- POSIX ACLs
- quotas
- fsck
- - resize
- defragmentation
Mount options
when RTO retransmissions remain unacknowledged.
See tcp_retries2 for more details.
- The default value is 7.
+ The default value is 8.
If your machine is a loaded WEB server,
you should think about lowering this value, such sockets
may consume significant resources. Cf. tcp_max_orphans.
Field name: init_size
Type: read
-Offset/size: 0x25c/4
+Offset/size: 0x260/4
This field indicates the amount of linear contiguous memory starting
at the kernel runtime start address that the kernel needs before it
VERSION = 3
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Sneaky Weasel
# *DOCUMENTATION*
*/
if (have_imager()) {
label = "HD imager";
- mux |= 1;
+ mux |= 2;
/* externally mux MMC1/ENET/AIC33 to imager */
mux |= BIT(6) | BIT(5) | BIT(3);
resets &= ~BIT(1);
if (have_tvp7002()) {
- mux |= 2;
+ mux |= 1;
resets &= ~BIT(2);
label = "tvp7002 HD";
} else {
{
struct davinci_gpio_regs __iomem *g;
u32 mask = 0xffff;
+ struct davinci_gpio_controller *d;
- g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
+ d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
+ g = (struct davinci_gpio_regs __iomem *)d->regs;
/* we only care about one bank */
if (irq & 1)
if (!status)
break;
__raw_writel(status, &g->intstat);
- if (irq & 1)
- status >>= 16;
/* now demux them to the right lowlevel handler */
- n = (int)irq_get_handler_data(irq);
+ n = d->irq_base;
+ if (irq & 1) {
+ n += 16;
+ status >>= 16;
+ }
+
while (status) {
res = ffs(status);
n += res;
/* set up all irqs in this bank */
irq_set_chained_handler(bank_irq, gpio_irq_handler);
- irq_set_handler_data(bank_irq, (__force void *)g);
+
+ /*
+ * Each chip handles 32 gpios, and each irq bank consists of 16
+ * gpio irqs. Pass the irq bank's corresponding controller to
+ * the chained irq handler.
+ */
+ irq_set_handler_data(bank_irq, &chips[gpio / 32]);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
irq_set_chip(irq, &gpio_irqchip);
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
+ if (!gc) {
+ pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+ __func__, irq_start);
+ return;
+ }
+
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
/*
* clocksource
*/
+
+static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return *IXP4XX_OSTS;
+}
+
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
EXPORT_SYMBOL(ixp4xx_timer_freq);
static void __init ixp4xx_clocksource_init(void)
{
init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
- clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32,
- clocksource_mmio_readl_up);
+ clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
}
/*
return chan;
}
-int s3c2410_dma_config(unsigned int channel, int xferunit)
+int s3c2410_dma_config(enum dma_ch channel, int xferunit)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
return 0;
}
-int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
*
*/
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
EXPORT_SYMBOL(s3c2410_dma_enqueue);
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source,
unsigned long devaddr)
{
EXPORT_SYMBOL(s3c2410_dma_devconfig);
-int s3c2410_dma_getposition(unsigned int channel,
+int s3c2410_dma_getposition(enum dma_ch channel,
dma_addr_t *src, dma_addr_t *dst)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
* get control of an dma channel
*/
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
struct s3c2410_dma_client *client,
void *dev)
{
* allowed to go through.
*/
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
unsigned long flags;
* get control of an dma channel
*/
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
struct s3c2410_dma_client *client,
void *dev)
{
* allowed to go through.
*/
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
unsigned long flags;
}
int
-s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
* xfersize: size of unit in bytes (1,2,4)
*/
-int s3c2410_dma_config(unsigned int channel,
+int s3c2410_dma_config(enum dma_ch channel,
int xferunit)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
* devaddr: physical address of the source
*/
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source,
unsigned long devaddr)
{
* returns the current transfer points for the dma source and destination
*/
-int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
+int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
* irq?
*/
-int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
+int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
}
EXPORT_SYMBOL(s3c2410_dma_set_opfn);
-int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
+int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
}
EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
-int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
+int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
};
struct s3c2410_dma_chan;
+enum dma_ch;
/* s3c2410_dma_cbfn_t
*
* request a dma channel exclusivley
*/
-extern int s3c2410_dma_request(unsigned int channel,
+extern int s3c2410_dma_request(enum dma_ch channel,
struct s3c2410_dma_client *, void *dev);
* change the state of the dma channel
*/
-extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op);
+extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
/* s3c2410_dma_setflags
*
* set the channel's flags to a given state
*/
-extern int s3c2410_dma_setflags(unsigned int channel,
+extern int s3c2410_dma_setflags(enum dma_ch channel,
unsigned int flags);
/* s3c2410_dma_free
* free the dma channel (will also abort any outstanding operations)
*/
-extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
+extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
/* s3c2410_dma_enqueue
*
* drained before the buffer is given to the DMA system.
*/
-extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
+extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size);
/* s3c2410_dma_config
* configure the dma channel
*/
-extern int s3c2410_dma_config(unsigned int channel, int xferunit);
+extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
/* s3c2410_dma_devconfig
*
* configure the device we're talking to
*/
-extern int s3c2410_dma_devconfig(unsigned int channel,
+extern int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, unsigned long devaddr);
/* s3c2410_dma_getposition
* get the position that the dma transfer is currently at
*/
-extern int s3c2410_dma_getposition(unsigned int channel,
+extern int s3c2410_dma_getposition(enum dma_ch channel,
dma_addr_t *src, dma_addr_t *dest);
-extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn);
-extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn);
+extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
+extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
handle_level_irq);
+
+ if (!gc) {
+ pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+ __func__, uirq->base_irq);
+ return;
+ }
+
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_set_bit;
s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
S3C64XX_TINT_CSTAT, handle_level_irq);
+
+ if (!s3c_tgc) {
+ pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
+ __func__, timer_irq);
+ return;
+ }
+
ct = s3c_tgc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/irq.h>
#include <asm/i8259.h>
}
}
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
{
if (i8259A_auto_eoi >= 0)
init_8259A(i8259A_auto_eoi);
- return 0;
}
-static int i8259A_shutdown(struct sys_device *dev)
+static void i8259A_shutdown(void)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
}
- return 0;
}
-static struct sysdev_class i8259_sysdev_class = {
- .name = "i8259",
+static struct syscore_ops i8259_syscore_ops = {
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
-static struct sys_device device_i8259A = {
- .id = 0,
- .cls = &i8259_sysdev_class,
-};
-
static int __init i8259A_init_sysfs(void)
{
- int error = sysdev_class_register(&i8259_sysdev_class);
- if (!error)
- error = sysdev_register(&device_i8259A);
- return error;
+ register_syscore_ops(&i8259_syscore_ops);
+ return 0;
}
device_initcall(i8259A_init_sysfs);
extern unsigned long arch_local_irq_save(void);
extern void arch_local_irq_enable(void);
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
unsigned long flags;
return flags;
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
{
return (flags & PSR_PIL) != 0;
}
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#ifndef __ASSEMBLY__
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
unsigned long flags;
return flags;
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
"wrpr %0, %%pil"
);
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"wrpr %0, %%pil"
);
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
__asm__ __volatile__(
"wrpr 0, %%pil"
);
}
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
{
return (flags > 0);
}
-static inline int arch_irqs_disabled(void)
+static inline notrace int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags, tmp;
} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
+#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
#ifdef CONFIG_SMP
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
- sll %o3, 28, %o2 ! shift for simpler checks below
+ srl %o3, 28, %o2 ! shift for simpler checks below
maybe_smp4m_msg_check_single:
andcc %o2, 0x1, %g0
beq,a maybe_smp4m_msg_check_mask
* Leon2 and Leon3 differ in their way of telling cache information
*
*/
-int leon_flush_needed(void)
+int __init leon_flush_needed(void)
{
int flush_needed = -1;
unsigned int ssize, sets;
config AMD_NUMA
def_bool y
prompt "Old style AMD Opteron NUMA detection"
- depends on NUMA && PCI
+ depends on X86_64 && NUMA && PCI
---help---
Enable AMD NUMA node topology detection. You should say Y here if
you have a multi processor AMD system. This uses an old method to
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
+ { /* Handle problems with rebooting on the Latitude E6320. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6320",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ },
+ },
+ { /* Handle problems with rebooting on the Latitude E5420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ },
+ },
+ { /* Handle problems with rebooting on the Latitude E6420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ },
+ },
{ }
};
{
struct platform_device *ghes_dev;
struct ghes_arr *ghes_arr = data;
- int rc;
+ int rc, i;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
return 0;
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
return 0;
+ for (i = 0; i < ghes_arr->count; i++) {
+ struct acpi_hest_header *hdr;
+ ghes_dev = ghes_arr->ghes_devs[i];
+ hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
+ if (hdr->source_id == hest_hdr->source_id) {
+ pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+ hdr->source_id);
+ return -EIO;
+ }
+ }
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
if (!ghes_dev)
return -ENOMEM;
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
-/*
- * Create and initialize a spinlock.
- */
-acpi_status
-acpi_os_create_lock(acpi_spinlock *out_handle)
-{
- spinlock_t *lock;
-
- lock = ACPI_ALLOCATE(sizeof(spinlock_t));
- if (!lock)
- return AE_NO_MEMORY;
- spin_lock_init(lock);
- *out_handle = lock;
-
- return AE_OK;
-}
-
/*
* Deallocate the memory for a spinlock.
*/
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
break;
case WM831X_GPIO_PULL_UP:
pull = "pullup";
+ break;
default:
pull = "INVALID PULL";
break;
};
#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
struct intel_fbdev;
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
}
static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- uint32_t size;
+ uint32_t gtt_size;
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
- return obj->base.size;
+ tiling_mode == I915_TILING_NONE)
+ return size;
/* Previous chips need a power-of-two fence region when tiling */
if (INTEL_INFO(dev)->gen == 3)
- size = 1024*1024;
+ gtt_size = 1024*1024;
else
- size = 512*1024;
+ gtt_size = 512*1024;
- while (size < obj->base.size)
- size <<= 1;
+ while (gtt_size < size)
+ gtt_size <<= 1;
- return size;
+ return gtt_size;
}
/**
* potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
-
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- return i915_gem_get_gtt_size(obj);
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
/**
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
* unfenced object
- * @obj: object to check
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
*
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- int tile_height;
-
/*
* Minimum alignment is 4k (GTT page size) for sane hw.
*/
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
- /*
- * Older chips need unfenced tiled buffers to be aligned to the left
- * edge of an even tile row (where tile rows are counted as if the bo is
- * placed in a fenced gtt region).
+ /* Previous hardware however needs to be aligned to a power-of-two
+ * tile height. The simplest method for determining this is to reuse
+ * the power-of-tile object size.
*/
- if (IS_GEN2(dev))
- tile_height = 16;
- else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_height = 32;
- else
- tile_height = 8;
-
- return tile_height * obj->stride * 2;
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
int
return -EINVAL;
}
- fence_size = i915_gem_get_gtt_size(obj);
- fence_alignment = i915_gem_get_gtt_alignment(obj);
- unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(obj);
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ args->tiling_mode);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+ return dev_priv->lvds_use_ssc && i915_panel_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
- gb_backend_map = 0x00006420;
+ gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
}
-/* emits 36 */
+/* emits 39 */
static void
set_default_state(struct radeon_device *rdev)
{
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
+ /* setup LDS */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0x10001000);
+
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
- ring_size += 52; /* shaders + def state */
+ ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(RV370_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RV370_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ if (rdev->flags & RADEON_IS_PCIE)
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ else
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
crtc2_gen_cntl = 0;
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+ else
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
/* Turn off mem requests and CRTC for both controllers */
WREG32(RADEON_CRTC_GEN_CNTL,
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ else
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* bail if the connector does not have hpd pin, e.g.,
+ * VGA, TV, etc.
+ */
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+ return;
+
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* powering up/down the eDP panel generates hpd events which
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+#define RV370_BUS_CNTL 0x004c
+# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
#define RADEON_MSI_REARM_EN 0x0160
# define RV370_MSI_REARM_EN (1 << 0)
return radeon_gart_table_vram_alloc(rdev);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
- tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
- WREG32(R_00004C_BUS_CNTL, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
info->direct[PSC_VOLTAGE_IN] = true;
info->direct[PSC_VOLTAGE_OUT] = true;
info->direct[PSC_CURRENT_OUT] = true;
- info->m[PSC_CURRENT_OUT] = 800;
+ info->m[PSC_CURRENT_OUT] = 807;
info->b[PSC_CURRENT_OUT] = 20475;
info->R[PSC_CURRENT_OUT] = -1;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
if (config & ADM1275_VRANGE) {
- info->m[PSC_VOLTAGE_IN] = 19045;
+ info->m[PSC_VOLTAGE_IN] = 19199;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -2;
- info->m[PSC_VOLTAGE_OUT] = 19045;
+ info->m[PSC_VOLTAGE_OUT] = 19199;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -2;
} else {
- info->m[PSC_VOLTAGE_IN] = 6666;
+ info->m[PSC_VOLTAGE_IN] = 6720;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -1;
- info->m[PSC_VOLTAGE_OUT] = 6666;
+ info->m[PSC_VOLTAGE_OUT] = 6720;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -1;
}
else
err = -EIO;
+ ACPI_FREE(ret);
return err;
}
};
static const struct attribute_group it87_group_label = {
- .attrs = it87_attributes_vid,
+ .attrs = it87_attributes_label,
};
/* SuperIO detection - will change isa_address if a chip is found */
struct spi_transfer xfer[2];
uint8_t *tx_buf;
uint8_t *rx_buf;
+ struct mutex drvdata_lock;
+ /* protect msg, xfer and buffers from multiple access */
};
static int max1111_read(struct device *dev, int channel)
uint8_t v1, v2;
int err;
+ /* writing to drvdata struct is not thread safe, wait on mutex */
+ mutex_lock(&data->drvdata_lock);
+
data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
err = spi_sync(data->spi, &data->msg);
if (err < 0) {
dev_err(dev, "spi_sync failed with %d\n", err);
+ mutex_unlock(&data->drvdata_lock);
return err;
}
v1 = data->rx_buf[0];
v2 = data->rx_buf[1];
+ mutex_unlock(&data->drvdata_lock);
+
if ((v1 & 0xc0) || (v2 & 0x3f))
return -EINVAL;
if (err)
goto err_free_data;
+ mutex_init(&data->drvdata_lock);
+
data->spi = spi;
spi_set_drvdata(spi, data);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+ mutex_destroy(&data->drvdata_lock);
kfree(data->rx_buf);
kfree(data->tx_buf);
kfree(data);
* Convert linear sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_linear(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
s16 exponent;
s32 mantissa;
else
val >>= -exponent;
- return (int)val;
+ return val;
}
/*
* Convert direct sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_direct(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
long val = (s16) sensor->data;
long m, b, R;
R++;
}
- return (int)((val - b) / m);
+ return (val - b) / m;
}
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
- int val;
+ long val;
if (data->info->direct[sensor->class])
val = pmbus_reg2data_direct(data, sensor);
if (!s1 && !s2)
*val = !!regval;
else {
- int v1, v2;
+ long v1, v2;
struct pmbus_sensor *sensor1, *sensor2;
sensor1 = &data->sensors[s1];
if (sensor->data < 0)
return sensor->data;
- return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+ return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
}
static ssize_t pmbus_set_sensor(struct device *dev,
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
goto err0;
+
+ /* If we took control of the bus, we need to force
+ reinitialization. This is because many ts_bus_ctrl()
+ functions strobe the RESET pin on the demod, and if the
+ frontend thread already exists then the dvb_init() routine
+ won't get called (which is what usually does initial
+ register configuration). */
+ fepriv->reinitialise = 1;
}
if ((ret = dvb_generic_open (inode, file)) < 0)
config RADIO_MIROPCM20
tristate "miroSOUND PCM20 radio"
- depends on ISA && VIDEO_V4L2 && SND
+ depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
select SND_ISA
select SND_MIRO
---help---
config RADIO_SF16FMR2
tristate "SF16FMR2 Radio"
- depends on ISA && VIDEO_V4L2
+ depends on ISA && VIDEO_V4L2 && SND
---help---
Choose Y here if you have one of these FM radio cards.
char ps_name[MAX_RDS_PS_NAME + 1];
len = control->size - 1;
- if (len > MAX_RDS_PS_NAME) {
+ if (len < 0 || len > MAX_RDS_PS_NAME) {
rval = -ERANGE;
goto exit;
}
char radio_text[MAX_RDS_RADIO_TEXT + 1];
len = control->size - 1;
- if (len > MAX_RDS_RADIO_TEXT) {
+ if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
rval = -ERANGE;
goto exit;
}
inout, data1);
break;
case MCE_CMD_S_TIMEOUT:
- /* value is in units of 50us, so x*50/100 or x/2 ms */
+ /* value is in units of 50us, so x*50/1000 ms */
dev_info(dev, "%s receive timeout of %d ms\n",
- inout, ((data1 << 8) | data2) / 2);
+ inout,
+ ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
break;
case MCE_CMD_G_TIMEOUT:
dev_info(dev, "Get receive timeout\n");
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
+ ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
break;
/* 1-byte return value commands */
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->timeout = US_TO_NS(1000);
+ rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
rdev->dev.parent = &pdev->dev;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
- rdev->timeout = US_TO_NS(1000);
+ rdev->timeout = MS_TO_NS(100);
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
#if 0
goto fail_irq;
}
- if (!pci_enable_msi(pci_dev))
- err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_DISABLED, dev->name, dev);
- else
- err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ err = request_irq(pci_dev->irq, cx23885_irq,
+ IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
/* unregister stuff */
free_irq(pci_dev->irq, dev);
- pci_disable_msi(pci_dev);
cx23885_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
* returns 0.
* This function is needed for boards that have a separate tuner for
* radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ * be used to represent a Digital TV too.
*/
static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
- if ((1 << mode & t->mode_mask) == 0)
+ int t_mode;
+ if (mode == V4L2_TUNER_RADIO)
+ t_mode = T_RADIO;
+ else
+ t_mode = T_ANALOG_TV;
+
+ if ((t_mode & t->mode_mask) == 0)
return -EINVAL;
return 0;
case V4L2_TUNER_RADIO:
p = "radio";
break;
- case V4L2_TUNER_DIGITAL_TV:
+ case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
p = "digital TV";
break;
case V4L2_TUNER_ANALOG_TV:
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (vt->type == V4L2_TUNER_ANALOG_TV)
+ if (t->mode != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
- if (vt->type != V4L2_TUNER_RADIO) {
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
return 0;
return 0;
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
- int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
- if (ext_csd_struct > 2) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
- ext_csd_struct);
+ card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
goto out;
}
goto out;
}
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
-
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
mmc_hostname(card->host));
}
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
}
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
* area offset and size to user by adding sysfs interface.
*/
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
- (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
u8 hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
u8 hc_wp_grp_sz =
}
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
- unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
{
u8 *bw_ext_csd;
int err;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
err = mmc_get_ext_csd(card, &bw_ext_csd);
- if (err)
- return err;
- if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+ if (err || bw_ext_csd == NULL) {
if (bus_width != MMC_BUS_WIDTH_1)
err = -EINVAL;
goto out;
goto out;
/* only compare read only fields */
- err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+ err = (!(card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
- (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+ (card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
- (ext_csd[EXT_CSD_REV] ==
+ (card->ext_csd.rev ==
bw_ext_csd[EXT_CSD_REV]) &&
- (ext_csd[EXT_CSD_STRUCTURE] ==
+ (card->ext_csd.raw_ext_csd_structure ==
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
- (ext_csd[EXT_CSD_CARD_TYPE] ==
+ (card->ext_csd.raw_card_type ==
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
- (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+ (card->ext_csd.raw_s_a_timeout ==
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
- (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_gap_size ==
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+ (card->ext_csd.raw_erase_timeout_mult ==
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
- (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_grp_size ==
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+ (card->ext_csd.raw_sec_trim_mult ==
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
- (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+ (card->ext_csd.raw_sec_erase_mult ==
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
- (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+ (card->ext_csd.raw_sec_feature_support ==
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
- (ext_csd[EXT_CSD_TRIM_MULT] ==
+ (card->ext_csd.raw_trim_mult ==
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
- memcmp(&ext_csd[EXT_CSD_SEC_CNT],
- &bw_ext_csd[EXT_CSD_SEC_CNT],
- 4) != 0);
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
if (err)
err = -EINVAL;
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card,
- ext_csd,
bus_width);
else
err = mmc_bus_test(card, bus_width);
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
- NETIF_F_SOFT_FEATURES | \
- NETIF_F_LRO)
+#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
static void bond_compute_features(struct bonding *bond)
{
return 0;
}
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs;
+ u32 tempval;
+
+ regs = priv->gfargrp[0].regs;
+
+ tempval = gfar_read(®s->rctrl);
+ /* If parse is no longer required, then disable parser */
+ if (tempval & RCTRL_REQ_PARSER)
+ tempval |= RCTRL_PRSDEP_INIT;
+ else
+ tempval &= ~RCTRL_PRSDEP_INIT;
+ gfar_write(®s->rctrl, tempval);
+}
+
/* Enables and disables VLAN insertion/extraction */
static void gfar_vlan_rx_register(struct net_device *dev,
/* Disable VLAN tag extraction */
tempval = gfar_read(®s->rctrl);
tempval &= ~RCTRL_VLEX;
- /* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
- tempval |= RCTRL_PRSDEP_INIT;
- else
- tempval &= ~RCTRL_PRSDEP_INIT;
gfar_write(®s->rctrl, tempval);
+
+ gfar_check_rx_parser_mode(priv);
}
gfar_change_mtu(dev, dev->mtu);
#define RCTRL_PROM 0x00000008
#define RCTRL_EMEN 0x00000002
#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
- RCTRL_TUCSEN)
+ RCTRL_TUCSEN | RCTRL_FILREN)
#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
RCTRL_PRSDEP_INIT)
#define RCTRL_EXTHASH (RCTRL_GHTX)
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
extern const struct ethtool_ops gfar_ethtool_ops;
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_dma[i], buflen,
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
/* Only look at sockets that are using this specific device. */
switch (event) {
+ case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
- /* A change in mtu is a bad thing, requiring
+ /* A change in mtu or address is a bad thing, requiring
* LCP re-negotiation.
*/
if (status & RX_FIFO_FULL)
dev->stats.rx_fifo_errors++;
- /* Mask off RX interrupt */
- misr &= ~RX_INTS;
- napi_schedule(&lp->napi);
+ if (likely(napi_schedule_prep(&lp->napi))) {
+ /* Mask off RX interrupt */
+ misr &= ~RX_INTS;
+ __napi_schedule(&lp->napi);
+ }
}
/* TX interrupt request */
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
slcomp = xchg(&sl->slcomp, slcomp);
+#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
sl->xdata = 0;
sl->xbits = 0;
-#endif
#endif
spin_unlock_bh(&sl->lock);
err = 0;
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
remove_net_device(hso_net->parent);
- if (hso_net->net) {
+ if (hso_net->net)
unregister_netdev(hso_net->net);
- free_netdev(hso_net->net);
- }
/* start freeing */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
+ if (hso_net->net)
+ free_netdev(hso_net->net);
+
kfree(hso_dev);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
ath5k_led_off(sc);
return 0;
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
\
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
int val; \
\
val = (int)simple_strtoul(buf, NULL, 10); \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
* TODO - this could be improved to be dependent on the rate.
* The hardware can keep up at lower rates, but not higher rates
*/
- if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+ if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+ !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
{ USB_DEVICE(0x04bb, 0x093f) },
/* NEC WL300NU-G */
{ USB_DEVICE(0x0409, 0x0249) },
+ /* NEC WL300NU-AG */
+ { USB_DEVICE(0x0409, 0x02b4) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057c, 0x8401) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
+ ssb_pcicore_fix_sprom_core_index(pc);
+
/* Disable PCI interrupts. */
ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+ /* Additional PCIe always once-executed workarounds */
+ if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!ssb_device_is_enabled(dev))
ssb_device_enable(dev, 0);
- ssb_pcicore_fix_sprom_core_index(pc);
-
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
pc->hostmode = pcicore_is_in_hostmode(pc);
if (pc->hostmode)
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
-
- /* Additional PCIe always once-executed workarounds */
- if (dev->id.coreid == SSB_DEV_PCIE) {
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
- }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on X86
- default m
+ depends on X86 && PCI
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
*/
struct btrfs_key location;
+ /* Lock for counters */
+ spinlock_t lock;
+
/* the extent_tree has caches of all the extent mappings to disk */
struct extent_map_tree extent_tree;
* items we think we'll end up using, and reserved_extents is the number
* of extent items we've reserved metadata for.
*/
- atomic_t outstanding_extents;
- atomic_t reserved_extents;
+ unsigned outstanding_extents;
+ unsigned reserved_extents;
/*
* ordered_data_close is set by truncate when a file that used
BTRFS_I(inode)->disk_i_size = size;
}
+static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
+ struct inode *inode)
+{
+ if (root == root->fs_info->tree_root ||
+ BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+ return true;
+ return false;
+}
+
#endif
{
int i;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- if (p->nodes[i] && p->locks[i])
- btrfs_set_lock_blocking(p->nodes[i]);
+ if (!p->nodes[i] || !p->locks[i])
+ continue;
+ btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
+ if (p->locks[i] == BTRFS_READ_LOCK)
+ p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+ else if (p->locks[i] == BTRFS_WRITE_LOCK)
+ p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
}
}
* for held
*/
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held)
+ struct extent_buffer *held, int held_rw)
{
int i;
* really sure by forcing the path to blocking before we clear
* the path blocking.
*/
- if (held)
- btrfs_set_lock_blocking(held);
+ if (held) {
+ btrfs_set_lock_blocking_rw(held, held_rw);
+ if (held_rw == BTRFS_WRITE_LOCK)
+ held_rw = BTRFS_WRITE_LOCK_BLOCKING;
+ else if (held_rw == BTRFS_READ_LOCK)
+ held_rw = BTRFS_READ_LOCK_BLOCKING;
+ }
btrfs_set_path_blocking(p);
#endif
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
- if (p->nodes[i] && p->locks[i])
- btrfs_clear_lock_blocking(p->nodes[i]);
+ if (p->nodes[i] && p->locks[i]) {
+ btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
+ if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
+ p->locks[i] = BTRFS_WRITE_LOCK;
+ else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
+ p->locks[i] = BTRFS_READ_LOCK;
+ }
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (held)
- btrfs_clear_lock_blocking(held);
+ btrfs_clear_lock_blocking_rw(held, held_rw);
#endif
}
if (!p->nodes[i])
continue;
if (p->locks[i]) {
- btrfs_tree_unlock(p->nodes[i]);
+ btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
p->locks[i] = 0;
}
free_extent_buffer(p->nodes[i]);
return eb;
}
+/* loop around taking references on and locking the root node of the
+ * tree until you end up with a lock on the root. A locked buffer
+ * is returned, with a reference held.
+ */
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+{
+ struct extent_buffer *eb;
+
+ while (1) {
+ eb = btrfs_root_node(root);
+ btrfs_tree_read_lock(eb);
+ if (eb == root->node)
+ break;
+ btrfs_tree_read_unlock(eb);
+ free_extent_buffer(eb);
+ }
+ return eb;
+}
+
/* cowonly root (everything not a reference counted cow subvolume), just get
* put onto a simple dirty list. transaction.c walks this to make sure they
* get properly updated on disk.
for (i = start_slot; i < end_slot; i++) {
int close = 1;
- if (!parent->map_token) {
- map_extent_buffer(parent,
- btrfs_node_key_ptr_offset(i),
- sizeof(struct btrfs_key_ptr),
- &parent->map_token, &parent->kaddr,
- &parent->map_start, &parent->map_len,
- KM_USER1);
- }
btrfs_node_key(parent, &disk_key, i);
if (!progress_passed && comp_keys(&disk_key, progress) < 0)
continue;
last_block = blocknr;
continue;
}
- if (parent->map_token) {
- unmap_extent_buffer(parent, parent->map_token,
- KM_USER1);
- parent->map_token = NULL;
- }
cur = btrfs_find_tree_block(root, blocknr, blocksize);
if (cur)
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
}
- if (parent->map_token) {
- unmap_extent_buffer(parent, parent->map_token,
- KM_USER1);
- parent->map_token = NULL;
- }
return err;
}
struct btrfs_disk_key *tmp = NULL;
struct btrfs_disk_key unaligned;
unsigned long offset;
- char *map_token = NULL;
char *kaddr = NULL;
unsigned long map_start = 0;
unsigned long map_len = 0;
mid = (low + high) / 2;
offset = p + mid * item_size;
- if (!map_token || offset < map_start ||
+ if (!kaddr || offset < map_start ||
(offset + sizeof(struct btrfs_disk_key)) >
map_start + map_len) {
- if (map_token) {
- unmap_extent_buffer(eb, map_token, KM_USER0);
- map_token = NULL;
- }
err = map_private_extent_buffer(eb, offset,
sizeof(struct btrfs_disk_key),
- &map_token, &kaddr,
- &map_start, &map_len, KM_USER0);
+ &kaddr, &map_start, &map_len);
if (!err) {
tmp = (struct btrfs_disk_key *)(kaddr + offset -
high = mid;
else {
*slot = mid;
- if (map_token)
- unmap_extent_buffer(eb, map_token, KM_USER0);
return 0;
}
}
*slot = low;
- if (map_token)
- unmap_extent_buffer(eb, map_token, KM_USER0);
return 1;
}
mid = path->nodes[level];
- WARN_ON(!path->locks[level]);
+ WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
+ path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
WARN_ON(btrfs_header_generation(mid) != trans->transid);
orig_ptr = btrfs_node_blockptr(mid, orig_slot);
u32 nr;
u32 blocksize;
u32 nscan = 0;
- bool map = true;
if (level != 1)
return;
nritems = btrfs_header_nritems(node);
nr = slot;
- if (node->map_token || path->skip_locking)
- map = false;
while (1) {
- if (map && !node->map_token) {
- unsigned long offset = btrfs_node_key_ptr_offset(nr);
- map_private_extent_buffer(node, offset,
- sizeof(struct btrfs_key_ptr),
- &node->map_token,
- &node->kaddr,
- &node->map_start,
- &node->map_len, KM_USER1);
- }
if (direction < 0) {
if (nr == 0)
break;
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
gen = btrfs_node_ptr_generation(node, nr);
- if (map && node->map_token) {
- unmap_extent_buffer(node, node->map_token,
- KM_USER1);
- node->map_token = NULL;
- }
readahead_tree_block(root, search, blocksize, gen);
nread += blocksize;
}
if ((nread > 65536 || nscan > 32))
break;
}
- if (map && node->map_token) {
- unmap_extent_buffer(node, node->map_token, KM_USER1);
- node->map_token = NULL;
- }
}
/*
t = path->nodes[i];
if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
- btrfs_tree_unlock(t);
+ btrfs_tree_unlock_rw(t, path->locks[i]);
path->locks[i] = 0;
}
}
continue;
if (!path->locks[i])
continue;
- btrfs_tree_unlock(path->nodes[i]);
+ btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
path->locks[i] = 0;
}
}
* we can trust our generation number
*/
free_extent_buffer(tmp);
+ btrfs_set_path_blocking(p);
+
tmp = read_tree_block(root, blocknr, blocksize, gen);
if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
*eb_ret = tmp;
static int
setup_nodes_for_search(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *p,
- struct extent_buffer *b, int level, int ins_len)
+ struct extent_buffer *b, int level, int ins_len,
+ int *write_lock_level)
{
int ret;
if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
int sret;
+ if (*write_lock_level < level + 1) {
+ *write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
sret = reada_for_balance(root, p, level);
if (sret)
goto again;
btrfs_set_path_blocking(p);
sret = split_node(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL);
+ btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(sret > 0);
if (sret) {
BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
int sret;
+ if (*write_lock_level < level + 1) {
+ *write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
sret = reada_for_balance(root, p, level);
if (sret)
goto again;
btrfs_set_path_blocking(p);
sret = balance_level(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL);
+ btrfs_clear_path_blocking(p, NULL, 0);
if (sret) {
ret = sret;
int err;
int level;
int lowest_unlock = 1;
+ int root_lock;
+ /* everything at write_lock_level or lower must be write locked */
+ int write_lock_level = 0;
u8 lowest_level = 0;
lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len > 0);
WARN_ON(p->nodes[0] != NULL);
- if (ins_len < 0)
+ if (ins_len < 0) {
lowest_unlock = 2;
+ /* when we are removing items, we might have to go up to level
+ * two as we update tree pointers Make sure we keep write
+ * for those levels as well
+ */
+ write_lock_level = 2;
+ } else if (ins_len > 0) {
+ /*
+ * for inserting items, make sure we have a write lock on
+ * level 1 so we can update keys
+ */
+ write_lock_level = 1;
+ }
+
+ if (!cow)
+ write_lock_level = -1;
+
+ if (cow && (p->keep_locks || p->lowest_level))
+ write_lock_level = BTRFS_MAX_LEVEL;
+
again:
+ /*
+ * we try very hard to do read locks on the root
+ */
+ root_lock = BTRFS_READ_LOCK;
+ level = 0;
if (p->search_commit_root) {
+ /*
+ * the commit roots are read only
+ * so we always do read locks
+ */
b = root->commit_root;
extent_buffer_get(b);
+ level = btrfs_header_level(b);
if (!p->skip_locking)
- btrfs_tree_lock(b);
+ btrfs_tree_read_lock(b);
} else {
- if (p->skip_locking)
+ if (p->skip_locking) {
b = btrfs_root_node(root);
- else
- b = btrfs_lock_root_node(root);
+ level = btrfs_header_level(b);
+ } else {
+ /* we don't know the level of the root node
+ * until we actually have it read locked
+ */
+ b = btrfs_read_lock_root_node(root);
+ level = btrfs_header_level(b);
+ if (level <= write_lock_level) {
+ /* whoops, must trade for write lock */
+ btrfs_tree_read_unlock(b);
+ free_extent_buffer(b);
+ b = btrfs_lock_root_node(root);
+ root_lock = BTRFS_WRITE_LOCK;
+
+ /* the level might have changed, check again */
+ level = btrfs_header_level(b);
+ }
+ }
}
+ p->nodes[level] = b;
+ if (!p->skip_locking)
+ p->locks[level] = root_lock;
while (b) {
level = btrfs_header_level(b);
* setup the path here so we can release it under lock
* contention with the cow code
*/
- p->nodes[level] = b;
- if (!p->skip_locking)
- p->locks[level] = 1;
-
if (cow) {
/*
* if we don't really need to cow this block
btrfs_set_path_blocking(p);
+ /*
+ * must have write locks on this node and the
+ * parent
+ */
+ if (level + 1 > write_lock_level) {
+ write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
err = btrfs_cow_block(trans, root, b,
p->nodes[level + 1],
p->slots[level + 1], &b);
BUG_ON(!cow && ins_len);
p->nodes[level] = b;
- if (!p->skip_locking)
- p->locks[level] = 1;
-
- btrfs_clear_path_blocking(p, NULL);
+ btrfs_clear_path_blocking(p, NULL, 0);
/*
* we have a lock on b and as long as we aren't changing
}
p->slots[level] = slot;
err = setup_nodes_for_search(trans, root, p, b, level,
- ins_len);
+ ins_len, &write_lock_level);
if (err == -EAGAIN)
goto again;
if (err) {
b = p->nodes[level];
slot = p->slots[level];
+ /*
+ * slot 0 is special, if we change the key
+ * we have to update the parent pointer
+ * which means we must have a write lock
+ * on the parent
+ */
+ if (slot == 0 && cow &&
+ write_lock_level < level + 1) {
+ write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
unlock_up(p, level, lowest_unlock);
if (level == lowest_level) {
}
if (!p->skip_locking) {
- btrfs_clear_path_blocking(p, NULL);
- err = btrfs_try_spin_lock(b);
-
- if (!err) {
- btrfs_set_path_blocking(p);
- btrfs_tree_lock(b);
- btrfs_clear_path_blocking(p, b);
+ level = btrfs_header_level(b);
+ if (level <= write_lock_level) {
+ err = btrfs_try_tree_write_lock(b);
+ if (!err) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_lock(b);
+ btrfs_clear_path_blocking(p, b,
+ BTRFS_WRITE_LOCK);
+ }
+ p->locks[level] = BTRFS_WRITE_LOCK;
+ } else {
+ err = btrfs_try_tree_read_lock(b);
+ if (!err) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_read_lock(b);
+ btrfs_clear_path_blocking(p, b,
+ BTRFS_READ_LOCK);
+ }
+ p->locks[level] = BTRFS_READ_LOCK;
}
+ p->nodes[level] = b;
}
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(root, b) < ins_len) {
+ if (write_lock_level < 1) {
+ write_lock_level = 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
- btrfs_clear_path_blocking(p, NULL);
+ btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(err > 0);
if (err) {
add_root_to_dirty_list(root);
extent_buffer_get(c);
path->nodes[level] = c;
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK;
path->slots[level] = 0;
return 0;
}
if (path->slots[0] == i)
push_space += data_size;
- if (!left->map_token) {
- map_extent_buffer(left, (unsigned long)item,
- sizeof(struct btrfs_item),
- &left->map_token, &left->kaddr,
- &left->map_start, &left->map_len,
- KM_USER1);
- }
-
this_item_size = btrfs_item_size(left, item);
if (this_item_size + sizeof(*item) + push_space > free_space)
break;
break;
i--;
}
- if (left->map_token) {
- unmap_extent_buffer(left, left->map_token, KM_USER1);
- left->map_token = NULL;
- }
if (push_items == 0)
goto out_unlock;
push_space = BTRFS_LEAF_DATA_SIZE(root);
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(right, i);
- if (!right->map_token) {
- map_extent_buffer(right, (unsigned long)item,
- sizeof(struct btrfs_item),
- &right->map_token, &right->kaddr,
- &right->map_start, &right->map_len,
- KM_USER1);
- }
push_space -= btrfs_item_size(right, item);
btrfs_set_item_offset(right, item, push_space);
}
- if (right->map_token) {
- unmap_extent_buffer(right, right->map_token, KM_USER1);
- right->map_token = NULL;
- }
left_nritems -= push_items;
btrfs_set_header_nritems(left, left_nritems);
for (i = 0; i < nr; i++) {
item = btrfs_item_nr(right, i);
- if (!right->map_token) {
- map_extent_buffer(right, (unsigned long)item,
- sizeof(struct btrfs_item),
- &right->map_token, &right->kaddr,
- &right->map_start, &right->map_len,
- KM_USER1);
- }
if (!empty && push_items > 0) {
if (path->slots[0] < i)
push_space += this_item_size + sizeof(*item);
}
- if (right->map_token) {
- unmap_extent_buffer(right, right->map_token, KM_USER1);
- right->map_token = NULL;
- }
-
if (push_items == 0) {
ret = 1;
goto out;
u32 ioff;
item = btrfs_item_nr(left, i);
- if (!left->map_token) {
- map_extent_buffer(left, (unsigned long)item,
- sizeof(struct btrfs_item),
- &left->map_token, &left->kaddr,
- &left->map_start, &left->map_len,
- KM_USER1);
- }
ioff = btrfs_item_offset(left, item);
btrfs_set_item_offset(left, item,
ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
- if (left->map_token) {
- unmap_extent_buffer(left, left->map_token, KM_USER1);
- left->map_token = NULL;
- }
/* fixup right node */
if (push_items > right_nritems) {
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(right, i);
- if (!right->map_token) {
- map_extent_buffer(right, (unsigned long)item,
- sizeof(struct btrfs_item),
- &right->map_token, &right->kaddr,
- &right->map_start, &right->map_len,
- KM_USER1);
- }
-
push_space = push_space - btrfs_item_size(right, item);
btrfs_set_item_offset(right, item, push_space);
}
- if (right->map_token) {
- unmap_extent_buffer(right, right->map_token, KM_USER1);
- right->map_token = NULL;
- }
btrfs_mark_buffer_dirty(left);
if (right_nritems)
struct btrfs_item *item = btrfs_item_nr(right, i);
u32 ioff;
- if (!right->map_token) {
- map_extent_buffer(right, (unsigned long)item,
- sizeof(struct btrfs_item),
- &right->map_token, &right->kaddr,
- &right->map_start, &right->map_len,
- KM_USER1);
- }
-
ioff = btrfs_item_offset(right, item);
btrfs_set_item_offset(right, item, ioff + rt_data_off);
}
- if (right->map_token) {
- unmap_extent_buffer(right, right->map_token, KM_USER1);
- right->map_token = NULL;
- }
-
btrfs_set_header_nritems(l, mid);
ret = 0;
btrfs_item_key(right, &disk_key, 0);
u32 ioff;
item = btrfs_item_nr(leaf, i);
- if (!leaf->map_token) {
- map_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
- }
-
ioff = btrfs_item_offset(leaf, item);
btrfs_set_item_offset(leaf, item, ioff + size_diff);
}
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
/* shift the data */
if (from_end) {
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
u32 ioff;
item = btrfs_item_nr(leaf, i);
- if (!leaf->map_token) {
- map_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
- }
ioff = btrfs_item_offset(leaf, item);
btrfs_set_item_offset(leaf, item, ioff - data_size);
}
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
/* shift the data */
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end - data_size, btrfs_leaf_data(leaf) +
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- WARN_ON(leaf->map_token);
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(leaf, i);
- if (!leaf->map_token) {
- map_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
- }
-
ioff = btrfs_item_offset(leaf, item);
btrfs_set_item_offset(leaf, item, ioff - total_data);
}
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
btrfs_item_nr_offset(slot),
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
- WARN_ON(leaf->map_token);
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(leaf, i);
- if (!leaf->map_token) {
- map_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
- }
-
ioff = btrfs_item_offset(leaf, item);
btrfs_set_item_offset(leaf, item, ioff - total_data);
}
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
btrfs_item_nr_offset(slot),
u32 ioff;
item = btrfs_item_nr(leaf, i);
- if (!leaf->map_token) {
- map_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
- }
ioff = btrfs_item_offset(leaf, item);
btrfs_set_item_offset(leaf, item, ioff + dsize);
}
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
btrfs_item_nr_offset(slot + nr),
sizeof(struct btrfs_item) *
WARN_ON(!path->keep_locks);
again:
- cur = btrfs_lock_root_node(root);
+ cur = btrfs_read_lock_root_node(root);
level = btrfs_header_level(cur);
WARN_ON(path->nodes[level]);
path->nodes[level] = cur;
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_READ_LOCK;
if (btrfs_header_generation(cur) < min_trans) {
ret = 1;
cur = read_node_slot(root, cur, slot);
BUG_ON(!cur);
- btrfs_tree_lock(cur);
+ btrfs_tree_read_lock(cur);
- path->locks[level - 1] = 1;
+ path->locks[level - 1] = BTRFS_READ_LOCK;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1);
- btrfs_clear_path_blocking(path, NULL);
+ btrfs_clear_path_blocking(path, NULL, 0);
}
out:
if (ret == 0)
u32 nritems;
int ret;
int old_spinning = path->leave_spinning;
- int force_blocking = 0;
+ int next_rw_lock = 0;
nritems = btrfs_header_nritems(path->nodes[0]);
if (nritems == 0)
return 1;
- /*
- * we take the blocks in an order that upsets lockdep. Using
- * blocking mode is the only way around it.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- force_blocking = 1;
-#endif
-
btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
again:
level = 1;
next = NULL;
+ next_rw_lock = 0;
btrfs_release_path(path);
path->keep_locks = 1;
-
- if (!force_blocking)
- path->leave_spinning = 1;
+ path->leave_spinning = 1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
path->keep_locks = 0;
}
if (next) {
- btrfs_tree_unlock(next);
+ btrfs_tree_unlock_rw(next, next_rw_lock);
free_extent_buffer(next);
}
next = c;
+ next_rw_lock = path->locks[level];
ret = read_block_for_search(NULL, root, path, &next, level,
slot, &key);
if (ret == -EAGAIN)
}
if (!path->skip_locking) {
- ret = btrfs_try_spin_lock(next);
+ ret = btrfs_try_tree_read_lock(next);
if (!ret) {
btrfs_set_path_blocking(path);
- btrfs_tree_lock(next);
- if (!force_blocking)
- btrfs_clear_path_blocking(path, next);
+ btrfs_tree_read_lock(next);
+ btrfs_clear_path_blocking(path, next,
+ BTRFS_READ_LOCK);
}
- if (force_blocking)
- btrfs_set_lock_blocking(next);
+ next_rw_lock = BTRFS_READ_LOCK;
}
break;
}
level--;
c = path->nodes[level];
if (path->locks[level])
- btrfs_tree_unlock(c);
+ btrfs_tree_unlock_rw(c, path->locks[level]);
free_extent_buffer(c);
path->nodes[level] = next;
path->slots[level] = 0;
if (!path->skip_locking)
- path->locks[level] = 1;
-
+ path->locks[level] = next_rw_lock;
if (!level)
break;
}
if (!path->skip_locking) {
- btrfs_assert_tree_locked(path->nodes[level]);
- ret = btrfs_try_spin_lock(next);
+ ret = btrfs_try_tree_read_lock(next);
if (!ret) {
btrfs_set_path_blocking(path);
- btrfs_tree_lock(next);
- if (!force_blocking)
- btrfs_clear_path_blocking(path, next);
+ btrfs_tree_read_lock(next);
+ btrfs_clear_path_blocking(path, next,
+ BTRFS_READ_LOCK);
}
- if (force_blocking)
- btrfs_set_lock_blocking(next);
+ next_rw_lock = BTRFS_READ_LOCK;
}
}
ret = 0;
chunks for this space */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
+ unsigned int flush:1; /* set if we are trying to make space */
+
unsigned int force_alloc; /* set if we need to force a chunk
alloc for this space */
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
spinlock_t lock;
struct rw_semaphore groups_sem;
- atomic_t caching_threads;
+ wait_queue_head_t wait;
};
struct btrfs_block_rsv {
struct list_head list;
struct mutex mutex;
wait_queue_head_t wait;
+ struct btrfs_work work;
struct btrfs_block_group_cache *block_group;
u64 progress;
atomic_t count;
struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker;
struct btrfs_workers submit_workers;
+ struct btrfs_workers caching_workers;
+
/*
* fixup workers take dirty pages that didn't properly go through
* the cow mechanism and make them safe to write. It happens
/* extent-tree.c */
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
- int num_items)
+ unsigned num_items)
{
return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3 * num_items;
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
-int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- int num_items);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held);
+ struct extent_buffer *held, int held_rw);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
}
/* reset all the locked nodes in the patch to spinning locks. */
- btrfs_clear_path_blocking(path, NULL);
+ btrfs_clear_path_blocking(path, NULL, 0);
/* insert the keys of the items */
ret = setup_items_for_insert(trans, root, path, keys, data_size,
data_size = sizeof(*dir_item) + name_len + data_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
name, name_len);
- /*
- * FIXME: at some point we should handle xattr's that are larger than
- * what we can fit in our leaf. We set location to NULL b/c we arent
- * pointing at anything else, that will change if we store the xattr
- * data in a separate inode.
- */
- BUG_ON(IS_ERR(dir_item));
+ if (IS_ERR(dir_item))
+ return PTR_ERR(dir_item);
memset(&location, 0, sizeof(location));
leaf = path->nodes[0];
struct btrfs_work work;
};
-/* These are used to set the lockdep class on the extent buffer locks.
- * The class is set by the readpage_end_io_hook after the buffer has
- * passed csum validation but before the pages are unlocked.
+/*
+ * Lockdep class keys for extent_buffer->lock's in this root. For a given
+ * eb, the lockdep key is determined by the btrfs_root it belongs to and
+ * the level the eb occupies in the tree.
+ *
+ * Different roots are used for different purposes and may nest inside each
+ * other and they require separate keysets. As lockdep keys should be
+ * static, assign keysets according to the purpose of the root as indicated
+ * by btrfs_root->objectid. This ensures that all special purpose roots
+ * have separate keysets.
*
- * The lockdep class is also set by btrfs_init_new_buffer on freshly
- * allocated blocks.
+ * Lock-nesting across peer nodes is always done with the immediate parent
+ * node locked thus preventing deadlock. As lockdep doesn't know this, use
+ * subclass to avoid triggering lockdep warning in such cases.
*
- * The class is based on the level in the tree block, which allows lockdep
- * to know that lower nodes nest inside the locks of higher nodes.
+ * The key is set by the readpage_end_io_hook after the buffer has passed
+ * csum validation but before the pages are unlocked. It is also set by
+ * btrfs_init_new_buffer on freshly allocated blocks.
*
- * We also add a check to make sure the highest level of the tree is
- * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
- * code needs update as well.
+ * We also add a check to make sure the highest level of the tree is the
+ * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
+ * needs update as well.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
# error
# endif
-static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
-static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
- /* leaf */
- "btrfs-extent-00",
- "btrfs-extent-01",
- "btrfs-extent-02",
- "btrfs-extent-03",
- "btrfs-extent-04",
- "btrfs-extent-05",
- "btrfs-extent-06",
- "btrfs-extent-07",
- /* highest possible level */
- "btrfs-extent-08",
+
+static struct btrfs_lockdep_keyset {
+ u64 id; /* root objectid */
+ const char *name_stem; /* lock name stem */
+ char names[BTRFS_MAX_LEVEL + 1][20];
+ struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
+} btrfs_lockdep_keysets[] = {
+ { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
+ { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
+ { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
+ { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
+ { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
+ { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
+ { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" },
+ { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
+ { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
+ { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
+ { .id = 0, .name_stem = "tree" },
};
+
+void __init btrfs_init_lockdep(void)
+{
+ int i, j;
+
+ /* initialize lockdep class names */
+ for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
+ struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
+
+ for (j = 0; j < ARRAY_SIZE(ks->names); j++)
+ snprintf(ks->names[j], sizeof(ks->names[j]),
+ "btrfs-%s-%02d", ks->name_stem, j);
+ }
+}
+
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
+ int level)
+{
+ struct btrfs_lockdep_keyset *ks;
+
+ BUG_ON(level >= ARRAY_SIZE(ks->keys));
+
+ /* find the matching keyset, id 0 is the default entry */
+ for (ks = btrfs_lockdep_keysets; ks->id; ks++)
+ if (ks->id == objectid)
+ break;
+
+ lockdep_set_class_and_name(&eb->lock,
+ &ks->keys[level], ks->names[level]);
+}
+
#endif
/*
unsigned long len;
unsigned long cur_len;
unsigned long offset = BTRFS_CSUM_SIZE;
- char *map_token = NULL;
char *kaddr;
unsigned long map_start;
unsigned long map_len;
len = buf->len - offset;
while (len > 0) {
err = map_private_extent_buffer(buf, offset, 32,
- &map_token, &kaddr,
- &map_start, &map_len, KM_USER0);
+ &kaddr, &map_start, &map_len);
if (err)
return 1;
cur_len = min(len, map_len - (offset - map_start));
crc, cur_len);
len -= cur_len;
offset += cur_len;
- unmap_extent_buffer(buf, map_token, KM_USER0);
}
if (csum_size > sizeof(inline_result)) {
result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
return 0;
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
-{
- lockdep_set_class_and_name(&eb->lock,
- &btrfs_eb_class[level],
- btrfs_eb_name[level]);
-}
-#endif
-
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
}
found_level = btrfs_header_level(eb);
- btrfs_set_buffer_lockdep_class(eb, found_level);
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
+ eb, found_level);
ret = csum_tree_block(root, eb, 1);
if (ret) {
goto fail_bdi;
}
- fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS;
+ mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
fs_info->thread_pool_size),
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->caching_workers, "cache",
+ 2, &fs_info->generic_worker);
+
/* a higher idle thresh on the submit workers makes it much more
* likely that bios will be send down in a sane order to the
* devices
btrfs_start_workers(&fs_info->endio_write_workers, 1);
btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
btrfs_start_workers(&fs_info->delayed_workers, 1);
+ btrfs_start_workers(&fs_info->caching_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
+ btrfs_stop_workers(&fs_info->caching_workers);
fail_alloc:
kfree(fs_info->delayed_root);
fail_iput:
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
+ btrfs_stop_workers(&fs_info->caching_workers);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
+void btrfs_init_lockdep(void);
+void btrfs_set_buffer_lockdep_class(u64 objectid,
+ struct extent_buffer *eb, int level);
#else
-static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
- int level)
+static inline void btrfs_init_lockdep(void)
+{ }
+static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
+ struct extent_buffer *eb, int level)
{
}
#endif
return total_added;
}
-static int caching_kthread(void *data)
+static noinline void caching_thread(struct btrfs_work *work)
{
- struct btrfs_block_group_cache *block_group = data;
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_block_group_cache *block_group;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_caching_control *caching_ctl;
+ struct btrfs_root *extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
u32 nritems;
int ret = 0;
+ caching_ctl = container_of(work, struct btrfs_caching_control, work);
+ block_group = caching_ctl->block_group;
+ fs_info = block_group->fs_info;
+ extent_root = fs_info->extent_root;
+
path = btrfs_alloc_path();
if (!path)
- return -ENOMEM;
+ goto out;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
free_excluded_extents(extent_root, block_group);
mutex_unlock(&caching_ctl->mutex);
+out:
wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl);
- atomic_dec(&block_group->space_info->caching_threads);
btrfs_put_block_group(block_group);
-
- return 0;
}
static int cache_block_group(struct btrfs_block_group_cache *cache,
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl;
- struct task_struct *tsk;
int ret = 0;
smp_mb();
caching_ctl->progress = cache->key.objectid;
/* one for caching kthread, one for caching block group list */
atomic_set(&caching_ctl->count, 2);
+ caching_ctl->work.func = caching_thread;
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem);
- atomic_inc(&cache->space_info->caching_threads);
btrfs_get_block_group(cache);
- tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
- cache->key.objectid);
- if (IS_ERR(tsk)) {
- ret = PTR_ERR(tsk);
- printk(KERN_ERR "error running thread %d\n", ret);
- BUG();
- }
+ btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
return ret;
}
found->full = 0;
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
found->chunk_alloc = 0;
+ found->flush = 0;
+ init_waitqueue_head(&found->wait);
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
- atomic_set(&found->caching_threads, 0);
return 0;
}
if (reserved == 0)
return 0;
+ smp_mb();
+ if (root->fs_info->delalloc_bytes == 0) {
+ if (trans)
+ return 0;
+ btrfs_wait_ordered_extents(root, 0, 0);
+ return 0;
+ }
+
max_reclaim = min(reserved, to_reclaim);
while (loops < 1024) {
}
}
+ if (reclaimed >= to_reclaim && !trans)
+ btrfs_wait_ordered_extents(root, 0, 0);
return reclaimed >= to_reclaim;
}
u64 num_bytes = orig_bytes;
int retries = 0;
int ret = 0;
- bool reserved = false;
bool committed = false;
+ bool flushing = false;
again:
- ret = -ENOSPC;
- if (reserved)
- num_bytes = 0;
-
+ ret = 0;
spin_lock(&space_info->lock);
+ /*
+ * We only want to wait if somebody other than us is flushing and we are
+ * actually alloed to flush.
+ */
+ while (flush && !flushing && space_info->flush) {
+ spin_unlock(&space_info->lock);
+ /*
+ * If we have a trans handle we can't wait because the flusher
+ * may have to commit the transaction, which would mean we would
+ * deadlock since we are waiting for the flusher to finish, but
+ * hold the current transaction open.
+ */
+ if (trans)
+ return -EAGAIN;
+ ret = wait_event_interruptible(space_info->wait,
+ !space_info->flush);
+ /* Must have been interrupted, return */
+ if (ret)
+ return -EINTR;
+
+ spin_lock(&space_info->lock);
+ }
+
+ ret = -ENOSPC;
unused = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use;
if (unused <= space_info->total_bytes) {
unused = space_info->total_bytes - unused;
if (unused >= num_bytes) {
- if (!reserved)
- space_info->bytes_reserved += orig_bytes;
+ space_info->bytes_reserved += orig_bytes;
ret = 0;
} else {
/*
* to reclaim space we can actually use it instead of somebody else
* stealing it from us.
*/
- if (ret && !reserved) {
- space_info->bytes_reserved += orig_bytes;
- reserved = true;
+ if (ret && flush) {
+ flushing = true;
+ space_info->flush = 1;
}
spin_unlock(&space_info->lock);
- if (!ret)
- return 0;
-
- if (!flush)
+ if (!ret || !flush)
goto out;
/*
* metadata until after the IO is completed.
*/
ret = shrink_delalloc(trans, root, num_bytes, 1);
- if (ret > 0)
- return 0;
- else if (ret < 0)
+ if (ret < 0)
goto out;
+ ret = 0;
+
/*
* So if we were overcommitted it's possible that somebody else flushed
* out enough space and we simply didn't have enough space to reclaim,
goto again;
}
- spin_lock(&space_info->lock);
/*
* Not enough space to be reclaimed, don't bother committing the
* transaction.
*/
+ spin_lock(&space_info->lock);
if (space_info->bytes_pinned < orig_bytes)
ret = -ENOSPC;
spin_unlock(&space_info->lock);
goto out;
ret = -EAGAIN;
- if (trans || committed)
+ if (trans)
goto out;
ret = -ENOSPC;
+ if (committed)
+ goto out;
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto out;
}
out:
- if (reserved) {
+ if (flushing) {
spin_lock(&space_info->lock);
- space_info->bytes_reserved -= orig_bytes;
+ space_info->flush = 0;
+ wake_up_all(&space_info->wait);
spin_unlock(&space_info->lock);
}
-
return ret;
}
if (commit_trans) {
if (trans)
return -EAGAIN;
-
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
ret = btrfs_commit_transaction(trans, root);
return 0;
}
-int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- int num_items)
-{
- u64 num_bytes;
- int ret;
-
- if (num_items == 0 || root->fs_info->chunk_root == root)
- return 0;
-
- num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
- ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
- num_bytes);
- if (!ret) {
- trans->bytes_reserved += num_bytes;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- }
- return ret;
-}
-
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
+static unsigned drop_outstanding_extent(struct inode *inode)
+{
+ unsigned dropped_extents = 0;
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ BUG_ON(!BTRFS_I(inode)->outstanding_extents);
+ BTRFS_I(inode)->outstanding_extents--;
+
+ /*
+ * If we have more or the same amount of outsanding extents than we have
+ * reserved then we need to leave the reserved extents count alone.
+ */
+ if (BTRFS_I(inode)->outstanding_extents >=
+ BTRFS_I(inode)->reserved_extents)
+ goto out;
+
+ dropped_extents = BTRFS_I(inode)->reserved_extents -
+ BTRFS_I(inode)->outstanding_extents;
+ BTRFS_I(inode)->reserved_extents -= dropped_extents;
+out:
+ spin_unlock(&BTRFS_I(inode)->lock);
+ return dropped_extents;
+}
+
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
{
return num_bytes >>= 3;
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
- u64 to_reserve;
- int nr_extents;
- int reserved_extents;
+ u64 to_reserve = 0;
+ unsigned nr_extents = 0;
int ret;
if (btrfs_transaction_in_commit(root->fs_info))
num_bytes = ALIGN(num_bytes, root->sectorsize);
- nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
- reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+
+ if (BTRFS_I(inode)->outstanding_extents >
+ BTRFS_I(inode)->reserved_extents) {
+ nr_extents = BTRFS_I(inode)->outstanding_extents -
+ BTRFS_I(inode)->reserved_extents;
+ BTRFS_I(inode)->reserved_extents += nr_extents;
- if (nr_extents > reserved_extents) {
- nr_extents -= reserved_extents;
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
- } else {
- nr_extents = 0;
- to_reserve = 0;
}
+ spin_unlock(&BTRFS_I(inode)->lock);
to_reserve += calc_csum_metadata_size(inode, num_bytes);
ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
- if (ret)
+ if (ret) {
+ unsigned dropped;
+ /*
+ * We don't need the return value since our reservation failed,
+ * we just need to clean up our counter.
+ */
+ dropped = drop_outstanding_extent(inode);
+ WARN_ON(dropped > 1);
return ret;
-
- atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
- atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+ }
block_rsv_add_bytes(block_rsv, to_reserve, 1);
- if (block_rsv->size > 512 * 1024 * 1024)
- shrink_delalloc(NULL, root, to_reserve, 0);
-
return 0;
}
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 to_free;
- int nr_extents;
- int reserved_extents;
+ u64 to_free = 0;
+ unsigned dropped;
num_bytes = ALIGN(num_bytes, root->sectorsize);
- atomic_dec(&BTRFS_I(inode)->outstanding_extents);
- WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
-
- reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
- do {
- int old, new;
-
- nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
- if (nr_extents >= reserved_extents) {
- nr_extents = 0;
- break;
- }
- old = reserved_extents;
- nr_extents = reserved_extents - nr_extents;
- new = reserved_extents - nr_extents;
- old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
- reserved_extents, new);
- if (likely(old == reserved_extents))
- break;
- reserved_extents = old;
- } while (1);
+ dropped = drop_outstanding_extent(inode);
to_free = calc_csum_metadata_size(inode, num_bytes);
- if (nr_extents > 0)
- to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
+ if (dropped > 0)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
}
/*
- * We only want to start kthread caching if we are at
- * the point where we will wait for caching to make
- * progress, or if our ideal search is over and we've
- * found somebody to start caching.
+ * The caching workers are limited to 2 threads, so we
+ * can queue as much work as we care to.
*/
- if (loop > LOOP_CACHING_NOWAIT ||
- (loop > LOOP_FIND_IDEAL &&
- atomic_read(&space_info->caching_threads) < 2)) {
+ if (loop > LOOP_FIND_IDEAL) {
ret = cache_block_group(block_group, trans,
orig_root, 0);
BUG_ON(ret);
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
loop++;
- if (!ideal_cache_percent &&
- atomic_read(&space_info->caching_threads))
+ if (!ideal_cache_percent)
goto search;
/*
if (!buf)
return ERR_PTR(-ENOMEM);
btrfs_set_header_generation(buf, trans->transid);
- btrfs_set_buffer_lockdep_class(buf, level);
+ btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf);
return 1;
if (path->locks[level] && !wc->keep_locks) {
- btrfs_tree_unlock(eb);
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
}
return 0;
* keep the tree lock
*/
if (path->locks[level] && level > 0) {
- btrfs_tree_unlock(eb);
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
}
return 0;
BUG_ON(level != btrfs_header_level(next));
path->nodes[level] = next;
path->slots[level] = 0;
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
wc->level = level;
if (wc->level == 1)
wc->reada_slot = 0;
BUG_ON(level == 0);
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, root,
eb->start, eb->len,
BUG_ON(ret);
BUG_ON(wc->refs[level] == 0);
if (wc->refs[level] == 1) {
- btrfs_tree_unlock(eb);
- path->locks[level] = 0;
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
return 1;
}
}
btrfs_header_generation(eb) == trans->transid) {
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
}
clean_tree_block(trans, root, eb);
}
return 0;
if (path->locks[level]) {
- btrfs_tree_unlock(path->nodes[level]);
+ btrfs_tree_unlock_rw(path->nodes[level],
+ path->locks[level]);
path->locks[level] = 0;
}
free_extent_buffer(path->nodes[level]);
path->nodes[level] = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(path->nodes[level]);
path->slots[level] = 0;
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
memset(&wc->update_progress, 0,
sizeof(wc->update_progress));
} else {
level = btrfs_header_level(node);
path->nodes[level] = node;
path->slots[level] = 0;
- path->locks[level] = 1;
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
wc->refs[parent_level] = 1;
wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
return flags;
}
-static int set_block_group_ro(struct btrfs_block_group_cache *cache)
+static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
+ u64 min_allocable_bytes;
int ret = -ENOSPC;
if (cache->ro)
return 0;
+ /*
+ * We need some metadata space and system metadata space for
+ * allocating chunks in some corner cases until we force to set
+ * it to be readonly.
+ */
+ if ((sinfo->flags &
+ (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
+ !force)
+ min_allocable_bytes = 1 * 1024 * 1024;
+ else
+ min_allocable_bytes = 0;
+
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
sinfo->bytes_may_use + sinfo->bytes_readonly +
- cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
+ cache->reserved_pinned + num_bytes + min_allocable_bytes <=
+ sinfo->total_bytes) {
sinfo->bytes_readonly += num_bytes;
sinfo->bytes_reserved += cache->reserved_pinned;
cache->reserved_pinned = 0;
do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
CHUNK_ALLOC_FORCE);
- ret = set_block_group_ro(cache);
+ ret = set_block_group_ro(cache, 0);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
- ret = set_block_group_ro(cache);
+ ret = set_block_group_ro(cache, 0);
out:
btrfs_end_transaction(trans, root);
return ret;
set_avail_alloc_bits(root->fs_info, cache->flags);
if (btrfs_chunk_readonly(root, cache->key.objectid))
- set_block_group_ro(cache);
+ set_block_group_ro(cache, 1);
}
list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
* mirrored block groups.
*/
list_for_each_entry(cache, &space_info->block_groups[3], list)
- set_block_group_ro(cache);
+ set_block_group_ro(cache, 1);
list_for_each_entry(cache, &space_info->block_groups[4], list)
- set_block_group_ro(cache);
+ set_block_group_ro(cache, 1);
}
init_global_block_rsv(info);
if (other->start == state->end + 1 &&
other->state == state->state) {
merge_cb(tree, state, other);
- other->start = state->start;
- state->tree = NULL;
- rb_erase(&state->rb_node, &tree->state);
- free_extent_state(state);
- state = NULL;
+ state->end = other->end;
+ other->tree = NULL;
+ rb_erase(&other->rb_node, &tree->state);
+ free_extent_state(other);
}
}
"%llu %llu\n", (unsigned long long)found->start,
(unsigned long long)found->end,
(unsigned long long)start, (unsigned long long)end);
- free_extent_state(state);
return -EEXIST;
}
state->tree = tree;
cached_state = NULL;
}
- if (cached && cached->tree && cached->start == start) {
+ if (cached && cached->tree && cached->start <= start &&
+ cached->end > start) {
if (clear)
atomic_dec(&cached->refs);
state = cached;
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
state = *cached_state;
- if (state->start == start && state->tree) {
+ if (state->start <= start && state->end > start &&
+ state->tree) {
node = &state->rb_node;
goto hit_next;
}
if (err)
goto out;
- next_node = rb_next(node);
cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
+ next_node = rb_next(&state->rb_node);
if (next_node && start < end && prealloc && !need_resched()) {
state = rb_entry(next_node, struct extent_state,
rb_node);
* Avoid to free 'prealloc' if it can be merged with
* the later extent.
*/
- atomic_inc(&prealloc->refs);
err = insert_state(tree, prealloc, start, this_end,
&bits);
BUG_ON(err == -EEXIST);
goto out;
}
cache_state(prealloc, cached_state);
- free_extent_state(prealloc);
prealloc = NULL;
start = this_end + 1;
goto search_again;
int bitset = 0;
spin_lock(&tree->lock);
- if (cached && cached->tree && cached->start == start)
+ if (cached && cached->tree && cached->start <= start &&
+ cached->end > start)
node = &cached->rb_node;
else
node = tree_search(tree, start);
pgoff_t index;
pgoff_t end; /* Inclusive */
int scanned = 0;
+ int tag;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
end = wbc->range_end >> PAGE_CACHE_SHIFT;
scanned = 1;
}
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag = PAGECACHE_TAG_TOWRITE;
+ else
+ tag = PAGECACHE_TAG_DIRTY;
retry:
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag_pages_for_writeback(mapping, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY, min(end - index,
- (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+ (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
unsigned i;
scanned = 1;
return NULL;
eb->start = start;
eb->len = len;
- spin_lock_init(&eb->lock);
- init_waitqueue_head(&eb->lock_wq);
+ rwlock_init(&eb->lock);
+ atomic_set(&eb->write_locks, 0);
+ atomic_set(&eb->read_locks, 0);
+ atomic_set(&eb->blocking_readers, 0);
+ atomic_set(&eb->blocking_writers, 0);
+ atomic_set(&eb->spinning_readers, 0);
+ atomic_set(&eb->spinning_writers, 0);
+ init_waitqueue_head(&eb->write_lock_wq);
+ init_waitqueue_head(&eb->read_lock_wq);
#if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags);
i = 0;
}
for (; i < num_pages; i++, index++) {
- p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
+ p = find_or_create_page(mapping, index, GFP_NOFS);
if (!p) {
WARN_ON(1);
goto free_eb;
return was_dirty;
}
+static int __eb_straddles_pages(u64 start, u64 len)
+{
+ if (len < PAGE_CACHE_SIZE)
+ return 1;
+ if (start & (PAGE_CACHE_SIZE - 1))
+ return 1;
+ if ((start + len) & (PAGE_CACHE_SIZE - 1))
+ return 1;
+ return 0;
+}
+
+static int eb_straddles_pages(struct extent_buffer *eb)
+{
+ return __eb_straddles_pages(eb->start, eb->len);
+}
+
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb,
struct extent_state **cached_state)
num_pages = num_extent_pages(eb->start, eb->len);
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- cached_state, GFP_NOFS);
+ if (eb_straddles_pages(eb)) {
+ clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+ cached_state, GFP_NOFS);
+ }
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if (page)
num_pages = num_extent_pages(eb->start, eb->len);
- set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- NULL, GFP_NOFS);
+ if (eb_straddles_pages(eb)) {
+ set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+ NULL, GFP_NOFS);
+ }
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
int uptodate;
unsigned long index;
- ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
- if (ret)
- return 1;
+ if (__eb_straddles_pages(start, end - start + 1)) {
+ ret = test_range_bit(tree, start, end,
+ EXTENT_UPTODATE, 1, NULL);
+ if (ret)
+ return 1;
+ }
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
page = find_get_page(tree->mapping, index);
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 1;
- ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, cached_state);
- if (ret)
- return ret;
+ if (eb_straddles_pages(eb)) {
+ ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+ EXTENT_UPTODATE, 1, cached_state);
+ if (ret)
+ return ret;
+ }
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1, NULL)) {
- return 0;
+ if (eb_straddles_pages(eb)) {
+ if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+ EXTENT_UPTODATE, 1, NULL)) {
+ return 0;
+ }
}
if (start) {
page = extent_buffer_page(eb, i);
cur = min(len, (PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER1);
+ kaddr = page_address(page);
memcpy(dst, kaddr + offset, cur);
- kunmap_atomic(kaddr, KM_USER1);
dst += cur;
len -= cur;
}
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len, char **token, char **map,
+ unsigned long min_len, char **map,
unsigned long *map_start,
- unsigned long *map_len, int km)
+ unsigned long *map_len)
{
size_t offset = start & (PAGE_CACHE_SIZE - 1);
char *kaddr;
}
p = extent_buffer_page(eb, i);
- kaddr = kmap_atomic(p, km);
- *token = kaddr;
+ kaddr = page_address(p);
*map = kaddr + offset;
*map_len = PAGE_CACHE_SIZE - offset;
return 0;
}
-int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len,
- char **token, char **map,
- unsigned long *map_start,
- unsigned long *map_len, int km)
-{
- int err;
- int save = 0;
- if (eb->map_token) {
- unmap_extent_buffer(eb, eb->map_token, km);
- eb->map_token = NULL;
- save = 1;
- }
- err = map_private_extent_buffer(eb, start, min_len, token, map,
- map_start, map_len, km);
- if (!err && save) {
- eb->map_token = *token;
- eb->kaddr = *map;
- eb->map_start = *map_start;
- eb->map_len = *map_len;
- }
- return err;
-}
-
-void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
-{
- kunmap_atomic(token, km);
-}
-
int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
unsigned long start,
unsigned long len)
cur = min(len, (PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = page_address(page);
ret = memcmp(ptr, kaddr + offset, cur);
- kunmap_atomic(kaddr, KM_USER0);
if (ret)
break;
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
- kaddr = kmap_atomic(page, KM_USER1);
+ kaddr = page_address(page);
memcpy(kaddr + offset, src, cur);
- kunmap_atomic(kaddr, KM_USER1);
src += cur;
len -= cur;
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = page_address(page);
memset(kaddr + offset, c, cur);
- kunmap_atomic(kaddr, KM_USER0);
len -= cur;
offset = 0;
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = page_address(page);
read_extent_buffer(src, kaddr + offset, src_offset, cur);
- kunmap_atomic(kaddr, KM_USER0);
src_offset += cur;
len -= cur;
unsigned long dst_off, unsigned long src_off,
unsigned long len)
{
- char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+ char *dst_kaddr = page_address(dst_page);
if (dst_page == src_page) {
memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
} else {
- char *src_kaddr = kmap_atomic(src_page, KM_USER1);
+ char *src_kaddr = page_address(src_page);
char *p = dst_kaddr + dst_off + len;
char *s = src_kaddr + src_off + len;
while (len--)
*--p = *--s;
-
- kunmap_atomic(src_kaddr, KM_USER1);
}
- kunmap_atomic(dst_kaddr, KM_USER0);
}
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
unsigned long dst_off, unsigned long src_off,
unsigned long len)
{
- char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+ char *dst_kaddr = page_address(dst_page);
char *src_kaddr;
if (dst_page != src_page) {
- src_kaddr = kmap_atomic(src_page, KM_USER1);
+ src_kaddr = page_address(src_page);
} else {
src_kaddr = dst_kaddr;
BUG_ON(areas_overlap(src_off, dst_off, len));
}
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
- kunmap_atomic(dst_kaddr, KM_USER0);
- if (dst_page != src_page)
- kunmap_atomic(src_kaddr, KM_USER1);
}
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
struct extent_buffer {
u64 start;
unsigned long len;
- char *map_token;
- char *kaddr;
unsigned long map_start;
unsigned long map_len;
struct page *first_page;
struct rcu_head rcu_head;
atomic_t refs;
- /* the spinlock is used to protect most operations */
- spinlock_t lock;
+ /* count of read lock holders on the extent buffer */
+ atomic_t write_locks;
+ atomic_t read_locks;
+ atomic_t blocking_writers;
+ atomic_t blocking_readers;
+ atomic_t spinning_readers;
+ atomic_t spinning_writers;
+
+ /* protects write locks */
+ rwlock_t lock;
- /*
- * when we keep the lock held while blocking, waiters go onto
- * the wq
+ /* readers use lock_wq while they wait for the write
+ * lock holders to unlock
*/
- wait_queue_head_t lock_wq;
+ wait_queue_head_t write_lock_wq;
+
+ /* writers use read_lock_wq while they wait for readers
+ * to unlock
+ */
+ wait_queue_head_t read_lock_wq;
};
static inline void extent_set_compress_type(unsigned long *bio_flags,
int extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb,
struct extent_state *cached_state);
-int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
- unsigned long min_len, char **token, char **map,
- unsigned long *map_start,
- unsigned long *map_len, int km);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
- unsigned long min_len, char **token, char **map,
+ unsigned long min_len, char **map,
unsigned long *map_start,
- unsigned long *map_len, int km);
-void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
+ unsigned long *map_len);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
WARN_ON(bio->bi_vcnt <= 0);
+ /*
+ * the free space stuff is only read when it hasn't been
+ * updated in the current transaction. So, we can safely
+ * read from the commit root and sidestep a nasty deadlock
+ * between reading the free space cache and updating the csum tree.
+ */
+ if (btrfs_is_free_space_inode(root, inode))
+ path->search_commit_root = 1;
+
disk_bytenr = (u64)bio->bi_sector << 9;
if (dio)
offset = logical_offset;
struct btrfs_sector_sum *sector_sum;
u32 nritems;
u32 ins_size;
- char *eb_map;
- char *eb_token;
- unsigned long map_len;
- unsigned long map_start;
u16 csum_size =
btrfs_super_csum_size(&root->fs_info->super_copy);
item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
btrfs_item_size_nr(leaf, path->slots[0]));
- eb_token = NULL;
next_sector:
- if (!eb_token ||
- (unsigned long)item + csum_size >= map_start + map_len) {
- int err;
-
- if (eb_token)
- unmap_extent_buffer(leaf, eb_token, KM_USER1);
- eb_token = NULL;
- err = map_private_extent_buffer(leaf, (unsigned long)item,
- csum_size,
- &eb_token, &eb_map,
- &map_start, &map_len, KM_USER1);
- if (err)
- eb_token = NULL;
- }
- if (eb_token) {
- memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)),
- §or_sum->sum, csum_size);
- } else {
- write_extent_buffer(leaf, §or_sum->sum,
- (unsigned long)item, csum_size);
- }
+ write_extent_buffer(leaf, §or_sum->sum, (unsigned long)item, csum_size);
total_bytes += root->sectorsize;
sector_sum++;
goto next_sector;
}
}
- if (eb_token) {
- unmap_extent_buffer(leaf, eb_token, KM_USER1);
- eb_token = NULL;
- }
+
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
again:
for (i = 0; i < num_pages; i++) {
- pages[i] = grab_cache_page(inode->i_mapping, index + i);
+ pages[i] = find_or_create_page(inode->i_mapping, index + i,
+ GFP_NOFS);
if (!pages[i]) {
faili = i - 1;
err = -ENOMEM;
* managed to copy.
*/
if (num_pages > dirty_pages) {
- if (copied > 0)
- atomic_inc(
- &BTRFS_I(inode)->outstanding_extents);
+ if (copied > 0) {
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
btrfs_delalloc_release_space(inode,
(num_pages - dirty_pages) <<
PAGE_CACHE_SHIFT);
return inode;
spin_lock(&block_group->lock);
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
+ printk(KERN_INFO "Old style space inode found, converting.\n");
+ BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
+ block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ }
+
if (!btrfs_fs_closing(root->fs_info)) {
block_group->inode = igrab(inode);
block_group->iref = 1;
btrfs_set_inode_gid(leaf, inode_item, 0);
btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
- BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
+ BTRFS_INODE_PREALLOC);
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct page *page;
- u32 *checksums = NULL, *crc;
- char *disk_crcs = NULL;
struct btrfs_key key;
struct list_head bitmaps;
u64 num_entries;
u64 num_bitmaps;
u64 generation;
- u32 cur_crc = ~(u32)0;
pgoff_t index = 0;
- unsigned long first_page_offset;
- int num_checksums;
int ret = 0;
INIT_LIST_HEAD(&bitmaps);
if (!num_entries)
goto out;
- /* Setup everything for doing checksumming */
- num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
- checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
- if (!checksums)
- goto out;
- first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
- disk_crcs = kzalloc(first_page_offset, GFP_NOFS);
- if (!disk_crcs)
- goto out;
-
ret = readahead_cache(inode);
if (ret)
goto out;
struct btrfs_free_space *e;
void *addr;
unsigned long offset = 0;
- unsigned long start_offset = 0;
int need_loop = 0;
if (!num_entries && !num_bitmaps)
break;
- if (index == 0) {
- start_offset = first_page_offset;
- offset = start_offset;
- }
-
- page = grab_cache_page(inode->i_mapping, index);
+ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page)
goto free_cache;
if (index == 0) {
u64 *gen;
- memcpy(disk_crcs, addr, first_page_offset);
- gen = addr + (sizeof(u32) * num_checksums);
+ /*
+ * We put a bogus crc in the front of the first page in
+ * case old kernels try to mount a fs with the new
+ * format to make sure they discard the cache.
+ */
+ addr += sizeof(u64);
+ offset += sizeof(u64);
+
+ gen = addr;
if (*gen != BTRFS_I(inode)->generation) {
printk(KERN_ERR "btrfs: space cache generation"
" (%llu) does not match inode (%llu)\n",
page_cache_release(page);
goto free_cache;
}
- crc = (u32 *)disk_crcs;
- }
- entry = addr + start_offset;
-
- /* First lets check our crc before we do anything fun */
- cur_crc = ~(u32)0;
- cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc,
- PAGE_CACHE_SIZE - start_offset);
- btrfs_csum_final(cur_crc, (char *)&cur_crc);
- if (cur_crc != *crc) {
- printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
- index);
- kunmap(page);
- unlock_page(page);
- page_cache_release(page);
- goto free_cache;
+ addr += sizeof(u64);
+ offset += sizeof(u64);
}
- crc++;
+ entry = addr;
while (1) {
if (!num_entries)
ret = 1;
out:
- kfree(checksums);
- kfree(disk_crcs);
return ret;
free_cache:
__btrfs_remove_free_space_cache(ctl);
struct btrfs_key key;
u64 start, end, len;
u64 bytes = 0;
- u32 *crc, *checksums;
- unsigned long first_page_offset;
+ u32 crc = ~(u32)0;
int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
- /* Since the first page has all of our checksums and our generation we
- * need to calculate the offset into the page that we can start writing
- * our entries.
- */
- first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
-
filemap_write_and_wait(inode->i_mapping);
btrfs_wait_ordered_range(inode, inode->i_size &
~(root->sectorsize - 1), (u64)-1);
- /* make sure we don't overflow that first page */
- if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
- /* this is really the same as running out of space, where we also return 0 */
- printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
- ret = 0;
- goto out_update;
- }
-
- /* We need a checksum per page. */
- crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
- if (!crc)
- return -1;
-
pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
- if (!pages) {
- kfree(crc);
+ if (!pages)
return -1;
- }
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list))
* know and don't freak out.
*/
while (index < num_pages) {
- page = grab_cache_page(inode->i_mapping, index);
+ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page) {
int i;
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
- goto out_free;
+ goto out;
}
pages[index] = page;
index++;
/* Write out the extent entries */
do {
struct btrfs_free_space_entry *entry;
- void *addr;
+ void *addr, *orig;
unsigned long offset = 0;
- unsigned long start_offset = 0;
next_page = false;
- if (index == 0) {
- start_offset = first_page_offset;
- offset = start_offset;
- }
-
if (index >= num_pages) {
out_of_space = true;
break;
page = pages[index];
- addr = kmap(page);
- entry = addr + start_offset;
+ orig = addr = kmap(page);
+ if (index == 0) {
+ u64 *gen;
- memset(addr, 0, PAGE_CACHE_SIZE);
+ /*
+ * We're going to put in a bogus crc for this page to
+ * make sure that old kernels who aren't aware of this
+ * format will be sure to discard the cache.
+ */
+ addr += sizeof(u64);
+ offset += sizeof(u64);
+
+ gen = addr;
+ *gen = trans->transid;
+ addr += sizeof(u64);
+ offset += sizeof(u64);
+ }
+ entry = addr;
+
+ memset(addr, 0, PAGE_CACHE_SIZE - offset);
while (node && !next_page) {
struct btrfs_free_space *e;
next_page = true;
entry++;
}
- *crc = ~(u32)0;
- *crc = btrfs_csum_data(root, addr + start_offset, *crc,
- PAGE_CACHE_SIZE - start_offset);
- kunmap(page);
- btrfs_csum_final(*crc, (char *)crc);
- crc++;
+ /* Generate bogus crc value */
+ if (index == 0) {
+ u32 *tmp;
+ crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
+ PAGE_CACHE_SIZE - sizeof(u64));
+ btrfs_csum_final(crc, (char *)&crc);
+ crc++;
+ tmp = orig;
+ *tmp = crc;
+ }
+
+ kunmap(page);
bytes += PAGE_CACHE_SIZE;
addr = kmap(page);
memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
- *crc = ~(u32)0;
- *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE);
kunmap(page);
- btrfs_csum_final(*crc, (char *)crc);
- crc++;
bytes += PAGE_CACHE_SIZE;
list_del_init(&entry->list);
i_size_read(inode) - 1, &cached_state,
GFP_NOFS);
ret = 0;
- goto out_free;
+ goto out;
}
/* Zero out the rest of the pages just to make sure */
index++;
}
- /* Write the checksums and trans id to the first page */
- {
- void *addr;
- u64 *gen;
-
- page = pages[0];
-
- addr = kmap(page);
- memcpy(addr, checksums, sizeof(u32) * num_pages);
- gen = addr + (sizeof(u32) * num_pages);
- *gen = trans->transid;
- kunmap(page);
- }
-
ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
bytes, &cached_state);
btrfs_drop_pages(pages, num_pages);
if (ret) {
ret = 0;
- goto out_free;
+ goto out;
}
BTRFS_I(inode)->generation = trans->transid;
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
- goto out_free;
+ goto out;
}
leaf = path->nodes[0];
if (ret > 0) {
EXTENT_DO_ACCOUNTING, 0, 0, NULL,
GFP_NOFS);
btrfs_release_path(path);
- goto out_free;
+ goto out;
}
}
header = btrfs_item_ptr(leaf, path->slots[0],
ret = 1;
-out_free:
- kfree(checksums);
+out:
kfree(pages);
-
-out_update:
if (ret != 1) {
invalidate_inode_pages2_range(inode->i_mapping, 0, index);
BTRFS_I(inode)->generation = 0;
return alloc_hint;
}
-static inline bool is_free_space_inode(struct btrfs_root *root,
- struct inode *inode)
-{
- if (root == root->fs_info->tree_root ||
- BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
- return true;
- return false;
-}
-
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
- BUG_ON(is_free_space_inode(root, inode));
+ BUG_ON(btrfs_is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (!path)
return -ENOMEM;
- nolock = is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(root, inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
if (!(orig->state & EXTENT_DELALLOC))
return 0;
- atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
return 0;
}
if (!(other->state & EXTENT_DELALLOC))
return 0;
- atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents--;
+ spin_unlock(&BTRFS_I(inode)->lock);
return 0;
}
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(root, inode);
- if (*bits & EXTENT_FIRST_DELALLOC)
+ if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
- else
- atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+ } else {
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += len;
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(root, inode);
- if (*bits & EXTENT_FIRST_DELALLOC)
+ if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
- else if (!(*bits & EXTENT_DO_ACCOUNTING))
- atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+ } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents--;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
if (*bits & EXTENT_DO_ACCOUNTING)
btrfs_delalloc_release_metadata(inode, len);
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(root, inode))
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
else
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
return 0;
BUG_ON(!ordered_extent);
- nolock = is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
- if (!leaf->map_token)
- map_private_extent_buffer(leaf, (unsigned long)inode_item,
- sizeof(struct btrfs_inode_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
-
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
inode->i_uid = btrfs_inode_uid(leaf, inode_item);
if (!maybe_acls)
cache_no_acl(inode);
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
-
btrfs_free_path(path);
switch (inode->i_mode & S_IFMT) {
struct btrfs_inode_item *item,
struct inode *inode)
{
- if (!leaf->map_token)
- map_private_extent_buffer(leaf, (unsigned long)item,
- sizeof(struct btrfs_inode_item),
- &leaf->map_token, &leaf->kaddr,
- &leaf->map_start, &leaf->map_len,
- KM_USER1);
-
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, 0);
-
- if (leaf->map_token) {
- unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
- leaf->map_token = NULL;
- }
}
/*
* The data relocation inode should also be directly updated
* without delay
*/
- if (!is_free_space_inode(root, inode)
+ if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
ret = -ENOMEM;
again:
- page = grab_cache_page(mapping, index);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
goto out;
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
- is_free_space_inode(root, inode)))
+ btrfs_is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
if (BTRFS_I(inode)->dummy_inode)
return 0;
- if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
+ if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
- atomic_set(&ei->outstanding_extents, 0);
- atomic_set(&ei->reserved_extents, 0);
+ spin_lock_init(&ei->lock);
+ ei->outstanding_extents = 0;
+ ei->reserved_extents = 0;
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
- WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
- WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
+ WARN_ON(BTRFS_I(inode)->outstanding_extents);
+ WARN_ON(BTRFS_I(inode)->reserved_extents);
/*
* This can happen where we create an inode, but somebody else also
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
- !is_free_space_inode(root, inode))
+ !btrfs_is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
/* step one, lock all the pages */
for (i = 0; i < num_pages; i++) {
struct page *page;
- page = grab_cache_page(inode->i_mapping,
- start_index + i);
+ page = find_or_create_page(inode->i_mapping,
+ start_index + i, GFP_NOFS);
if (!page)
break;
GFP_NOFS);
if (i_done != num_pages) {
- atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode,
(num_pages - i_done) << PAGE_CACHE_SHIFT);
}
#include "extent_io.h"
#include "locking.h"
-static inline void spin_nested(struct extent_buffer *eb)
-{
- spin_lock(&eb->lock);
-}
+void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
/*
- * Setting a lock to blocking will drop the spinlock and set the
- * flag that forces other procs who want the lock to wait. After
- * this you can safely schedule with the lock held.
+ * if we currently have a spinning reader or writer lock
+ * (indicated by the rw flag) this will bump the count
+ * of blocking holders and drop the spinlock.
*/
-void btrfs_set_lock_blocking(struct extent_buffer *eb)
+void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
{
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
- set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
- spin_unlock(&eb->lock);
+ if (rw == BTRFS_WRITE_LOCK) {
+ if (atomic_read(&eb->blocking_writers) == 0) {
+ WARN_ON(atomic_read(&eb->spinning_writers) != 1);
+ atomic_dec(&eb->spinning_writers);
+ btrfs_assert_tree_locked(eb);
+ atomic_inc(&eb->blocking_writers);
+ write_unlock(&eb->lock);
+ }
+ } else if (rw == BTRFS_READ_LOCK) {
+ btrfs_assert_tree_read_locked(eb);
+ atomic_inc(&eb->blocking_readers);
+ WARN_ON(atomic_read(&eb->spinning_readers) == 0);
+ atomic_dec(&eb->spinning_readers);
+ read_unlock(&eb->lock);
}
- /* exit with the spin lock released and the bit set */
+ return;
}
/*
- * clearing the blocking flag will take the spinlock again.
- * After this you can't safely schedule
+ * if we currently have a blocking lock, take the spinlock
+ * and drop our blocking count
*/
-void btrfs_clear_lock_blocking(struct extent_buffer *eb)
+void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
{
- if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
- spin_nested(eb);
- clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
- smp_mb__after_clear_bit();
+ if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
+ BUG_ON(atomic_read(&eb->blocking_writers) != 1);
+ write_lock(&eb->lock);
+ WARN_ON(atomic_read(&eb->spinning_writers));
+ atomic_inc(&eb->spinning_writers);
+ if (atomic_dec_and_test(&eb->blocking_writers))
+ wake_up(&eb->write_lock_wq);
+ } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
+ BUG_ON(atomic_read(&eb->blocking_readers) == 0);
+ read_lock(&eb->lock);
+ atomic_inc(&eb->spinning_readers);
+ if (atomic_dec_and_test(&eb->blocking_readers))
+ wake_up(&eb->read_lock_wq);
}
- /* exit with the spin lock held */
+ return;
}
/*
- * unfortunately, many of the places that currently set a lock to blocking
- * don't end up blocking for very long, and often they don't block
- * at all. For a dbench 50 run, if we don't spin on the blocking bit
- * at all, the context switch rate can jump up to 400,000/sec or more.
- *
- * So, we're still stuck with this crummy spin on the blocking bit,
- * at least until the most common causes of the short blocks
- * can be dealt with.
+ * take a spinning read lock. This will wait for any blocking
+ * writers
*/
-static int btrfs_spin_on_block(struct extent_buffer *eb)
+void btrfs_tree_read_lock(struct extent_buffer *eb)
{
- int i;
-
- for (i = 0; i < 512; i++) {
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- return 1;
- if (need_resched())
- break;
- cpu_relax();
+again:
+ wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
+ read_lock(&eb->lock);
+ if (atomic_read(&eb->blocking_writers)) {
+ read_unlock(&eb->lock);
+ wait_event(eb->write_lock_wq,
+ atomic_read(&eb->blocking_writers) == 0);
+ goto again;
}
- return 0;
+ atomic_inc(&eb->read_locks);
+ atomic_inc(&eb->spinning_readers);
}
/*
- * This is somewhat different from trylock. It will take the
- * spinlock but if it finds the lock is set to blocking, it will
- * return without the lock held.
- *
- * returns 1 if it was able to take the lock and zero otherwise
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
*/
-int btrfs_try_spin_lock(struct extent_buffer *eb)
+int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
- int i;
+ if (atomic_read(&eb->blocking_writers))
+ return 0;
- if (btrfs_spin_on_block(eb)) {
- spin_nested(eb);
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- return 1;
- spin_unlock(&eb->lock);
+ read_lock(&eb->lock);
+ if (atomic_read(&eb->blocking_writers)) {
+ read_unlock(&eb->lock);
+ return 0;
}
- /* spin for a bit on the BLOCKING flag */
- for (i = 0; i < 2; i++) {
- cpu_relax();
- if (!btrfs_spin_on_block(eb))
- break;
-
- spin_nested(eb);
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- return 1;
- spin_unlock(&eb->lock);
- }
- return 0;
+ atomic_inc(&eb->read_locks);
+ atomic_inc(&eb->spinning_readers);
+ return 1;
}
/*
- * the autoremove wake function will return 0 if it tried to wake up
- * a process that was already awake, which means that process won't
- * count as an exclusive wakeup. The waitq code will continue waking
- * procs until it finds one that was actually sleeping.
- *
- * For btrfs, this isn't quite what we want. We want a single proc
- * to be notified that the lock is ready for taking. If that proc
- * already happen to be awake, great, it will loop around and try for
- * the lock.
- *
- * So, btrfs_wake_function always returns 1, even when the proc that we
- * tried to wake up was already awake.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers or readers
*/
-static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
+int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
- autoremove_wake_function(wait, mode, sync, key);
+ if (atomic_read(&eb->blocking_writers) ||
+ atomic_read(&eb->blocking_readers))
+ return 0;
+ write_lock(&eb->lock);
+ if (atomic_read(&eb->blocking_writers) ||
+ atomic_read(&eb->blocking_readers)) {
+ write_unlock(&eb->lock);
+ return 0;
+ }
+ atomic_inc(&eb->write_locks);
+ atomic_inc(&eb->spinning_writers);
return 1;
}
/*
- * returns with the extent buffer spinlocked.
- *
- * This will spin and/or wait as required to take the lock, and then
- * return with the spinlock held.
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
+ * drop a spinning read lock
+ */
+void btrfs_tree_read_unlock(struct extent_buffer *eb)
+{
+ btrfs_assert_tree_read_locked(eb);
+ WARN_ON(atomic_read(&eb->spinning_readers) == 0);
+ atomic_dec(&eb->spinning_readers);
+ atomic_dec(&eb->read_locks);
+ read_unlock(&eb->lock);
+}
+
+/*
+ * drop a blocking read lock
+ */
+void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
+{
+ btrfs_assert_tree_read_locked(eb);
+ WARN_ON(atomic_read(&eb->blocking_readers) == 0);
+ if (atomic_dec_and_test(&eb->blocking_readers))
+ wake_up(&eb->read_lock_wq);
+ atomic_dec(&eb->read_locks);
+}
+
+/*
+ * take a spinning write lock. This will wait for both
+ * blocking readers or writers
*/
int btrfs_tree_lock(struct extent_buffer *eb)
{
- DEFINE_WAIT(wait);
- wait.func = btrfs_wake_function;
-
- if (!btrfs_spin_on_block(eb))
- goto sleep;
-
- while(1) {
- spin_nested(eb);
-
- /* nobody is blocking, exit with the spinlock held */
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- return 0;
-
- /*
- * we have the spinlock, but the real owner is blocking.
- * wait for them
- */
- spin_unlock(&eb->lock);
-
- /*
- * spin for a bit, and if the blocking flag goes away,
- * loop around
- */
- cpu_relax();
- if (btrfs_spin_on_block(eb))
- continue;
-sleep:
- prepare_to_wait_exclusive(&eb->lock_wq, &wait,
- TASK_UNINTERRUPTIBLE);
-
- if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- schedule();
-
- finish_wait(&eb->lock_wq, &wait);
+again:
+ wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
+ wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
+ write_lock(&eb->lock);
+ if (atomic_read(&eb->blocking_readers)) {
+ write_unlock(&eb->lock);
+ wait_event(eb->read_lock_wq,
+ atomic_read(&eb->blocking_readers) == 0);
+ goto again;
}
+ if (atomic_read(&eb->blocking_writers)) {
+ write_unlock(&eb->lock);
+ wait_event(eb->write_lock_wq,
+ atomic_read(&eb->blocking_writers) == 0);
+ goto again;
+ }
+ WARN_ON(atomic_read(&eb->spinning_writers));
+ atomic_inc(&eb->spinning_writers);
+ atomic_inc(&eb->write_locks);
return 0;
}
+/*
+ * drop a spinning or a blocking write lock.
+ */
int btrfs_tree_unlock(struct extent_buffer *eb)
{
- /*
- * if we were a blocking owner, we don't have the spinlock held
- * just clear the bit and look for waiters
- */
- if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- smp_mb__after_clear_bit();
- else
- spin_unlock(&eb->lock);
-
- if (waitqueue_active(&eb->lock_wq))
- wake_up(&eb->lock_wq);
+ int blockers = atomic_read(&eb->blocking_writers);
+
+ BUG_ON(blockers > 1);
+
+ btrfs_assert_tree_locked(eb);
+ atomic_dec(&eb->write_locks);
+
+ if (blockers) {
+ WARN_ON(atomic_read(&eb->spinning_writers));
+ atomic_dec(&eb->blocking_writers);
+ smp_wmb();
+ wake_up(&eb->write_lock_wq);
+ } else {
+ WARN_ON(atomic_read(&eb->spinning_writers) != 1);
+ atomic_dec(&eb->spinning_writers);
+ write_unlock(&eb->lock);
+ }
return 0;
}
void btrfs_assert_tree_locked(struct extent_buffer *eb)
{
- if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
- assert_spin_locked(&eb->lock);
+ BUG_ON(!atomic_read(&eb->write_locks));
+}
+
+void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+{
+ BUG_ON(!atomic_read(&eb->read_locks));
}
#ifndef __BTRFS_LOCKING_
#define __BTRFS_LOCKING_
+#define BTRFS_WRITE_LOCK 1
+#define BTRFS_READ_LOCK 2
+#define BTRFS_WRITE_LOCK_BLOCKING 3
+#define BTRFS_READ_LOCK_BLOCKING 4
+
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
-void btrfs_set_lock_blocking(struct extent_buffer *eb);
-void btrfs_clear_lock_blocking(struct extent_buffer *eb);
+void btrfs_tree_read_lock(struct extent_buffer *eb);
+void btrfs_tree_read_unlock(struct extent_buffer *eb);
+void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
+void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
+int btrfs_try_tree_read_lock(struct extent_buffer *eb);
+int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+
+static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
+{
+ if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
+ btrfs_tree_unlock(eb);
+ else if (rw == BTRFS_READ_LOCK_BLOCKING)
+ btrfs_tree_read_unlock_blocking(eb);
+ else if (rw == BTRFS_READ_LOCK)
+ btrfs_tree_read_unlock(eb);
+ else
+ BUG();
+}
+
+static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
+{
+ btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
+}
+
+static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb)
+{
+ btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING);
+}
#endif
page_cache_sync_readahead(inode->i_mapping,
ra, NULL, index,
last_index + 1 - index);
- page = grab_cache_page(inode->i_mapping, index);
+ page = find_or_create_page(inode->i_mapping, index,
+ GFP_NOFS);
if (!page) {
btrfs_delalloc_release_metadata(inode,
PAGE_CACHE_SIZE);
unsigned long part_offset = (unsigned long)s; \
unsigned long offset = part_offset + offsetof(type, member); \
type *p; \
- /* ugly, but we want the fast path here */ \
- if (eb->map_token && offset >= eb->map_start && \
- offset + sizeof(((type *)0)->member) <= eb->map_start + \
- eb->map_len) { \
- p = (type *)(eb->kaddr + part_offset - eb->map_start); \
- return le##bits##_to_cpu(p->member); \
- } \
- { \
- int err; \
- char *map_token; \
- char *kaddr; \
- int unmap_on_exit = (eb->map_token == NULL); \
- unsigned long map_start; \
- unsigned long map_len; \
- u##bits res; \
- err = map_extent_buffer(eb, offset, \
- sizeof(((type *)0)->member), \
- &map_token, &kaddr, \
- &map_start, &map_len, KM_USER1); \
- if (err) { \
- __le##bits leres; \
- read_eb_member(eb, s, type, member, &leres); \
- return le##bits##_to_cpu(leres); \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- res = le##bits##_to_cpu(p->member); \
- if (unmap_on_exit) \
- unmap_extent_buffer(eb, map_token, KM_USER1); \
- return res; \
- } \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ u##bits res; \
+ err = map_private_extent_buffer(eb, offset, \
+ sizeof(((type *)0)->member), \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits leres; \
+ read_eb_member(eb, s, type, member, &leres); \
+ return le##bits##_to_cpu(leres); \
+ } \
+ p = (type *)(kaddr + part_offset - map_start); \
+ res = le##bits##_to_cpu(p->member); \
+ return res; \
} \
void btrfs_set_##name(struct extent_buffer *eb, \
type *s, u##bits val) \
unsigned long part_offset = (unsigned long)s; \
unsigned long offset = part_offset + offsetof(type, member); \
type *p; \
- /* ugly, but we want the fast path here */ \
- if (eb->map_token && offset >= eb->map_start && \
- offset + sizeof(((type *)0)->member) <= eb->map_start + \
- eb->map_len) { \
- p = (type *)(eb->kaddr + part_offset - eb->map_start); \
- p->member = cpu_to_le##bits(val); \
- return; \
- } \
- { \
- int err; \
- char *map_token; \
- char *kaddr; \
- int unmap_on_exit = (eb->map_token == NULL); \
- unsigned long map_start; \
- unsigned long map_len; \
- err = map_extent_buffer(eb, offset, \
- sizeof(((type *)0)->member), \
- &map_token, &kaddr, \
- &map_start, &map_len, KM_USER1); \
- if (err) { \
- __le##bits val2; \
- val2 = cpu_to_le##bits(val); \
- write_eb_member(eb, s, type, member, &val2); \
- return; \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- p->member = cpu_to_le##bits(val); \
- if (unmap_on_exit) \
- unmap_extent_buffer(eb, map_token, KM_USER1); \
- } \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ err = map_private_extent_buffer(eb, offset, \
+ sizeof(((type *)0)->member), \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits val2; \
+ val2 = cpu_to_le##bits(val); \
+ write_eb_member(eb, s, type, member, &val2); \
+ return; \
+ } \
+ p = (type *)(kaddr + part_offset - map_start); \
+ p->member = cpu_to_le##bits(val); \
}
#include "ctree.h"
struct btrfs_disk_key *disk_key, int nr)
{
unsigned long ptr = btrfs_node_key_ptr_offset(nr);
- if (eb->map_token && ptr >= eb->map_start &&
- ptr + sizeof(*disk_key) <= eb->map_start + eb->map_len) {
- memcpy(disk_key, eb->kaddr + ptr - eb->map_start,
- sizeof(*disk_key));
- return;
- } else if (eb->map_token) {
- unmap_extent_buffer(eb, eb->map_token, KM_USER1);
- eb->map_token = NULL;
- }
read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
struct btrfs_key_ptr, key, disk_key);
}
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
- int retries = 0;
+ u64 num_bytes = 0;
int ret;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
h->block_rsv = NULL;
goto got_it;
}
+
+ /*
+ * Do the reservation before we join the transaction so we can do all
+ * the appropriate flushing if need be.
+ */
+ if (num_items > 0 && root != root->fs_info->chunk_root) {
+ num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+ ret = btrfs_block_rsv_add(NULL, root,
+ &root->fs_info->trans_block_rsv,
+ num_bytes);
+ if (ret)
+ return ERR_PTR(ret);
+ }
again:
h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
if (!h)
goto again;
}
- if (num_items > 0) {
- ret = btrfs_trans_reserve_metadata(h, root, num_items);
- if (ret == -EAGAIN && !retries) {
- retries++;
- btrfs_commit_transaction(h, root);
- goto again;
- } else if (ret == -EAGAIN) {
- /*
- * We have already retried and got EAGAIN, so really we
- * don't have space, so set ret to -ENOSPC.
- */
- ret = -ENOSPC;
- }
-
- if (ret < 0) {
- btrfs_end_transaction(h, root);
- return ERR_PTR(ret);
- }
+ if (num_bytes) {
+ h->block_rsv = &root->fs_info->trans_block_rsv;
+ h->bytes_reserved = num_bytes;
}
got_it:
}
if (lock && cur_trans->blocked && !cur_trans->in_commit) {
- if (throttle)
+ if (throttle) {
+ /*
+ * We may race with somebody else here so end up having
+ * to call end_transaction on ourselves again, so inc
+ * our use_count.
+ */
+ trans->use_count++;
return btrfs_commit_transaction(trans, root);
- else
+ } else {
wake_up_process(info->transaction_kthread);
+ }
}
WARN_ON(cur_trans != info->running_transaction);
btrfs_read_buffer(next, ptr_gen);
btrfs_tree_lock(next);
- clean_tree_block(trans, root, next);
btrfs_set_lock_blocking(next);
+ clean_tree_block(trans, root, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
next = path->nodes[*level];
btrfs_tree_lock(next);
- clean_tree_block(trans, root, next);
btrfs_set_lock_blocking(next);
+ clean_tree_block(trans, root, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
next = path->nodes[orig_level];
btrfs_tree_lock(next);
- clean_tree_block(trans, log, next);
btrfs_set_lock_blocking(next);
+ clean_tree_block(trans, log, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
if (!sb)
return -ENOMEM;
btrfs_set_buffer_uptodate(sb);
- btrfs_set_buffer_lockdep_class(sb, 0);
+ btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
if (!path)
return -ENOMEM;
- /* first lets see if we already have this xattr */
- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
- strlen(name), -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
-
- /* ok we already have this xattr, lets remove it */
- if (di) {
- /* if we want create only exit */
- if (flags & XATTR_CREATE) {
- ret = -EEXIST;
+ if (flags & XATTR_REPLACE) {
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
+ name_len, -1);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ } else if (!di) {
+ ret = -ENODATA;
goto out;
}
-
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- BUG_ON(ret);
+ if (ret)
+ goto out;
btrfs_release_path(path);
+ }
- /* if we don't have a value then we are removing the xattr */
- if (!value)
+again:
+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
+ name, name_len, value, size);
+ if (ret == -EEXIST) {
+ if (flags & XATTR_CREATE)
goto out;
- } else {
+ /*
+ * We can't use the path we already have since we won't have the
+ * proper locking for a delete, so release the path and
+ * re-lookup to delete the thing.
+ */
btrfs_release_path(path);
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
+ name, name_len, -1);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ } else if (!di) {
+ /* Shouldn't happen but just in case... */
+ btrfs_release_path(path);
+ goto again;
+ }
- if (flags & XATTR_REPLACE) {
- /* we couldn't find the attr to replace */
- ret = -ENODATA;
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ if (ret)
goto out;
+
+ /*
+ * We have a value to set, so go back and try to insert it now.
+ */
+ if (value) {
+ btrfs_release_path(path);
+ goto again;
}
}
-
- /* ok we have to create a completely new xattr */
- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
- name, name_len, value, size);
- BUG_ON(ret);
out:
btrfs_free_path(path);
return ret;
struct dentry *temp;
char *path;
int len, pos;
+ unsigned seq;
if (dentry == NULL)
return ERR_PTR(-EINVAL);
retry:
len = 0;
+ seq = read_seqbegin(&rename_lock);
+ rcu_read_lock();
for (temp = dentry; !IS_ROOT(temp);) {
struct inode *inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
len += 1 + temp->d_name.len;
temp = temp->d_parent;
if (temp == NULL) {
+ rcu_read_unlock();
pr_err("build_path corrupt dentry %p\n", dentry);
return ERR_PTR(-EINVAL);
}
}
+ rcu_read_unlock();
if (len)
len--; /* no leading '/' */
return ERR_PTR(-ENOMEM);
pos = len;
path[pos] = 0; /* trailing null */
+ rcu_read_lock();
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
- struct inode *inode = temp->d_inode;
+ struct inode *inode;
+ spin_lock(&temp->d_lock);
+ inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
dout("build_path path+%d: %p SNAPDIR\n",
pos, temp);
break;
} else {
pos -= temp->d_name.len;
- if (pos < 0)
+ if (pos < 0) {
+ spin_unlock(&temp->d_lock);
break;
+ }
strncpy(path + pos, temp->d_name.name,
temp->d_name.len);
}
+ spin_unlock(&temp->d_lock);
if (pos)
path[--pos] = '/';
temp = temp->d_parent;
if (temp == NULL) {
+ rcu_read_unlock();
pr_err("build_path corrupt dentry\n");
kfree(path);
return ERR_PTR(-EINVAL);
}
}
- if (pos != 0) {
+ rcu_read_unlock();
+ if (pos != 0 || read_seqretry(&rename_lock, seq)) {
pr_err("build_path did not end path lookup where "
"expected, namelen is %d, pos is %d\n", len, pos);
/* presumably this is only possible if racing with a
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/namei.h>
#include <net/ipv6.h>
#include "cifsfs.h"
#include "cifspdu.h"
static struct dentry *
cifs_get_root(struct smb_vol *vol, struct super_block *sb)
{
- int xid, rc;
- struct inode *inode;
- struct qstr name;
- struct dentry *dparent = NULL, *dchild = NULL, *alias;
+ struct dentry *dentry;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- unsigned int i, full_len, len;
- char *full_path = NULL, *pstart;
+ char *full_path = NULL;
+ char *s, *p;
char sep;
+ int xid;
full_path = cifs_build_path_to_root(vol, cifs_sb,
cifs_sb_master_tcon(cifs_sb));
xid = GetXid();
sep = CIFS_DIR_SEP(cifs_sb);
- dparent = dget(sb->s_root);
- full_len = strlen(full_path);
- full_path[full_len] = sep;
- pstart = full_path + 1;
-
- for (i = 1, len = 0; i <= full_len; i++) {
- if (full_path[i] != sep || !len) {
- len++;
- continue;
- }
-
- full_path[i] = 0;
- cFYI(1, "get dentry for %s", pstart);
-
- name.name = pstart;
- name.len = len;
- name.hash = full_name_hash(pstart, len);
- dchild = d_lookup(dparent, &name);
- if (dchild == NULL) {
- cFYI(1, "not exists");
- dchild = d_alloc(dparent, &name);
- if (dchild == NULL) {
- dput(dparent);
- dparent = ERR_PTR(-ENOMEM);
- goto out;
- }
- }
-
- cFYI(1, "get inode");
- if (dchild->d_inode == NULL) {
- cFYI(1, "not exists");
- inode = NULL;
- if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path,
- sb, xid);
- else
- rc = cifs_get_inode_info(&inode, full_path,
- NULL, sb, xid, NULL);
- if (rc) {
- dput(dchild);
- dput(dparent);
- dparent = ERR_PTR(rc);
- goto out;
- }
- alias = d_materialise_unique(dchild, inode);
- if (alias != NULL) {
- dput(dchild);
- if (IS_ERR(alias)) {
- dput(dparent);
- dparent = ERR_PTR(-EINVAL); /* XXX */
- goto out;
- }
- dchild = alias;
- }
- }
- cFYI(1, "parent %p, child %p", dparent, dchild);
-
- dput(dparent);
- dparent = dchild;
- len = 0;
- pstart = full_path + i + 1;
- full_path[i] = sep;
- }
-out:
+ dentry = dget(sb->s_root);
+ p = s = full_path;
+
+ do {
+ struct inode *dir = dentry->d_inode;
+ struct dentry *child;
+
+ /* skip separators */
+ while (*s == sep)
+ s++;
+ if (!*s)
+ break;
+ p = s++;
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+
+ mutex_lock(&dir->i_mutex);
+ child = lookup_one_len(p, dentry, s - p);
+ mutex_unlock(&dir->i_mutex);
+ dput(dentry);
+ dentry = child;
+ } while (!IS_ERR(dentry));
_FreeXid(xid);
kfree(full_path);
- return dparent;
+ return dentry;
}
static int cifs_set_super(struct super_block *sb, void *data)
extern const struct export_operations cifs_export_ops;
#endif /* CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "1.73"
+#define CIFS_VERSION "1.74"
#endif /* _CIFSFS_H */
goto out;
}
- snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
+ snprintf(username, sizeof(username), "krb50x%x", fsuid);
vol_info->username = username;
vol_info->local_nls = cifs_sb->local_nls;
vol_info->linux_uid = fsuid;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ unsigned seq;
if (direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
dfsplen = 0;
cifs_bp_rename_retry:
namelen = dfsplen;
+ seq = read_seqbegin(&rename_lock);
+ rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
+ rcu_read_unlock();
return NULL;
}
}
+ rcu_read_unlock();
full_path = kmalloc(namelen+1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
full_path[namelen] = 0; /* trailing null */
+ rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
+ spin_lock(&temp->d_lock);
namelen -= 1 + temp->d_name.len;
if (namelen < 0) {
+ spin_unlock(&temp->d_lock);
break;
} else {
full_path[namelen] = dirsep;
temp->d_name.len);
cFYI(0, "name: %s", full_path + namelen);
}
+ spin_unlock(&temp->d_lock);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
+ rcu_read_unlock();
kfree(full_path);
return NULL;
}
}
- if (namelen != dfsplen) {
+ rcu_read_unlock();
+ if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
cERROR(1, "did not end path lookup where expected namelen is %d",
namelen);
/* presumably this is only possible if racing with a rename
io_parms.pid = pid;
io_parms.tcon = pTcon;
io_parms.offset = *poffset;
- io_parms.length = len;
+ io_parms.length = cur_len;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
&read_data, &buf_type);
pSMBr = (struct smb_com_read_rsp *)read_data;
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab)
- flags |= NTLMSSP_NEGOTIATE_KEY_XCH |
- NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
}
sec_blob->NegotiateFlags = cpu_to_le32(flags);
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
if (ses->server->sec_mode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
- flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+ if (!ses->server->session_estab)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ }
tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
/* These macros may change in future, to provide better st_ino semantics. */
#define OFFSET(x) ((x)->i_ino)
-static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
+static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
{
if (!cino->offset)
return offset + 1;
}
static struct inode *get_cramfs_inode(struct super_block *sb,
- struct cramfs_inode *cramfs_inode, unsigned int offset)
+ const struct cramfs_inode *cramfs_inode, unsigned int offset)
{
struct inode *inode;
static struct timespec zerotime;
/* Set it all up.. */
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, &super.root, 0);
- if (!root)
+ if (IS_ERR(root))
goto out;
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
unsigned int offset = 0;
+ struct inode *inode = NULL;
int sorted;
mutex_lock(&read_mutex);
for (;;) {
if (!namelen) {
- mutex_unlock(&read_mutex);
- return ERR_PTR(-EIO);
+ inode = ERR_PTR(-EIO);
+ goto out;
}
if (name[namelen-1])
break;
if (retval > 0)
continue;
if (!retval) {
- struct cramfs_inode entry = *de;
- mutex_unlock(&read_mutex);
- d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
- return NULL;
+ inode = get_cramfs_inode(dir->i_sb, de, dir_off);
+ break;
}
/* else (retval < 0) */
if (sorted)
break;
}
+out:
mutex_unlock(&read_mutex);
- d_add(dentry, NULL);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ d_add(dentry, inode);
return NULL;
}
tname = dentry->d_name.name;
i = dentry->d_inode;
prefetch(tname);
- if (i)
- prefetch(i);
/*
* This seqcount check is required to ensure name and
* len are loaded atomically, so as not to walk off the
* The hash value has to match the hash queue that the dentry is on..
*/
/*
- * d_move - move a dentry
+ * __d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
*
* Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way. Caller hold
+ * rename_lock.
*/
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
{
if (!dentry->d_inode)
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
BUG_ON(d_ancestor(dentry, target));
BUG_ON(d_ancestor(target, dentry));
- write_seqlock(&rename_lock);
-
dentry_lock_for_move(dentry, target);
write_seqcount_begin(&dentry->d_seq);
spin_unlock(&target->d_lock);
fsnotify_d_move(dentry);
spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+ write_seqlock(&rename_lock);
+ __d_move(dentry, target);
write_sequnlock(&rename_lock);
}
EXPORT_SYMBOL(d_move);
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
if (alias->d_parent == dentry->d_parent)
goto out_unalias;
- /* Check for loops */
- ret = ERR_PTR(-ELOOP);
- if (d_ancestor(alias, dentry))
- goto out_err;
-
/* See lock_rename() */
ret = ERR_PTR(-EBUSY);
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
goto out_err;
m2 = &alias->d_parent->d_inode->i_mutex;
out_unalias:
- d_move(alias, dentry);
+ __d_move(alias, dentry);
ret = alias;
out_err:
spin_unlock(&inode->i_lock);
alias = __d_find_alias(inode, 0);
if (alias) {
actual = alias;
- /* Is this an anonymous mountpoint that we could splice
- * into our tree? */
- if (IS_ROOT(alias)) {
+ write_seqlock(&rename_lock);
+
+ if (d_ancestor(alias, dentry)) {
+ /* Check for loops */
+ actual = ERR_PTR(-ELOOP);
+ } else if (IS_ROOT(alias)) {
+ /* Is this an anonymous mountpoint that we
+ * could splice into our tree? */
__d_materialise_dentry(dentry, alias);
+ write_sequnlock(&rename_lock);
__d_drop(alias);
goto found;
+ } else {
+ /* Nope, but we must(!) avoid directory
+ * aliasing */
+ actual = __d_unalias(inode, dentry, alias);
}
- /* Nope, but we must(!) avoid directory aliasing */
- actual = __d_unalias(inode, dentry, alias);
+ write_sequnlock(&rename_lock);
if (IS_ERR(actual))
dput(alias);
goto out_nolock;
unsigned long ino = exofs_parent_ino(child);
if (!ino)
- return NULL;
+ return ERR_PTR(-ESTALE);
return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
}
pagevec_init(&pvec, 0);
next = 0;
- while (next <= (loff_t)-1 &&
- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
- ) {
+ do {
+ if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
+ break;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
- pgoff_t page_index = page->index;
-
- ASSERTCMP(page_index, >=, next);
- next = page_index + 1;
-
+ next = page->index;
if (PageFsCache(page)) {
__fscache_wait_on_page_write(cookie, page);
__fscache_uncache_page(cookie, page);
}
pagevec_release(&pvec);
cond_resched();
- }
+ } while (++next);
_leave("");
}
return 0;
gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_ail_lock);
head = bh = page_buffers(page);
do {
if (atomic_read(&bh->b_count))
goto not_possible;
bh = bh->b_this_page;
} while(bh != head);
+ spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
head = bh = page_buffers(page);
WARN_ON(buffer_dirty(bh));
WARN_ON(buffer_pinned(bh));
cannot_release:
+ spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
return 0;
}
bd_ail_gl_list);
bh = bd->bd_bh;
gfs2_remove_from_ail(bd);
- spin_unlock(&sdp->sd_ail_lock);
-
bd->bd_bh = NULL;
bh->b_private = NULL;
+ spin_unlock(&sdp->sd_ail_lock);
+
bd->bd_blkno = bh->b_blocknr;
gfs2_log_lock(sdp);
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
}
}
- if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+ if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
+ gfs2_log_flush(gl->gl_sbd, NULL);
gl->gl_sbd->sd_rindex_uptodate = 0;
+ }
if (ip && S_ISREG(ip->i_inode.i_mode))
truncate_inode_pages(ip->i_inode.i_mapping, 0);
}
#include <linux/buffer_head.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
+#include <linux/completion.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
struct gfs2_glock *sd_trans_gl;
wait_queue_head_t sd_glock_wait;
atomic_t sd_glock_disposal;
+ struct completion sd_locking_init;
/* Inode Stuff */
if (gfs2_ail1_empty(sdp))
break;
}
+ gfs2_log_flush(sdp, NULL);
}
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
init_waitqueue_head(&sdp->sd_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
+ init_completion(&sdp->sd_locking_init);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
fsname++;
if (lm->lm_mount == NULL) {
fs_info(sdp, "Now mounting FS...\n");
+ complete(&sdp->sd_locking_init);
return 0;
}
ret = lm->lm_mount(sdp, fsname);
if (ret == 0)
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+ complete(&sdp->sd_locking_init);
return ret;
}
struct timespec atime;
struct gfs2_dinode *di;
int ret = -EAGAIN;
+ int unlock_required = 0;
/* Skip timestamp update, if this is from a memalloc */
if (current->flags & PF_MEMALLOC)
goto do_flush;
- ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- if (ret)
- goto do_flush;
+ if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ if (ret)
+ goto do_flush;
+ unlock_required = 1;
+ }
ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (ret)
goto do_unlock;
}
gfs2_trans_end(sdp);
do_unlock:
- gfs2_glock_dq_uninit(&gh);
+ if (unlock_required)
+ gfs2_glock_dq_uninit(&gh);
do_flush:
if (wbc->sync_mode == WB_SYNC_ALL)
gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
return error;
}
-/*
+/**
+ * gfs2_evict_inode - Remove an inode from cache
+ * @inode: The inode to evict
+ *
+ * There are three cases to consider:
+ * 1. i_nlink == 0, we are final opener (and must deallocate)
+ * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
+ * 3. i_nlink > 0
+ *
+ * If the fs is read only, then we have to treat all cases as per #3
+ * since we are unable to do any deallocation. The inode will be
+ * deallocated by the next read/write node to attempt an allocation
+ * in the same resource group
+ *
* We have to (at the moment) hold the inodes main lock to cover
* the gap between unlocking the shared lock on the iopen lock and
* taking the exclusive lock. I'd rather do a shared -> exclusive
if (error)
goto out_truncate;
+ /* Case 1 starts here */
+
if (S_ISDIR(inode->i_mode) &&
(ip->i_diskflags & GFS2_DIF_EXHASH)) {
error = gfs2_dir_exhash_dealloc(ip);
goto out_unlock;
out_truncate:
+ /* Case 2 starts here */
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (error)
goto out_unlock;
- gfs2_final_release_pages(ip);
+ /* Needs to be done before glock release & also in a transaction */
+ truncate_inode_pages(&inode->i_data, 0);
gfs2_trans_end(sdp);
out_unlock:
+ /* Error path for case 1 */
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
gfs2_glock_dq(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
if (error && error != GLR_TRYFAILED && error != -EROFS)
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
out:
+ /* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
rv = sscanf(buf, "%u", &first);
if (rv != 1 || first > 1)
return -EINVAL;
+ rv = wait_for_completion_killable(&sdp->sd_locking_init);
+ if (rv)
+ return rv;
spin_lock(&sdp->sd_jindex_spin);
rv = -EBUSY;
if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
rv = sscanf(buf, "%d", &jid);
if (rv != 1)
return -EINVAL;
-
+ rv = wait_for_completion_killable(&sdp->sd_locking_init);
+ if (rv)
+ return rv;
spin_lock(&sdp->sd_jindex_spin);
rv = -EINVAL;
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
struct nameidata *nd)
{
- struct dentry *proc_dentry, *new, *parent;
+ struct dentry *proc_dentry, *parent;
+ struct qstr *name = &dentry->d_name;
struct inode *inode;
int err, deleted;
else if (deleted)
return ERR_PTR(-ENOENT);
- err = -ENOMEM;
parent = HPPFS_I(ino)->proc_dentry;
mutex_lock(&parent->d_inode->i_mutex);
- proc_dentry = d_lookup(parent, &dentry->d_name);
- if (proc_dentry == NULL) {
- proc_dentry = d_alloc(parent, &dentry->d_name);
- if (proc_dentry == NULL) {
- mutex_unlock(&parent->d_inode->i_mutex);
- goto out;
- }
- new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
- proc_dentry, NULL);
- if (new) {
- dput(proc_dentry);
- proc_dentry = new;
- }
- }
+ proc_dentry = lookup_one_len(name->name, parent, name->len);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(proc_dentry))
err = -ENOMEM;
inode = get_inode(ino->i_sb, proc_dentry);
if (!inode)
- goto out_dput;
+ goto out;
d_add(dentry, inode);
return NULL;
- out_dput:
- dput(proc_dentry);
out:
return ERR_PTR(err);
}
struct inode *proc_ino = dentry->d_inode;
struct inode *inode = new_inode(sb);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
return ERR_PTR(-ENOMEM);
+ }
if (S_ISDIR(dentry->d_inode->i_mode)) {
inode->i_op = &hppfs_dir_iops;
inode->i_fop = &hppfs_file_fops;
}
- HPPFS_I(inode)->proc_dentry = dget(dentry);
+ HPPFS_I(inode)->proc_dentry = dentry;
inode->i_uid = proc_ino->i_uid;
inode->i_gid = proc_ino->i_gid;
sb->s_fs_info = proc_mnt;
err = -ENOMEM;
- root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root);
+ root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
if (!root_inode)
goto out_mntput;
goto out;
attr->set_buf[size] = '\0';
- val = simple_strtol(attr->set_buf, NULL, 0);
+ val = simple_strtoll(attr->set_buf, NULL, 0);
ret = attr->set(attr->data, val);
if (ret == 0)
ret = len; /* on success, claim we got the whole input */
goto err_parent;
BUG_ON(nd->inode != parent->d_inode);
} else {
+ if (dentry->d_parent != parent)
+ goto err_parent;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!__d_rcu_to_refcount(dentry, nd->seq))
goto err_child;
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
- *inode = path->dentry->d_inode;
if (unlikely(managed_dentry_might_block(path->dentry)))
return false;
path->mnt = mounted;
path->dentry = mounted->mnt_root;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+ /*
+ * Update the inode too. We don't need to re-check the
+ * dentry sequence number here after this d_inode read,
+ * because a mount-point is always pinned.
+ */
+ *inode = path->dentry->d_inode;
}
return true;
}
* this offset and save the original offset.
*/
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
- data->mds_offset = offset;
/* Perform an asynchronous write */
status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
#define encode_getfh_maxsz (op_encode_hdr_maxsz)
#define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \
((3+NFS4_FHSIZE) >> 2))
-#define nfs4_fattr_bitmap_maxsz 3
+#define nfs4_fattr_bitmap_maxsz 4
#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
+ /* pnfs_set_layoutcommit needs this */
+ data->mds_offset = data->args.offset;
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
lock_ufs(dir->i_sb);
ino = ufs_inode_by_name(dir, &dentry->d_name);
- if (ino) {
+ if (ino)
inode = ufs_iget(dir->i_sb, ino);
- if (IS_ERR(inode)) {
- unlock_ufs(dir->i_sb);
- return ERR_CAST(inode);
- }
- }
unlock_ufs(dir->i_sb);
- d_add(dentry, inode);
- return NULL;
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ return d_splice_alias(inode, dentry);
}
/*
struct acpi_device_power {
int state; /* Current state */
struct acpi_device_power_flags flags;
- struct acpi_device_power_state states[4]; /* Power states (D0-D3) */
+ struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
};
/* Performance Management */
/*
* Spinlock primitives
*/
+
+#ifndef acpi_os_create_lock
acpi_status
acpi_os_create_lock(acpi_spinlock *out_handle);
+#endif
void acpi_os_delete_lock(acpi_spinlock handle);
} while (0)
#endif
+/*
+ * When lockdep is enabled, the spin_lock_init() macro stringifies it's
+ * argument and uses that as a name for the lock in debugging.
+ * By executing spin_lock_init() in a macro the key changes from "lock" for
+ * all locks to the name of the argument of acpi_os_create_lock(), which
+ * prevents lockdep from reporting false positives for ACPICA locks.
+ */
+#define acpi_os_create_lock(__handle) \
+({ \
+ spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+ \
+ if (lock) { \
+ *(__handle) = lock; \
+ spin_lock_init(*(__handle)); \
+ } \
+ lock ? AE_OK : AE_NO_MEMORY; \
+})
+
#endif /* __KERNEL__ */
#endif /* __ACLINUX_H__ */
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
unsigned long long enhanced_area_offset; /* Units: Byte */
unsigned int enhanced_area_size; /* Units: KB */
unsigned int boot_size; /* in bytes */
+ u8 raw_partition_support; /* 160 */
+ u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_structure; /* 194 */
+ u8 raw_card_type; /* 196 */
+ u8 raw_s_a_timeout; /* 217 */
+ u8 raw_hc_erase_gap_size; /* 221 */
+ u8 raw_erase_timeout_mult; /* 223 */
+ u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_sec_trim_mult; /* 229 */
+ u8 raw_sec_erase_mult; /* 230 */
+ u8 raw_sec_feature_support;/* 231 */
+ u8 raw_trim_mult; /* 232 */
+ u8 raw_sectors[4]; /* 212 - 4 bytes */
};
struct sd_scr {
#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
NETIF_F_FSO)
-#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
- NETIF_F_HIGHDMA | \
- NETIF_F_SCTP_CSUM | \
- NETIF_F_ALL_FCOE)
-
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
return 0;
}
-struct sched_group {
- struct sched_group *next; /* Must be a circular list */
+struct sched_group_power {
atomic_t ref;
-
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
- unsigned int cpu_power, cpu_power_orig;
+ unsigned int power, power_orig;
+};
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
+
unsigned int group_weight;
+ struct sched_group_power *sgp;
/*
* The CPUs this group covers.
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
+#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+ int rcu_boosted;
+#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
short Tb_max;
};
-#ifndef __KERNEL__
-
-void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet);
-
-#else
+#ifdef __KERNEL__
/* important Z80 window addresses */
#define SDLA_CONTROL_WND 0xE000
SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
SCTP_CMD_TIMER_STOP, /* Stop a timer. */
SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
static struct rcu_state *rcu_state;
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned. So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier(). When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods. This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks. So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one. We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
#ifdef CONFIG_RCU_BOOST
/*
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
#endif /* #ifdef CONFIG_RCU_BOOST */
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ return;
if (likely(!rsp->boost)) {
rcu_do_batch(rsp, rdp);
return;
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
+static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
/*
struct rcu_data *rdp;
struct rcu_node *rnp;
- if (t->rcu_read_lock_nesting &&
+ if (t->rcu_read_lock_nesting > 0 &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
rnp->gp_tasks = &t->rcu_node_entry;
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ } else if (t->rcu_read_lock_nesting < 0 &&
+ t->rcu_read_unlock_special) {
+
+ /*
+ * Complete exit from RCU read-side critical section on
+ * behalf of preempted instance of __rcu_read_unlock().
+ */
+ rcu_read_unlock_special(t);
}
/*
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
}
/* Hardware IRQ handlers cannot block. */
- if (in_irq()) {
+ if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags);
return;
}
#ifdef CONFIG_RCU_BOOST
if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np;
+ /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
+ if (t->rcu_boosted) {
+ special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 0;
+ }
#endif /* #ifdef CONFIG_RCU_BOOST */
t->rcu_blocked_node = NULL;
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
if (special & RCU_READ_UNLOCK_BOOSTED) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
rt_mutex_unlock(t->rcu_boost_mutex);
t->rcu_boost_mutex = NULL;
}
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
+ if (t->rcu_read_lock_nesting != 1)
+ --t->rcu_read_lock_nesting;
+ else {
+ t->rcu_read_lock_nesting = INT_MIN;
+ barrier(); /* assign before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ barrier(); /* ->rcu_read_unlock_special load before assign */
+ t->rcu_read_lock_nesting = 0;
+ }
#ifdef CONFIG_PROVE_LOCKING
- WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+ {
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ }
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
rcu_preempt_qs(cpu);
return;
}
- if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+ if (t->rcu_read_lock_nesting > 0 &&
+ per_cpu(rcu_preempt_data, cpu).qs_pending)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
raw_spin_lock_irqsave(&rnp->lock, flags);
for (;;) {
- if (!sync_rcu_preempt_exp_done(rnp))
+ if (!sync_rcu_preempt_exp_done(rnp)) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
break;
+ }
if (rnp->parent == NULL) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up(&sync_rcu_preempt_exp_wq);
break;
}
raw_spin_lock(&rnp->lock); /* irqs already disabled */
rnp->expmask &= ~mask;
}
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
t->rcu_boost_mutex = &mtx;
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 1;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
rnp->qsmaskinit == 0)
return 0;
if (rnp->node_kthread_task == NULL) {
int cpu;
struct rcu_node *rnp;
- rcu_kthreads_spawnable = 1;
+ rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu) {
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_kthreads_spawnable) {
+ if (rcu_scheduler_fully_active) {
(void)rcu_spawn_one_cpu_kthread(cpu);
if (rnp->node_kthread_task == NULL)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
{
}
+static int __init rcu_scheduler_really_started(void)
+{
+ rcu_scheduler_fully_active = 1;
+ return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
static void __cpuinit rcu_prepare_kthreads(int cpu)
{
}
}
#ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
{
struct rq *rq = this_rq();
- struct task_struct *list = xchg(&rq->wake_list, NULL);
-
- if (!list)
- return;
raw_spin_lock(&rq->lock);
raw_spin_unlock(&rq->lock);
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
void scheduler_ipi(void)
{
- sched_ttwu_pending();
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ /*
+ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+ * traditionally all their work was done from the interrupt return
+ * path. Now that we actually do some work, we need to make sure
+ * we do call them.
+ *
+ * Some archs already do call them, luckily irq_enter/exit nest
+ * properly.
+ *
+ * Arguably we should visit all archs and update all handlers,
+ * however a fair share of IPIs are still resched only so this would
+ * somewhat pessimize the simple resched case.
+ */
+ irq_enter();
+ sched_ttwu_do_pending(list);
+ irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
break;
}
- if (!group->cpu_power) {
+ if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
- if (group->cpu_power != SCHED_POWER_SCALE) {
+ if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
- group->cpu_power);
+ group->sgp->power);
}
group = group->next;
return rd;
}
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+ struct sched_group *tmp, *first;
+
+ if (!sg)
+ return;
+
+ first = sg;
+ do {
+ tmp = sg->next;
+
+ if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+ kfree(sg->sgp);
+
+ kfree(sg);
+ sg = tmp;
+ } while (sg != first);
+}
+
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
- if (atomic_dec_and_test(&sd->groups->ref))
+
+ /*
+ * If its an overlapping domain it has private groups, iterate and
+ * nuke them all.
+ */
+ if (sd->flags & SD_OVERLAP) {
+ free_sched_groups(sd->groups, 1);
+ } else if (atomic_dec_and_test(&sd->groups->ref)) {
+ kfree(sd->groups->sgp);
kfree(sd->groups);
+ }
kfree(sd);
}
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
+ struct sched_group_power **__percpu sgp;
};
struct s_data {
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+#define SDTL_OVERLAP 0x01
+
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
+ int flags;
struct sd_data data;
};
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+ const struct cpumask *span = sched_domain_span(sd);
+ struct cpumask *covered = sched_domains_tmpmask;
+ struct sd_data *sdd = sd->private;
+ struct sched_domain *child;
+ int i;
+
+ cpumask_clear(covered);
+
+ for_each_cpu(i, span) {
+ struct cpumask *sg_span;
+
+ if (cpumask_test_cpu(i, covered))
+ continue;
+
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, cpu_to_node(i));
+
+ if (!sg)
+ goto fail;
+
+ sg_span = sched_group_cpus(sg);
+
+ child = *per_cpu_ptr(sdd->sd, i);
+ if (child->child) {
+ child = child->child;
+ cpumask_copy(sg_span, sched_domain_span(child));
+ } else
+ cpumask_set_cpu(i, sg_span);
+
+ cpumask_or(covered, covered, sg_span);
+
+ sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+ atomic_inc(&sg->sgp->ref);
+
+ if (cpumask_test_cpu(cpu, sg_span))
+ groups = sg;
+
+ if (!first)
+ first = sg;
+ if (last)
+ last->next = sg;
+ last = sg;
+ last->next = first;
+ }
+ sd->groups = groups;
+
+ return 0;
+
+fail:
+ free_sched_groups(first, 0);
+
+ return -ENOMEM;
+}
+
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
if (child)
cpu = cpumask_first(sched_domain_span(child));
- if (sg)
+ if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
+ (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+ atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+ }
return cpu;
}
/*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
*/
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
struct cpumask *covered;
int i;
+ get_group(cpu, sdd, &sd->groups);
+ atomic_inc(&sd->groups->ref);
+
+ if (cpu != cpumask_first(sched_domain_span(sd)))
+ return 0;
+
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
continue;
cpumask_clear(sched_group_cpus(sg));
- sg->cpu_power = 0;
+ sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
last = sg;
}
last->next = first;
+
+ return 0;
}
/*
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
- WARN_ON(!sd || !sd->groups);
+ struct sched_group *sg = sd->groups;
- if (cpu != group_first_cpu(sd->groups))
- return;
+ WARN_ON(!sd || !sg);
+
+ do {
+ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+ sg = sg->next;
+ } while (sg != sd->groups);
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+ if (cpu != group_first_cpu(sg))
+ return;
update_group_power(sd, cpu);
}
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
- struct sched_group *sg = sd->groups;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (cpu == cpumask_first(sched_group_cpus(sg))) {
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
- }
+
+ if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+ *per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
- { sd_init_NODE, cpu_node_mask, },
+ { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
if (!sdd->sg)
return -ENOMEM;
+ sdd->sgp = alloc_percpu(struct sched_group_power *);
+ if (!sdd->sgp)
+ return -ENOMEM;
+
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
+ struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
return -ENOMEM;
*per_cpu_ptr(sdd->sg, j) = sg;
+
+ sgp = kzalloc_node(sizeof(struct sched_group_power),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sgp)
+ return -ENOMEM;
+
+ *per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
- kfree(*per_cpu_ptr(sdd->sd, j));
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd && (sd->flags & SD_OVERLAP))
+ free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sg, j));
+ kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
+ free_percpu(sdd->sgp);
}
}
struct sched_domain_topology_level *tl;
sd = NULL;
- for (tl = sched_domain_topology; tl->init; tl++)
+ for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+ sd->flags |= SD_OVERLAP;
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+ break;
+ }
while (sd->child)
sd = sd->child;
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
- get_group(i, sd->private, &sd->groups);
- atomic_inc(&sd->groups->ref);
-
- if (i != cpumask_first(sched_domain_span(sd)))
- continue;
-
- build_sched_groups(sd);
+ if (sd->flags & SD_OVERLAP) {
+ if (build_overlap_sched_groups(sd, i))
+ goto error;
+ } else {
+ if (build_sched_groups(sd, i))
+ goto error;
+ }
}
}
#endif
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
}
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
}
/* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+ avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
if (local_group) {
this_load = avg_load;
power >>= SCHED_POWER_SHIFT;
}
- sdg->cpu_power_orig = power;
+ sdg->sgp->power_orig = power;
if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
power = 1;
cpu_rq(cpu)->cpu_power = power;
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
static void update_group_power(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- power += group->cpu_power;
+ power += group->sgp->power;
group = group->next;
} while (group != child->groups);
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
/*
/*
* If ~90% of the cpu_power is still there, we're good.
*/
- if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+ if (group->sgp->power * 32 > group->sgp->power_orig * 29)
return 1;
return 0;
}
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
/*
* Consider the group unbalanced when the imbalance is larger
if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
return;
sds->total_load += sgs.group_load;
- sds->total_pwr += sg->cpu_power;
+ sds->total_pwr += sg->sgp->power;
/*
* In case the child domain prefers tasks go to siblings
if (this_cpu > busiest_cpu)
return 0;
- *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+ *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
SCHED_POWER_SCALE);
return 1;
}
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->cpu_power;
+ scaled_busy_load_per_task /= sds->busiest->sgp->power;
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
* moving them.
*/
- pwr_now += sds->busiest->cpu_power *
+ pwr_now += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->cpu_power *
+ pwr_now += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->cpu_power;
+ sds->busiest->sgp->power;
if (sds->max_load > tmp)
- pwr_move += sds->busiest->cpu_power *
+ pwr_move += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->cpu_power <
+ if (sds->max_load * sds->busiest->sgp->power <
sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->cpu_power) /
- sds->this->cpu_power;
+ tmp = (sds->max_load * sds->busiest->sgp->power) /
+ sds->this->sgp->power;
else
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->cpu_power;
- pwr_move += sds->this->cpu_power *
+ sds->this->sgp->power;
+ pwr_move += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
- load_above_capacity /= sds->busiest->cpu_power;
+ load_above_capacity /= sds->busiest->sgp->power;
}
/*
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds->busiest->cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+ *imbalance = min(max_pull * sds->busiest->sgp->power,
+ (sds->avg_load - sds->this_load) * sds->this->sgp->power)
/ SCHED_POWER_SCALE;
/*
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
{
struct sighand_struct *sighand;
- rcu_read_lock();
for (;;) {
+ local_irq_save(*flags);
+ rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL))
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+ local_irq_restore(*flags);
break;
+ }
- spin_lock_irqsave(&sighand->siglock, *flags);
- if (likely(sighand == tsk->sighand))
+ spin_lock(&sighand->siglock);
+ if (likely(sighand == tsk->sighand)) {
+ rcu_read_unlock();
break;
- spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+ local_irq_restore(*flags);
}
- rcu_read_unlock();
return sighand;
}
{
if (!force_irqthreads)
__do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#else
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#endif
for (i = 0; i <= classzone_idx; i++)
present_pages += pgdat->node_zones[i].present_pages;
- return balanced_pages > (present_pages >> 2);
+ /* A special case here: if zone has no page, we think it's balanced */
+ return balanced_pages >= (present_pages >> 2);
}
/* is kswapd sleeping prematurely? */
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
- dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
+ dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+ NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+ NETIF_F_ALL_FCOE;
+
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
hci_dev_put(hdev);
+ if (conn->handle == 0)
+ kfree(conn);
+
return 0;
}
{
struct hidp_session *session = (struct hidp_session *) arg;
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
static void hidp_set_timer(struct hidp_session *session)
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(current);
}
}
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
session->waiting_for_startup = 0;
wake_up_interruptible(&session->startup_queue);
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
-
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!atomic_read(&session->terminate)) {
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
hidp_process_transmit(session);
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
err_add_device:
hid_destroy_device(session->hid);
session->hid = NULL;
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
unlink:
hidp_del_timer(session);
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
} else
err = -ENOENT;
uint ctrl_mtu;
uint intr_mtu;
+ atomic_t terminate;
struct task_struct *task;
unsigned char keys[8];
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
- parent->sk_data_ready(parent, 0);
+ if (parent)
+ parent->sk_data_ready(parent, 0);
} else {
sk->sk_state = BT_CONFIG;
sk = chan->sk;
- if (sk->sk_state != BT_CONFIG) {
+ if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
struct l2cap_cmd_rej rej;
rej.reason = cpu_to_le16(0x0002);
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
- if (chan->conf_len + len > sizeof(chan->conf_req)) {
+ if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
L2CAP_CONF_REJECT, flags), rsp);
struct sock *parent = bt_sk(sk)->parent;
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
- parent->sk_data_ready(parent, 0);
+ if (parent)
+ parent->sk_data_ready(parent, 0);
} else {
sk->sk_state = BT_CONFIG;
res = L2CAP_CR_SUCCESS;
if ((flags & O_DIRECTORY) == O_DIRECTORY)
return CEPH_FILE_MODE_PIN;
#endif
- if ((flags & O_APPEND) == O_APPEND)
- flags |= O_WRONLY;
- if ((flags & O_ACCMODE) == O_RDWR)
- mode = CEPH_FILE_MODE_RDWR;
- else if ((flags & O_ACCMODE) == O_WRONLY)
+ switch (flags & O_ACCMODE) {
+ case O_WRONLY:
mode = CEPH_FILE_MODE_WR;
- else
+ break;
+ case O_RDONLY:
mode = CEPH_FILE_MODE_RD;
-
+ break;
+ case O_RDWR:
+ case O_ACCMODE: /* this is what the VFS does */
+ mode = CEPH_FILE_MODE_RDWR;
+ break;
+ }
#ifdef O_LAZY
if (flags & O_LAZY)
mode |= CEPH_FILE_MODE_LAZY;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
local->sched_scan_ies.ie[i] = kzalloc(2 +
IEEE80211_MAX_SSID_LEN +
- local->scan_ies_len,
+ local->scan_ies_len +
+ req->ie_len,
GFP_KERNEL);
if (!local->sched_scan_ies.ie[i]) {
ret = -ENOMEM;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue = rx->queue;
+
+ /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+ if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+ queue = 0;
/*
* it makes no sense to check for MIC errors on anything other
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
+ rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
return RX_CONTINUE;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ int queue = rx->queue;
+
+ /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+ if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+ queue = 0;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
- hdr->addr1, hwaccel, rx->queue,
+ hdr->addr1, hwaccel, queue,
&rx->tkip_iv32,
&rx->tkip_iv16);
if (res != TKIP_DECRYPT_OK)
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
- if (!sctp_checksum_disable &&
- !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
- __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+ if (!sctp_checksum_disable) {
+ if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+ __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
- /* 3) Put the resultant value into the checksum field in the
- * common header, and leave the rest of the bits unchanged.
- */
- sh->checksum = sctp_end_cksum(crc32);
- } else {
- if (dst->dev->features & NETIF_F_SCTP_CSUM) {
+ /* 3) Put the resultant value into the checksum field in the
+ * common header, and leave the rest of the bits unchanged.
+ */
+ sh->checksum = sctp_end_cksum(crc32);
+ } else {
/* no need to seed pseudo checksum for SCTP */
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (skb_transport_header(nskb) -
nskb->head);
nskb->csum_offset = offsetof(struct sctphdr, checksum);
- } else {
- nskb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
#endif /* SCTP_DEBUG */
if (transport) {
if (bytes_acked) {
+ struct sctp_association *asoc = transport->asoc;
+
/* We may have counted DATA that was migrated
* to this transport due to DEL-IP operation.
* Subtract those bytes, since the were never
transport->error_count = 0;
transport->asoc->overall_error_count = 0;
+ /*
+ * While in SHUTDOWN PENDING, we may have started
+ * the T5 shutdown guard timer after reaching the
+ * retransmission limit. Stop that timer as soon
+ * as the receiver acknowledged any data.
+ */
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+ del_timer(&asoc->timers
+ [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+ sctp_association_put(asoc);
+
/* Mark the destination transport address as
* active if it is not so marked.
*/
* A sender is doing zero window probing when the
* receiver's advertised window is zero, and there is
* only one data chunk in flight to the receiver.
+ *
+ * Allow the association to timeout while in SHUTDOWN
+ * PENDING or SHUTDOWN RECEIVED in case the receiver
+ * stays in zero window mode forever.
*/
if (!q->asoc->peer.rwnd &&
!list_empty(&tlist) &&
- (sack_ctsn+2 == q->asoc->next_tsn)) {
+ (sack_ctsn+2 == q->asoc->next_tsn) &&
+ q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
SCTP_DEBUG_PRINTK("%s: SACK received for zero "
"window probe: %u\n",
__func__, sack_ctsn);
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
* transport address to which the HEARTBEAT was sent.
- * The association's overall error count is also cleared.
*/
t->error_count = 0;
- t->asoc->overall_error_count = 0;
+
+ /*
+ * Although RFC4960 specifies that the overall error count must
+ * be cleared when a HEARTBEAT ACK is received, we make an
+ * exception while in SHUTDOWN PENDING. If the peer keeps its
+ * window shut forever, we may never be able to transmit our
+ * outstanding data and rely on the retransmission limit be reached
+ * to shutdown the association.
+ */
+ if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ t->asoc->overall_error_count = 0;
/* Clear the hb_sent flag to signal that we had a good
* acknowledgement.
sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
break;
+ case SCTP_CMD_TIMER_START_ONCE:
+ timer = &asoc->timers[cmd->obj.to];
+
+ if (timer_pending(timer))
+ break;
+ /* fall through */
+
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to];
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
if (asoc->autoclose)
SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ETIMEDOUT));
- /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_PERR(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
- return SCTP_DISPOSITION_DELETE_TCB;
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+ /*
+ * We are here likely because the receiver had its rwnd
+ * closed for a while and we have not been able to
+ * transmit the locally queued data within the maximum
+ * retransmission attempts limit. Start the T5
+ * shutdown guard timer to give the receiver one last
+ * chance and some additional time to recover before
+ * aborting.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
}
/* E1) For the destination address for which the timer
/* SCTP_STATE_ESTABLISHED */ \
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
- TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
/* SCTP_STATE_SHUTDOWN_SENT */ \
TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
+ unsigned int data_was_unread;
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
ep = sctp_sk(sk)->ep;
+ /* Clean up any skbs sitting on the receive queue. */
+ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+ data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
}
}
- if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+ !skb_queue_empty(&asoc->ulpq.reasm) ||
+ (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
sctp_primitive_SHUTDOWN(asoc, NULL);
}
- /* Clean up any skbs sitting on the receive queue. */
- sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
- sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
}
/* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL)
- sctp_ulpevent_free(sctp_skb2event(skb));
+ unsigned int data_unread = 0;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ if (!sctp_ulpevent_is_notification(event))
+ data_unread += skb->len;
+
+ sctp_ulpevent_free(event);
+ }
+
+ return data_unread;
}
u32 bind_version;
struct rpc_xprt *xprt;
struct rpc_clnt *rpcb_clnt;
- static struct rpcbind_args *map;
+ struct rpcbind_args *map;
struct rpc_task *child;
struct sockaddr_storage addr;
struct sockaddr *sap = (struct sockaddr *)&addr;
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
+ void (*do_action)(struct rpc_task *);
/*
- * Execute any pending callback.
+ * Execute any pending callback first.
*/
- if (task->tk_callback) {
- void (*save_callback)(struct rpc_task *);
-
- /*
- * We set tk_callback to NULL before calling it,
- * in case it sets the tk_callback field itself:
- */
- save_callback = task->tk_callback;
- task->tk_callback = NULL;
- save_callback(task);
- } else {
+ do_action = task->tk_callback;
+ task->tk_callback = NULL;
+ if (do_action == NULL) {
/*
* Perform the next FSM step.
- * tk_action may be NULL when the task has been killed
- * by someone else.
+ * tk_action may be NULL if the task has been killed.
+ * In particular, note that rpc_killall_tasks may
+ * do this at any time, so beware when dereferencing.
*/
- if (task->tk_action == NULL)
+ do_action = task->tk_action;
+ if (do_action == NULL)
break;
- task->tk_action(task);
}
+ do_action(task);
/*
* Lockless check for whether task is sleeping or not.
mutex_init(&rdev->mtx);
mutex_init(&rdev->devlist_mtx);
+ mutex_init(&rdev->sched_scan_mtx);
INIT_LIST_HEAD(&rdev->netdev_list);
spin_lock_init(&rdev->bss_lock);
INIT_LIST_HEAD(&rdev->bss_list);
rfkill_destroy(rdev->rfkill);
mutex_destroy(&rdev->mtx);
mutex_destroy(&rdev->devlist_mtx);
+ mutex_destroy(&rdev->sched_scan_mtx);
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&scan->pub);
cfg80211_rdev_free_wowlan(rdev);
___cfg80211_scan_done(rdev, true);
}
+ cfg80211_unlock_rdev(rdev);
+
+ mutex_lock(&rdev->sched_scan_mtx);
+
if (WARN_ON(rdev->sched_scan_req &&
rdev->sched_scan_req->dev == wdev->netdev)) {
__cfg80211_stop_sched_scan(rdev, false);
}
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
mutex_lock(&rdev->devlist_mtx);
rdev->opencount--;
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
__cfg80211_stop_sched_scan(rdev, false);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
#ifdef CONFIG_CFG80211_WEXT
struct work_struct scan_done_wk;
struct work_struct sched_scan_results_wk;
+ struct mutex sched_scan_mtx;
+
#ifdef CONFIG_NL80211_TESTMODE
struct genl_info *testmode_info;
#endif
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
- if (rdev->sched_scan_req)
- return -EINPROGRESS;
-
if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
return -EINVAL;
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
+ mutex_lock(&rdev->sched_scan_mtx);
+
+ if (rdev->sched_scan_req) {
+ err = -EINPROGRESS;
+ goto out;
+ }
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
- if (!request)
- return -ENOMEM;
+ if (!request) {
+ err = -ENOMEM;
+ goto out;
+ }
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
out_free:
kfree(request);
out:
+ mutex_unlock(&rdev->sched_scan_mtx);
return err;
}
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int err;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_stop)
return -EOPNOTSUPP;
- return __cfg80211_stop_sched_scan(rdev, false);
+ mutex_lock(&rdev->sched_scan_mtx);
+ err = __cfg80211_stop_sched_scan(rdev, false);
+ mutex_unlock(&rdev->sched_scan_mtx);
+
+ return err;
}
static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
rdev = container_of(wk, struct cfg80211_registered_device,
sched_scan_results_wk);
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
/* we don't have sched_scan_req anymore if the scan is stopping */
if (rdev->sched_scan_req)
nl80211_send_sched_scan_results(rdev,
rdev->sched_scan_req->dev);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
}
void cfg80211_sched_scan_results(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->sched_scan_mtx);
__cfg80211_stop_sched_scan(rdev, true);
- cfg80211_unlock_rdev(rdev);
+ mutex_unlock(&rdev->sched_scan_mtx);
}
EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
int err;
struct net_device *dev;
- ASSERT_RDEV_LOCK(rdev);
+ lockdep_assert_held(&rdev->sched_scan_mtx);
if (!rdev->sched_scan_req)
return 0;
xfrm_state_check_expire(x1);
err = 0;
+ x->km.state = XFRM_STATE_DEAD;
+ __xfrm_state_put(x);
}
spin_unlock_bh(&x1->lock);
# older versions of depmod require the version string to start with three
# numbers, so we cheat with a symlink here
depmod_hack_needed=true
-mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
-if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
- if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
- -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
+mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
+if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
+ if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
+ -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
depmod_hack_needed=false
fi
fi
+rm -rf "$tmp_dir"
if $depmod_hack_needed; then
symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
ln -s "$KERNELRELEASE" "$symlink"
SND_SOC_DAPM_INPUT("DMIC2DAT"),
SND_SOC_DAPM_INPUT("Clock"),
-SND_SOC_DAPM_MICBIAS("MICBIAS", WM8994_MICBIAS, 2, 0),
SND_SOC_DAPM_SUPPLY_S("MICBIAS Supply", 1, SND_SOC_NOPM, 0, 0, micbias_ev,
SND_SOC_DAPM_PRE_PMU),
{ "AIF2DACDAT", NULL, "AIF1DACDAT" },
{ "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
{ "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
- { "MICBIAS", NULL, "CLK_SYS" },
- { "MICBIAS", NULL, "MICBIAS Supply" },
+ { "MICBIAS1", NULL, "CLK_SYS" },
+ { "MICBIAS1", NULL, "MICBIAS Supply" },
+ { "MICBIAS2", NULL, "CLK_SYS" },
+ { "MICBIAS2", NULL, "MICBIAS Supply" },
};
static const struct snd_soc_dapm_route wm8994_intercon[] = {
report = SND_JACK_MICROPHONE;
/* Everything else is buttons; just assign slots */
- if (status & 0x1c0)
+ if (status & 0x1c)
report |= SND_JACK_BTN_0;
done:
static struct fsi_ak4642_data fsi_a_ak4642 = {
.name = "AK4642",
- .card = "FSIA (AK4642)",
+ .card = "FSIA-AK4642",
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi.0",
static struct fsi_ak4642_data fsi_b_ak4642 = {
.name = "AK4642",
- .card = "FSIB (AK4642)",
+ .card = "FSIB-AK4642",
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi.0",
static struct fsi_ak4642_data fsi_a_ak4643 = {
.name = "AK4643",
- .card = "FSIA (AK4643)",
+ .card = "FSIA-AK4643",
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi.0",
static struct fsi_ak4642_data fsi_b_ak4643 = {
.name = "AK4643",
- .card = "FSIB (AK4643)",
+ .card = "FSIB-AK4643",
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi.0",
static struct fsi_ak4642_data fsi2_a_ak4642 = {
.name = "AK4642",
- .card = "FSI2A (AK4642)",
+ .card = "FSI2A-AK4642",
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
static struct fsi_ak4642_data fsi2_b_ak4642 = {
.name = "AK4642",
- .card = "FSI2B (AK4642)",
+ .card = "FSI2B-AK4642",
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0012",
.platform = "sh_fsi2",
static struct fsi_ak4642_data fsi2_a_ak4643 = {
.name = "AK4643",
- .card = "FSI2A (AK4643)",
+ .card = "FSI2A-AK4643",
.cpu_dai = "fsia-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
static struct fsi_ak4642_data fsi2_b_ak4643 = {
.name = "AK4643",
- .card = "FSI2B (AK4643)",
+ .card = "FSI2B-AK4643",
.cpu_dai = "fsib-dai",
.codec = "ak4642-codec.0-0013",
.platform = "sh_fsi2",
};
static struct snd_soc_card fsi_soc_card = {
- .name = "FSI (DA7210)",
+ .name = "FSI-DA7210",
.dai_link = &fsi_da7210_dai,
.num_links = 1,
};
static struct fsi_hdmi_data fsi2_a_hdmi = {
.cpu_dai = "fsia-dai",
- .card = "FSI2A (SH MOBILE HDMI)",
+ .card = "FSI2A-HDMI",
.id = FSI_PORT_A,
};
static struct fsi_hdmi_data fsi2_b_hdmi = {
.cpu_dai = "fsib-dai",
- .card = "FSI2B (SH MOBILE HDMI)",
+ .card = "FSI2B-HDMI",
.id = FSI_PORT_B,
};