Merge branch 'alloc_path' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh...
authorChris Mason <chris.mason@oracle.com>
Mon, 1 Aug 2011 18:27:34 +0000 (14:27 -0400)
committerChris Mason <chris.mason@oracle.com>
Mon, 1 Aug 2011 18:27:34 +0000 (14:27 -0400)
158 files changed:
Documentation/filesystems/nilfs2.txt
Documentation/networking/ip-sysctl.txt
Documentation/x86/boot.txt
Makefile
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/gpio.c
arch/arm/mach-davinci/irq.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-s3c64xx/dma.c
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-samsung/dma.c
arch/arm/plat-samsung/include/plat/dma.h
arch/arm/plat-samsung/irq-uart.c
arch/arm/plat-samsung/irq-vic-timer.c
arch/mips/kernel/i8259.c
arch/sparc/include/asm/irqflags_32.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/include/asm/ptrace.h
arch/sparc/kernel/entry.S
arch/sparc/mm/leon_mm.c
arch/x86/Kconfig
arch/x86/kernel/reboot.c
drivers/acpi/apei/hest.c
drivers/acpi/osl.c
drivers/char/agp/intel-agp.h
drivers/gpio/wm831x-gpio.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/rs600.c
drivers/hwmon/adm1275.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/it87.c
drivers/hwmon/max1111.c
drivers/hwmon/pmbus_core.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/radio/Kconfig
drivers/media/radio/si4713-i2c.c
drivers/media/rc/mceusb.c
drivers/media/rc/nuvoton-cir.c
drivers/media/video/cx23885/cx23885-core.c
drivers/media/video/tuner-core.c
drivers/mmc/core/mmc.c
drivers/net/bonding/bond_main.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/natsemi.c
drivers/net/pppoe.c
drivers/net/r6040.c
drivers/net/slip.c
drivers/net/tulip/dmfe.c
drivers/net/usb/hso.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/sysfs.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/ssb/driver_pcicore.c
drivers/watchdog/Kconfig
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/locking.c
fs/btrfs/locking.h
fs/btrfs/relocation.c
fs/btrfs/struct-funcs.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/ceph/mds_client.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/sess.c
fs/cramfs/inode.c
fs/dcache.c
fs/exofs/super.c
fs/fscache/page.c
fs/gfs2/aops.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/log.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/sys.c
fs/hppfs/hppfs.c
fs/libfs.c
fs/namei.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4xdr.c
fs/nfs/write.c
fs/ufs/namei.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/platform/aclinux.h
include/drm/drm_pciids.h
include/linux/mmc/card.h
include/linux/netdevice.h
include/linux/sched.h
include/linux/sdla.h
include/net/sctp/command.h
include/net/sctp/ulpevent.h
kernel/rcutree.c
kernel/rcutree_plugin.h
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/signal.c
kernel/softirq.c
mm/vmscan.c
net/8021q/vlan_dev.c
net/bluetooth/hci_conn.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/l2cap_core.c
net/ceph/ceph_fs.c
net/mac80211/scan.c
net/mac80211/wpa.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/ulpevent.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sched.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_state.c
scripts/depmod.sh
sound/soc/codecs/wm8994.c
sound/soc/sh/fsi-ak4642.c
sound/soc/sh/fsi-da7210.c
sound/soc/sh/fsi-hdmi.c

index d5c0cef38a7122ed378371acba045f928f891c0b..873a2ab2e9f8801aee72a11833ac4bfa24fa3d84 100644 (file)
@@ -40,7 +40,6 @@ Features which NILFS2 does not support yet:
        - POSIX ACLs
        - quotas
        - fsck
-       - resize
        - defragmentation
 
 Mount options
index d3d653a5f9b923be1ab518cba040e9ccb3868f3e..bfe924217f246a8c0a10846e3a034201a9b9095a 100644 (file)
@@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
        when RTO retransmissions remain unacknowledged.
        See tcp_retries2 for more details.
 
-       The default value is 7.
+       The default value is 8.
        If your machine is a loaded WEB server,
        you should think about lowering this value, such sockets
        may consume significant resources. Cf. tcp_max_orphans.
index 9b7221a86df291e6c651e66a3bebc41c69256633..7c3a8801b7ce0c20b3395022d937427405f138dd 100644 (file)
@@ -674,7 +674,7 @@ Protocol:   2.10+
 
 Field name:    init_size
 Type:          read
-Offset/size:   0x25c/4
+Offset/size:   0x260/4
 
   This field indicates the amount of linear contiguous memory starting
   at the kernel runtime start address that the kernel needs before it
index 60d91f76c2fd1c94af79dd994d79d46a131c6cbd..6a5bdad524affe34c62141ae9678da115fcc28ee 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*
index c67f684ee3e58e219a182c55624e902e069e666b..09a87e61ffcf451dfe76ab10e6fd1ba58c5a3834 100644 (file)
@@ -520,7 +520,7 @@ fail:
         */
        if (have_imager()) {
                label = "HD imager";
-               mux |= 1;
+               mux |= 2;
 
                /* externally mux MMC1/ENET/AIC33 to imager */
                mux |= BIT(6) | BIT(5) | BIT(3);
@@ -540,7 +540,7 @@ fail:
                resets &= ~BIT(1);
 
                if (have_tvp7002()) {
-                       mux |= 2;
+                       mux |= 1;
                        resets &= ~BIT(2);
                        label = "tvp7002 HD";
                } else {
index e7221398e5af9c751d1dc8f85c1d294b2afb0f9b..cafbe13a82a5c5bc56e24bf60fd37fae1d324c4c 100644 (file)
@@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
        struct davinci_gpio_regs __iomem *g;
        u32 mask = 0xffff;
+       struct davinci_gpio_controller *d;
 
-       g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
+       d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
+       g = (struct davinci_gpio_regs __iomem *)d->regs;
 
        /* we only care about one bank */
        if (irq & 1)
@@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
                if (!status)
                        break;
                __raw_writel(status, &g->intstat);
-               if (irq & 1)
-                       status >>= 16;
 
                /* now demux them to the right lowlevel handler */
-               n = (int)irq_get_handler_data(irq);
+               n = d->irq_base;
+               if (irq & 1) {
+                       n += 16;
+                       status >>= 16;
+               }
+
                while (status) {
                        res = ffs(status);
                        n += res;
@@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void)
 
                /* set up all irqs in this bank */
                irq_set_chained_handler(bank_irq, gpio_irq_handler);
-               irq_set_handler_data(bank_irq, (__force void *)g);
+
+               /*
+                * Each chip handles 32 gpios, and each irq bank consists of 16
+                * gpio irqs. Pass the irq bank's corresponding controller to
+                * the chained irq handler.
+                */
+               irq_set_handler_data(bank_irq, &chips[gpio / 32]);
 
                for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
                        irq_set_chip(irq, &gpio_irqchip);
index d8c1af02593100e586d0fd39cb25cac71dc0a21d..952dc126c390cca25e1b33e838cbc282ab501ccc 100644 (file)
@@ -52,6 +52,12 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        struct irq_chip_type *ct;
 
        gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
+       if (!gc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+                      __func__, irq_start);
+               return;
+       }
+
        ct = gc->chip_types;
        ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
index 74ed81a3cb1a7ae0ca7ca97d7ba3f41e4509c150..07772575d7ab22d3205238003322487cd9fead2b 100644 (file)
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
 /*
  * clocksource
  */
+
+static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
+{
+       return *IXP4XX_OSTS;
+}
+
 unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
        init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
 
-       clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32,
-                       clocksource_mmio_readl_up);
+       clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
+                       ixp4xx_clocksource_read);
 }
 
 /*
index b197171e7d03c8219c34a126e90c78f13501a6d1..204bfafe4bfc214677e60fedbef98e4f43a5dd93 100644 (file)
@@ -113,7 +113,7 @@ found:
        return chan;
 }
 
-int s3c2410_dma_config(unsigned int channel, int xferunit)
+int s3c2410_dma_config(enum dma_ch channel, int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
        return 0;
 }
 
-int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  *
  */
 
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                        dma_addr_t data, int size)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -415,7 +415,7 @@ err_buff:
 EXPORT_SYMBOL(s3c2410_dma_enqueue);
 
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
 EXPORT_SYMBOL(s3c2410_dma_devconfig);
 
 
-int s3c2410_dma_getposition(unsigned int channel,
+int s3c2410_dma_getposition(enum dma_ch channel,
                            dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
index a79a8ccd25f67898d5a657e785b32d52f2800650..539bd0e3defdc2ab9be00294a997a80fd060f6d6 100644 (file)
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
 }
 
 int
-s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -1021,7 +1021,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  * xfersize:     size of unit in bytes (1,2,4)
 */
 
-int s3c2410_dma_config(unsigned int channel,
+int s3c2410_dma_config(enum dma_ch channel,
                       int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
  * devaddr:   physical address of the source
 */
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -1173,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
  * returns the current transfer points for the dma source and destination
 */
 
-int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
+int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
index cb459dd9545957cde60a7f61a2f80bd0820493ff..6143aa1476880a0a37b4328fe6e699caa022daa3 100644 (file)
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
  * irq?
 */
 
-int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
+int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_opfn);
 
-int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
+int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
 
-int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
+int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
index 2e8f8c6560d72128c92cdfa16b4f908565865216..8c273b7a6f56593015ccec86869670bedc1b5a3d 100644 (file)
@@ -42,6 +42,7 @@ struct s3c2410_dma_client {
 };
 
 struct s3c2410_dma_chan;
+enum dma_ch;
 
 /* s3c2410_dma_cbfn_t
  *
@@ -62,7 +63,7 @@ typedef int  (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
  * request a dma channel exclusivley
 */
 
-extern int s3c2410_dma_request(unsigned int channel,
+extern int s3c2410_dma_request(enum dma_ch channel,
                               struct s3c2410_dma_client *, void *dev);
 
 
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
  * change the state of the dma channel
 */
 
-extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op);
+extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
 
 /* s3c2410_dma_setflags
  *
  * set the channel's flags to a given state
 */
 
-extern int s3c2410_dma_setflags(unsigned int channel,
+extern int s3c2410_dma_setflags(enum dma_ch channel,
                                unsigned int flags);
 
 /* s3c2410_dma_free
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
  * free the dma channel (will also abort any outstanding operations)
 */
 
-extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
+extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
 
 /* s3c2410_dma_enqueue
  *
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
  * drained before the buffer is given to the DMA system.
 */
 
-extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
+extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                               dma_addr_t data, int size);
 
 /* s3c2410_dma_config
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
  * configure the dma channel
 */
 
-extern int s3c2410_dma_config(unsigned int channel, int xferunit);
+extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
 
 /* s3c2410_dma_devconfig
  *
  * configure the device we're talking to
 */
 
-extern int s3c2410_dma_devconfig(unsigned int channel,
+extern int s3c2410_dma_devconfig(enum dma_ch channel,
                enum s3c2410_dmasrc source, unsigned long devaddr);
 
 /* s3c2410_dma_getposition
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
  * get the position that the dma transfer is currently at
 */
 
-extern int s3c2410_dma_getposition(unsigned int channel,
+extern int s3c2410_dma_getposition(enum dma_ch channel,
                                   dma_addr_t *src, dma_addr_t *dest);
 
-extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn);
-extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn);
+extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
+extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
 
index 0e46588d847bebe577703fd975c09b4314c5f41c..657405c481d04bed1dc310e245c182192b25e83d 100644 (file)
@@ -54,6 +54,13 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
 
        gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
                                    handle_level_irq);
+
+       if (!gc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+                      __func__, uirq->base_irq);
+               return;
+       }
+
        ct = gc->chip_types;
        ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
index a607546ddbd0c25dd9c71f1517b51455324bd11f..f714d060370d6f1647e29e2367591dafa242b4b6 100644 (file)
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
 
        s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
                                         S3C64XX_TINT_CSTAT, handle_level_irq);
+
+       if (!s3c_tgc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
+                      __func__, timer_irq);
+               return;
+       }
+
        ct = s3c_tgc->chip_types;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
index c018696765d4c86d5a474049f5a13023387f437d..5c74eb797f08f1f4a6cc24df2d1c9567e5a198ed 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
 #include <linux/irq.h>
 
 #include <asm/i8259.h>
@@ -215,14 +215,13 @@ spurious_8259A_irq:
        }
 }
 
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
 {
        if (i8259A_auto_eoi >= 0)
                init_8259A(i8259A_auto_eoi);
-       return 0;
 }
 
-static int i8259A_shutdown(struct sys_device *dev)
+static void i8259A_shutdown(void)
 {
        /* Put the i8259A into a quiescent state that
         * the kernel initialization code can get it
@@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
                outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
                outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-1 */
        }
-       return 0;
 }
 
-static struct sysdev_class i8259_sysdev_class = {
-       .name = "i8259",
+static struct syscore_ops i8259_syscore_ops = {
        .resume = i8259A_resume,
        .shutdown = i8259A_shutdown,
 };
 
-static struct sys_device device_i8259A = {
-       .id     = 0,
-       .cls    = &i8259_sysdev_class,
-};
-
 static int __init i8259A_init_sysfs(void)
 {
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
+       register_syscore_ops(&i8259_syscore_ops);
+       return 0;
 }
 
 device_initcall(i8259A_init_sysfs);
index d4d0711de0f9f5031439927d02517d6a5e743509..14848909e0dec49c7b5a7cc56b482bc9a068d26f 100644 (file)
@@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
 extern unsigned long arch_local_irq_save(void);
 extern void arch_local_irq_enable(void);
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        arch_local_irq_save();
 }
 
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags & PSR_PIL) != 0;
 }
 
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
index aab969c82c2b654391089b180d77d45e62416058..23cd27f6beb47e689842fbfdafc3bbf5826c7977 100644 (file)
@@ -14,7 +14,7 @@
 
 #ifndef __ASSEMBLY__
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
        );
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
        );
 }
 
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
 {
        __asm__ __volatile__(
                "wrpr   0, %%pil"
@@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
        );
 }
 
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags > 0);
 }
 
-static inline int arch_irqs_disabled(void)
+static inline notrace int arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags, tmp;
 
index c7ad3fe2b252b8b16c1bca8e2c062a77da1e3f6a..b928b31424b1fd6c35c40572eb10ee5637299632 100644 (file)
@@ -205,6 +205,7 @@ do {        current_thread_info()->syscall_noerror = 1; \
 } while (0)
 #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
 #define instruction_pointer(regs) ((regs)->tpc)
+#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
 #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
 #define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
 #ifdef CONFIG_SMP
index 9fe08a1ea6c6ea226f9cb4a91bf1f183067d98da..f445e98463e6d332c0191f13092738dc8336677b 100644 (file)
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
        WRITE_PAUSE
        wr      %l4, PSR_ET, %psr
        WRITE_PAUSE
-       sll     %o3, 28, %o2            ! shift for simpler checks below
+       srl     %o3, 28, %o2            ! shift for simpler checks below
 maybe_smp4m_msg_check_single:
        andcc   %o2, 0x1, %g0
        beq,a   maybe_smp4m_msg_check_mask
index c0e01297e64eb84a03b8582e135c616b7c129ba0..e485a680499824319b5e9bd6fb750806d99a9429 100644 (file)
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
  * Leon2 and Leon3 differ in their way of telling cache information
  *
  */
-int leon_flush_needed(void)
+int __init leon_flush_needed(void)
 {
        int flush_needed = -1;
        unsigned int ssize, sets;
index da349723d4115cef7d75aac4680ba2284deaf0d0..37357a599dcac02e4407467e844bbac0d9e8d224 100644 (file)
@@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
 config AMD_NUMA
        def_bool y
        prompt "Old style AMD Opteron NUMA detection"
-       depends on NUMA && PCI
+       depends on X86_64 && NUMA && PCI
        ---help---
          Enable AMD NUMA node topology detection.  You should say Y here if
          you have a multi processor AMD system. This uses an old method to
index 4f0d46fefa7f7166e4d82b4dfda94528662034aa..9242436e9937e5a4ef91c6baa04eaf2e90243125 100644 (file)
@@ -419,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
                },
        },
+       {       /* Handle problems with rebooting on the Latitude E6320. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E6320",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+               },
+       },
+       {       /* Handle problems with rebooting on the Latitude E5420. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E5420",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+               },
+       },
+       {       /* Handle problems with rebooting on the Latitude E6420. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E6420",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+               },
+       },
        { }
 };
 
index abda3786a5d70c4b22738b1245303dbabe2722d6..181bc2f7bb7411a4b300c23c2450ad79ae129a4b 100644 (file)
@@ -139,13 +139,23 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
-       int rc;
+       int rc, i;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
 
        if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
+       for (i = 0; i < ghes_arr->count; i++) {
+               struct acpi_hest_header *hdr;
+               ghes_dev = ghes_arr->ghes_devs[i];
+               hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
+               if (hdr->source_id == hest_hdr->source_id) {
+                       pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+                                  hdr->source_id);
+                       return -EIO;
+               }
+       }
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
index 52ca9649d76925abc1718e9c3bed4391cf189006..372f9b70f7f4dc98e2985532fb19707861c920a8 100644 (file)
@@ -1332,23 +1332,6 @@ int acpi_resources_are_enforced(void)
 }
 EXPORT_SYMBOL(acpi_resources_are_enforced);
 
-/*
- * Create and initialize a spinlock.
- */
-acpi_status
-acpi_os_create_lock(acpi_spinlock *out_handle)
-{
-       spinlock_t *lock;
-
-       lock = ACPI_ALLOCATE(sizeof(spinlock_t));
-       if (!lock)
-               return AE_NO_MEMORY;
-       spin_lock_init(lock);
-       *out_handle = lock;
-
-       return AE_OK;
-}
-
 /*
  * Deallocate the memory for a spinlock.
  */
index 999803ce10dc5cae9c3571c155de3a5d1276a5c7..5da67f165afaf8df358d1c884083429948cf781e 100644 (file)
 #define G4x_GMCH_SIZE_MASK     (0xf << 8)
 #define G4x_GMCH_SIZE_1M       (0x1 << 8)
 #define G4x_GMCH_SIZE_2M       (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M    (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M  (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M    (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN    (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M    (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M  ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M    (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
 #define GFX_FLSH_CNTL          0x2170 /* 915+ */
 
index 309644cf4d9b178cd8d91a2d57e348497f7c76c0..2bcfb0be09ff38e1db182c77a78c506fba8f6149 100644 (file)
@@ -180,6 +180,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                        break;
                case WM831X_GPIO_PULL_UP:
                        pull = "pullup";
+                       break;
                default:
                        pull = "INVALID PULL";
                        break;
index f245c588ae954fe8fe07d4a235dd46b219038a16..ce7914c4c044662153545e7c2d87e277432fbc34 100644 (file)
@@ -262,6 +262,7 @@ enum intel_pch {
 };
 
 #define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
 
 struct intel_fbdev;
 
@@ -1194,7 +1195,9 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+                                   uint32_t size,
+                                   int tiling_mode);
 
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
index 5c0d1247f4535e1e9ee83076151b95552eb10060..a087e1bf0c2f4359af4374aab8ae7c50ae5040a2 100644 (file)
@@ -1374,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
 }
 
 static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-       uint32_t size;
+       uint32_t gtt_size;
 
        if (INTEL_INFO(dev)->gen >= 4 ||
-           obj->tiling_mode == I915_TILING_NONE)
-               return obj->base.size;
+           tiling_mode == I915_TILING_NONE)
+               return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
        if (INTEL_INFO(dev)->gen == 3)
-               size = 1024*1024;
+               gtt_size = 1024*1024;
        else
-               size = 512*1024;
+               gtt_size = 512*1024;
 
-       while (size < obj->base.size)
-               size <<= 1;
+       while (gtt_size < size)
+               gtt_size <<= 1;
 
-       return size;
+       return gtt_size;
 }
 
 /**
@@ -1403,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
  * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+                          uint32_t size,
+                          int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
        if (INTEL_INFO(dev)->gen >= 4 ||
-           obj->tiling_mode == I915_TILING_NONE)
+           tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       return i915_gem_get_gtt_size(obj);
+       return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
 /**
  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
  *                                      unfenced object
- * @obj: object to check
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
  *
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
 uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+                                   uint32_t size,
+                                   int tiling_mode)
 {
-       struct drm_device *dev = obj->base.dev;
-       int tile_height;
-
        /*
         * Minimum alignment is 4k (GTT page size) for sane hw.
         */
        if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
-           obj->tiling_mode == I915_TILING_NONE)
+           tiling_mode == I915_TILING_NONE)
                return 4096;
 
-       /*
-        * Older chips need unfenced tiled buffers to be aligned to the left
-        * edge of an even tile row (where tile rows are counted as if the bo is
-        * placed in a fenced gtt region).
+       /* Previous hardware however needs to be aligned to a power-of-two
+        * tile height. The simplest method for determining this is to reuse
+        * the power-of-tile object size.
         */
-       if (IS_GEN2(dev))
-               tile_height = 16;
-       else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-               tile_height = 32;
-       else
-               tile_height = 8;
-
-       return tile_height * obj->stride * 2;
+       return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
 int
@@ -2744,9 +2736,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                return -EINVAL;
        }
 
-       fence_size = i915_gem_get_gtt_size(obj);
-       fence_alignment = i915_gem_get_gtt_alignment(obj);
-       unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+       fence_size = i915_gem_get_gtt_size(dev,
+                                          obj->base.size,
+                                          obj->tiling_mode);
+       fence_alignment = i915_gem_get_gtt_alignment(dev,
+                                                    obj->base.size,
+                                                    obj->tiling_mode);
+       unfenced_alignment =
+               i915_gem_get_unfenced_gtt_alignment(dev,
+                                                   obj->base.size,
+                                                   obj->tiling_mode);
 
        if (alignment == 0)
                alignment = map_and_fenceable ? fence_alignment :
index 82d70fd9e933b8aff68afeffca08dca89e937fe2..99c4faa59d8f3d2fdf372b4a1c53705710ea7131 100644 (file)
@@ -348,7 +348,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                /* Rebind if we need a change of alignment */
                if (!obj->map_and_fenceable) {
                        u32 unfenced_alignment =
-                               i915_gem_get_unfenced_gtt_alignment(obj);
+                               i915_gem_get_unfenced_gtt_alignment(dev,
+                                                                   obj->base.size,
+                                                                   args->tiling_mode);
                        if (obj->gtt_offset & (unfenced_alignment - 1))
                                ret = i915_gem_object_unbind(obj);
                }
index 21b6f93fe9196d277b2b2c7b926a8faf729706b8..0f1c799afea1be623f38cd019ca9fbfe42d37516 100644 (file)
@@ -4305,7 +4305,8 @@ static void intel_update_watermarks(struct drm_device *dev)
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
-       return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+       return dev_priv->lvds_use_ssc && i915_panel_use_ssc
+               && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
@@ -7810,6 +7811,15 @@ static void quirk_pipea_force (struct drm_device *dev)
        DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
 }
 
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -7838,6 +7848,9 @@ struct intel_quirk intel_quirks[] = {
        /* 855 & before need to leave pipe A & dpll A up */
        { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
        { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+       /* Lenovo U160 cannot use SSC on LVDS */
+       { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
index 660f96401a05a7ac8dd4766802684b5a42d03f38..15bd0477a3e8714da218abdc07bec96038f5244b 100644 (file)
@@ -2000,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                        gb_backend_map = 0x66442200;
                        break;
                case CHIP_JUNIPER:
-                       gb_backend_map = 0x00006420;
+                       gb_backend_map = 0x00002200;
                        break;
                default:
                        gb_backend_map =
index 57f3bc17b87e09a9dd0d1fd99fe66800e43ee9fb..2eb251858e7283d37b5311637815abae8dd50e0e 100644 (file)
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
 
 }
 
-/* emits 36 */
+/* emits 39 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
                radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
                radeon_ring_write(rdev, 0);
 
+               /* setup LDS */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(rdev, 0x10001000);
+
                /* SQ config */
                radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
                radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        /* set default  + shaders */
-       ring_size += 52; /* shaders + def state */
+       ring_size += 55; /* shaders + def state */
        ring_size += 10; /* fence emit for VB IB */
        ring_size += 5; /* done copy */
        ring_size += 10; /* fence emit for done copy */
index 3fc5fa1aefd0f57bcea68ef07c65f7f37f456663..229a20f10e2b0c548b02cd5df527b9b72d6fe31a 100644 (file)
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       bus_cntl = RREG32(RV370_BUS_CNTL);
        d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
        d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
        vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
 
        /* Disable VGA mode */
        WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(RV370_BUS_CNTL, bus_cntl);
        WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
        WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
        WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       if (rdev->flags & RADEON_IS_PCIE)
+               bus_cntl = RREG32(RV370_BUS_CNTL);
+       else
+               bus_cntl = RREG32(RADEON_BUS_CNTL);
        crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
        crtc2_gen_cntl = 0;
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+       else
+               WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
 
        /* Turn off mem requests and CRTC for both controllers */
        WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, bus_cntl);
+       else
+               WREG32(RADEON_BUS_CNTL, bus_cntl);
        WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
        if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
                WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
index cbfca3a24fdf9caac828d4cca8ce8445963af4c4..9792d4ffdc86250e102457c5bd36765098dde7e3 100644 (file)
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
+       /* bail if the connector does not have hpd pin, e.g.,
+        * VGA, TV, etc.
+        */
+       if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+               return;
+
        radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
        /* powering up/down the eDP panel generates hpd events which
index ec93a75369e671c9b9d0b98407ad5c5e901a4eb2..bc44a3d35ec6f49de04a9570e8fa7b51f600523c 100644 (file)
 #       define RADEON_BUS_READ_BURST         (1 << 30)
 #define RADEON_BUS_CNTL1                    0x0034
 #       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
 /* rv370/rv380, rv410, r423/r430/r480, r5xx */
 #define RADEON_MSI_REARM_EN                0x0160
 #      define RV370_MSI_REARM_EN            (1 << 0)
index 6e3b11e5abbe5a83842e4e9b1b49d6531e460d92..1f5850e473cc35716f5c70d9a4640209caea2e41 100644 (file)
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
        return radeon_gart_table_vram_alloc(rdev);
 }
 
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
 {
        u32 tmp;
        int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
                return r;
        radeon_gart_restore(rdev);
        /* Enable bus master */
-       tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
-       WREG32(R_00004C_BUS_CNTL, tmp);
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
        /* FIXME: setup default page */
        WREG32_MC(R_000100_MC_PT0_CNTL,
                  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
index b9b7caf4a1d2c134d3c0440740ded963da856a5c..8bc1bd663721fb3b95cf792dce4779cf18550fa7 100644 (file)
@@ -53,23 +53,23 @@ static int adm1275_probe(struct i2c_client *client,
        info->direct[PSC_VOLTAGE_IN] = true;
        info->direct[PSC_VOLTAGE_OUT] = true;
        info->direct[PSC_CURRENT_OUT] = true;
-       info->m[PSC_CURRENT_OUT] = 800;
+       info->m[PSC_CURRENT_OUT] = 807;
        info->b[PSC_CURRENT_OUT] = 20475;
        info->R[PSC_CURRENT_OUT] = -1;
        info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 
        if (config & ADM1275_VRANGE) {
-               info->m[PSC_VOLTAGE_IN] = 19045;
+               info->m[PSC_VOLTAGE_IN] = 19199;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -2;
-               info->m[PSC_VOLTAGE_OUT] = 19045;
+               info->m[PSC_VOLTAGE_OUT] = 19199;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -2;
        } else {
-               info->m[PSC_VOLTAGE_IN] = 6666;
+               info->m[PSC_VOLTAGE_IN] = 6720;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -1;
-               info->m[PSC_VOLTAGE_OUT] = 6666;
+               info->m[PSC_VOLTAGE_OUT] = 6720;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -1;
        }
index dcb78a7a804754956035f2c57e2e1f8673ac45df..00e98517f94c6d5bbb26e384cb08bb687b00cc16 100644 (file)
@@ -674,6 +674,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
        else
                err = -EIO;
 
+       ACPI_FREE(ret);
        return err;
 }
 
index bb6405b92007b7b9a9ba2c95038e49f01211c7a0..5f52477504305e9f679e8d74e6cd4a402808c821 100644 (file)
@@ -1538,7 +1538,7 @@ static struct attribute *it87_attributes_label[] = {
 };
 
 static const struct attribute_group it87_group_label = {
-       .attrs = it87_attributes_vid,
+       .attrs = it87_attributes_label,
 };
 
 /* SuperIO detection - will change isa_address if a chip is found */
index 12a54aa297760b1c6913a3fe772ab3dc7cb0c3ee..14335bbc9bdce30512422770f8772bb86827bf5e 100644 (file)
@@ -40,6 +40,8 @@ struct max1111_data {
        struct spi_transfer     xfer[2];
        uint8_t *tx_buf;
        uint8_t *rx_buf;
+       struct mutex            drvdata_lock;
+       /* protect msg, xfer and buffers from multiple access */
 };
 
 static int max1111_read(struct device *dev, int channel)
@@ -48,6 +50,9 @@ static int max1111_read(struct device *dev, int channel)
        uint8_t v1, v2;
        int err;
 
+       /* writing to drvdata struct is not thread safe, wait on mutex */
+       mutex_lock(&data->drvdata_lock);
+
        data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
                MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
                MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
@@ -55,12 +60,15 @@ static int max1111_read(struct device *dev, int channel)
        err = spi_sync(data->spi, &data->msg);
        if (err < 0) {
                dev_err(dev, "spi_sync failed with %d\n", err);
+               mutex_unlock(&data->drvdata_lock);
                return err;
        }
 
        v1 = data->rx_buf[0];
        v2 = data->rx_buf[1];
 
+       mutex_unlock(&data->drvdata_lock);
+
        if ((v1 & 0xc0) || (v2 & 0x3f))
                return -EINVAL;
 
@@ -176,6 +184,8 @@ static int __devinit max1111_probe(struct spi_device *spi)
        if (err)
                goto err_free_data;
 
+       mutex_init(&data->drvdata_lock);
+
        data->spi = spi;
        spi_set_drvdata(spi, data);
 
@@ -213,6 +223,7 @@ static int __devexit max1111_remove(struct spi_device *spi)
 
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+       mutex_destroy(&data->drvdata_lock);
        kfree(data->rx_buf);
        kfree(data->tx_buf);
        kfree(data);
index 744672c1f26d6be2b499f51e59d1dfead082d94d..8e31a8e2c746e8848c7268ab5c8091b966b648b4 100644 (file)
@@ -362,8 +362,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
  * Convert linear sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_linear(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        s16 exponent;
        s32 mantissa;
@@ -397,15 +397,15 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
        else
                val >>= -exponent;
 
-       return (int)val;
+       return val;
 }
 
 /*
  * Convert direct sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_direct(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        long val = (s16) sensor->data;
        long m, b, R;
@@ -440,12 +440,12 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
                R++;
        }
 
-       return (int)((val - b) / m);
+       return (val - b) / m;
 }
 
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
 {
-       int val;
+       long val;
 
        if (data->info->direct[sensor->class])
                val = pmbus_reg2data_direct(data, sensor);
@@ -619,7 +619,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
        if (!s1 && !s2)
                *val = !!regval;
        else {
-               int v1, v2;
+               long v1, v2;
                struct pmbus_sensor *sensor1, *sensor2;
 
                sensor1 = &data->sensors[s1];
@@ -661,7 +661,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
        if (sensor->data < 0)
                return sensor->data;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+       return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
 }
 
 static ssize_t pmbus_set_sensor(struct device *dev,
index 98278041d75f5b790affdc7a024180ca91c0854a..5b6b451d46940db41b8d2b3fd4473e7a1429383a 100644 (file)
@@ -1988,6 +1988,14 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
        if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
                if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
                        goto err0;
+
+               /* If we took control of the bus, we need to force
+                  reinitialization.  This is because many ts_bus_ctrl()
+                  functions strobe the RESET pin on the demod, and if the
+                  frontend thread already exists then the dvb_init() routine
+                  won't get called (which is what usually does initial
+                  register configuration). */
+               fepriv->reinitialise = 1;
        }
 
        if ((ret = dvb_generic_open (inode, file)) < 0)
index e4c97fd6f05a329db01408effae324c4f2ef3d42..52798a111e16cb5a55df761aa228e60ec3f9343f 100644 (file)
@@ -168,7 +168,7 @@ config RADIO_MAXIRADIO
 
 config RADIO_MIROPCM20
        tristate "miroSOUND PCM20 radio"
-       depends on ISA && VIDEO_V4L2 && SND
+       depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
        select SND_ISA
        select SND_MIRO
        ---help---
@@ -201,7 +201,7 @@ config RADIO_SF16FMI
 
 config RADIO_SF16FMR2
        tristate "SF16FMR2 Radio"
-       depends on ISA && VIDEO_V4L2
+       depends on ISA && VIDEO_V4L2 && SND
        ---help---
          Choose Y here if you have one of these FM radio cards.
 
index deca2e06ff2203bba0caae2e5737499fd0f9cdc2..c9f4a8e65dc45daccc97c70bdaa99c8510fddae1 100644 (file)
@@ -1033,7 +1033,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
                char ps_name[MAX_RDS_PS_NAME + 1];
 
                len = control->size - 1;
-               if (len > MAX_RDS_PS_NAME) {
+               if (len < 0 || len > MAX_RDS_PS_NAME) {
                        rval = -ERANGE;
                        goto exit;
                }
@@ -1057,7 +1057,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
                char radio_text[MAX_RDS_RADIO_TEXT + 1];
 
                len = control->size - 1;
-               if (len > MAX_RDS_RADIO_TEXT) {
+               if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
                        rval = -ERANGE;
                        goto exit;
                }
index 06dfe0957b5e1d26ae2d7f61c1c7154e8a47f24e..ec972dc25790ea8b18e2641a0e045e1f5858bc3f 100644 (file)
@@ -558,9 +558,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
                                 inout, data1);
                        break;
                case MCE_CMD_S_TIMEOUT:
-                       /* value is in units of 50us, so x*50/100 or x/2 ms */
+                       /* value is in units of 50us, so x*50/1000 ms */
                        dev_info(dev, "%s receive timeout of %d ms\n",
-                                inout, ((data1 << 8) | data2) / 2);
+                                inout,
+                                ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
                        break;
                case MCE_CMD_G_TIMEOUT:
                        dev_info(dev, "Get receive timeout\n");
@@ -847,7 +848,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
        switch (ir->buf_in[index]) {
        /* 2-byte return value commands */
        case MCE_CMD_S_TIMEOUT:
-               ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
+               ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
                break;
 
        /* 1-byte return value commands */
@@ -1078,7 +1079,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
        rc->priv = ir;
        rc->driver_type = RC_DRIVER_IR_RAW;
        rc->allowed_protos = RC_TYPE_ALL;
-       rc->timeout = US_TO_NS(1000);
+       rc->timeout = MS_TO_NS(100);
        if (!ir->flags.no_tx) {
                rc->s_tx_mask = mceusb_set_tx_mask;
                rc->s_tx_carrier = mceusb_set_tx_carrier;
index 565f24c20d77ddad0f4a02f6e0aa2867d377503f..ce595f9ab4c7a41c16ff0d69ecc33faf9fc3c4f4 100644 (file)
@@ -1110,7 +1110,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        rdev->dev.parent = &pdev->dev;
        rdev->driver_name = NVT_DRIVER_NAME;
        rdev->map_name = RC_MAP_RC6_MCE;
-       rdev->timeout = US_TO_NS(1000);
+       rdev->timeout = MS_TO_NS(100);
        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
 #if 0
index 64d9b2136ff6b536fa1c22e11eb1c9bc80a9185f..419777a832ee2fd929bf0e087add45ad7d09e973 100644 (file)
@@ -2060,12 +2060,8 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
                goto fail_irq;
        }
 
-       if (!pci_enable_msi(pci_dev))
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_DISABLED, dev->name, dev);
-       else
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+       err = request_irq(pci_dev->irq, cx23885_irq,
+                         IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
        if (err < 0) {
                printk(KERN_ERR "%s: can't get IRQ %d\n",
                       dev->name, pci_dev->irq);
@@ -2114,7 +2110,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
 
        /* unregister stuff */
        free_irq(pci_dev->irq, dev);
-       pci_disable_msi(pci_dev);
 
        cx23885_dev_unregister(dev);
        v4l2_device_unregister(v4l2_dev);
index cfa9f7efe93dc4247f740a5192527541bc44a0a0..a03945ab9f08f14218501d28a5eee4ba4cd629f8 100644 (file)
@@ -714,10 +714,19 @@ static int tuner_remove(struct i2c_client *client)
  * returns 0.
  * This function is needed for boards that have a separate tuner for
  * radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ *       select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ *      be used to represent a Digital TV too.
  */
 static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
 {
-       if ((1 << mode & t->mode_mask) == 0)
+       int t_mode;
+       if (mode == V4L2_TUNER_RADIO)
+               t_mode = T_RADIO;
+       else
+               t_mode = T_ANALOG_TV;
+
+       if ((t_mode & t->mode_mask) == 0)
                return -EINVAL;
 
        return 0;
@@ -984,7 +993,7 @@ static void tuner_status(struct dvb_frontend *fe)
        case V4L2_TUNER_RADIO:
                p = "radio";
                break;
-       case V4L2_TUNER_DIGITAL_TV:
+       case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
                p = "digital TV";
                break;
        case V4L2_TUNER_ANALOG_TV:
@@ -1135,9 +1144,8 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
                return 0;
        if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (vt->type == V4L2_TUNER_ANALOG_TV)
+       if (t->mode != V4L2_TUNER_RADIO) {
                vt->capability |= V4L2_TUNER_CAP_NORM;
-       if (vt->type != V4L2_TUNER_RADIO) {
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
                return 0;
index 2a7e43bc796dfd1e798ad60a1ae6d0e7b98278a2..aa7d1d79b8c554c143c004db5e3ef9565ca653dd 100644 (file)
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                return 0;
 
        /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+       card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
        if (card->csd.structure == 3) {
-               int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
-               if (ext_csd_struct > 2) {
+               if (card->ext_csd.raw_ext_csd_structure > 2) {
                        printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
                                "version %d\n", mmc_hostname(card->host),
-                                       ext_csd_struct);
+                                       card->ext_csd.raw_ext_csd_structure);
                        err = -EINVAL;
                        goto out;
                }
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                goto out;
        }
 
+       card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+       card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+       card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+       card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
        if (card->ext_csd.rev >= 2) {
                card->ext_csd.sectors =
                        ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
                        mmc_card_set_blockaddr(card);
        }
-
+       card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
        switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
        case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
             EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        mmc_hostname(card->host));
        }
 
+       card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+       card->ext_csd.raw_erase_timeout_mult =
+               ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+       card->ext_csd.raw_hc_erase_grp_size =
+               ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
        if (card->ext_csd.rev >= 3) {
                u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
                card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
        }
 
+       card->ext_csd.raw_hc_erase_gap_size =
+               ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+       card->ext_csd.raw_sec_trim_mult =
+               ext_csd[EXT_CSD_SEC_TRIM_MULT];
+       card->ext_csd.raw_sec_erase_mult =
+               ext_csd[EXT_CSD_SEC_ERASE_MULT];
+       card->ext_csd.raw_sec_feature_support =
+               ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+       card->ext_csd.raw_trim_mult =
+               ext_csd[EXT_CSD_TRIM_MULT];
        if (card->ext_csd.rev >= 4) {
                /*
                 * Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                 * area offset and size to user by adding sysfs interface.
                 */
                if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
-                               (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+                   (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
                        u8 hc_erase_grp_sz =
                                ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
                        u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
 }
 
 
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
-                       unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
 {
        u8 *bw_ext_csd;
        int err;
 
+       if (bus_width == MMC_BUS_WIDTH_1)
+               return 0;
+
        err = mmc_get_ext_csd(card, &bw_ext_csd);
-       if (err)
-               return err;
 
-       if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+       if (err || bw_ext_csd == NULL) {
                if (bus_width != MMC_BUS_WIDTH_1)
                        err = -EINVAL;
                goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
                goto out;
 
        /* only compare read only fields */
-       err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+       err = (!(card->ext_csd.raw_partition_support ==
                        bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
-               (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+               (card->ext_csd.raw_erased_mem_count ==
                        bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
-               (ext_csd[EXT_CSD_REV] ==
+               (card->ext_csd.rev ==
                        bw_ext_csd[EXT_CSD_REV]) &&
-               (ext_csd[EXT_CSD_STRUCTURE] ==
+               (card->ext_csd.raw_ext_csd_structure ==
                        bw_ext_csd[EXT_CSD_STRUCTURE]) &&
-               (ext_csd[EXT_CSD_CARD_TYPE] ==
+               (card->ext_csd.raw_card_type ==
                        bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
-               (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+               (card->ext_csd.raw_s_a_timeout ==
                        bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
-               (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_gap_size ==
                        bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+               (card->ext_csd.raw_erase_timeout_mult ==
                        bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
-               (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_grp_size ==
                        bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+               (card->ext_csd.raw_sec_trim_mult ==
                        bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+               (card->ext_csd.raw_sec_erase_mult ==
                        bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+               (card->ext_csd.raw_sec_feature_support ==
                        bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
-               (ext_csd[EXT_CSD_TRIM_MULT] ==
+               (card->ext_csd.raw_trim_mult ==
                        bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
-               memcmp(&ext_csd[EXT_CSD_SEC_CNT],
-                      &bw_ext_csd[EXT_CSD_SEC_CNT],
-                      4) != 0);
+               (card->ext_csd.raw_sectors[0] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+               (card->ext_csd.raw_sectors[1] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+               (card->ext_csd.raw_sectors[2] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+               (card->ext_csd.raw_sectors[3] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
        if (err)
                err = -EINVAL;
 
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                 */
                                if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
                                        err = mmc_compare_ext_csds(card,
-                                               ext_csd,
                                                bus_width);
                                else
                                        err = mmc_bus_test(card, bus_width);
index eafe44a528ac015c8edddec6b0650a9eb526449c..63c22b0bb5ad0fa1d192967371fd6071f824ce2a 100644 (file)
@@ -1428,9 +1428,9 @@ out:
        return features;
 }
 
-#define BOND_VLAN_FEATURES     (NETIF_F_ALL_TX_OFFLOADS | \
-                                NETIF_F_SOFT_FEATURES | \
-                                NETIF_F_LRO)
+#define BOND_VLAN_FEATURES     (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 static void bond_compute_features(struct bonding *bond)
 {
index 2dfcc8047847b12ade17da7c324a717133398680..dfa55f94ba7fcbe612acfb460bff7125b834f96b 100644 (file)
@@ -2289,6 +2289,23 @@ static int gfar_set_mac_address(struct net_device *dev)
        return 0;
 }
 
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs;
+       u32 tempval;
+
+       regs = priv->gfargrp[0].regs;
+
+       tempval = gfar_read(&regs->rctrl);
+       /* If parse is no longer required, then disable parser */
+       if (tempval & RCTRL_REQ_PARSER)
+               tempval |= RCTRL_PRSDEP_INIT;
+       else
+               tempval &= ~RCTRL_PRSDEP_INIT;
+       gfar_write(&regs->rctrl, tempval);
+}
+
 
 /* Enables and disables VLAN insertion/extraction */
 static void gfar_vlan_rx_register(struct net_device *dev,
@@ -2325,12 +2342,9 @@ static void gfar_vlan_rx_register(struct net_device *dev,
                /* Disable VLAN tag extraction */
                tempval = gfar_read(&regs->rctrl);
                tempval &= ~RCTRL_VLEX;
-               /* If parse is no longer required, then disable parser */
-               if (tempval & RCTRL_REQ_PARSER)
-                       tempval |= RCTRL_PRSDEP_INIT;
-               else
-                       tempval &= ~RCTRL_PRSDEP_INIT;
                gfar_write(&regs->rctrl, tempval);
+
+               gfar_check_rx_parser_mode(priv);
        }
 
        gfar_change_mtu(dev, dev->mtu);
index ba36dc7a34356c0fac622cbd2de682201049d5eb..440e69d8beff6a121a53723f07439c24bb043d81 100644 (file)
@@ -274,7 +274,7 @@ extern const char gfar_driver_version[];
 #define RCTRL_PROM             0x00000008
 #define RCTRL_EMEN             0x00000002
 #define RCTRL_REQ_PARSER       (RCTRL_VLEX | RCTRL_IPCSEN | \
-                                RCTRL_TUCSEN)
+                                RCTRL_TUCSEN | RCTRL_FILREN)
 #define RCTRL_CHECKSUMMING     (RCTRL_IPCSEN | RCTRL_TUCSEN | \
                                RCTRL_PRSDEP_INIT)
 #define RCTRL_EXTHASH          (RCTRL_GHTX)
@@ -1156,6 +1156,7 @@ extern void gfar_configure_coalescing(struct gfar_private *priv,
                unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 8f8b65af9ed5f9fcbb977d69294096dc51d66d12..60f46bc2bf64076bca146c272b9dcf6059dab609 100644 (file)
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
 module_param(mtu, int, 0);
 module_param(debug, int, 0);
 module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
 module_param_array(options, int, NULL, 0);
 module_param_array(full_duplex, int, NULL, 0);
 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
                np->rx_ring[i].cmd_status = 0;
                np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (np->rx_skbuff[i]) {
-                       pci_unmap_single(np->pci_dev,
-                               np->rx_dma[i], buflen,
+                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
+                               buflen + NATSEMI_PADDING,
                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(np->rx_skbuff[i]);
                }
index 718879b35b7d2ac2ea5415dcd70b63e64da94255..bc9a4bb31980f1758a38e38daebdc5a7ab8f2a12 100644 (file)
@@ -348,8 +348,9 @@ static int pppoe_device_event(struct notifier_block *this,
 
        /* Only look at sockets that are using this specific device. */
        switch (event) {
+       case NETDEV_CHANGEADDR:
        case NETDEV_CHANGEMTU:
-               /* A change in mtu is a bad thing, requiring
+               /* A change in mtu or address is a bad thing, requiring
                 * LCP re-negotiation.
                 */
 
index 200a363c3bf59ac47d5da56e762486af6572b252..0ffec46084416958db7d7b9b756c859aa416e7d0 100644 (file)
@@ -677,9 +677,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
                if (status & RX_FIFO_FULL)
                        dev->stats.rx_fifo_errors++;
 
-               /* Mask off RX interrupt */
-               misr &= ~RX_INTS;
-               napi_schedule(&lp->napi);
+               if (likely(napi_schedule_prep(&lp->napi))) {
+                       /* Mask off RX interrupt */
+                       misr &= ~RX_INTS;
+                       __napi_schedule(&lp->napi);
+               }
        }
 
        /* TX interrupt request */
index 8ec1a9a0bb9ae007c69865b2599f07d3f23c99c8..2f110fb30daa9e9417c99b1e8f925e5e606bec96 100644 (file)
@@ -182,10 +182,10 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
 #ifdef SL_INCLUDE_CSLIP
        cbuff = xchg(&sl->cbuff, cbuff);
        slcomp = xchg(&sl->slcomp, slcomp);
+#endif
 #ifdef CONFIG_SLIP_MODE_SLIP6
        sl->xdata    = 0;
        sl->xbits    = 0;
-#endif
 #endif
        spin_unlock_bh(&sl->lock);
        err = 0;
index 4685127319669eadb65aa596ffed7ce8caa3b506..9a21ca3873fce6d0dbc455b6cda5025db9be6d59 100644 (file)
@@ -879,7 +879,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
        txptr = db->tx_remove_ptr;
        while(db->tx_packet_cnt) {
                tdes0 = le32_to_cpu(txptr->tdes0);
-               pr_debug("tdes0=%x\n", tdes0);
                if (tdes0 & 0x80000000)
                        break;
 
@@ -889,7 +888,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
 
                /* Transmit statistic counter */
                if ( tdes0 != 0x7fffffff ) {
-                       pr_debug("tdes0=%x\n", tdes0);
                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
                        if (tdes0 & TDES0_ERR_MASK) {
@@ -986,7 +984,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
                        /* error summary bit check */
                        if (rdes0 & 0x8000) {
                                /* This is a error packet */
-                               pr_debug("rdes0: %x\n", rdes0);
                                dev->stats.rx_errors++;
                                if (rdes0 & 1)
                                        dev->stats.rx_fifo_errors++;
@@ -1638,7 +1635,6 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
                else                            /* DM9102/DM9102A */
                        phy_mode = phy_read(db->ioaddr,
                                    db->phy_addr, 17, db->chip_id) & 0xf000;
-               pr_debug("Phy_mode %x\n", phy_mode);
                switch (phy_mode) {
                case 0x1000: db->op_mode = DMFE_10MHF; break;
                case 0x2000: db->op_mode = DMFE_10MFD; break;
index 387ca43f26f4c3942e098702b55d39dbca325a90..304fe78ff60e3b287b608fb4b19bafacb915e315 100644 (file)
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
 
        remove_net_device(hso_net->parent);
 
-       if (hso_net->net) {
+       if (hso_net->net)
                unregister_netdev(hso_net->net);
-               free_netdev(hso_net->net);
-       }
 
        /* start freeing */
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
        kfree(hso_net->mux_bulk_tx_buf);
        hso_net->mux_bulk_tx_buf = NULL;
 
+       if (hso_net->net)
+               free_netdev(hso_net->net);
+
        kfree(hso_dev);
 }
 
index 296c316a83412eaa64bd4c6bd47b264dc4910822..f2c0c236392f2663f970de02ecd8f0a7bd34f9d9 100644 (file)
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int ath5k_pci_suspend(struct device *dev)
 {
-       struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        ath5k_led_off(sc);
        return 0;
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
 static int ath5k_pci_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct ath5k_softc *sc = pci_get_drvdata(pdev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
index 929c68cdf8ab498dc1f35e7cb5b20b08be861e47..a073cdce1f156fd3a1cfc1acc73992b38e87a257 100644 (file)
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
                                                                        \
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev,            \
                        struct device_attribute *attr,                  \
                        const char *buf, size_t count)                  \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        int val;                                                        \
                                                                        \
        val = (int)simple_strtoul(buf, NULL, 10);                       \
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
 static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
index 3779b8977d4709a9ce68d85a8b2458d472deaea4..33443bcaa8d9b27c6b3a7d70be54a4e470c38efd 100644 (file)
@@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
         * TODO - this could be improved to be dependent on the rate.
         *      The hardware can keep up at lower rates, but not higher rates
         */
-       if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+       if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+           !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
                ndelim += ATH_AGGR_ENCRYPTDELIM;
 
        /*
index 2fb53d0675124d90b0c7ba82c9183af2437b9a9a..333b69ef2ae23b792de0dc8f85b1050821e92e3c 100644 (file)
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x04bb, 0x093f) },
        /* NEC WL300NU-G */
        { USB_DEVICE(0x0409, 0x0249) },
+       /* NEC WL300NU-AG */
+       { USB_DEVICE(0x0409, 0x02b4) },
        /* AVM FRITZ!WLAN USB Stick N */
        { USB_DEVICE(0x057c, 0x8401) },
        /* AVM FRITZ!WLAN USB Stick N 2.4 */
index 092e342c19df75c352be98a177255a659640e378..942f7a3969a79ceb857904053a689b2421f90fe0 100644 (file)
@@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
        {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        /* HP - Lite-On ,8188CUS Slim Combo */
index 2a20dabec76d722d1c311aad9e1e34a13382b6d2..d6620ad309ce9489f35358cf6644bd20b8e96888 100644 (file)
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
+       ssb_pcicore_fix_sprom_core_index(pc);
+
        /* Disable PCI interrupts. */
        ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+       /* Additional PCIe always once-executed workarounds */
+       if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+               ssb_pcicore_serdes_workaround(pc);
+               /* TODO: ASPM */
+               /* TODO: Clock Request Update */
+       }
 }
 
 void ssb_pcicore_init(struct ssb_pcicore *pc)
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
        if (!ssb_device_is_enabled(dev))
                ssb_device_enable(dev, 0);
 
-       ssb_pcicore_fix_sprom_core_index(pc);
-
 #ifdef CONFIG_SSB_PCICORE_HOSTMODE
        pc->hostmode = pcicore_is_in_hostmode(pc);
        if (pc->hostmode)
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
 #endif /* CONFIG_SSB_PCICORE_HOSTMODE */
        if (!pc->hostmode)
                ssb_pcicore_init_clientmode(pc);
-
-       /* Additional PCIe always once-executed workarounds */
-       if (dev->id.coreid == SSB_DEV_PCIE) {
-               ssb_pcicore_serdes_workaround(pc);
-               /* TODO: ASPM */
-               /* TODO: Clock Request Update */
-       }
 }
 
 static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
index 9536d386bb38ea890a9d5fa78921b5f0bb9fa46a..21d816e9dfa51d7166d61c9b2272192040b56ad3 100644 (file)
@@ -599,8 +599,7 @@ config IT87_WDT
 
 config HP_WATCHDOG
        tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
-       depends on X86
-       default m
+       depends on X86 && PCI
        help
          A software monitoring watchdog and NMI sourcing driver. This driver
          will detect lockups and provide a stack trace. This is a driver that
index 52d7eca8c7bfe9d599e60cd0918b36c7f5fefe42..502b9e98867949736b03d5bbf55b14b7d67205e7 100644 (file)
@@ -34,6 +34,9 @@ struct btrfs_inode {
         */
        struct btrfs_key location;
 
+       /* Lock for counters */
+       spinlock_t lock;
+
        /* the extent_tree has caches of all the extent mappings to disk */
        struct extent_map_tree extent_tree;
 
@@ -134,8 +137,8 @@ struct btrfs_inode {
         * items we think we'll end up using, and reserved_extents is the number
         * of extent items we've reserved metadata for.
         */
-       atomic_t outstanding_extents;
-       atomic_t reserved_extents;
+       unsigned outstanding_extents;
+       unsigned reserved_extents;
 
        /*
         * ordered_data_close is set by truncate when a file that used
@@ -184,4 +187,13 @@ static inline void btrfs_i_size_write(struct inode *inode, u64 size)
        BTRFS_I(inode)->disk_i_size = size;
 }
 
+static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
+                                      struct inode *inode)
+{
+       if (root == root->fs_info->tree_root ||
+           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+               return true;
+       return false;
+}
+
 #endif
index 2e667868e0d2b8b75649572d10602ac56679a0d0..011cab3aca8d9ffeba2690f642badec7e36471c9 100644 (file)
@@ -54,8 +54,13 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
 {
        int i;
        for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
-               if (p->nodes[i] && p->locks[i])
-                       btrfs_set_lock_blocking(p->nodes[i]);
+               if (!p->nodes[i] || !p->locks[i])
+                       continue;
+               btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
+               if (p->locks[i] == BTRFS_READ_LOCK)
+                       p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+               else if (p->locks[i] == BTRFS_WRITE_LOCK)
+                       p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
        }
 }
 
@@ -68,7 +73,7 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
  * for held
  */
 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
-                                       struct extent_buffer *held)
+                                       struct extent_buffer *held, int held_rw)
 {
        int i;
 
@@ -79,19 +84,29 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
         * really sure by forcing the path to blocking before we clear
         * the path blocking.
         */
-       if (held)
-               btrfs_set_lock_blocking(held);
+       if (held) {
+               btrfs_set_lock_blocking_rw(held, held_rw);
+               if (held_rw == BTRFS_WRITE_LOCK)
+                       held_rw = BTRFS_WRITE_LOCK_BLOCKING;
+               else if (held_rw == BTRFS_READ_LOCK)
+                       held_rw = BTRFS_READ_LOCK_BLOCKING;
+       }
        btrfs_set_path_blocking(p);
 #endif
 
        for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
-               if (p->nodes[i] && p->locks[i])
-                       btrfs_clear_lock_blocking(p->nodes[i]);
+               if (p->nodes[i] && p->locks[i]) {
+                       btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
+                       if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
+                               p->locks[i] = BTRFS_WRITE_LOCK;
+                       else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
+                               p->locks[i] = BTRFS_READ_LOCK;
+               }
        }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        if (held)
-               btrfs_clear_lock_blocking(held);
+               btrfs_clear_lock_blocking_rw(held, held_rw);
 #endif
 }
 
@@ -119,7 +134,7 @@ noinline void btrfs_release_path(struct btrfs_path *p)
                if (!p->nodes[i])
                        continue;
                if (p->locks[i]) {
-                       btrfs_tree_unlock(p->nodes[i]);
+                       btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
                        p->locks[i] = 0;
                }
                free_extent_buffer(p->nodes[i]);
@@ -167,6 +182,25 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
        return eb;
 }
 
+/* loop around taking references on and locking the root node of the
+ * tree until you end up with a lock on the root.  A locked buffer
+ * is returned, with a reference held.
+ */
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+{
+       struct extent_buffer *eb;
+
+       while (1) {
+               eb = btrfs_root_node(root);
+               btrfs_tree_read_lock(eb);
+               if (eb == root->node)
+                       break;
+               btrfs_tree_read_unlock(eb);
+               free_extent_buffer(eb);
+       }
+       return eb;
+}
+
 /* cowonly root (everything not a reference counted cow subvolume), just get
  * put onto a simple dirty list.  transaction.c walks this to make sure they
  * get properly updated on disk.
@@ -626,14 +660,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
        for (i = start_slot; i < end_slot; i++) {
                int close = 1;
 
-               if (!parent->map_token) {
-                       map_extent_buffer(parent,
-                                       btrfs_node_key_ptr_offset(i),
-                                       sizeof(struct btrfs_key_ptr),
-                                       &parent->map_token, &parent->kaddr,
-                                       &parent->map_start, &parent->map_len,
-                                       KM_USER1);
-               }
                btrfs_node_key(parent, &disk_key, i);
                if (!progress_passed && comp_keys(&disk_key, progress) < 0)
                        continue;
@@ -656,11 +682,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                        last_block = blocknr;
                        continue;
                }
-               if (parent->map_token) {
-                       unmap_extent_buffer(parent, parent->map_token,
-                                           KM_USER1);
-                       parent->map_token = NULL;
-               }
 
                cur = btrfs_find_tree_block(root, blocknr, blocksize);
                if (cur)
@@ -701,11 +722,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                btrfs_tree_unlock(cur);
                free_extent_buffer(cur);
        }
-       if (parent->map_token) {
-               unmap_extent_buffer(parent, parent->map_token,
-                                   KM_USER1);
-               parent->map_token = NULL;
-       }
        return err;
 }
 
@@ -746,7 +762,6 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
        struct btrfs_disk_key *tmp = NULL;
        struct btrfs_disk_key unaligned;
        unsigned long offset;
-       char *map_token = NULL;
        char *kaddr = NULL;
        unsigned long map_start = 0;
        unsigned long map_len = 0;
@@ -756,18 +771,13 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
                mid = (low + high) / 2;
                offset = p + mid * item_size;
 
-               if (!map_token || offset < map_start ||
+               if (!kaddr || offset < map_start ||
                    (offset + sizeof(struct btrfs_disk_key)) >
                    map_start + map_len) {
-                       if (map_token) {
-                               unmap_extent_buffer(eb, map_token, KM_USER0);
-                               map_token = NULL;
-                       }
 
                        err = map_private_extent_buffer(eb, offset,
                                                sizeof(struct btrfs_disk_key),
-                                               &map_token, &kaddr,
-                                               &map_start, &map_len, KM_USER0);
+                                               &kaddr, &map_start, &map_len);
 
                        if (!err) {
                                tmp = (struct btrfs_disk_key *)(kaddr + offset -
@@ -790,14 +800,10 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
                        high = mid;
                else {
                        *slot = mid;
-                       if (map_token)
-                               unmap_extent_buffer(eb, map_token, KM_USER0);
                        return 0;
                }
        }
        *slot = low;
-       if (map_token)
-               unmap_extent_buffer(eb, map_token, KM_USER0);
        return 1;
 }
 
@@ -890,7 +896,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 
        mid = path->nodes[level];
 
-       WARN_ON(!path->locks[level]);
+       WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
+               path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
        WARN_ON(btrfs_header_generation(mid) != trans->transid);
 
        orig_ptr = btrfs_node_blockptr(mid, orig_slot);
@@ -1228,7 +1235,6 @@ static void reada_for_search(struct btrfs_root *root,
        u32 nr;
        u32 blocksize;
        u32 nscan = 0;
-       bool map = true;
 
        if (level != 1)
                return;
@@ -1250,19 +1256,8 @@ static void reada_for_search(struct btrfs_root *root,
 
        nritems = btrfs_header_nritems(node);
        nr = slot;
-       if (node->map_token || path->skip_locking)
-               map = false;
 
        while (1) {
-               if (map && !node->map_token) {
-                       unsigned long offset = btrfs_node_key_ptr_offset(nr);
-                       map_private_extent_buffer(node, offset,
-                                                 sizeof(struct btrfs_key_ptr),
-                                                 &node->map_token,
-                                                 &node->kaddr,
-                                                 &node->map_start,
-                                                 &node->map_len, KM_USER1);
-               }
                if (direction < 0) {
                        if (nr == 0)
                                break;
@@ -1281,11 +1276,6 @@ static void reada_for_search(struct btrfs_root *root,
                if ((search <= target && target - search <= 65536) ||
                    (search > target && search - target <= 65536)) {
                        gen = btrfs_node_ptr_generation(node, nr);
-                       if (map && node->map_token) {
-                               unmap_extent_buffer(node, node->map_token,
-                                                   KM_USER1);
-                               node->map_token = NULL;
-                       }
                        readahead_tree_block(root, search, blocksize, gen);
                        nread += blocksize;
                }
@@ -1293,10 +1283,6 @@ static void reada_for_search(struct btrfs_root *root,
                if ((nread > 65536 || nscan > 32))
                        break;
        }
-       if (map && node->map_token) {
-               unmap_extent_buffer(node, node->map_token, KM_USER1);
-               node->map_token = NULL;
-       }
 }
 
 /*
@@ -1409,7 +1395,7 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
 
                t = path->nodes[i];
                if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
-                       btrfs_tree_unlock(t);
+                       btrfs_tree_unlock_rw(t, path->locks[i]);
                        path->locks[i] = 0;
                }
        }
@@ -1436,7 +1422,7 @@ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
                        continue;
                if (!path->locks[i])
                        continue;
-               btrfs_tree_unlock(path->nodes[i]);
+               btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
                path->locks[i] = 0;
        }
 }
@@ -1485,6 +1471,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
                         * we can trust our generation number
                         */
                        free_extent_buffer(tmp);
+                       btrfs_set_path_blocking(p);
+
                        tmp = read_tree_block(root, blocknr, blocksize, gen);
                        if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
                                *eb_ret = tmp;
@@ -1540,20 +1528,27 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 static int
 setup_nodes_for_search(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *p,
-                      struct extent_buffer *b, int level, int ins_len)
+                      struct extent_buffer *b, int level, int ins_len,
+                      int *write_lock_level)
 {
        int ret;
        if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
            BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
                int sret;
 
+               if (*write_lock_level < level + 1) {
+                       *write_lock_level = level + 1;
+                       btrfs_release_path(p);
+                       goto again;
+               }
+
                sret = reada_for_balance(root, p, level);
                if (sret)
                        goto again;
 
                btrfs_set_path_blocking(p);
                sret = split_node(trans, root, p, level);
-               btrfs_clear_path_blocking(p, NULL);
+               btrfs_clear_path_blocking(p, NULL, 0);
 
                BUG_ON(sret > 0);
                if (sret) {
@@ -1565,13 +1560,19 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
                   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
                int sret;
 
+               if (*write_lock_level < level + 1) {
+                       *write_lock_level = level + 1;
+                       btrfs_release_path(p);
+                       goto again;
+               }
+
                sret = reada_for_balance(root, p, level);
                if (sret)
                        goto again;
 
                btrfs_set_path_blocking(p);
                sret = balance_level(trans, root, p, level);
-               btrfs_clear_path_blocking(p, NULL);
+               btrfs_clear_path_blocking(p, NULL, 0);
 
                if (sret) {
                        ret = sret;
@@ -1615,27 +1616,78 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
        int err;
        int level;
        int lowest_unlock = 1;
+       int root_lock;
+       /* everything at write_lock_level or lower must be write locked */
+       int write_lock_level = 0;
        u8 lowest_level = 0;
 
        lowest_level = p->lowest_level;
        WARN_ON(lowest_level && ins_len > 0);
        WARN_ON(p->nodes[0] != NULL);
 
-       if (ins_len < 0)
+       if (ins_len < 0) {
                lowest_unlock = 2;
 
+               /* when we are removing items, we might have to go up to level
+                * two as we update tree pointers  Make sure we keep write
+                * for those levels as well
+                */
+               write_lock_level = 2;
+       } else if (ins_len > 0) {
+               /*
+                * for inserting items, make sure we have a write lock on
+                * level 1 so we can update keys
+                */
+               write_lock_level = 1;
+       }
+
+       if (!cow)
+               write_lock_level = -1;
+
+       if (cow && (p->keep_locks || p->lowest_level))
+               write_lock_level = BTRFS_MAX_LEVEL;
+
 again:
+       /*
+        * we try very hard to do read locks on the root
+        */
+       root_lock = BTRFS_READ_LOCK;
+       level = 0;
        if (p->search_commit_root) {
+               /*
+                * the commit roots are read only
+                * so we always do read locks
+                */
                b = root->commit_root;
                extent_buffer_get(b);
+               level = btrfs_header_level(b);
                if (!p->skip_locking)
-                       btrfs_tree_lock(b);
+                       btrfs_tree_read_lock(b);
        } else {
-               if (p->skip_locking)
+               if (p->skip_locking) {
                        b = btrfs_root_node(root);
-               else
-                       b = btrfs_lock_root_node(root);
+                       level = btrfs_header_level(b);
+               } else {
+                       /* we don't know the level of the root node
+                        * until we actually have it read locked
+                        */
+                       b = btrfs_read_lock_root_node(root);
+                       level = btrfs_header_level(b);
+                       if (level <= write_lock_level) {
+                               /* whoops, must trade for write lock */
+                               btrfs_tree_read_unlock(b);
+                               free_extent_buffer(b);
+                               b = btrfs_lock_root_node(root);
+                               root_lock = BTRFS_WRITE_LOCK;
+
+                               /* the level might have changed, check again */
+                               level = btrfs_header_level(b);
+                       }
+               }
        }
+       p->nodes[level] = b;
+       if (!p->skip_locking)
+               p->locks[level] = root_lock;
 
        while (b) {
                level = btrfs_header_level(b);
@@ -1644,10 +1696,6 @@ again:
                 * setup the path here so we can release it under lock
                 * contention with the cow code
                 */
-               p->nodes[level] = b;
-               if (!p->skip_locking)
-                       p->locks[level] = 1;
-
                if (cow) {
                        /*
                         * if we don't really need to cow this block
@@ -1659,6 +1707,16 @@ again:
 
                        btrfs_set_path_blocking(p);
 
+                       /*
+                        * must have write locks on this node and the
+                        * parent
+                        */
+                       if (level + 1 > write_lock_level) {
+                               write_lock_level = level + 1;
+                               btrfs_release_path(p);
+                               goto again;
+                       }
+
                        err = btrfs_cow_block(trans, root, b,
                                              p->nodes[level + 1],
                                              p->slots[level + 1], &b);
@@ -1671,10 +1729,7 @@ cow_done:
                BUG_ON(!cow && ins_len);
 
                p->nodes[level] = b;
-               if (!p->skip_locking)
-                       p->locks[level] = 1;
-
-               btrfs_clear_path_blocking(p, NULL);
+               btrfs_clear_path_blocking(p, NULL, 0);
 
                /*
                 * we have a lock on b and as long as we aren't changing
@@ -1700,7 +1755,7 @@ cow_done:
                        }
                        p->slots[level] = slot;
                        err = setup_nodes_for_search(trans, root, p, b, level,
-                                                    ins_len);
+                                            ins_len, &write_lock_level);
                        if (err == -EAGAIN)
                                goto again;
                        if (err) {
@@ -1710,6 +1765,19 @@ cow_done:
                        b = p->nodes[level];
                        slot = p->slots[level];
 
+                       /*
+                        * slot 0 is special, if we change the key
+                        * we have to update the parent pointer
+                        * which means we must have a write lock
+                        * on the parent
+                        */
+                       if (slot == 0 && cow &&
+                           write_lock_level < level + 1) {
+                               write_lock_level = level + 1;
+                               btrfs_release_path(p);
+                               goto again;
+                       }
+
                        unlock_up(p, level, lowest_unlock);
 
                        if (level == lowest_level) {
@@ -1728,23 +1796,42 @@ cow_done:
                        }
 
                        if (!p->skip_locking) {
-                               btrfs_clear_path_blocking(p, NULL);
-                               err = btrfs_try_spin_lock(b);
-
-                               if (!err) {
-                                       btrfs_set_path_blocking(p);
-                                       btrfs_tree_lock(b);
-                                       btrfs_clear_path_blocking(p, b);
+                               level = btrfs_header_level(b);
+                               if (level <= write_lock_level) {
+                                       err = btrfs_try_tree_write_lock(b);
+                                       if (!err) {
+                                               btrfs_set_path_blocking(p);
+                                               btrfs_tree_lock(b);
+                                               btrfs_clear_path_blocking(p, b,
+                                                                 BTRFS_WRITE_LOCK);
+                                       }
+                                       p->locks[level] = BTRFS_WRITE_LOCK;
+                               } else {
+                                       err = btrfs_try_tree_read_lock(b);
+                                       if (!err) {
+                                               btrfs_set_path_blocking(p);
+                                               btrfs_tree_read_lock(b);
+                                               btrfs_clear_path_blocking(p, b,
+                                                                 BTRFS_READ_LOCK);
+                                       }
+                                       p->locks[level] = BTRFS_READ_LOCK;
                                }
+                               p->nodes[level] = b;
                        }
                } else {
                        p->slots[level] = slot;
                        if (ins_len > 0 &&
                            btrfs_leaf_free_space(root, b) < ins_len) {
+                               if (write_lock_level < 1) {
+                                       write_lock_level = 1;
+                                       btrfs_release_path(p);
+                                       goto again;
+                               }
+
                                btrfs_set_path_blocking(p);
                                err = split_leaf(trans, root, key,
                                                 p, ins_len, ret == 0);
-                               btrfs_clear_path_blocking(p, NULL);
+                               btrfs_clear_path_blocking(p, NULL, 0);
 
                                BUG_ON(err > 0);
                                if (err) {
@@ -2025,7 +2112,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        add_root_to_dirty_list(root);
        extent_buffer_get(c);
        path->nodes[level] = c;
-       path->locks[level] = 1;
+       path->locks[level] = BTRFS_WRITE_LOCK;
        path->slots[level] = 0;
        return 0;
 }
@@ -2253,14 +2340,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
                if (path->slots[0] == i)
                        push_space += data_size;
 
-               if (!left->map_token) {
-                       map_extent_buffer(left, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &left->map_token, &left->kaddr,
-                                       &left->map_start, &left->map_len,
-                                       KM_USER1);
-               }
-
                this_item_size = btrfs_item_size(left, item);
                if (this_item_size + sizeof(*item) + push_space > free_space)
                        break;
@@ -2271,10 +2350,6 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
                        break;
                i--;
        }
-       if (left->map_token) {
-               unmap_extent_buffer(left, left->map_token, KM_USER1);
-               left->map_token = NULL;
-       }
 
        if (push_items == 0)
                goto out_unlock;
@@ -2316,21 +2391,10 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
        push_space = BTRFS_LEAF_DATA_SIZE(root);
        for (i = 0; i < right_nritems; i++) {
                item = btrfs_item_nr(right, i);
-               if (!right->map_token) {
-                       map_extent_buffer(right, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &right->map_token, &right->kaddr,
-                                       &right->map_start, &right->map_len,
-                                       KM_USER1);
-               }
                push_space -= btrfs_item_size(right, item);
                btrfs_set_item_offset(right, item, push_space);
        }
 
-       if (right->map_token) {
-               unmap_extent_buffer(right, right->map_token, KM_USER1);
-               right->map_token = NULL;
-       }
        left_nritems -= push_items;
        btrfs_set_header_nritems(left, left_nritems);
 
@@ -2467,13 +2531,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 
        for (i = 0; i < nr; i++) {
                item = btrfs_item_nr(right, i);
-               if (!right->map_token) {
-                       map_extent_buffer(right, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &right->map_token, &right->kaddr,
-                                       &right->map_start, &right->map_len,
-                                       KM_USER1);
-               }
 
                if (!empty && push_items > 0) {
                        if (path->slots[0] < i)
@@ -2496,11 +2553,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
                push_space += this_item_size + sizeof(*item);
        }
 
-       if (right->map_token) {
-               unmap_extent_buffer(right, right->map_token, KM_USER1);
-               right->map_token = NULL;
-       }
-
        if (push_items == 0) {
                ret = 1;
                goto out;
@@ -2530,23 +2582,12 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
                u32 ioff;
 
                item = btrfs_item_nr(left, i);
-               if (!left->map_token) {
-                       map_extent_buffer(left, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &left->map_token, &left->kaddr,
-                                       &left->map_start, &left->map_len,
-                                       KM_USER1);
-               }
 
                ioff = btrfs_item_offset(left, item);
                btrfs_set_item_offset(left, item,
                      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
        }
        btrfs_set_header_nritems(left, old_left_nritems + push_items);
-       if (left->map_token) {
-               unmap_extent_buffer(left, left->map_token, KM_USER1);
-               left->map_token = NULL;
-       }
 
        /* fixup right node */
        if (push_items > right_nritems) {
@@ -2574,21 +2615,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
        for (i = 0; i < right_nritems; i++) {
                item = btrfs_item_nr(right, i);
 
-               if (!right->map_token) {
-                       map_extent_buffer(right, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &right->map_token, &right->kaddr,
-                                       &right->map_start, &right->map_len,
-                                       KM_USER1);
-               }
-
                push_space = push_space - btrfs_item_size(right, item);
                btrfs_set_item_offset(right, item, push_space);
        }
-       if (right->map_token) {
-               unmap_extent_buffer(right, right->map_token, KM_USER1);
-               right->map_token = NULL;
-       }
 
        btrfs_mark_buffer_dirty(left);
        if (right_nritems)
@@ -2729,23 +2758,10 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
                struct btrfs_item *item = btrfs_item_nr(right, i);
                u32 ioff;
 
-               if (!right->map_token) {
-                       map_extent_buffer(right, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &right->map_token, &right->kaddr,
-                                       &right->map_start, &right->map_len,
-                                       KM_USER1);
-               }
-
                ioff = btrfs_item_offset(right, item);
                btrfs_set_item_offset(right, item, ioff + rt_data_off);
        }
 
-       if (right->map_token) {
-               unmap_extent_buffer(right, right->map_token, KM_USER1);
-               right->map_token = NULL;
-       }
-
        btrfs_set_header_nritems(l, mid);
        ret = 0;
        btrfs_item_key(right, &disk_key, 0);
@@ -3264,23 +3280,10 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
                u32 ioff;
                item = btrfs_item_nr(leaf, i);
 
-               if (!leaf->map_token) {
-                       map_extent_buffer(leaf, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &leaf->map_token, &leaf->kaddr,
-                                       &leaf->map_start, &leaf->map_len,
-                                       KM_USER1);
-               }
-
                ioff = btrfs_item_offset(leaf, item);
                btrfs_set_item_offset(leaf, item, ioff + size_diff);
        }
 
-       if (leaf->map_token) {
-               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-               leaf->map_token = NULL;
-       }
-
        /* shift the data */
        if (from_end) {
                memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
@@ -3377,22 +3380,10 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
                u32 ioff;
                item = btrfs_item_nr(leaf, i);
 
-               if (!leaf->map_token) {
-                       map_extent_buffer(leaf, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &leaf->map_token, &leaf->kaddr,
-                                       &leaf->map_start, &leaf->map_len,
-                                       KM_USER1);
-               }
                ioff = btrfs_item_offset(leaf, item);
                btrfs_set_item_offset(leaf, item, ioff - data_size);
        }
 
-       if (leaf->map_token) {
-               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-               leaf->map_token = NULL;
-       }
-
        /* shift the data */
        memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
                      data_end - data_size, btrfs_leaf_data(leaf) +
@@ -3494,27 +3485,13 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
                 * item0..itemN ... dataN.offset..dataN.size .. data0.size
                 */
                /* first correct the data pointers */
-               WARN_ON(leaf->map_token);
                for (i = slot; i < nritems; i++) {
                        u32 ioff;
 
                        item = btrfs_item_nr(leaf, i);
-                       if (!leaf->map_token) {
-                               map_extent_buffer(leaf, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &leaf->map_token, &leaf->kaddr,
-                                       &leaf->map_start, &leaf->map_len,
-                                       KM_USER1);
-                       }
-
                        ioff = btrfs_item_offset(leaf, item);
                        btrfs_set_item_offset(leaf, item, ioff - total_data);
                }
-               if (leaf->map_token) {
-                       unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-                       leaf->map_token = NULL;
-               }
-
                /* shift the items */
                memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
                              btrfs_item_nr_offset(slot),
@@ -3608,27 +3585,13 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
                 * item0..itemN ... dataN.offset..dataN.size .. data0.size
                 */
                /* first correct the data pointers */
-               WARN_ON(leaf->map_token);
                for (i = slot; i < nritems; i++) {
                        u32 ioff;
 
                        item = btrfs_item_nr(leaf, i);
-                       if (!leaf->map_token) {
-                               map_extent_buffer(leaf, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &leaf->map_token, &leaf->kaddr,
-                                       &leaf->map_start, &leaf->map_len,
-                                       KM_USER1);
-                       }
-
                        ioff = btrfs_item_offset(leaf, item);
                        btrfs_set_item_offset(leaf, item, ioff - total_data);
                }
-               if (leaf->map_token) {
-                       unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-                       leaf->map_token = NULL;
-               }
-
                /* shift the items */
                memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
                              btrfs_item_nr_offset(slot),
@@ -3840,22 +3803,10 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                        u32 ioff;
 
                        item = btrfs_item_nr(leaf, i);
-                       if (!leaf->map_token) {
-                               map_extent_buffer(leaf, (unsigned long)item,
-                                       sizeof(struct btrfs_item),
-                                       &leaf->map_token, &leaf->kaddr,
-                                       &leaf->map_start, &leaf->map_len,
-                                       KM_USER1);
-                       }
                        ioff = btrfs_item_offset(leaf, item);
                        btrfs_set_item_offset(leaf, item, ioff + dsize);
                }
 
-               if (leaf->map_token) {
-                       unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-                       leaf->map_token = NULL;
-               }
-
                memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
                              btrfs_item_nr_offset(slot + nr),
                              sizeof(struct btrfs_item) *
@@ -4004,11 +3955,11 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
 
        WARN_ON(!path->keep_locks);
 again:
-       cur = btrfs_lock_root_node(root);
+       cur = btrfs_read_lock_root_node(root);
        level = btrfs_header_level(cur);
        WARN_ON(path->nodes[level]);
        path->nodes[level] = cur;
-       path->locks[level] = 1;
+       path->locks[level] = BTRFS_READ_LOCK;
 
        if (btrfs_header_generation(cur) < min_trans) {
                ret = 1;
@@ -4098,12 +4049,12 @@ find_next_key:
                cur = read_node_slot(root, cur, slot);
                BUG_ON(!cur);
 
-               btrfs_tree_lock(cur);
+               btrfs_tree_read_lock(cur);
 
-               path->locks[level - 1] = 1;
+               path->locks[level - 1] = BTRFS_READ_LOCK;
                path->nodes[level - 1] = cur;
                unlock_up(path, level, 1);
-               btrfs_clear_path_blocking(path, NULL);
+               btrfs_clear_path_blocking(path, NULL, 0);
        }
 out:
        if (ret == 0)
@@ -4218,30 +4169,21 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
        u32 nritems;
        int ret;
        int old_spinning = path->leave_spinning;
-       int force_blocking = 0;
+       int next_rw_lock = 0;
 
        nritems = btrfs_header_nritems(path->nodes[0]);
        if (nritems == 0)
                return 1;
 
-       /*
-        * we take the blocks in an order that upsets lockdep.  Using
-        * blocking mode is the only way around it.
-        */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       force_blocking = 1;
-#endif
-
        btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
 again:
        level = 1;
        next = NULL;
+       next_rw_lock = 0;
        btrfs_release_path(path);
 
        path->keep_locks = 1;
-
-       if (!force_blocking)
-               path->leave_spinning = 1;
+       path->leave_spinning = 1;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        path->keep_locks = 0;
@@ -4281,11 +4223,12 @@ again:
                }
 
                if (next) {
-                       btrfs_tree_unlock(next);
+                       btrfs_tree_unlock_rw(next, next_rw_lock);
                        free_extent_buffer(next);
                }
 
                next = c;
+               next_rw_lock = path->locks[level];
                ret = read_block_for_search(NULL, root, path, &next, level,
                                            slot, &key);
                if (ret == -EAGAIN)
@@ -4297,15 +4240,14 @@ again:
                }
 
                if (!path->skip_locking) {
-                       ret = btrfs_try_spin_lock(next);
+                       ret = btrfs_try_tree_read_lock(next);
                        if (!ret) {
                                btrfs_set_path_blocking(path);
-                               btrfs_tree_lock(next);
-                               if (!force_blocking)
-                                       btrfs_clear_path_blocking(path, next);
+                               btrfs_tree_read_lock(next);
+                               btrfs_clear_path_blocking(path, next,
+                                                         BTRFS_READ_LOCK);
                        }
-                       if (force_blocking)
-                               btrfs_set_lock_blocking(next);
+                       next_rw_lock = BTRFS_READ_LOCK;
                }
                break;
        }
@@ -4314,14 +4256,13 @@ again:
                level--;
                c = path->nodes[level];
                if (path->locks[level])
-                       btrfs_tree_unlock(c);
+                       btrfs_tree_unlock_rw(c, path->locks[level]);
 
                free_extent_buffer(c);
                path->nodes[level] = next;
                path->slots[level] = 0;
                if (!path->skip_locking)
-                       path->locks[level] = 1;
-
+                       path->locks[level] = next_rw_lock;
                if (!level)
                        break;
 
@@ -4336,16 +4277,14 @@ again:
                }
 
                if (!path->skip_locking) {
-                       btrfs_assert_tree_locked(path->nodes[level]);
-                       ret = btrfs_try_spin_lock(next);
+                       ret = btrfs_try_tree_read_lock(next);
                        if (!ret) {
                                btrfs_set_path_blocking(path);
-                               btrfs_tree_lock(next);
-                               if (!force_blocking)
-                                       btrfs_clear_path_blocking(path, next);
+                               btrfs_tree_read_lock(next);
+                               btrfs_clear_path_blocking(path, next,
+                                                         BTRFS_READ_LOCK);
                        }
-                       if (force_blocking)
-                               btrfs_set_lock_blocking(next);
+                       next_rw_lock = BTRFS_READ_LOCK;
                }
        }
        ret = 0;
index 3b859a3e6a0e9354a653e324f8c08ba85d45f959..3be57c611040ffd279bae8377f673a5b467f23e9 100644 (file)
@@ -755,6 +755,8 @@ struct btrfs_space_info {
                                   chunks for this space */
        unsigned int chunk_alloc:1;     /* set if we are allocating a chunk */
 
+       unsigned int flush:1;           /* set if we are trying to make space */
+
        unsigned int force_alloc;       /* set if we need to force a chunk
                                           alloc for this space */
 
@@ -764,7 +766,7 @@ struct btrfs_space_info {
        struct list_head block_groups[BTRFS_NR_RAID_TYPES];
        spinlock_t lock;
        struct rw_semaphore groups_sem;
-       atomic_t caching_threads;
+       wait_queue_head_t wait;
 };
 
 struct btrfs_block_rsv {
@@ -824,6 +826,7 @@ struct btrfs_caching_control {
        struct list_head list;
        struct mutex mutex;
        wait_queue_head_t wait;
+       struct btrfs_work work;
        struct btrfs_block_group_cache *block_group;
        u64 progress;
        atomic_t count;
@@ -1032,6 +1035,8 @@ struct btrfs_fs_info {
        struct btrfs_workers endio_write_workers;
        struct btrfs_workers endio_freespace_worker;
        struct btrfs_workers submit_workers;
+       struct btrfs_workers caching_workers;
+
        /*
         * fixup workers take dirty pages that didn't properly go through
         * the cow mechanism and make them safe to write.  It happens
@@ -2128,7 +2133,7 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 
 /* extent-tree.c */
 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
-                                                int num_items)
+                                                unsigned num_items)
 {
        return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
                3 * num_items;
@@ -2222,9 +2227,6 @@ void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
-int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root,
-                               int num_items);
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root);
 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
@@ -2330,7 +2332,7 @@ struct btrfs_path *btrfs_alloc_path(void);
 void btrfs_free_path(struct btrfs_path *p);
 void btrfs_set_path_blocking(struct btrfs_path *p);
 void btrfs_clear_path_blocking(struct btrfs_path *p,
-                              struct extent_buffer *held);
+                              struct extent_buffer *held, int held_rw);
 void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
 
 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
index 98c68e658a9b2eb08a8ba99d0b5691f16fc8a67d..b52c672f4c180beae28f60302071731bd343dcba 100644 (file)
@@ -735,7 +735,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
        }
 
        /* reset all the locked nodes in the patch to spinning locks. */
-       btrfs_clear_path_blocking(path, NULL);
+       btrfs_clear_path_blocking(path, NULL, 0);
 
        /* insert the keys of the items */
        ret = setup_items_for_insert(trans, root, path, keys, data_size,
index 685f2593c4f049559a087cec8b88daa04a0d357d..c360a848d97fb6115c58a41bb45aeef611c4c86f 100644 (file)
@@ -89,13 +89,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
        data_size = sizeof(*dir_item) + name_len + data_len;
        dir_item = insert_with_overflow(trans, root, path, &key, data_size,
                                        name, name_len);
-       /*
-        * FIXME: at some point we should handle xattr's that are larger than
-        * what we can fit in our leaf.  We set location to NULL b/c we arent
-        * pointing at anything else, that will change if we store the xattr
-        * data in a separate inode.
-        */
-       BUG_ON(IS_ERR(dir_item));
+       if (IS_ERR(dir_item))
+               return PTR_ERR(dir_item);
        memset(&location, 0, sizeof(location));
 
        leaf = path->nodes[0];
index 1ac8db5dc0a31b9a742099956b121cd75ba0a1a6..94ecac33cf2d924ab55cff813b774c04acd285a9 100644 (file)
@@ -100,38 +100,83 @@ struct async_submit_bio {
        struct btrfs_work work;
 };
 
-/* These are used to set the lockdep class on the extent buffer locks.
- * The class is set by the readpage_end_io_hook after the buffer has
- * passed csum validation but before the pages are unlocked.
+/*
+ * Lockdep class keys for extent_buffer->lock's in this root.  For a given
+ * eb, the lockdep key is determined by the btrfs_root it belongs to and
+ * the level the eb occupies in the tree.
+ *
+ * Different roots are used for different purposes and may nest inside each
+ * other and they require separate keysets.  As lockdep keys should be
+ * static, assign keysets according to the purpose of the root as indicated
+ * by btrfs_root->objectid.  This ensures that all special purpose roots
+ * have separate keysets.
  *
- * The lockdep class is also set by btrfs_init_new_buffer on freshly
- * allocated blocks.
+ * Lock-nesting across peer nodes is always done with the immediate parent
+ * node locked thus preventing deadlock.  As lockdep doesn't know this, use
+ * subclass to avoid triggering lockdep warning in such cases.
  *
- * The class is based on the level in the tree block, which allows lockdep
- * to know that lower nodes nest inside the locks of higher nodes.
+ * The key is set by the readpage_end_io_hook after the buffer has passed
+ * csum validation but before the pages are unlocked.  It is also set by
+ * btrfs_init_new_buffer on freshly allocated blocks.
  *
- * We also add a check to make sure the highest level of the tree is
- * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
- * code needs update as well.
+ * We also add a check to make sure the highest level of the tree is the
+ * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
+ * needs update as well.
  */
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # if BTRFS_MAX_LEVEL != 8
 #  error
 # endif
-static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
-static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
-       /* leaf */
-       "btrfs-extent-00",
-       "btrfs-extent-01",
-       "btrfs-extent-02",
-       "btrfs-extent-03",
-       "btrfs-extent-04",
-       "btrfs-extent-05",
-       "btrfs-extent-06",
-       "btrfs-extent-07",
-       /* highest possible level */
-       "btrfs-extent-08",
+
+static struct btrfs_lockdep_keyset {
+       u64                     id;             /* root objectid */
+       const char              *name_stem;     /* lock name stem */
+       char                    names[BTRFS_MAX_LEVEL + 1][20];
+       struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
+} btrfs_lockdep_keysets[] = {
+       { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
+       { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
+       { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
+       { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
+       { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
+       { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
+       { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
+       { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
+       { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
+       { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
+       { .id = 0,                              .name_stem = "tree"     },
 };
+
+void __init btrfs_init_lockdep(void)
+{
+       int i, j;
+
+       /* initialize lockdep class names */
+       for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
+               struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
+
+               for (j = 0; j < ARRAY_SIZE(ks->names); j++)
+                       snprintf(ks->names[j], sizeof(ks->names[j]),
+                                "btrfs-%s-%02d", ks->name_stem, j);
+       }
+}
+
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
+                                   int level)
+{
+       struct btrfs_lockdep_keyset *ks;
+
+       BUG_ON(level >= ARRAY_SIZE(ks->keys));
+
+       /* find the matching keyset, id 0 is the default entry */
+       for (ks = btrfs_lockdep_keysets; ks->id; ks++)
+               if (ks->id == objectid)
+                       break;
+
+       lockdep_set_class_and_name(&eb->lock,
+                                  &ks->keys[level], ks->names[level]);
+}
+
 #endif
 
 /*
@@ -217,7 +262,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
        unsigned long len;
        unsigned long cur_len;
        unsigned long offset = BTRFS_CSUM_SIZE;
-       char *map_token = NULL;
        char *kaddr;
        unsigned long map_start;
        unsigned long map_len;
@@ -228,8 +272,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
        len = buf->len - offset;
        while (len > 0) {
                err = map_private_extent_buffer(buf, offset, 32,
-                                       &map_token, &kaddr,
-                                       &map_start, &map_len, KM_USER0);
+                                       &kaddr, &map_start, &map_len);
                if (err)
                        return 1;
                cur_len = min(len, map_len - (offset - map_start));
@@ -237,7 +280,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                                      crc, cur_len);
                len -= cur_len;
                offset += cur_len;
-               unmap_extent_buffer(buf, map_token, KM_USER0);
        }
        if (csum_size > sizeof(inline_result)) {
                result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
@@ -494,15 +536,6 @@ static noinline int check_leaf(struct btrfs_root *root,
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
-{
-       lockdep_set_class_and_name(&eb->lock,
-                          &btrfs_eb_class[level],
-                          btrfs_eb_name[level]);
-}
-#endif
-
 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                               struct extent_state *state)
 {
@@ -553,7 +586,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        }
        found_level = btrfs_header_level(eb);
 
-       btrfs_set_buffer_lockdep_class(eb, found_level);
+       btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
+                                      eb, found_level);
 
        ret = csum_tree_block(root, eb, 1);
        if (ret) {
@@ -1603,7 +1637,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                goto fail_bdi;
        }
 
-       fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS;
+       mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
 
        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
        INIT_LIST_HEAD(&fs_info->trans_list);
@@ -1807,6 +1841,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                           fs_info->thread_pool_size),
                           &fs_info->generic_worker);
 
+       btrfs_init_workers(&fs_info->caching_workers, "cache",
+                          2, &fs_info->generic_worker);
+
        /* a higher idle thresh on the submit workers makes it much more
         * likely that bios will be send down in a sane order to the
         * devices
@@ -1860,6 +1897,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        btrfs_start_workers(&fs_info->endio_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
        btrfs_start_workers(&fs_info->delayed_workers, 1);
+       btrfs_start_workers(&fs_info->caching_workers, 1);
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2117,6 +2155,7 @@ fail_sb_buffer:
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
        btrfs_stop_workers(&fs_info->delayed_workers);
+       btrfs_stop_workers(&fs_info->caching_workers);
 fail_alloc:
        kfree(fs_info->delayed_root);
 fail_iput:
@@ -2584,6 +2623,7 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
        btrfs_stop_workers(&fs_info->delayed_workers);
+       btrfs_stop_workers(&fs_info->caching_workers);
 
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
index a0b610a67aaeadf1f564d03e218305997c021a85..bec3ea4bd67fd465a8a5e008fcae1e7353f573ad 100644 (file)
@@ -87,10 +87,14 @@ int btree_lock_page_hook(struct page *page);
 
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
+void btrfs_init_lockdep(void);
+void btrfs_set_buffer_lockdep_class(u64 objectid,
+                                   struct extent_buffer *eb, int level);
 #else
-static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
-                                                int level)
+static inline void btrfs_init_lockdep(void)
+{ }
+static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
+                                       struct extent_buffer *eb, int level)
 {
 }
 #endif
index 6bce721e7bbc9fe6b5c12a6f92513b8fcbdb4455..55bddffede73806b0005f80ba217b87c5cf33c5e 100644 (file)
@@ -320,12 +320,12 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
        return total_added;
 }
 
-static int caching_kthread(void *data)
+static noinline void caching_thread(struct btrfs_work *work)
 {
-       struct btrfs_block_group_cache *block_group = data;
-       struct btrfs_fs_info *fs_info = block_group->fs_info;
-       struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
-       struct btrfs_root *extent_root = fs_info->extent_root;
+       struct btrfs_block_group_cache *block_group;
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_caching_control *caching_ctl;
+       struct btrfs_root *extent_root;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_key key;
@@ -334,9 +334,14 @@ static int caching_kthread(void *data)
        u32 nritems;
        int ret = 0;
 
+       caching_ctl = container_of(work, struct btrfs_caching_control, work);
+       block_group = caching_ctl->block_group;
+       fs_info = block_group->fs_info;
+       extent_root = fs_info->extent_root;
+
        path = btrfs_alloc_path();
        if (!path)
-               return -ENOMEM;
+               goto out;
 
        last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
@@ -433,13 +438,11 @@ err:
        free_excluded_extents(extent_root, block_group);
 
        mutex_unlock(&caching_ctl->mutex);
+out:
        wake_up(&caching_ctl->wait);
 
        put_caching_control(caching_ctl);
-       atomic_dec(&block_group->space_info->caching_threads);
        btrfs_put_block_group(block_group);
-
-       return 0;
 }
 
 static int cache_block_group(struct btrfs_block_group_cache *cache,
@@ -449,7 +452,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
 {
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
-       struct task_struct *tsk;
        int ret = 0;
 
        smp_mb();
@@ -501,6 +503,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        caching_ctl->progress = cache->key.objectid;
        /* one for caching kthread, one for caching block group list */
        atomic_set(&caching_ctl->count, 2);
+       caching_ctl->work.func = caching_thread;
 
        spin_lock(&cache->lock);
        if (cache->cached != BTRFS_CACHE_NO) {
@@ -516,16 +519,9 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->extent_commit_sem);
 
-       atomic_inc(&cache->space_info->caching_threads);
        btrfs_get_block_group(cache);
 
-       tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
-                         cache->key.objectid);
-       if (IS_ERR(tsk)) {
-               ret = PTR_ERR(tsk);
-               printk(KERN_ERR "error running thread %d\n", ret);
-               BUG();
-       }
+       btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
 
        return ret;
 }
@@ -2934,9 +2930,10 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        found->full = 0;
        found->force_alloc = CHUNK_ALLOC_NO_FORCE;
        found->chunk_alloc = 0;
+       found->flush = 0;
+       init_waitqueue_head(&found->wait);
        *space_info = found;
        list_add_rcu(&found->list, &info->space_info);
-       atomic_set(&found->caching_threads, 0);
        return 0;
 }
 
@@ -3320,6 +3317,14 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        if (reserved == 0)
                return 0;
 
+       smp_mb();
+       if (root->fs_info->delalloc_bytes == 0) {
+               if (trans)
+                       return 0;
+               btrfs_wait_ordered_extents(root, 0, 0);
+               return 0;
+       }
+
        max_reclaim = min(reserved, to_reclaim);
 
        while (loops < 1024) {
@@ -3362,6 +3367,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                }
 
        }
+       if (reclaimed >= to_reclaim && !trans)
+               btrfs_wait_ordered_extents(root, 0, 0);
        return reclaimed >= to_reclaim;
 }
 
@@ -3386,15 +3393,36 @@ static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
        u64 num_bytes = orig_bytes;
        int retries = 0;
        int ret = 0;
-       bool reserved = false;
        bool committed = false;
+       bool flushing = false;
 
 again:
-       ret = -ENOSPC;
-       if (reserved)
-               num_bytes = 0;
-
+       ret = 0;
        spin_lock(&space_info->lock);
+       /*
+        * We only want to wait if somebody other than us is flushing and we are
+        * actually alloed to flush.
+        */
+       while (flush && !flushing && space_info->flush) {
+               spin_unlock(&space_info->lock);
+               /*
+                * If we have a trans handle we can't wait because the flusher
+                * may have to commit the transaction, which would mean we would
+                * deadlock since we are waiting for the flusher to finish, but
+                * hold the current transaction open.
+                */
+               if (trans)
+                       return -EAGAIN;
+               ret = wait_event_interruptible(space_info->wait,
+                                              !space_info->flush);
+               /* Must have been interrupted, return */
+               if (ret)
+                       return -EINTR;
+
+               spin_lock(&space_info->lock);
+       }
+
+       ret = -ENOSPC;
        unused = space_info->bytes_used + space_info->bytes_reserved +
                 space_info->bytes_pinned + space_info->bytes_readonly +
                 space_info->bytes_may_use;
@@ -3409,8 +3437,7 @@ again:
        if (unused <= space_info->total_bytes) {
                unused = space_info->total_bytes - unused;
                if (unused >= num_bytes) {
-                       if (!reserved)
-                               space_info->bytes_reserved += orig_bytes;
+                       space_info->bytes_reserved += orig_bytes;
                        ret = 0;
                } else {
                        /*
@@ -3435,17 +3462,14 @@ again:
         * to reclaim space we can actually use it instead of somebody else
         * stealing it from us.
         */
-       if (ret && !reserved) {
-               space_info->bytes_reserved += orig_bytes;
-               reserved = true;
+       if (ret && flush) {
+               flushing = true;
+               space_info->flush = 1;
        }
 
        spin_unlock(&space_info->lock);
 
-       if (!ret)
-               return 0;
-
-       if (!flush)
+       if (!ret || !flush)
                goto out;
 
        /*
@@ -3453,11 +3477,11 @@ again:
         * metadata until after the IO is completed.
         */
        ret = shrink_delalloc(trans, root, num_bytes, 1);
-       if (ret > 0)
-               return 0;
-       else if (ret < 0)
+       if (ret < 0)
                goto out;
 
+       ret = 0;
+
        /*
         * So if we were overcommitted it's possible that somebody else flushed
         * out enough space and we simply didn't have enough space to reclaim,
@@ -3468,11 +3492,11 @@ again:
                goto again;
        }
 
-       spin_lock(&space_info->lock);
        /*
         * Not enough space to be reclaimed, don't bother committing the
         * transaction.
         */
+       spin_lock(&space_info->lock);
        if (space_info->bytes_pinned < orig_bytes)
                ret = -ENOSPC;
        spin_unlock(&space_info->lock);
@@ -3480,10 +3504,13 @@ again:
                goto out;
 
        ret = -EAGAIN;
-       if (trans || committed)
+       if (trans)
                goto out;
 
        ret = -ENOSPC;
+       if (committed)
+               goto out;
+
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                goto out;
@@ -3495,12 +3522,12 @@ again:
        }
 
 out:
-       if (reserved) {
+       if (flushing) {
                spin_lock(&space_info->lock);
-               space_info->bytes_reserved -= orig_bytes;
+               space_info->flush = 0;
+               wake_up_all(&space_info->wait);
                spin_unlock(&space_info->lock);
        }
-
        return ret;
 }
 
@@ -3710,7 +3737,6 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
        if (commit_trans) {
                if (trans)
                        return -EAGAIN;
-
                trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
                ret = btrfs_commit_transaction(trans, root);
@@ -3880,26 +3906,6 @@ int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
-                                struct btrfs_root *root,
-                                int num_items)
-{
-       u64 num_bytes;
-       int ret;
-
-       if (num_items == 0 || root->fs_info->chunk_root == root)
-               return 0;
-
-       num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
-       ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
-                                 num_bytes);
-       if (!ret) {
-               trans->bytes_reserved += num_bytes;
-               trans->block_rsv = &root->fs_info->trans_block_rsv;
-       }
-       return ret;
-}
-
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
                                  struct btrfs_root *root)
 {
@@ -3950,6 +3956,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
+static unsigned drop_outstanding_extent(struct inode *inode)
+{
+       unsigned dropped_extents = 0;
+
+       spin_lock(&BTRFS_I(inode)->lock);
+       BUG_ON(!BTRFS_I(inode)->outstanding_extents);
+       BTRFS_I(inode)->outstanding_extents--;
+
+       /*
+        * If we have more or the same amount of outsanding extents than we have
+        * reserved then we need to leave the reserved extents count alone.
+        */
+       if (BTRFS_I(inode)->outstanding_extents >=
+           BTRFS_I(inode)->reserved_extents)
+               goto out;
+
+       dropped_extents = BTRFS_I(inode)->reserved_extents -
+               BTRFS_I(inode)->outstanding_extents;
+       BTRFS_I(inode)->reserved_extents -= dropped_extents;
+out:
+       spin_unlock(&BTRFS_I(inode)->lock);
+       return dropped_extents;
+}
+
 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
 {
        return num_bytes >>= 3;
@@ -3959,9 +3989,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
-       u64 to_reserve;
-       int nr_extents;
-       int reserved_extents;
+       u64 to_reserve = 0;
+       unsigned nr_extents = 0;
        int ret;
 
        if (btrfs_transaction_in_commit(root->fs_info))
@@ -3969,66 +3998,49 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
 
-       nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
-       reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+       spin_lock(&BTRFS_I(inode)->lock);
+       BTRFS_I(inode)->outstanding_extents++;
+
+       if (BTRFS_I(inode)->outstanding_extents >
+           BTRFS_I(inode)->reserved_extents) {
+               nr_extents = BTRFS_I(inode)->outstanding_extents -
+                       BTRFS_I(inode)->reserved_extents;
+               BTRFS_I(inode)->reserved_extents += nr_extents;
 
-       if (nr_extents > reserved_extents) {
-               nr_extents -= reserved_extents;
                to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
-       } else {
-               nr_extents = 0;
-               to_reserve = 0;
        }
+       spin_unlock(&BTRFS_I(inode)->lock);
 
        to_reserve += calc_csum_metadata_size(inode, num_bytes);
        ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
-       if (ret)
+       if (ret) {
+               unsigned dropped;
+               /*
+                * We don't need the return value since our reservation failed,
+                * we just need to clean up our counter.
+                */
+               dropped = drop_outstanding_extent(inode);
+               WARN_ON(dropped > 1);
                return ret;
-
-       atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
-       atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+       }
 
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
-       if (block_rsv->size > 512 * 1024 * 1024)
-               shrink_delalloc(NULL, root, to_reserve, 0);
-
        return 0;
 }
 
 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 to_free;
-       int nr_extents;
-       int reserved_extents;
+       u64 to_free = 0;
+       unsigned dropped;
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
-       atomic_dec(&BTRFS_I(inode)->outstanding_extents);
-       WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
-
-       reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
-       do {
-               int old, new;
-
-               nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
-               if (nr_extents >= reserved_extents) {
-                       nr_extents = 0;
-                       break;
-               }
-               old = reserved_extents;
-               nr_extents = reserved_extents - nr_extents;
-               new = reserved_extents - nr_extents;
-               old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
-                                    reserved_extents, new);
-               if (likely(old == reserved_extents))
-                       break;
-               reserved_extents = old;
-       } while (1);
+       dropped = drop_outstanding_extent(inode);
 
        to_free = calc_csum_metadata_size(inode, num_bytes);
-       if (nr_extents > 0)
-               to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
+       if (dropped > 0)
+               to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
                                to_free);
@@ -4996,14 +5008,10 @@ have_block_group:
                        }
 
                        /*
-                        * We only want to start kthread caching if we are at
-                        * the point where we will wait for caching to make
-                        * progress, or if our ideal search is over and we've
-                        * found somebody to start caching.
+                        * The caching workers are limited to 2 threads, so we
+                        * can queue as much work as we care to.
                         */
-                       if (loop > LOOP_CACHING_NOWAIT ||
-                           (loop > LOOP_FIND_IDEAL &&
-                            atomic_read(&space_info->caching_threads) < 2)) {
+                       if (loop > LOOP_FIND_IDEAL) {
                                ret = cache_block_group(block_group, trans,
                                                        orig_root, 0);
                                BUG_ON(ret);
@@ -5225,8 +5233,7 @@ loop:
                if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
                        found_uncached_bg = false;
                        loop++;
-                       if (!ideal_cache_percent &&
-                           atomic_read(&space_info->caching_threads))
+                       if (!ideal_cache_percent)
                                goto search;
 
                        /*
@@ -5630,7 +5637,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
        if (!buf)
                return ERR_PTR(-ENOMEM);
        btrfs_set_header_generation(buf, trans->transid);
-       btrfs_set_buffer_lockdep_class(buf, level);
+       btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
        clean_tree_block(trans, root, buf);
 
@@ -5917,7 +5924,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
                        return 1;
 
                if (path->locks[level] && !wc->keep_locks) {
-                       btrfs_tree_unlock(eb);
+                       btrfs_tree_unlock_rw(eb, path->locks[level]);
                        path->locks[level] = 0;
                }
                return 0;
@@ -5941,7 +5948,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
         * keep the tree lock
         */
        if (path->locks[level] && level > 0) {
-               btrfs_tree_unlock(eb);
+               btrfs_tree_unlock_rw(eb, path->locks[level]);
                path->locks[level] = 0;
        }
        return 0;
@@ -6054,7 +6061,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
        BUG_ON(level != btrfs_header_level(next));
        path->nodes[level] = next;
        path->slots[level] = 0;
-       path->locks[level] = 1;
+       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
        wc->level = level;
        if (wc->level == 1)
                wc->reada_slot = 0;
@@ -6125,7 +6132,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        BUG_ON(level == 0);
                        btrfs_tree_lock(eb);
                        btrfs_set_lock_blocking(eb);
-                       path->locks[level] = 1;
+                       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
                        ret = btrfs_lookup_extent_info(trans, root,
                                                       eb->start, eb->len,
@@ -6134,8 +6141,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        BUG_ON(ret);
                        BUG_ON(wc->refs[level] == 0);
                        if (wc->refs[level] == 1) {
-                               btrfs_tree_unlock(eb);
-                               path->locks[level] = 0;
+                               btrfs_tree_unlock_rw(eb, path->locks[level]);
                                return 1;
                        }
                }
@@ -6157,7 +6163,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                    btrfs_header_generation(eb) == trans->transid) {
                        btrfs_tree_lock(eb);
                        btrfs_set_lock_blocking(eb);
-                       path->locks[level] = 1;
+                       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
                }
                clean_tree_block(trans, root, eb);
        }
@@ -6236,7 +6242,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
                                return 0;
 
                        if (path->locks[level]) {
-                               btrfs_tree_unlock(path->nodes[level]);
+                               btrfs_tree_unlock_rw(path->nodes[level],
+                                                    path->locks[level]);
                                path->locks[level] = 0;
                        }
                        free_extent_buffer(path->nodes[level]);
@@ -6292,7 +6299,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
                path->nodes[level] = btrfs_lock_root_node(root);
                btrfs_set_lock_blocking(path->nodes[level]);
                path->slots[level] = 0;
-               path->locks[level] = 1;
+               path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
                memset(&wc->update_progress, 0,
                       sizeof(wc->update_progress));
        } else {
@@ -6460,7 +6467,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
        level = btrfs_header_level(node);
        path->nodes[level] = node;
        path->slots[level] = 0;
-       path->locks[level] = 1;
+       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
        wc->refs[parent_level] = 1;
        wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -6535,15 +6542,28 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        return flags;
 }
 
-static int set_block_group_ro(struct btrfs_block_group_cache *cache)
+static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
 {
        struct btrfs_space_info *sinfo = cache->space_info;
        u64 num_bytes;
+       u64 min_allocable_bytes;
        int ret = -ENOSPC;
 
        if (cache->ro)
                return 0;
 
+       /*
+        * We need some metadata space and system metadata space for
+        * allocating chunks in some corner cases until we force to set
+        * it to be readonly.
+        */
+       if ((sinfo->flags &
+            (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
+           !force)
+               min_allocable_bytes = 1 * 1024 * 1024;
+       else
+               min_allocable_bytes = 0;
+
        spin_lock(&sinfo->lock);
        spin_lock(&cache->lock);
        num_bytes = cache->key.offset - cache->reserved - cache->pinned -
@@ -6551,7 +6571,8 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache)
 
        if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
            sinfo->bytes_may_use + sinfo->bytes_readonly +
-           cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
+           cache->reserved_pinned + num_bytes + min_allocable_bytes <=
+           sinfo->total_bytes) {
                sinfo->bytes_readonly += num_bytes;
                sinfo->bytes_reserved += cache->reserved_pinned;
                cache->reserved_pinned = 0;
@@ -6582,7 +6603,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
                do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
                               CHUNK_ALLOC_FORCE);
 
-       ret = set_block_group_ro(cache);
+       ret = set_block_group_ro(cache, 0);
        if (!ret)
                goto out;
        alloc_flags = get_alloc_profile(root, cache->space_info->flags);
@@ -6590,7 +6611,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
                             CHUNK_ALLOC_FORCE);
        if (ret < 0)
                goto out;
-       ret = set_block_group_ro(cache);
+       ret = set_block_group_ro(cache, 0);
 out:
        btrfs_end_transaction(trans, root);
        return ret;
@@ -7027,7 +7048,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 
                set_avail_alloc_bits(root->fs_info, cache->flags);
                if (btrfs_chunk_readonly(root, cache->key.objectid))
-                       set_block_group_ro(cache);
+                       set_block_group_ro(cache, 1);
        }
 
        list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
@@ -7041,9 +7062,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                 * mirrored block groups.
                 */
                list_for_each_entry(cache, &space_info->block_groups[3], list)
-                       set_block_group_ro(cache);
+                       set_block_group_ro(cache, 1);
                list_for_each_entry(cache, &space_info->block_groups[4], list)
-                       set_block_group_ro(cache);
+                       set_block_group_ro(cache, 1);
        }
 
        init_global_block_rsv(info);
index 7055d11c1efdd2efef6668b18e0dfcae802a9dd7..5bbdb243bb6f7b95b8abd808806970e5d4ec70eb 100644 (file)
@@ -281,11 +281,10 @@ static int merge_state(struct extent_io_tree *tree,
                if (other->start == state->end + 1 &&
                    other->state == state->state) {
                        merge_cb(tree, state, other);
-                       other->start = state->start;
-                       state->tree = NULL;
-                       rb_erase(&state->rb_node, &tree->state);
-                       free_extent_state(state);
-                       state = NULL;
+                       state->end = other->end;
+                       other->tree = NULL;
+                       rb_erase(&other->rb_node, &tree->state);
+                       free_extent_state(other);
                }
        }
 
@@ -351,7 +350,6 @@ static int insert_state(struct extent_io_tree *tree,
                       "%llu %llu\n", (unsigned long long)found->start,
                       (unsigned long long)found->end,
                       (unsigned long long)start, (unsigned long long)end);
-               free_extent_state(state);
                return -EEXIST;
        }
        state->tree = tree;
@@ -500,7 +498,8 @@ again:
                        cached_state = NULL;
                }
 
-               if (cached && cached->tree && cached->start == start) {
+               if (cached && cached->tree && cached->start <= start &&
+                   cached->end > start) {
                        if (clear)
                                atomic_dec(&cached->refs);
                        state = cached;
@@ -742,7 +741,8 @@ again:
        spin_lock(&tree->lock);
        if (cached_state && *cached_state) {
                state = *cached_state;
-               if (state->start == start && state->tree) {
+               if (state->start <= start && state->end > start &&
+                   state->tree) {
                        node = &state->rb_node;
                        goto hit_next;
                }
@@ -783,13 +783,13 @@ hit_next:
                if (err)
                        goto out;
 
-               next_node = rb_next(node);
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
                        goto out;
 
                start = last_end + 1;
+               next_node = rb_next(&state->rb_node);
                if (next_node && start < end && prealloc && !need_resched()) {
                        state = rb_entry(next_node, struct extent_state,
                                         rb_node);
@@ -862,7 +862,6 @@ hit_next:
                 * Avoid to free 'prealloc' if it can be merged with
                 * the later extent.
                 */
-               atomic_inc(&prealloc->refs);
                err = insert_state(tree, prealloc, start, this_end,
                                   &bits);
                BUG_ON(err == -EEXIST);
@@ -872,7 +871,6 @@ hit_next:
                        goto out;
                }
                cache_state(prealloc, cached_state);
-               free_extent_state(prealloc);
                prealloc = NULL;
                start = this_end + 1;
                goto search_again;
@@ -1564,7 +1562,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
        int bitset = 0;
 
        spin_lock(&tree->lock);
-       if (cached && cached->tree && cached->start == start)
+       if (cached && cached->tree && cached->start <= start &&
+           cached->end > start)
                node = &cached->rb_node;
        else
                node = tree_search(tree, start);
@@ -2432,6 +2431,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
        int scanned = 0;
+       int tag;
 
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
@@ -2442,11 +2442,16 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                end = wbc->range_end >> PAGE_CACHE_SHIFT;
                scanned = 1;
        }
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               tag = PAGECACHE_TAG_TOWRITE;
+       else
+               tag = PAGECACHE_TAG_DIRTY;
 retry:
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               tag_pages_for_writeback(mapping, index, end);
        while (!done && !nr_to_write_done && (index <= end) &&
-              (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                             PAGECACHE_TAG_DIRTY, min(end - index,
-                                 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+              (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
                unsigned i;
 
                scanned = 1;
@@ -3022,8 +3027,15 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
                return NULL;
        eb->start = start;
        eb->len = len;
-       spin_lock_init(&eb->lock);
-       init_waitqueue_head(&eb->lock_wq);
+       rwlock_init(&eb->lock);
+       atomic_set(&eb->write_locks, 0);
+       atomic_set(&eb->read_locks, 0);
+       atomic_set(&eb->blocking_readers, 0);
+       atomic_set(&eb->blocking_writers, 0);
+       atomic_set(&eb->spinning_readers, 0);
+       atomic_set(&eb->spinning_writers, 0);
+       init_waitqueue_head(&eb->write_lock_wq);
+       init_waitqueue_head(&eb->read_lock_wq);
 
 #if LEAK_DEBUG
        spin_lock_irqsave(&leak_lock, flags);
@@ -3119,7 +3131,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                i = 0;
        }
        for (; i < num_pages; i++, index++) {
-               p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
+               p = find_or_create_page(mapping, index, GFP_NOFS);
                if (!p) {
                        WARN_ON(1);
                        goto free_eb;
@@ -3266,6 +3278,22 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
        return was_dirty;
 }
 
+static int __eb_straddles_pages(u64 start, u64 len)
+{
+       if (len < PAGE_CACHE_SIZE)
+               return 1;
+       if (start & (PAGE_CACHE_SIZE - 1))
+               return 1;
+       if ((start + len) & (PAGE_CACHE_SIZE - 1))
+               return 1;
+       return 0;
+}
+
+static int eb_straddles_pages(struct extent_buffer *eb)
+{
+       return __eb_straddles_pages(eb->start, eb->len);
+}
+
 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
                                struct extent_buffer *eb,
                                struct extent_state **cached_state)
@@ -3277,8 +3305,10 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
        num_pages = num_extent_pages(eb->start, eb->len);
        clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 
-       clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-                             cached_state, GFP_NOFS);
+       if (eb_straddles_pages(eb)) {
+               clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+                                     cached_state, GFP_NOFS);
+       }
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                if (page)
@@ -3296,8 +3326,10 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
 
        num_pages = num_extent_pages(eb->start, eb->len);
 
-       set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
-                           NULL, GFP_NOFS);
+       if (eb_straddles_pages(eb)) {
+               set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+                                   NULL, GFP_NOFS);
+       }
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3320,9 +3352,12 @@ int extent_range_uptodate(struct extent_io_tree *tree,
        int uptodate;
        unsigned long index;
 
-       ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
-       if (ret)
-               return 1;
+       if (__eb_straddles_pages(start, end - start + 1)) {
+               ret = test_range_bit(tree, start, end,
+                                    EXTENT_UPTODATE, 1, NULL);
+               if (ret)
+                       return 1;
+       }
        while (start <= end) {
                index = start >> PAGE_CACHE_SHIFT;
                page = find_get_page(tree->mapping, index);
@@ -3350,10 +3385,12 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
                return 1;
 
-       ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
-                          EXTENT_UPTODATE, 1, cached_state);
-       if (ret)
-               return ret;
+       if (eb_straddles_pages(eb)) {
+               ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+                                  EXTENT_UPTODATE, 1, cached_state);
+               if (ret)
+                       return ret;
+       }
 
        num_pages = num_extent_pages(eb->start, eb->len);
        for (i = 0; i < num_pages; i++) {
@@ -3386,9 +3423,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
                return 0;
 
-       if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
-                          EXTENT_UPTODATE, 1, NULL)) {
-               return 0;
+       if (eb_straddles_pages(eb)) {
+               if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+                                  EXTENT_UPTODATE, 1, NULL)) {
+                       return 0;
+               }
        }
 
        if (start) {
@@ -3492,9 +3531,8 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
                page = extent_buffer_page(eb, i);
 
                cur = min(len, (PAGE_CACHE_SIZE - offset));
-               kaddr = kmap_atomic(page, KM_USER1);
+               kaddr = page_address(page);
                memcpy(dst, kaddr + offset, cur);
-               kunmap_atomic(kaddr, KM_USER1);
 
                dst += cur;
                len -= cur;
@@ -3504,9 +3542,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
 }
 
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
-                              unsigned long min_len, char **token, char **map,
+                              unsigned long min_len, char **map,
                               unsigned long *map_start,
-                              unsigned long *map_len, int km)
+                              unsigned long *map_len)
 {
        size_t offset = start & (PAGE_CACHE_SIZE - 1);
        char *kaddr;
@@ -3536,42 +3574,12 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
        }
 
        p = extent_buffer_page(eb, i);
-       kaddr = kmap_atomic(p, km);
-       *token = kaddr;
+       kaddr = page_address(p);
        *map = kaddr + offset;
        *map_len = PAGE_CACHE_SIZE - offset;
        return 0;
 }
 
-int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
-                     unsigned long min_len,
-                     char **token, char **map,
-                     unsigned long *map_start,
-                     unsigned long *map_len, int km)
-{
-       int err;
-       int save = 0;
-       if (eb->map_token) {
-               unmap_extent_buffer(eb, eb->map_token, km);
-               eb->map_token = NULL;
-               save = 1;
-       }
-       err = map_private_extent_buffer(eb, start, min_len, token, map,
-                                      map_start, map_len, km);
-       if (!err && save) {
-               eb->map_token = *token;
-               eb->kaddr = *map;
-               eb->map_start = *map_start;
-               eb->map_len = *map_len;
-       }
-       return err;
-}
-
-void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
-{
-       kunmap_atomic(token, km);
-}
-
 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
                          unsigned long start,
                          unsigned long len)
@@ -3595,9 +3603,8 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
 
                cur = min(len, (PAGE_CACHE_SIZE - offset));
 
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = page_address(page);
                ret = memcmp(ptr, kaddr + offset, cur);
-               kunmap_atomic(kaddr, KM_USER0);
                if (ret)
                        break;
 
@@ -3630,9 +3637,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
                WARN_ON(!PageUptodate(page));
 
                cur = min(len, PAGE_CACHE_SIZE - offset);
-               kaddr = kmap_atomic(page, KM_USER1);
+               kaddr = page_address(page);
                memcpy(kaddr + offset, src, cur);
-               kunmap_atomic(kaddr, KM_USER1);
 
                src += cur;
                len -= cur;
@@ -3661,9 +3667,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
                WARN_ON(!PageUptodate(page));
 
                cur = min(len, PAGE_CACHE_SIZE - offset);
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = page_address(page);
                memset(kaddr + offset, c, cur);
-               kunmap_atomic(kaddr, KM_USER0);
 
                len -= cur;
                offset = 0;
@@ -3694,9 +3699,8 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
                cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
 
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = page_address(page);
                read_extent_buffer(src, kaddr + offset, src_offset, cur);
-               kunmap_atomic(kaddr, KM_USER0);
 
                src_offset += cur;
                len -= cur;
@@ -3709,20 +3713,17 @@ static void move_pages(struct page *dst_page, struct page *src_page,
                       unsigned long dst_off, unsigned long src_off,
                       unsigned long len)
 {
-       char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+       char *dst_kaddr = page_address(dst_page);
        if (dst_page == src_page) {
                memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
        } else {
-               char *src_kaddr = kmap_atomic(src_page, KM_USER1);
+               char *src_kaddr = page_address(src_page);
                char *p = dst_kaddr + dst_off + len;
                char *s = src_kaddr + src_off + len;
 
                while (len--)
                        *--p = *--s;
-
-               kunmap_atomic(src_kaddr, KM_USER1);
        }
-       kunmap_atomic(dst_kaddr, KM_USER0);
 }
 
 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -3735,20 +3736,17 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
                       unsigned long dst_off, unsigned long src_off,
                       unsigned long len)
 {
-       char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+       char *dst_kaddr = page_address(dst_page);
        char *src_kaddr;
 
        if (dst_page != src_page) {
-               src_kaddr = kmap_atomic(src_page, KM_USER1);
+               src_kaddr = page_address(src_page);
        } else {
                src_kaddr = dst_kaddr;
                BUG_ON(areas_overlap(src_off, dst_off, len));
        }
 
        memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
-       kunmap_atomic(dst_kaddr, KM_USER0);
-       if (dst_page != src_page)
-               kunmap_atomic(src_kaddr, KM_USER1);
 }
 
 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
index a11a92ee2d30a84ccf52284e704c5cc971f7126e..21a7ca9e72825752a3b910cdfb9fac444094b96b 100644 (file)
@@ -120,8 +120,6 @@ struct extent_state {
 struct extent_buffer {
        u64 start;
        unsigned long len;
-       char *map_token;
-       char *kaddr;
        unsigned long map_start;
        unsigned long map_len;
        struct page *first_page;
@@ -130,14 +128,26 @@ struct extent_buffer {
        struct rcu_head rcu_head;
        atomic_t refs;
 
-       /* the spinlock is used to protect most operations */
-       spinlock_t lock;
+       /* count of read lock holders on the extent buffer */
+       atomic_t write_locks;
+       atomic_t read_locks;
+       atomic_t blocking_writers;
+       atomic_t blocking_readers;
+       atomic_t spinning_readers;
+       atomic_t spinning_writers;
+
+       /* protects write locks */
+       rwlock_t lock;
 
-       /*
-        * when we keep the lock held while blocking, waiters go onto
-        * the wq
+       /* readers use lock_wq while they wait for the write
+        * lock holders to unlock
         */
-       wait_queue_head_t lock_wq;
+       wait_queue_head_t write_lock_wq;
+
+       /* writers use read_lock_wq while they wait for readers
+        * to unlock
+        */
+       wait_queue_head_t read_lock_wq;
 };
 
 static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -279,15 +289,10 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
 int extent_buffer_uptodate(struct extent_io_tree *tree,
                           struct extent_buffer *eb,
                           struct extent_state *cached_state);
-int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
-                     unsigned long min_len, char **token, char **map,
-                     unsigned long *map_start,
-                     unsigned long *map_len, int km);
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
-                     unsigned long min_len, char **token, char **map,
+                     unsigned long min_len, char **map,
                      unsigned long *map_start,
-                     unsigned long *map_len, int km);
-void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
+                     unsigned long *map_len);
 int extent_range_uptodate(struct extent_io_tree *tree,
                          u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
index f92ff0ed6e03df2c25a638727b0517caa9a8109f..b910694f61ed25bbf0f309ff673381afcf0994d8 100644 (file)
@@ -177,6 +177,15 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 
        WARN_ON(bio->bi_vcnt <= 0);
 
+       /*
+        * the free space stuff is only read when it hasn't been
+        * updated in the current transaction.  So, we can safely
+        * read from the commit root and sidestep a nasty deadlock
+        * between reading the free space cache and updating the csum tree.
+        */
+       if (btrfs_is_free_space_inode(root, inode))
+               path->search_commit_root = 1;
+
        disk_bytenr = (u64)bio->bi_sector << 9;
        if (dio)
                offset = logical_offset;
@@ -665,10 +674,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
        struct btrfs_sector_sum *sector_sum;
        u32 nritems;
        u32 ins_size;
-       char *eb_map;
-       char *eb_token;
-       unsigned long map_len;
-       unsigned long map_start;
        u16 csum_size =
                btrfs_super_csum_size(&root->fs_info->super_copy);
 
@@ -817,30 +822,9 @@ found:
        item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
        item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
                                      btrfs_item_size_nr(leaf, path->slots[0]));
-       eb_token = NULL;
 next_sector:
 
-       if (!eb_token ||
-          (unsigned long)item + csum_size >= map_start + map_len) {
-               int err;
-
-               if (eb_token)
-                       unmap_extent_buffer(leaf, eb_token, KM_USER1);
-               eb_token = NULL;
-               err = map_private_extent_buffer(leaf, (unsigned long)item,
-                                               csum_size,
-                                               &eb_token, &eb_map,
-                                               &map_start, &map_len, KM_USER1);
-               if (err)
-                       eb_token = NULL;
-       }
-       if (eb_token) {
-               memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)),
-                      &sector_sum->sum, csum_size);
-       } else {
-               write_extent_buffer(leaf, &sector_sum->sum,
-                                   (unsigned long)item, csum_size);
-       }
+       write_extent_buffer(leaf, &sector_sum->sum, (unsigned long)item, csum_size);
 
        total_bytes += root->sectorsize;
        sector_sum++;
@@ -853,10 +837,7 @@ next_sector:
                        goto next_sector;
                }
        }
-       if (eb_token) {
-               unmap_extent_buffer(leaf, eb_token, KM_USER1);
-               eb_token = NULL;
-       }
+
        btrfs_mark_buffer_dirty(path->nodes[0]);
        if (total_bytes < sums->len) {
                btrfs_release_path(path);
index 23d1d811e2b2b159eea7eb1c721edd83a69957d7..41ca5fdaee6cf12de9a781fecab721635538386e 100644 (file)
@@ -1082,7 +1082,8 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
 
 again:
        for (i = 0; i < num_pages; i++) {
-               pages[i] = grab_cache_page(inode->i_mapping, index + i);
+               pages[i] = find_or_create_page(inode->i_mapping, index + i,
+                                              GFP_NOFS);
                if (!pages[i]) {
                        faili = i - 1;
                        err = -ENOMEM;
@@ -1239,9 +1240,11 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                 * managed to copy.
                 */
                if (num_pages > dirty_pages) {
-                       if (copied > 0)
-                               atomic_inc(
-                                       &BTRFS_I(inode)->outstanding_extents);
+                       if (copied > 0) {
+                               spin_lock(&BTRFS_I(inode)->lock);
+                               BTRFS_I(inode)->outstanding_extents++;
+                               spin_unlock(&BTRFS_I(inode)->lock);
+                       }
                        btrfs_delalloc_release_space(inode,
                                        (num_pages - dirty_pages) <<
                                        PAGE_CACHE_SHIFT);
index bf0d61567f3d65a24e9d11802acdd0c8c5abf32b..6377713f639c978db84f64dec2dc9541e8c8287e 100644 (file)
@@ -98,6 +98,12 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
                return inode;
 
        spin_lock(&block_group->lock);
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
+               printk(KERN_INFO "Old style space inode found, converting.\n");
+               BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
+               block_group->disk_cache_state = BTRFS_DC_CLEAR;
+       }
+
        if (!btrfs_fs_closing(root->fs_info)) {
                block_group->inode = igrab(inode);
                block_group->iref = 1;
@@ -135,7 +141,7 @@ int __create_free_space_inode(struct btrfs_root *root,
        btrfs_set_inode_gid(leaf, inode_item, 0);
        btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
        btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
-                             BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
+                             BTRFS_INODE_PREALLOC);
        btrfs_set_inode_nlink(leaf, inode_item, 1);
        btrfs_set_inode_transid(leaf, inode_item, trans->transid);
        btrfs_set_inode_block_group(leaf, inode_item, offset);
@@ -239,17 +245,12 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        struct btrfs_free_space_header *header;
        struct extent_buffer *leaf;
        struct page *page;
-       u32 *checksums = NULL, *crc;
-       char *disk_crcs = NULL;
        struct btrfs_key key;
        struct list_head bitmaps;
        u64 num_entries;
        u64 num_bitmaps;
        u64 generation;
-       u32 cur_crc = ~(u32)0;
        pgoff_t index = 0;
-       unsigned long first_page_offset;
-       int num_checksums;
        int ret = 0;
 
        INIT_LIST_HEAD(&bitmaps);
@@ -292,16 +293,6 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        if (!num_entries)
                goto out;
 
-       /* Setup everything for doing checksumming */
-       num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
-       checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
-       if (!checksums)
-               goto out;
-       first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
-       disk_crcs = kzalloc(first_page_offset, GFP_NOFS);
-       if (!disk_crcs)
-               goto out;
-
        ret = readahead_cache(inode);
        if (ret)
                goto out;
@@ -311,18 +302,12 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                struct btrfs_free_space *e;
                void *addr;
                unsigned long offset = 0;
-               unsigned long start_offset = 0;
                int need_loop = 0;
 
                if (!num_entries && !num_bitmaps)
                        break;
 
-               if (index == 0) {
-                       start_offset = first_page_offset;
-                       offset = start_offset;
-               }
-
-               page = grab_cache_page(inode->i_mapping, index);
+               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
                if (!page)
                        goto free_cache;
 
@@ -342,8 +327,15 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                if (index == 0) {
                        u64 *gen;
 
-                       memcpy(disk_crcs, addr, first_page_offset);
-                       gen = addr + (sizeof(u32) * num_checksums);
+                       /*
+                        * We put a bogus crc in the front of the first page in
+                        * case old kernels try to mount a fs with the new
+                        * format to make sure they discard the cache.
+                        */
+                       addr += sizeof(u64);
+                       offset += sizeof(u64);
+
+                       gen = addr;
                        if (*gen != BTRFS_I(inode)->generation) {
                                printk(KERN_ERR "btrfs: space cache generation"
                                       " (%llu) does not match inode (%llu)\n",
@@ -355,24 +347,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                                page_cache_release(page);
                                goto free_cache;
                        }
-                       crc = (u32 *)disk_crcs;
-               }
-               entry = addr + start_offset;
-
-               /* First lets check our crc before we do anything fun */
-               cur_crc = ~(u32)0;
-               cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc,
-                                         PAGE_CACHE_SIZE - start_offset);
-               btrfs_csum_final(cur_crc, (char *)&cur_crc);
-               if (cur_crc != *crc) {
-                       printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
-                              index);
-                       kunmap(page);
-                       unlock_page(page);
-                       page_cache_release(page);
-                       goto free_cache;
+                       addr += sizeof(u64);
+                       offset += sizeof(u64);
                }
-               crc++;
+               entry = addr;
 
                while (1) {
                        if (!num_entries)
@@ -470,8 +448,6 @@ next:
 
        ret = 1;
 out:
-       kfree(checksums);
-       kfree(disk_crcs);
        return ret;
 free_cache:
        __btrfs_remove_free_space_cache(ctl);
@@ -569,8 +545,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        struct btrfs_key key;
        u64 start, end, len;
        u64 bytes = 0;
-       u32 *crc, *checksums;
-       unsigned long first_page_offset;
+       u32 crc = ~(u32)0;
        int index = 0, num_pages = 0;
        int entries = 0;
        int bitmaps = 0;
@@ -590,34 +565,13 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
                PAGE_CACHE_SHIFT;
 
-       /* Since the first page has all of our checksums and our generation we
-        * need to calculate the offset into the page that we can start writing
-        * our entries.
-        */
-       first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
-
        filemap_write_and_wait(inode->i_mapping);
        btrfs_wait_ordered_range(inode, inode->i_size &
                                 ~(root->sectorsize - 1), (u64)-1);
 
-       /* make sure we don't overflow that first page */
-       if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
-               /* this is really the same as running out of space, where we also return 0 */
-               printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
-               ret = 0;
-               goto out_update;
-       }
-
-       /* We need a checksum per page. */
-       crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
-       if (!crc)
-               return -1;
-
        pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
-       if (!pages) {
-               kfree(crc);
+       if (!pages)
                return -1;
-       }
 
        /* Get the cluster for this block_group if it exists */
        if (block_group && !list_empty(&block_group->cluster_list))
@@ -640,7 +594,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
         * know and don't freak out.
         */
        while (index < num_pages) {
-               page = grab_cache_page(inode->i_mapping, index);
+               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
                if (!page) {
                        int i;
 
@@ -648,7 +602,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                unlock_page(pages[i]);
                                page_cache_release(pages[i]);
                        }
-                       goto out_free;
+                       goto out;
                }
                pages[index] = page;
                index++;
@@ -668,17 +622,11 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        /* Write out the extent entries */
        do {
                struct btrfs_free_space_entry *entry;
-               void *addr;
+               void *addr, *orig;
                unsigned long offset = 0;
-               unsigned long start_offset = 0;
 
                next_page = false;
 
-               if (index == 0) {
-                       start_offset = first_page_offset;
-                       offset = start_offset;
-               }
-
                if (index >= num_pages) {
                        out_of_space = true;
                        break;
@@ -686,10 +634,26 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
                page = pages[index];
 
-               addr = kmap(page);
-               entry = addr + start_offset;
+               orig = addr = kmap(page);
+               if (index == 0) {
+                       u64 *gen;
 
-               memset(addr, 0, PAGE_CACHE_SIZE);
+                       /*
+                        * We're going to put in a bogus crc for this page to
+                        * make sure that old kernels who aren't aware of this
+                        * format will be sure to discard the cache.
+                        */
+                       addr += sizeof(u64);
+                       offset += sizeof(u64);
+
+                       gen = addr;
+                       *gen = trans->transid;
+                       addr += sizeof(u64);
+                       offset += sizeof(u64);
+               }
+               entry = addr;
+
+               memset(addr, 0, PAGE_CACHE_SIZE - offset);
                while (node && !next_page) {
                        struct btrfs_free_space *e;
 
@@ -752,13 +716,19 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                next_page = true;
                        entry++;
                }
-               *crc = ~(u32)0;
-               *crc = btrfs_csum_data(root, addr + start_offset, *crc,
-                                      PAGE_CACHE_SIZE - start_offset);
-               kunmap(page);
 
-               btrfs_csum_final(*crc, (char *)crc);
-               crc++;
+               /* Generate bogus crc value */
+               if (index == 0) {
+                       u32 *tmp;
+                       crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
+                                             PAGE_CACHE_SIZE - sizeof(u64));
+                       btrfs_csum_final(crc, (char *)&crc);
+                       crc++;
+                       tmp = orig;
+                       *tmp = crc;
+               }
+
+               kunmap(page);
 
                bytes += PAGE_CACHE_SIZE;
 
@@ -779,11 +749,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
                addr = kmap(page);
                memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
-               *crc = ~(u32)0;
-               *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE);
                kunmap(page);
-               btrfs_csum_final(*crc, (char *)crc);
-               crc++;
                bytes += PAGE_CACHE_SIZE;
 
                list_del_init(&entry->list);
@@ -796,7 +762,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                     i_size_read(inode) - 1, &cached_state,
                                     GFP_NOFS);
                ret = 0;
-               goto out_free;
+               goto out;
        }
 
        /* Zero out the rest of the pages just to make sure */
@@ -811,20 +777,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                index++;
        }
 
-       /* Write the checksums and trans id to the first page */
-       {
-               void *addr;
-               u64 *gen;
-
-               page = pages[0];
-
-               addr = kmap(page);
-               memcpy(addr, checksums, sizeof(u32) * num_pages);
-               gen = addr + (sizeof(u32) * num_pages);
-               *gen = trans->transid;
-               kunmap(page);
-       }
-
        ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
                                            bytes, &cached_state);
        btrfs_drop_pages(pages, num_pages);
@@ -833,7 +785,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
        if (ret) {
                ret = 0;
-               goto out_free;
+               goto out;
        }
 
        BTRFS_I(inode)->generation = trans->transid;
@@ -850,7 +802,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
-               goto out_free;
+               goto out;
        }
        leaf = path->nodes[0];
        if (ret > 0) {
@@ -866,7 +818,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                         EXTENT_DO_ACCOUNTING, 0, 0, NULL,
                                         GFP_NOFS);
                        btrfs_release_path(path);
-                       goto out_free;
+                       goto out;
                }
        }
        header = btrfs_item_ptr(leaf, path->slots[0],
@@ -879,11 +831,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
        ret = 1;
 
-out_free:
-       kfree(checksums);
+out:
        kfree(pages);
-
-out_update:
        if (ret != 1) {
                invalidate_inode_pages2_range(inode->i_mapping, 0, index);
                BTRFS_I(inode)->generation = 0;
index 88829993db6cd816533025af04d591fad4b92034..4360ccb191b12c2f7f2b41e085209f07951fac12 100644 (file)
@@ -750,15 +750,6 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
        return alloc_hint;
 }
 
-static inline bool is_free_space_inode(struct btrfs_root *root,
-                                      struct inode *inode)
-{
-       if (root == root->fs_info->tree_root ||
-           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
-               return true;
-       return false;
-}
-
 /*
  * when extent_io.c finds a delayed allocation range in the file,
  * the call backs end up in this code.  The basic idea is to
@@ -791,7 +782,7 @@ static noinline int cow_file_range(struct inode *inode,
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        int ret = 0;
 
-       BUG_ON(is_free_space_inode(root, inode));
+       BUG_ON(btrfs_is_free_space_inode(root, inode));
        trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
@@ -1073,7 +1064,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        if (!path)
                return -ENOMEM;
 
-       nolock = is_free_space_inode(root, inode);
+       nolock = btrfs_is_free_space_inode(root, inode);
 
        if (nolock)
                trans = btrfs_join_transaction_nolock(root);
@@ -1299,7 +1290,9 @@ static int btrfs_split_extent_hook(struct inode *inode,
        if (!(orig->state & EXTENT_DELALLOC))
                return 0;
 
-       atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+       spin_lock(&BTRFS_I(inode)->lock);
+       BTRFS_I(inode)->outstanding_extents++;
+       spin_unlock(&BTRFS_I(inode)->lock);
        return 0;
 }
 
@@ -1317,7 +1310,9 @@ static int btrfs_merge_extent_hook(struct inode *inode,
        if (!(other->state & EXTENT_DELALLOC))
                return 0;
 
-       atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+       spin_lock(&BTRFS_I(inode)->lock);
+       BTRFS_I(inode)->outstanding_extents--;
+       spin_unlock(&BTRFS_I(inode)->lock);
        return 0;
 }
 
@@ -1338,12 +1333,15 @@ static int btrfs_set_bit_hook(struct inode *inode,
        if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               bool do_list = !is_free_space_inode(root, inode);
+               bool do_list = !btrfs_is_free_space_inode(root, inode);
 
-               if (*bits & EXTENT_FIRST_DELALLOC)
+               if (*bits & EXTENT_FIRST_DELALLOC) {
                        *bits &= ~EXTENT_FIRST_DELALLOC;
-               else
-                       atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+               } else {
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents++;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+               }
 
                spin_lock(&root->fs_info->delalloc_lock);
                BTRFS_I(inode)->delalloc_bytes += len;
@@ -1371,12 +1369,15 @@ static int btrfs_clear_bit_hook(struct inode *inode,
        if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               bool do_list = !is_free_space_inode(root, inode);
+               bool do_list = !btrfs_is_free_space_inode(root, inode);
 
-               if (*bits & EXTENT_FIRST_DELALLOC)
+               if (*bits & EXTENT_FIRST_DELALLOC) {
                        *bits &= ~EXTENT_FIRST_DELALLOC;
-               else if (!(*bits & EXTENT_DO_ACCOUNTING))
-                       atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+               } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents--;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+               }
 
                if (*bits & EXTENT_DO_ACCOUNTING)
                        btrfs_delalloc_release_metadata(inode, len);
@@ -1478,7 +1479,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-       if (is_free_space_inode(root, inode))
+       if (btrfs_is_free_space_inode(root, inode))
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
        else
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1728,7 +1729,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                return 0;
        BUG_ON(!ordered_extent);
 
-       nolock = is_free_space_inode(root, inode);
+       nolock = btrfs_is_free_space_inode(root, inode);
 
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list));
@@ -2535,13 +2536,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
 
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_inode_item);
-       if (!leaf->map_token)
-               map_private_extent_buffer(leaf, (unsigned long)inode_item,
-                                         sizeof(struct btrfs_inode_item),
-                                         &leaf->map_token, &leaf->kaddr,
-                                         &leaf->map_start, &leaf->map_len,
-                                         KM_USER1);
-
        inode->i_mode = btrfs_inode_mode(leaf, inode_item);
        inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
        inode->i_uid = btrfs_inode_uid(leaf, inode_item);
@@ -2579,11 +2573,6 @@ cache_acl:
        if (!maybe_acls)
                cache_no_acl(inode);
 
-       if (leaf->map_token) {
-               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-               leaf->map_token = NULL;
-       }
-
        btrfs_free_path(path);
 
        switch (inode->i_mode & S_IFMT) {
@@ -2628,13 +2617,6 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct btrfs_inode_item *item,
                            struct inode *inode)
 {
-       if (!leaf->map_token)
-               map_private_extent_buffer(leaf, (unsigned long)item,
-                                         sizeof(struct btrfs_inode_item),
-                                         &leaf->map_token, &leaf->kaddr,
-                                         &leaf->map_start, &leaf->map_len,
-                                         KM_USER1);
-
        btrfs_set_inode_uid(leaf, item, inode->i_uid);
        btrfs_set_inode_gid(leaf, item, inode->i_gid);
        btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2663,11 +2645,6 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
        btrfs_set_inode_block_group(leaf, item, 0);
-
-       if (leaf->map_token) {
-               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
-               leaf->map_token = NULL;
-       }
 }
 
 /*
@@ -2688,7 +2665,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
         * The data relocation inode should also be directly updated
         * without delay
         */
-       if (!is_free_space_inode(root, inode)
+       if (!btrfs_is_free_space_inode(root, inode)
            && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
                ret = btrfs_delayed_update_inode(trans, root, inode);
                if (!ret)
@@ -3403,7 +3380,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
 
        ret = -ENOMEM;
 again:
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
                goto out;
@@ -3639,7 +3616,7 @@ void btrfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages(&inode->i_data, 0);
        if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
-                              is_free_space_inode(root, inode)))
+                              btrfs_is_free_space_inode(root, inode)))
                goto no_delete;
 
        if (is_bad_inode(inode)) {
@@ -4293,7 +4270,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        if (BTRFS_I(inode)->dummy_inode)
                return 0;
 
-       if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
+       if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
                nolock = true;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -6752,8 +6729,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->index_cnt = (u64)-1;
        ei->last_unlink_trans = 0;
 
-       atomic_set(&ei->outstanding_extents, 0);
-       atomic_set(&ei->reserved_extents, 0);
+       spin_lock_init(&ei->lock);
+       ei->outstanding_extents = 0;
+       ei->reserved_extents = 0;
 
        ei->ordered_data_close = 0;
        ei->orphan_meta_reserved = 0;
@@ -6791,8 +6769,8 @@ void btrfs_destroy_inode(struct inode *inode)
 
        WARN_ON(!list_empty(&inode->i_dentry));
        WARN_ON(inode->i_data.nrpages);
-       WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
-       WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
+       WARN_ON(BTRFS_I(inode)->outstanding_extents);
+       WARN_ON(BTRFS_I(inode)->reserved_extents);
 
        /*
         * This can happen where we create an inode, but somebody else also
@@ -6847,7 +6825,7 @@ int btrfs_drop_inode(struct inode *inode)
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
        if (btrfs_root_refs(&root->root_item) == 0 &&
-           !is_free_space_inode(root, inode))
+           !btrfs_is_free_space_inode(root, inode))
                return 1;
        else
                return generic_drop_inode(inode);
index a3c4751e07db0d7704e8ac1aa3269d7c75e3c48a..fd252fff4c6666d19ebc53553c96d17d8567d37a 100644 (file)
@@ -867,8 +867,8 @@ again:
        /* step one, lock all the pages */
        for (i = 0; i < num_pages; i++) {
                struct page *page;
-               page = grab_cache_page(inode->i_mapping,
-                                           start_index + i);
+               page = find_or_create_page(inode->i_mapping,
+                                           start_index + i, GFP_NOFS);
                if (!page)
                        break;
 
@@ -938,7 +938,9 @@ again:
                          GFP_NOFS);
 
        if (i_done != num_pages) {
-               atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+               spin_lock(&BTRFS_I(inode)->lock);
+               BTRFS_I(inode)->outstanding_extents++;
+               spin_unlock(&BTRFS_I(inode)->lock);
                btrfs_delalloc_release_space(inode,
                                     (num_pages - i_done) << PAGE_CACHE_SHIFT);
        }
index 66fa43dc3f0f9ff8b5c67e5330120fd663bf4054..d77b67c4b275731417c11e04ad38b3dc9c4d456b 100644 (file)
 #include "extent_io.h"
 #include "locking.h"
 
-static inline void spin_nested(struct extent_buffer *eb)
-{
-       spin_lock(&eb->lock);
-}
+void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 
 /*
- * Setting a lock to blocking will drop the spinlock and set the
- * flag that forces other procs who want the lock to wait.  After
- * this you can safely schedule with the lock held.
+ * if we currently have a spinning reader or writer lock
+ * (indicated by the rw flag) this will bump the count
+ * of blocking holders and drop the spinlock.
  */
-void btrfs_set_lock_blocking(struct extent_buffer *eb)
+void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
-               set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
-               spin_unlock(&eb->lock);
+       if (rw == BTRFS_WRITE_LOCK) {
+               if (atomic_read(&eb->blocking_writers) == 0) {
+                       WARN_ON(atomic_read(&eb->spinning_writers) != 1);
+                       atomic_dec(&eb->spinning_writers);
+                       btrfs_assert_tree_locked(eb);
+                       atomic_inc(&eb->blocking_writers);
+                       write_unlock(&eb->lock);
+               }
+       } else if (rw == BTRFS_READ_LOCK) {
+               btrfs_assert_tree_read_locked(eb);
+               atomic_inc(&eb->blocking_readers);
+               WARN_ON(atomic_read(&eb->spinning_readers) == 0);
+               atomic_dec(&eb->spinning_readers);
+               read_unlock(&eb->lock);
        }
-       /* exit with the spin lock released and the bit set */
+       return;
 }
 
 /*
- * clearing the blocking flag will take the spinlock again.
- * After this you can't safely schedule
+ * if we currently have a blocking lock, take the spinlock
+ * and drop our blocking count
  */
-void btrfs_clear_lock_blocking(struct extent_buffer *eb)
+void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
-               spin_nested(eb);
-               clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
-               smp_mb__after_clear_bit();
+       if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
+               BUG_ON(atomic_read(&eb->blocking_writers) != 1);
+               write_lock(&eb->lock);
+               WARN_ON(atomic_read(&eb->spinning_writers));
+               atomic_inc(&eb->spinning_writers);
+               if (atomic_dec_and_test(&eb->blocking_writers))
+                       wake_up(&eb->write_lock_wq);
+       } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
+               BUG_ON(atomic_read(&eb->blocking_readers) == 0);
+               read_lock(&eb->lock);
+               atomic_inc(&eb->spinning_readers);
+               if (atomic_dec_and_test(&eb->blocking_readers))
+                       wake_up(&eb->read_lock_wq);
        }
-       /* exit with the spin lock held */
+       return;
 }
 
 /*
- * unfortunately, many of the places that currently set a lock to blocking
- * don't end up blocking for very long, and often they don't block
- * at all.  For a dbench 50 run, if we don't spin on the blocking bit
- * at all, the context switch rate can jump up to 400,000/sec or more.
- *
- * So, we're still stuck with this crummy spin on the blocking bit,
- * at least until the most common causes of the short blocks
- * can be dealt with.
+ * take a spinning read lock.  This will wait for any blocking
+ * writers
  */
-static int btrfs_spin_on_block(struct extent_buffer *eb)
+void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
-       int i;
-
-       for (i = 0; i < 512; i++) {
-               if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-                       return 1;
-               if (need_resched())
-                       break;
-               cpu_relax();
+again:
+       wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers)) {
+               read_unlock(&eb->lock);
+               wait_event(eb->write_lock_wq,
+                          atomic_read(&eb->blocking_writers) == 0);
+               goto again;
        }
-       return 0;
+       atomic_inc(&eb->read_locks);
+       atomic_inc(&eb->spinning_readers);
 }
 
 /*
- * This is somewhat different from trylock.  It will take the
- * spinlock but if it finds the lock is set to blocking, it will
- * return without the lock held.
- *
- * returns 1 if it was able to take the lock and zero otherwise
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
  */
-int btrfs_try_spin_lock(struct extent_buffer *eb)
+int btrfs_try_tree_read_lock(struct extent_buffer *eb)
 {
-       int i;
+       if (atomic_read(&eb->blocking_writers))
+               return 0;
 
-       if (btrfs_spin_on_block(eb)) {
-               spin_nested(eb);
-               if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-                       return 1;
-               spin_unlock(&eb->lock);
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers)) {
+               read_unlock(&eb->lock);
+               return 0;
        }
-       /* spin for a bit on the BLOCKING flag */
-       for (i = 0; i < 2; i++) {
-               cpu_relax();
-               if (!btrfs_spin_on_block(eb))
-                       break;
-
-               spin_nested(eb);
-               if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-                       return 1;
-               spin_unlock(&eb->lock);
-       }
-       return 0;
+       atomic_inc(&eb->read_locks);
+       atomic_inc(&eb->spinning_readers);
+       return 1;
 }
 
 /*
- * the autoremove wake function will return 0 if it tried to wake up
- * a process that was already awake, which means that process won't
- * count as an exclusive wakeup.  The waitq code will continue waking
- * procs until it finds one that was actually sleeping.
- *
- * For btrfs, this isn't quite what we want.  We want a single proc
- * to be notified that the lock is ready for taking.  If that proc
- * already happen to be awake, great, it will loop around and try for
- * the lock.
- *
- * So, btrfs_wake_function always returns 1, even when the proc that we
- * tried to wake up was already awake.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers or readers
  */
-static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
-                              int sync, void *key)
+int btrfs_try_tree_write_lock(struct extent_buffer *eb)
 {
-       autoremove_wake_function(wait, mode, sync, key);
+       if (atomic_read(&eb->blocking_writers) ||
+           atomic_read(&eb->blocking_readers))
+               return 0;
+       write_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers) ||
+           atomic_read(&eb->blocking_readers)) {
+               write_unlock(&eb->lock);
+               return 0;
+       }
+       atomic_inc(&eb->write_locks);
+       atomic_inc(&eb->spinning_writers);
        return 1;
 }
 
 /*
- * returns with the extent buffer spinlocked.
- *
- * This will spin and/or wait as required to take the lock, and then
- * return with the spinlock held.
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
+ * drop a spinning read lock
+ */
+void btrfs_tree_read_unlock(struct extent_buffer *eb)
+{
+       btrfs_assert_tree_read_locked(eb);
+       WARN_ON(atomic_read(&eb->spinning_readers) == 0);
+       atomic_dec(&eb->spinning_readers);
+       atomic_dec(&eb->read_locks);
+       read_unlock(&eb->lock);
+}
+
+/*
+ * drop a blocking read lock
+ */
+void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
+{
+       btrfs_assert_tree_read_locked(eb);
+       WARN_ON(atomic_read(&eb->blocking_readers) == 0);
+       if (atomic_dec_and_test(&eb->blocking_readers))
+               wake_up(&eb->read_lock_wq);
+       atomic_dec(&eb->read_locks);
+}
+
+/*
+ * take a spinning write lock.  This will wait for both
+ * blocking readers or writers
  */
 int btrfs_tree_lock(struct extent_buffer *eb)
 {
-       DEFINE_WAIT(wait);
-       wait.func = btrfs_wake_function;
-
-       if (!btrfs_spin_on_block(eb))
-               goto sleep;
-
-       while(1) {
-               spin_nested(eb);
-
-               /* nobody is blocking, exit with the spinlock held */
-               if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-                       return 0;
-
-               /*
-                * we have the spinlock, but the real owner is blocking.
-                * wait for them
-                */
-               spin_unlock(&eb->lock);
-
-               /*
-                * spin for a bit, and if the blocking flag goes away,
-                * loop around
-                */
-               cpu_relax();
-               if (btrfs_spin_on_block(eb))
-                       continue;
-sleep:
-               prepare_to_wait_exclusive(&eb->lock_wq, &wait,
-                                         TASK_UNINTERRUPTIBLE);
-
-               if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-                       schedule();
-
-               finish_wait(&eb->lock_wq, &wait);
+again:
+       wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
+       wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
+       write_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_readers)) {
+               write_unlock(&eb->lock);
+               wait_event(eb->read_lock_wq,
+                          atomic_read(&eb->blocking_readers) == 0);
+               goto again;
        }
+       if (atomic_read(&eb->blocking_writers)) {
+               write_unlock(&eb->lock);
+               wait_event(eb->write_lock_wq,
+                          atomic_read(&eb->blocking_writers) == 0);
+               goto again;
+       }
+       WARN_ON(atomic_read(&eb->spinning_writers));
+       atomic_inc(&eb->spinning_writers);
+       atomic_inc(&eb->write_locks);
        return 0;
 }
 
+/*
+ * drop a spinning or a blocking write lock.
+ */
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
-       /*
-        * if we were a blocking owner, we don't have the spinlock held
-        * just clear the bit and look for waiters
-        */
-       if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-               smp_mb__after_clear_bit();
-       else
-               spin_unlock(&eb->lock);
-
-       if (waitqueue_active(&eb->lock_wq))
-               wake_up(&eb->lock_wq);
+       int blockers = atomic_read(&eb->blocking_writers);
+
+       BUG_ON(blockers > 1);
+
+       btrfs_assert_tree_locked(eb);
+       atomic_dec(&eb->write_locks);
+
+       if (blockers) {
+               WARN_ON(atomic_read(&eb->spinning_writers));
+               atomic_dec(&eb->blocking_writers);
+               smp_wmb();
+               wake_up(&eb->write_lock_wq);
+       } else {
+               WARN_ON(atomic_read(&eb->spinning_writers) != 1);
+               atomic_dec(&eb->spinning_writers);
+               write_unlock(&eb->lock);
+       }
        return 0;
 }
 
 void btrfs_assert_tree_locked(struct extent_buffer *eb)
 {
-       if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
-               assert_spin_locked(&eb->lock);
+       BUG_ON(!atomic_read(&eb->write_locks));
+}
+
+void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+{
+       BUG_ON(!atomic_read(&eb->read_locks));
 }
index 5c33a560a2f100c2454797d122875a76921403ab..17247ddb81a00f80e2e3e0a82c5c87e36cc564d1 100644 (file)
 #ifndef __BTRFS_LOCKING_
 #define __BTRFS_LOCKING_
 
+#define BTRFS_WRITE_LOCK 1
+#define BTRFS_READ_LOCK 2
+#define BTRFS_WRITE_LOCK_BLOCKING 3
+#define BTRFS_READ_LOCK_BLOCKING 4
+
 int btrfs_tree_lock(struct extent_buffer *eb);
 int btrfs_tree_unlock(struct extent_buffer *eb);
 int btrfs_try_spin_lock(struct extent_buffer *eb);
 
-void btrfs_set_lock_blocking(struct extent_buffer *eb);
-void btrfs_clear_lock_blocking(struct extent_buffer *eb);
+void btrfs_tree_read_lock(struct extent_buffer *eb);
+void btrfs_tree_read_unlock(struct extent_buffer *eb);
+void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
+void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
+int btrfs_try_tree_read_lock(struct extent_buffer *eb);
+int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+
+static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
+{
+       if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
+               btrfs_tree_unlock(eb);
+       else if (rw == BTRFS_READ_LOCK_BLOCKING)
+               btrfs_tree_read_unlock_blocking(eb);
+       else if (rw == BTRFS_READ_LOCK)
+               btrfs_tree_read_unlock(eb);
+       else
+               BUG();
+}
+
+static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
+{
+       btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
+}
+
+static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb)
+{
+       btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING);
+}
 #endif
index 5e0a3dc79a453f3930e9c749c1cf08c63e5c7c6a..59bb1764273d476b7efe01a8d0948f4f473de6f2 100644 (file)
@@ -2955,7 +2955,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        page_cache_sync_readahead(inode->i_mapping,
                                                  ra, NULL, index,
                                                  last_index + 1 - index);
-                       page = grab_cache_page(inode->i_mapping, index);
+                       page = find_or_create_page(inode->i_mapping, index,
+                                                  GFP_NOFS);
                        if (!page) {
                                btrfs_delalloc_release_metadata(inode,
                                                        PAGE_CACHE_SIZE);
index c0f7ecaf1e79a931bbb2be480c97c3d848ec4654..bc1f6ad18442bc728a10e9919b91162af1b60b4d 100644 (file)
@@ -50,36 +50,22 @@ u##bits btrfs_##name(struct extent_buffer *eb,                              \
        unsigned long part_offset = (unsigned long)s;                   \
        unsigned long offset = part_offset + offsetof(type, member);    \
        type *p;                                                        \
-       /* ugly, but we want the fast path here */                      \
-       if (eb->map_token && offset >= eb->map_start &&                 \
-           offset + sizeof(((type *)0)->member) <= eb->map_start +     \
-           eb->map_len) {                                              \
-               p = (type *)(eb->kaddr + part_offset - eb->map_start);  \
-               return le##bits##_to_cpu(p->member);                    \
-       }                                                               \
-       {                                                               \
-               int err;                                                \
-               char *map_token;                                        \
-               char *kaddr;                                            \
-               int unmap_on_exit = (eb->map_token == NULL);            \
-               unsigned long map_start;                                \
-               unsigned long map_len;                                  \
-               u##bits res;                                            \
-               err = map_extent_buffer(eb, offset,                     \
-                               sizeof(((type *)0)->member),            \
-                               &map_token, &kaddr,                     \
-                               &map_start, &map_len, KM_USER1);        \
-               if (err) {                                              \
-                       __le##bits leres;                               \
-                       read_eb_member(eb, s, type, member, &leres);    \
-                       return le##bits##_to_cpu(leres);                \
-               }                                                       \
-               p = (type *)(kaddr + part_offset - map_start);          \
-               res = le##bits##_to_cpu(p->member);                     \
-               if (unmap_on_exit)                                      \
-                       unmap_extent_buffer(eb, map_token, KM_USER1);   \
-               return res;                                             \
-       }                                                               \
+       int err;                                                \
+       char *kaddr;                                            \
+       unsigned long map_start;                                \
+       unsigned long map_len;                                  \
+       u##bits res;                                            \
+       err = map_private_extent_buffer(eb, offset,             \
+                       sizeof(((type *)0)->member),            \
+                       &kaddr, &map_start, &map_len);          \
+       if (err) {                                              \
+               __le##bits leres;                               \
+               read_eb_member(eb, s, type, member, &leres);    \
+               return le##bits##_to_cpu(leres);                \
+       }                                                       \
+       p = (type *)(kaddr + part_offset - map_start);          \
+       res = le##bits##_to_cpu(p->member);                     \
+       return res;                                             \
 }                                                                      \
 void btrfs_set_##name(struct extent_buffer *eb,                                \
                                    type *s, u##bits val)               \
@@ -87,36 +73,21 @@ void btrfs_set_##name(struct extent_buffer *eb,                             \
        unsigned long part_offset = (unsigned long)s;                   \
        unsigned long offset = part_offset + offsetof(type, member);    \
        type *p;                                                        \
-       /* ugly, but we want the fast path here */                      \
-       if (eb->map_token && offset >= eb->map_start &&                 \
-           offset + sizeof(((type *)0)->member) <= eb->map_start +     \
-           eb->map_len) {                                              \
-               p = (type *)(eb->kaddr + part_offset - eb->map_start);  \
-               p->member = cpu_to_le##bits(val);                       \
-               return;                                                 \
-       }                                                               \
-       {                                                               \
-               int err;                                                \
-               char *map_token;                                        \
-               char *kaddr;                                            \
-               int unmap_on_exit = (eb->map_token == NULL);            \
-               unsigned long map_start;                                \
-               unsigned long map_len;                                  \
-               err = map_extent_buffer(eb, offset,                     \
-                               sizeof(((type *)0)->member),            \
-                               &map_token, &kaddr,                     \
-                               &map_start, &map_len, KM_USER1);        \
-               if (err) {                                              \
-                       __le##bits val2;                                \
-                       val2 = cpu_to_le##bits(val);                    \
-                       write_eb_member(eb, s, type, member, &val2);    \
-                       return;                                         \
-               }                                                       \
-               p = (type *)(kaddr + part_offset - map_start);          \
-               p->member = cpu_to_le##bits(val);                       \
-               if (unmap_on_exit)                                      \
-                       unmap_extent_buffer(eb, map_token, KM_USER1);   \
-       }                                                               \
+       int err;                                                \
+       char *kaddr;                                            \
+       unsigned long map_start;                                \
+       unsigned long map_len;                                  \
+       err = map_private_extent_buffer(eb, offset,             \
+                       sizeof(((type *)0)->member),            \
+                       &kaddr, &map_start, &map_len);          \
+       if (err) {                                              \
+               __le##bits val2;                                \
+               val2 = cpu_to_le##bits(val);                    \
+               write_eb_member(eb, s, type, member, &val2);    \
+               return;                                         \
+       }                                                       \
+       p = (type *)(kaddr + part_offset - map_start);          \
+       p->member = cpu_to_le##bits(val);                       \
 }
 
 #include "ctree.h"
@@ -125,15 +96,6 @@ void btrfs_node_key(struct extent_buffer *eb,
                    struct btrfs_disk_key *disk_key, int nr)
 {
        unsigned long ptr = btrfs_node_key_ptr_offset(nr);
-       if (eb->map_token && ptr >= eb->map_start &&
-           ptr + sizeof(*disk_key) <= eb->map_start + eb->map_len) {
-               memcpy(disk_key, eb->kaddr + ptr - eb->map_start,
-                       sizeof(*disk_key));
-               return;
-       } else if (eb->map_token) {
-               unmap_extent_buffer(eb, eb->map_token, KM_USER1);
-               eb->map_token = NULL;
-       }
        read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
                       struct btrfs_key_ptr, key, disk_key);
 }
index 51dcec86757f071654bc3866123e65157ef0286b..eb55863bb4aee8a323783aa24536d17ec166f26d 100644 (file)
@@ -260,7 +260,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 {
        struct btrfs_trans_handle *h;
        struct btrfs_transaction *cur_trans;
-       int retries = 0;
+       u64 num_bytes = 0;
        int ret;
 
        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -274,6 +274,19 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
                h->block_rsv = NULL;
                goto got_it;
        }
+
+       /*
+        * Do the reservation before we join the transaction so we can do all
+        * the appropriate flushing if need be.
+        */
+       if (num_items > 0 && root != root->fs_info->chunk_root) {
+               num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+               ret = btrfs_block_rsv_add(NULL, root,
+                                         &root->fs_info->trans_block_rsv,
+                                         num_bytes);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
 again:
        h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
        if (!h)
@@ -310,24 +323,9 @@ again:
                goto again;
        }
 
-       if (num_items > 0) {
-               ret = btrfs_trans_reserve_metadata(h, root, num_items);
-               if (ret == -EAGAIN && !retries) {
-                       retries++;
-                       btrfs_commit_transaction(h, root);
-                       goto again;
-               } else if (ret == -EAGAIN) {
-                       /*
-                        * We have already retried and got EAGAIN, so really we
-                        * don't have space, so set ret to -ENOSPC.
-                        */
-                       ret = -ENOSPC;
-               }
-
-               if (ret < 0) {
-                       btrfs_end_transaction(h, root);
-                       return ERR_PTR(ret);
-               }
+       if (num_bytes) {
+               h->block_rsv = &root->fs_info->trans_block_rsv;
+               h->bytes_reserved = num_bytes;
        }
 
 got_it:
@@ -499,10 +497,17 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
        }
 
        if (lock && cur_trans->blocked && !cur_trans->in_commit) {
-               if (throttle)
+               if (throttle) {
+                       /*
+                        * We may race with somebody else here so end up having
+                        * to call end_transaction on ourselves again, so inc
+                        * our use_count.
+                        */
+                       trans->use_count++;
                        return btrfs_commit_transaction(trans, root);
-               else
+               } else {
                        wake_up_process(info->transaction_kthread);
+               }
        }
 
        WARN_ON(cur_trans != info->running_transaction);
index f3cacc079102738ee6769528d90f5412c44fd6dd..babee65f8edaf779e26bc134ad7d02e8541ff8f2 100644 (file)
@@ -1733,8 +1733,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                                btrfs_read_buffer(next, ptr_gen);
 
                                btrfs_tree_lock(next);
-                               clean_tree_block(trans, root, next);
                                btrfs_set_lock_blocking(next);
+                               clean_tree_block(trans, root, next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
 
@@ -1802,8 +1802,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                next = path->nodes[*level];
 
                                btrfs_tree_lock(next);
-                               clean_tree_block(trans, root, next);
                                btrfs_set_lock_blocking(next);
+                               clean_tree_block(trans, root, next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
 
@@ -1870,8 +1870,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                        next = path->nodes[orig_level];
 
                        btrfs_tree_lock(next);
-                       clean_tree_block(trans, log, next);
                        btrfs_set_lock_blocking(next);
+                       clean_tree_block(trans, log, next);
                        btrfs_wait_tree_block_writeback(next);
                        btrfs_tree_unlock(next);
 
index 90d956c17d92f0868c7d8f1083448d83675bbba2..53875ae73ad4f634548e8d1d315056822c9cbd4b 100644 (file)
@@ -3599,7 +3599,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        if (!sb)
                return -ENOMEM;
        btrfs_set_buffer_uptodate(sb);
-       btrfs_set_buffer_lockdep_class(sb, 0);
+       btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
 
        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
        array_size = btrfs_super_sys_array_size(super_copy);
index 5366fe452ab07db7402402e96351c1b956809e20..d733b9cfea343207e71bdb6d8d8b51323717c800 100644 (file)
@@ -102,43 +102,57 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       /* first lets see if we already have this xattr */
-       di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
-                               strlen(name), -1);
-       if (IS_ERR(di)) {
-               ret = PTR_ERR(di);
-               goto out;
-       }
-
-       /* ok we already have this xattr, lets remove it */
-       if (di) {
-               /* if we want create only exit */
-               if (flags & XATTR_CREATE) {
-                       ret = -EEXIST;
+       if (flags & XATTR_REPLACE) {
+               di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
+                                       name_len, -1);
+               if (IS_ERR(di)) {
+                       ret = PTR_ERR(di);
+                       goto out;
+               } else if (!di) {
+                       ret = -ENODATA;
                        goto out;
                }
-
                ret = btrfs_delete_one_dir_name(trans, root, path, di);
-               BUG_ON(ret);
+               if (ret)
+                       goto out;
                btrfs_release_path(path);
+       }
 
-               /* if we don't have a value then we are removing the xattr */
-               if (!value)
+again:
+       ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
+                                     name, name_len, value, size);
+       if (ret == -EEXIST) {
+               if (flags & XATTR_CREATE)
                        goto out;
-       } else {
+               /*
+                * We can't use the path we already have since we won't have the
+                * proper locking for a delete, so release the path and
+                * re-lookup to delete the thing.
+                */
                btrfs_release_path(path);
+               di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
+                                       name, name_len, -1);
+               if (IS_ERR(di)) {
+                       ret = PTR_ERR(di);
+                       goto out;
+               } else if (!di) {
+                       /* Shouldn't happen but just in case... */
+                       btrfs_release_path(path);
+                       goto again;
+               }
 
-               if (flags & XATTR_REPLACE) {
-                       /* we couldn't find the attr to replace */
-                       ret = -ENODATA;
+               ret = btrfs_delete_one_dir_name(trans, root, path, di);
+               if (ret)
                        goto out;
+
+               /*
+                * We have a value to set, so go back and try to insert it now.
+                */
+               if (value) {
+                       btrfs_release_path(path);
+                       goto again;
                }
        }
-
-       /* ok we have to create a completely new xattr */
-       ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
-                                     name, name_len, value, size);
-       BUG_ON(ret);
 out:
        btrfs_free_path(path);
        return ret;
index 79743d146be69ec8ce3f279a032e1a5459eb462c..0c1d91756528969d409b7f4480b1653fc508fd5c 100644 (file)
@@ -1438,12 +1438,15 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
        struct dentry *temp;
        char *path;
        int len, pos;
+       unsigned seq;
 
        if (dentry == NULL)
                return ERR_PTR(-EINVAL);
 
 retry:
        len = 0;
+       seq = read_seqbegin(&rename_lock);
+       rcu_read_lock();
        for (temp = dentry; !IS_ROOT(temp);) {
                struct inode *inode = temp->d_inode;
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
@@ -1455,10 +1458,12 @@ retry:
                        len += 1 + temp->d_name.len;
                temp = temp->d_parent;
                if (temp == NULL) {
+                       rcu_read_unlock();
                        pr_err("build_path corrupt dentry %p\n", dentry);
                        return ERR_PTR(-EINVAL);
                }
        }
+       rcu_read_unlock();
        if (len)
                len--;  /* no leading '/' */
 
@@ -1467,9 +1472,12 @@ retry:
                return ERR_PTR(-ENOMEM);
        pos = len;
        path[pos] = 0;  /* trailing null */
+       rcu_read_lock();
        for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
-               struct inode *inode = temp->d_inode;
+               struct inode *inode;
 
+               spin_lock(&temp->d_lock);
+               inode = temp->d_inode;
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
                        dout("build_path path+%d: %p SNAPDIR\n",
                             pos, temp);
@@ -1478,21 +1486,26 @@ retry:
                        break;
                } else {
                        pos -= temp->d_name.len;
-                       if (pos < 0)
+                       if (pos < 0) {
+                               spin_unlock(&temp->d_lock);
                                break;
+                       }
                        strncpy(path + pos, temp->d_name.name,
                                temp->d_name.len);
                }
+               spin_unlock(&temp->d_lock);
                if (pos)
                        path[--pos] = '/';
                temp = temp->d_parent;
                if (temp == NULL) {
+                       rcu_read_unlock();
                        pr_err("build_path corrupt dentry\n");
                        kfree(path);
                        return ERR_PTR(-EINVAL);
                }
        }
-       if (pos != 0) {
+       rcu_read_unlock();
+       if (pos != 0 || read_seqretry(&rename_lock, seq)) {
                pr_err("build_path did not end path lookup where "
                       "expected, namelen is %d, pos is %d\n", len, pos);
                /* presumably this is only possible if racing with a
index 3e2989976297bf654649055f67ca2007e3d71733..bc4b12ca537bfaf1a6556cf0b1df5a4bc8af25ac 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/namei.h>
 #include <net/ipv6.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
@@ -542,14 +543,12 @@ static const struct super_operations cifs_super_ops = {
 static struct dentry *
 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
 {
-       int xid, rc;
-       struct inode *inode;
-       struct qstr name;
-       struct dentry *dparent = NULL, *dchild = NULL, *alias;
+       struct dentry *dentry;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
-       unsigned int i, full_len, len;
-       char *full_path = NULL, *pstart;
+       char *full_path = NULL;
+       char *s, *p;
        char sep;
+       int xid;
 
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
@@ -560,73 +559,32 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
 
        xid = GetXid();
        sep = CIFS_DIR_SEP(cifs_sb);
-       dparent = dget(sb->s_root);
-       full_len = strlen(full_path);
-       full_path[full_len] = sep;
-       pstart = full_path + 1;
-
-       for (i = 1, len = 0; i <= full_len; i++) {
-               if (full_path[i] != sep || !len) {
-                       len++;
-                       continue;
-               }
-
-               full_path[i] = 0;
-               cFYI(1, "get dentry for %s", pstart);
-
-               name.name = pstart;
-               name.len = len;
-               name.hash = full_name_hash(pstart, len);
-               dchild = d_lookup(dparent, &name);
-               if (dchild == NULL) {
-                       cFYI(1, "not exists");
-                       dchild = d_alloc(dparent, &name);
-                       if (dchild == NULL) {
-                               dput(dparent);
-                               dparent = ERR_PTR(-ENOMEM);
-                               goto out;
-                       }
-               }
-
-               cFYI(1, "get inode");
-               if (dchild->d_inode == NULL) {
-                       cFYI(1, "not exists");
-                       inode = NULL;
-                       if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
-                               rc = cifs_get_inode_info_unix(&inode, full_path,
-                                                             sb, xid);
-                       else
-                               rc = cifs_get_inode_info(&inode, full_path,
-                                                        NULL, sb, xid, NULL);
-                       if (rc) {
-                               dput(dchild);
-                               dput(dparent);
-                               dparent = ERR_PTR(rc);
-                               goto out;
-                       }
-                       alias = d_materialise_unique(dchild, inode);
-                       if (alias != NULL) {
-                               dput(dchild);
-                               if (IS_ERR(alias)) {
-                                       dput(dparent);
-                                       dparent = ERR_PTR(-EINVAL); /* XXX */
-                                       goto out;
-                               }
-                               dchild = alias;
-                       }
-               }
-               cFYI(1, "parent %p, child %p", dparent, dchild);
-
-               dput(dparent);
-               dparent = dchild;
-               len = 0;
-               pstart = full_path + i + 1;
-               full_path[i] = sep;
-       }
-out:
+       dentry = dget(sb->s_root);
+       p = s = full_path;
+
+       do {
+               struct inode *dir = dentry->d_inode;
+               struct dentry *child;
+
+               /* skip separators */
+               while (*s == sep)
+                       s++;
+               if (!*s)
+                       break;
+               p = s++;
+               /* next separator */
+               while (*s && *s != sep)
+                       s++;
+
+               mutex_lock(&dir->i_mutex);
+               child = lookup_one_len(p, dentry, s - p);
+               mutex_unlock(&dir->i_mutex);
+               dput(dentry);
+               dentry = child;
+       } while (!IS_ERR(dentry));
        _FreeXid(xid);
        kfree(full_path);
-       return dparent;
+       return dentry;
 }
 
 static int cifs_set_super(struct super_block *sb, void *data)
index 0900e1658c967de0fc2f4d70cc7645c9363a5d30..036ca83e5f461c2ff3e807ded5b7b797e5b6f836 100644 (file)
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "1.73"
+#define CIFS_VERSION   "1.74"
 #endif                         /* _CIFSFS_H */
index dbd669cc5bc7623ea93fafcff8d0448d55a92653..ccc1afa0bf3b697eaccc92351884042d549b7ca4 100644 (file)
@@ -3485,7 +3485,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
                goto out;
        }
 
-       snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
+       snprintf(username, sizeof(username), "krb50x%x", fsuid);
        vol_info->username = username;
        vol_info->local_nls = cifs_sb->local_nls;
        vol_info->linux_uid = fsuid;
index 81914df47ef1612c1ab742d228503bbf5e598046..fa8c21d913bc5b212191d2c444cb5ce74e1d4058 100644 (file)
@@ -55,6 +55,7 @@ build_path_from_dentry(struct dentry *direntry)
        char dirsep;
        struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
        struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       unsigned seq;
 
        if (direntry == NULL)
                return NULL;  /* not much we can do if dentry is freed and
@@ -68,22 +69,29 @@ build_path_from_dentry(struct dentry *direntry)
                dfsplen = 0;
 cifs_bp_rename_retry:
        namelen = dfsplen;
+       seq = read_seqbegin(&rename_lock);
+       rcu_read_lock();
        for (temp = direntry; !IS_ROOT(temp);) {
                namelen += (1 + temp->d_name.len);
                temp = temp->d_parent;
                if (temp == NULL) {
                        cERROR(1, "corrupt dentry");
+                       rcu_read_unlock();
                        return NULL;
                }
        }
+       rcu_read_unlock();
 
        full_path = kmalloc(namelen+1, GFP_KERNEL);
        if (full_path == NULL)
                return full_path;
        full_path[namelen] = 0; /* trailing null */
+       rcu_read_lock();
        for (temp = direntry; !IS_ROOT(temp);) {
+               spin_lock(&temp->d_lock);
                namelen -= 1 + temp->d_name.len;
                if (namelen < 0) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        full_path[namelen] = dirsep;
@@ -91,14 +99,17 @@ cifs_bp_rename_retry:
                                temp->d_name.len);
                        cFYI(0, "name: %s", full_path + namelen);
                }
+               spin_unlock(&temp->d_lock);
                temp = temp->d_parent;
                if (temp == NULL) {
                        cERROR(1, "corrupt dentry");
+                       rcu_read_unlock();
                        kfree(full_path);
                        return NULL;
                }
        }
-       if (namelen != dfsplen) {
+       rcu_read_unlock();
+       if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
                cERROR(1, "did not end path lookup where expected namelen is %d",
                        namelen);
                /* presumably this is only possible if racing with a rename
index bb71471a4d9d68516269550d6b5eeb5603d5a46b..a9b4a24f2a16ba0b4f9073e98043d51bf1f43e67 100644 (file)
@@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
                        io_parms.pid = pid;
                        io_parms.tcon = pTcon;
                        io_parms.offset = *poffset;
-                       io_parms.length = len;
+                       io_parms.length = cur_len;
                        rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
                                         &read_data, &buf_type);
                        pSMBr = (struct smb_com_read_rsp *)read_data;
index 3892ab817a36407975d6738a1b20d7c90c846574..d3e619692ee0f0437e26d9d91d61545859f248bc 100644 (file)
@@ -428,8 +428,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                flags |= NTLMSSP_NEGOTIATE_SIGN;
                if (!ses->server->session_estab)
-                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH |
-                               NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
        }
 
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -465,10 +464,11 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
        if (ses->server->sec_mode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
-               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+               if (!ses->server->session_estab)
+                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+       }
 
        tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
index e141939080f0d6db65c525305cab5a74b9dd552c..739fb59bcdc25123f3bdc620246045ddabdac34d 100644 (file)
@@ -37,7 +37,7 @@ static DEFINE_MUTEX(read_mutex);
 /* These macros may change in future, to provide better st_ino semantics. */
 #define OFFSET(x)      ((x)->i_ino)
 
-static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
+static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
 {
        if (!cino->offset)
                return offset + 1;
@@ -61,7 +61,7 @@ static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
 }
 
 static struct inode *get_cramfs_inode(struct super_block *sb,
-       struct cramfs_inode *cramfs_inode, unsigned int offset)
+       const struct cramfs_inode *cramfs_inode, unsigned int offset)
 {
        struct inode *inode;
        static struct timespec zerotime;
@@ -317,7 +317,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
        /* Set it all up.. */
        sb->s_op = &cramfs_ops;
        root = get_cramfs_inode(sb, &super.root, 0);
-       if (!root)
+       if (IS_ERR(root))
                goto out;
        sb->s_root = d_alloc_root(root);
        if (!sb->s_root) {
@@ -423,6 +423,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
        unsigned int offset = 0;
+       struct inode *inode = NULL;
        int sorted;
 
        mutex_lock(&read_mutex);
@@ -449,8 +450,8 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
 
                for (;;) {
                        if (!namelen) {
-                               mutex_unlock(&read_mutex);
-                               return ERR_PTR(-EIO);
+                               inode = ERR_PTR(-EIO);
+                               goto out;
                        }
                        if (name[namelen-1])
                                break;
@@ -462,17 +463,18 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
                if (retval > 0)
                        continue;
                if (!retval) {
-                       struct cramfs_inode entry = *de;
-                       mutex_unlock(&read_mutex);
-                       d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
-                       return NULL;
+                       inode = get_cramfs_inode(dir->i_sb, de, dir_off);
+                       break;
                }
                /* else (retval < 0) */
                if (sorted)
                        break;
        }
+out:
        mutex_unlock(&read_mutex);
-       d_add(dentry, NULL);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+       d_add(dentry, inode);
        return NULL;
 }
 
index 37f72ee5bf7c9577f164ce4a472d7d4d8f0c4aaa..fbdcbca40725ab483f9b238c561c6dfb240db0bf 100644 (file)
@@ -1813,8 +1813,6 @@ seqretry:
                tname = dentry->d_name.name;
                i = dentry->d_inode;
                prefetch(tname);
-               if (i)
-                       prefetch(i);
                /*
                 * This seqcount check is required to ensure name and
                 * len are loaded atomically, so as not to walk off the
@@ -2213,14 +2211,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
  * The hash value has to match the hash queue that the dentry is on..
  */
 /*
- * d_move - move a dentry
+ * __d_move - move a dentry
  * @dentry: entry to move
  * @target: new dentry
  *
  * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way.  Caller hold
+ * rename_lock.
  */
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
 {
        if (!dentry->d_inode)
                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2228,8 +2227,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
        BUG_ON(d_ancestor(dentry, target));
        BUG_ON(d_ancestor(target, dentry));
 
-       write_seqlock(&rename_lock);
-
        dentry_lock_for_move(dentry, target);
 
        write_seqcount_begin(&dentry->d_seq);
@@ -2275,6 +2272,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
        spin_unlock(&target->d_lock);
        fsnotify_d_move(dentry);
        spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+       write_seqlock(&rename_lock);
+       __d_move(dentry, target);
        write_sequnlock(&rename_lock);
 }
 EXPORT_SYMBOL(d_move);
@@ -2302,7 +2313,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
@@ -2317,11 +2328,6 @@ static struct dentry *__d_unalias(struct inode *inode,
        if (alias->d_parent == dentry->d_parent)
                goto out_unalias;
 
-       /* Check for loops */
-       ret = ERR_PTR(-ELOOP);
-       if (d_ancestor(alias, dentry))
-               goto out_err;
-
        /* See lock_rename() */
        ret = ERR_PTR(-EBUSY);
        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2331,7 +2337,7 @@ static struct dentry *__d_unalias(struct inode *inode,
                goto out_err;
        m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-       d_move(alias, dentry);
+       __d_move(alias, dentry);
        ret = alias;
 out_err:
        spin_unlock(&inode->i_lock);
@@ -2416,15 +2422,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                alias = __d_find_alias(inode, 0);
                if (alias) {
                        actual = alias;
-                       /* Is this an anonymous mountpoint that we could splice
-                        * into our tree? */
-                       if (IS_ROOT(alias)) {
+                       write_seqlock(&rename_lock);
+
+                       if (d_ancestor(alias, dentry)) {
+                               /* Check for loops */
+                               actual = ERR_PTR(-ELOOP);
+                       } else if (IS_ROOT(alias)) {
+                               /* Is this an anonymous mountpoint that we
+                                * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
+                               write_sequnlock(&rename_lock);
                                __d_drop(alias);
                                goto found;
+                       } else {
+                               /* Nope, but we must(!) avoid directory
+                                * aliasing */
+                               actual = __d_unalias(inode, dentry, alias);
                        }
-                       /* Nope, but we must(!) avoid directory aliasing */
-                       actual = __d_unalias(inode, dentry, alias);
+                       write_sequnlock(&rename_lock);
                        if (IS_ERR(actual))
                                dput(alias);
                        goto out_nolock;
index 06065bd37fc339070948a141cd8063c9d39af8ad..c57beddcc217e3e3592fe977f8237f7f46ddf16f 100644 (file)
@@ -913,7 +913,7 @@ struct dentry *exofs_get_parent(struct dentry *child)
        unsigned long ino = exofs_parent_ino(child);
 
        if (!ino)
-               return NULL;
+               return ERR_PTR(-ESTALE);
 
        return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
 }
index 2f343b4d7a7d87b6792aad379b91ebffee9e15bc..3f7a59bfa7ada5cf1f8da1d7a391c36322004fba 100644 (file)
@@ -976,16 +976,12 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
 
        pagevec_init(&pvec, 0);
        next = 0;
-       while (next <= (loff_t)-1 &&
-              pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
-              ) {
+       do {
+               if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
+                       break;
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
-                       pgoff_t page_index = page->index;
-
-                       ASSERTCMP(page_index, >=, next);
-                       next = page_index + 1;
-
+                       next = page->index;
                        if (PageFsCache(page)) {
                                __fscache_wait_on_page_write(cookie, page);
                                __fscache_uncache_page(cookie, page);
@@ -993,7 +989,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
                }
                pagevec_release(&pvec);
                cond_resched();
-       }
+       } while (++next);
 
        _leave("");
 }
index 802ac5eeba28e2c77c8618ba457a179fa8babb73..f9fbbe96c222860374840a825f2168d14b340f83 100644 (file)
@@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                return 0;
 
        gfs2_log_lock(sdp);
+       spin_lock(&sdp->sd_ail_lock);
        head = bh = page_buffers(page);
        do {
                if (atomic_read(&bh->b_count))
@@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                        goto not_possible;
                bh = bh->b_this_page;
        } while(bh != head);
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
 
        head = bh = page_buffers(page);
@@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */
        WARN_ON(buffer_dirty(bh));
        WARN_ON(buffer_pinned(bh));
 cannot_release:
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
        return 0;
 }
index 8ef70f464731eec2e1a707b0bd1dcd12d24aa2f1..2cca29316bd624b30802550931e569eacc65b7ac 100644 (file)
@@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl)
                                bd_ail_gl_list);
                bh = bd->bd_bh;
                gfs2_remove_from_ail(bd);
-               spin_unlock(&sdp->sd_ail_lock);
-
                bd->bd_bh = NULL;
                bh->b_private = NULL;
+               spin_unlock(&sdp->sd_ail_lock);
+
                bd->bd_blkno = bh->b_blocknr;
                gfs2_log_lock(sdp);
                gfs2_assert_withdraw(sdp, !buffer_busy(bh));
@@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
                }
        }
 
-       if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+       if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
+               gfs2_log_flush(gl->gl_sbd, NULL);
                gl->gl_sbd->sd_rindex_uptodate = 0;
+       }
        if (ip && S_ISREG(ip->i_inode.i_mode))
                truncate_inode_pages(ip->i_inode.i_mapping, 0);
 }
index 0a064e91ac7071e6f5570acf50666aaf59b59bc2..81206e70cbf69485d19d27a7ae7515cc6b158586 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/buffer_head.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist_bl.h>
+#include <linux/completion.h>
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
@@ -546,6 +547,7 @@ struct gfs2_sbd {
        struct gfs2_glock *sd_trans_gl;
        wait_queue_head_t sd_glock_wait;
        atomic_t sd_glock_disposal;
+       struct completion sd_locking_init;
 
        /* Inode Stuff */
 
index 903115f2bb34849d8df402d57723bec403857204..85c62923ee292d9d663119854f9115ed26fff36b 100644 (file)
@@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
                if (gfs2_ail1_empty(sdp))
                        break;
        }
+       gfs2_log_flush(sdp, NULL);
 }
 
 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
index 8ac9ae189b535cfe91fa8922fdf091e8057766f2..2a77071fb7b68df78c3ff41041bf1d7fb330985c 100644 (file)
@@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
        init_waitqueue_head(&sdp->sd_glock_wait);
        atomic_set(&sdp->sd_glock_disposal, 0);
+       init_completion(&sdp->sd_locking_init);
        spin_lock_init(&sdp->sd_statfs_spin);
 
        spin_lock_init(&sdp->sd_rindex_spin);
@@ -1017,11 +1018,13 @@ hostdata_error:
                fsname++;
        if (lm->lm_mount == NULL) {
                fs_info(sdp, "Now mounting FS...\n");
+               complete(&sdp->sd_locking_init);
                return 0;
        }
        ret = lm->lm_mount(sdp, fsname);
        if (ret == 0)
                fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+       complete(&sdp->sd_locking_init);
        return ret;
 }
 
index ed540e7018beee30c058b185d6599cfff30dd9ba..fb0edf735483174932e569ab2c318311d88cb6e2 100644 (file)
@@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        struct timespec atime;
        struct gfs2_dinode *di;
        int ret = -EAGAIN;
+       int unlock_required = 0;
 
        /* Skip timestamp update, if this is from a memalloc */
        if (current->flags & PF_MEMALLOC)
                goto do_flush;
-       ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-       if (ret)
-               goto do_flush;
+       if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+               ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+               if (ret)
+                       goto do_flush;
+               unlock_required = 1;
+       }
        ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
        if (ret)
                goto do_unlock;
@@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        }
        gfs2_trans_end(sdp);
 do_unlock:
-       gfs2_glock_dq_uninit(&gh);
+       if (unlock_required)
+               gfs2_glock_dq_uninit(&gh);
 do_flush:
        if (wbc->sync_mode == WB_SYNC_ALL)
                gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
@@ -1427,7 +1432,20 @@ out:
        return error;
 }
 
-/*
+/**
+ * gfs2_evict_inode - Remove an inode from cache
+ * @inode: The inode to evict
+ *
+ * There are three cases to consider:
+ * 1. i_nlink == 0, we are final opener (and must deallocate)
+ * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
+ * 3. i_nlink > 0
+ *
+ * If the fs is read only, then we have to treat all cases as per #3
+ * since we are unable to do any deallocation. The inode will be
+ * deallocated by the next read/write node to attempt an allocation
+ * in the same resource group
+ *
  * We have to (at the moment) hold the inodes main lock to cover
  * the gap between unlocking the shared lock on the iopen lock and
  * taking the exclusive lock. I'd rather do a shared -> exclusive
@@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode)
        if (error)
                goto out_truncate;
 
+       /* Case 1 starts here */
+
        if (S_ISDIR(inode->i_mode) &&
            (ip->i_diskflags & GFS2_DIF_EXHASH)) {
                error = gfs2_dir_exhash_dealloc(ip);
@@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode)
        goto out_unlock;
 
 out_truncate:
+       /* Case 2 starts here */
        error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
        if (error)
                goto out_unlock;
-       gfs2_final_release_pages(ip);
+       /* Needs to be done before glock release & also in a transaction */
+       truncate_inode_pages(&inode->i_data, 0);
        gfs2_trans_end(sdp);
 
 out_unlock:
+       /* Error path for case 1 */
        if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
                gfs2_glock_dq(&ip->i_iopen_gh);
        gfs2_holder_uninit(&ip->i_iopen_gh);
@@ -1507,6 +1530,7 @@ out_unlock:
        if (error && error != GLR_TRYFAILED && error != -EROFS)
                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
 out:
+       /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
 
index e20eab37bc80c3fa6629c38dd79c1b9dc6491c2f..443cabcfcd23f834f64bc800c6c4cd518a0e58db 100644 (file)
@@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%u", &first);
        if (rv != 1 || first > 1)
                return -EINVAL;
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EBUSY;
        if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
@@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%d", &jid);
        if (rv != 1)
                return -EINVAL;
-
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EINVAL;
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
index 87ed48e0343d701fe621fa1751390bfdfbec06f7..85c098a499f33ce858bdfaf85f76f053bd1b9376 100644 (file)
@@ -139,7 +139,8 @@ static int file_removed(struct dentry *dentry, const char *file)
 static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
                                   struct nameidata *nd)
 {
-       struct dentry *proc_dentry, *new, *parent;
+       struct dentry *proc_dentry, *parent;
+       struct qstr *name = &dentry->d_name;
        struct inode *inode;
        int err, deleted;
 
@@ -149,23 +150,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
        else if (deleted)
                return ERR_PTR(-ENOENT);
 
-       err = -ENOMEM;
        parent = HPPFS_I(ino)->proc_dentry;
        mutex_lock(&parent->d_inode->i_mutex);
-       proc_dentry = d_lookup(parent, &dentry->d_name);
-       if (proc_dentry == NULL) {
-               proc_dentry = d_alloc(parent, &dentry->d_name);
-               if (proc_dentry == NULL) {
-                       mutex_unlock(&parent->d_inode->i_mutex);
-                       goto out;
-               }
-               new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
-                                                      proc_dentry, NULL);
-               if (new) {
-                       dput(proc_dentry);
-                       proc_dentry = new;
-               }
-       }
+       proc_dentry = lookup_one_len(name->name, parent, name->len);
        mutex_unlock(&parent->d_inode->i_mutex);
 
        if (IS_ERR(proc_dentry))
@@ -174,13 +161,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
        err = -ENOMEM;
        inode = get_inode(ino->i_sb, proc_dentry);
        if (!inode)
-               goto out_dput;
+               goto out;
 
        d_add(dentry, inode);
        return NULL;
 
- out_dput:
-       dput(proc_dentry);
  out:
        return ERR_PTR(err);
 }
@@ -690,8 +675,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
        struct inode *proc_ino = dentry->d_inode;
        struct inode *inode = new_inode(sb);
 
-       if (!inode)
+       if (!inode) {
+               dput(dentry);
                return ERR_PTR(-ENOMEM);
+       }
 
        if (S_ISDIR(dentry->d_inode->i_mode)) {
                inode->i_op = &hppfs_dir_iops;
@@ -704,7 +691,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
                inode->i_fop = &hppfs_file_fops;
        }
 
-       HPPFS_I(inode)->proc_dentry = dget(dentry);
+       HPPFS_I(inode)->proc_dentry = dentry;
 
        inode->i_uid = proc_ino->i_uid;
        inode->i_gid = proc_ino->i_gid;
@@ -737,7 +724,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
        sb->s_fs_info = proc_mnt;
 
        err = -ENOMEM;
-       root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root);
+       root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
        if (!root_inode)
                goto out_mntput;
 
index c88eab55aec95f4ab26427da882fd88c8b3ef512..275ca4749a2ee3280fd0544680c1df4f23baf3d5 100644 (file)
@@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
                goto out;
 
        attr->set_buf[size] = '\0';
-       val = simple_strtol(attr->set_buf, NULL, 0);
+       val = simple_strtoll(attr->set_buf, NULL, 0);
        ret = attr->set(attr->data, val);
        if (ret == 0)
                ret = len; /* on success, claim we got the whole input */
index 0223c41fb1146cb529a92c784912498e15829a00..14ab8d3f2f0c8f7fc3e829ed26404e53a2420028 100644 (file)
@@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
                        goto err_parent;
                BUG_ON(nd->inode != parent->d_inode);
        } else {
+               if (dentry->d_parent != parent)
+                       goto err_parent;
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
                if (!__d_rcu_to_refcount(dentry, nd->seq))
                        goto err_child;
@@ -940,7 +942,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 * Don't forget we might have a non-mountpoint managed dentry
                 * that wants to block transit.
                 */
-               *inode = path->dentry->d_inode;
                if (unlikely(managed_dentry_might_block(path->dentry)))
                        return false;
 
@@ -953,6 +954,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                path->mnt = mounted;
                path->dentry = mounted->mnt_root;
                nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+               /*
+                * Update the inode too. We don't need to re-check the
+                * dentry sequence number here after this d_inode read,
+                * because a mount-point is always pinned.
+                */
+               *inode = path->dentry->d_inode;
        }
        return true;
 }
index 0bafcc91c27f8d8513dc1a7c8776fefd3411eee2..f9d03abcd04cd803f7ec69fcd81a84eba2d7f66b 100644 (file)
@@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
         * this offset and save the original offset.
         */
        data->args.offset = filelayout_get_dserver_offset(lseg, offset);
-       data->mds_offset = offset;
 
        /* Perform an asynchronous write */
        status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
index 6870bc61ceec4083f80818c4a31f934dab3227b1..e6e8f3b9a1dea29908a5179a38293dfbefce0ad8 100644 (file)
@@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int);
 #define encode_getfh_maxsz      (op_encode_hdr_maxsz)
 #define decode_getfh_maxsz      (op_decode_hdr_maxsz + 1 + \
                                ((3+NFS4_FHSIZE) >> 2))
-#define nfs4_fattr_bitmap_maxsz 3
+#define nfs4_fattr_bitmap_maxsz 4
 #define encode_getattr_maxsz    (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
 #define nfs4_name_maxsz                (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
 #define nfs4_path_maxsz                (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
index e268e3b23497282f02e7cb9d209f189a886c60a7..727168059684e92b22b0c1015fc229668a4bf07d 100644 (file)
@@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
 
        data->args.fh     = NFS_FH(inode);
        data->args.offset = req_offset(req) + offset;
+       /* pnfs_set_layoutcommit needs this */
+       data->mds_offset = data->args.offset;
        data->args.pgbase = req->wb_pgbase + offset;
        data->args.pages  = data->pagevec;
        data->args.count  = count;
index 29309e25417fdf30209f69d301ea55e4daaed401..b57aab9a1184719a027715d16786a54cdbd43d70 100644 (file)
@@ -56,16 +56,12 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
 
        lock_ufs(dir->i_sb);
        ino = ufs_inode_by_name(dir, &dentry->d_name);
-       if (ino) {
+       if (ino)
                inode = ufs_iget(dir->i_sb, ino);
-               if (IS_ERR(inode)) {
-                       unlock_ufs(dir->i_sb);
-                       return ERR_CAST(inode);
-               }
-       }
        unlock_ufs(dir->i_sb);
-       d_add(dentry, inode);
-       return NULL;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+       return d_splice_alias(inode, dentry);
 }
 
 /*
index 3a10ef5914eb5ab67dc981e3f6d21ee4cf0b3908..6cd5b6403a7b9f37eac4317807c97003c79cd9ba 100644 (file)
@@ -210,7 +210,7 @@ struct acpi_device_power_state {
 struct acpi_device_power {
        int state;              /* Current state */
        struct acpi_device_power_flags flags;
-       struct acpi_device_power_state states[4];       /* Power states (D0-D3) */
+       struct acpi_device_power_state states[ACPI_D_STATE_COUNT];      /* Power states (D0-D3Cold) */
 };
 
 /* Performance Management */
index a756bc8d866db15af5fbe3c2813e873b48aa48d7..4543b6f75867dfa4e6695e52e764cdd20c8f778d 100644 (file)
@@ -98,8 +98,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
 /*
  * Spinlock primitives
  */
+
+#ifndef acpi_os_create_lock
 acpi_status
 acpi_os_create_lock(acpi_spinlock *out_handle);
+#endif
 
 void acpi_os_delete_lock(acpi_spinlock handle);
 
index 5d2a5e9544d9d4743202852cde379a60fb2f7600..2ce1be9f62918c38a52edf3d8424af1e4c48d25a 100644 (file)
@@ -159,6 +159,24 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
        } while (0)
 #endif
 
+/*
+ * When lockdep is enabled, the spin_lock_init() macro stringifies it's
+ * argument and uses that as a name for the lock in debugging.
+ * By executing spin_lock_init() in a macro the key changes from "lock" for
+ * all locks to the name of the argument of acpi_os_create_lock(), which
+ * prevents lockdep from reporting false positives for ACPICA locks.
+ */
+#define acpi_os_create_lock(__handle)                          \
+({                                                             \
+       spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));        \
+                                                               \
+       if (lock) {                                             \
+               *(__handle) = lock;                             \
+               spin_lock_init(*(__handle));                    \
+       }                                                       \
+       lock ? AE_OK : AE_NO_MEMORY;                            \
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ACLINUX_H__ */
index e08f344c6cffc546660506b37e8659679ac78211..3d53efd25ab906889e081acb8ae10f1065ef180a 100644 (file)
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
index c6927a4d157fd3cfb9f4a963065353c730364b99..6ad43554ac0521293772db9025261a854ca82593 100644 (file)
@@ -64,6 +64,19 @@ struct mmc_ext_csd {
        unsigned long long      enhanced_area_offset;   /* Units: Byte */
        unsigned int            enhanced_area_size;     /* Units: KB */
        unsigned int            boot_size;              /* in bytes */
+       u8                      raw_partition_support;  /* 160 */
+       u8                      raw_erased_mem_count;   /* 181 */
+       u8                      raw_ext_csd_structure;  /* 194 */
+       u8                      raw_card_type;          /* 196 */
+       u8                      raw_s_a_timeout;                /* 217 */
+       u8                      raw_hc_erase_gap_size;  /* 221 */
+       u8                      raw_erase_timeout_mult; /* 223 */
+       u8                      raw_hc_erase_grp_size;  /* 224 */
+       u8                      raw_sec_trim_mult;      /* 229 */
+       u8                      raw_sec_erase_mult;     /* 230 */
+       u8                      raw_sec_feature_support;/* 231 */
+       u8                      raw_trim_mult;          /* 232 */
+       u8                      raw_sectors[4];         /* 212 - 4 bytes */
 };
 
 struct sd_scr {
index 54b8b4d7b68f1a2a42f68de4128f563f9f86eb4e..9e19477991ad87e8f1b0421742a4243131755495 100644 (file)
@@ -1097,12 +1097,6 @@ struct net_device {
 #define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
                                 NETIF_F_FSO)
 
-#define NETIF_F_ALL_TX_OFFLOADS        (NETIF_F_ALL_CSUM | NETIF_F_SG | \
-                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
-                                NETIF_F_HIGHDMA | \
-                                NETIF_F_SCTP_CSUM | \
-                                NETIF_F_ALL_FCOE)
-
        /*
         * If one device supports one of these features, then enable them
         * for all in netdev_increment_features.
index 496770a96487676ec12f222d97a4af8defcf341b..14a6c7b545de5bf3ad51dea50d5d484cfc8bfbb5 100644 (file)
@@ -844,6 +844,7 @@ enum cpu_idle_type {
 #define SD_SERIALIZE           0x0400  /* Only a single load balancing instance */
 #define SD_ASYM_PACKING                0x0800  /* Place busy groups earlier in the domain */
 #define SD_PREFER_SIBLING      0x1000  /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP             0x2000  /* sched_domains of this level overlap */
 
 enum powersavings_balance_level {
        POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
        return 0;
 }
 
-struct sched_group {
-       struct sched_group *next;       /* Must be a circular list */
+struct sched_group_power {
        atomic_t ref;
-
        /*
         * CPU power of this group, SCHED_LOAD_SCALE being max power for a
         * single CPU.
         */
-       unsigned int cpu_power, cpu_power_orig;
+       unsigned int power, power_orig;
+};
+
+struct sched_group {
+       struct sched_group *next;       /* Must be a circular list */
+       atomic_t ref;
+
        unsigned int group_weight;
+       struct sched_group_power *sgp;
 
        /*
         * The CPUs this group covers.
@@ -1254,6 +1260,9 @@ struct task_struct {
 #ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
+#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+       int rcu_boosted;
+#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
        struct list_head rcu_node_entry;
 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 #ifdef CONFIG_TREE_PREEMPT_RCU
index 564acd3a71c1a89b0dcc9175b8f3068cf9e086c7..9995c7fc3f60ce08e2b7474fc6cd955ce736e7a5 100644 (file)
@@ -112,11 +112,7 @@ struct sdla_dlci_conf {
    short Tb_max;
 };
 
-#ifndef __KERNEL__
-
-void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet);
-
-#else
+#ifdef __KERNEL__
 
 /* important Z80 window addresses */
 #define SDLA_CONTROL_WND               0xE000
index dd6847e5d6e46264ffe6db00ccb7128ce61483fa..6506458ccd33bbc3df02f76f8ae661c2fafd18d9 100644 (file)
@@ -63,6 +63,7 @@ typedef enum {
        SCTP_CMD_ECN_ECNE,      /* Do delayed ECNE processing. */
        SCTP_CMD_ECN_CWR,       /* Do delayed CWR processing.  */
        SCTP_CMD_TIMER_START,   /* Start a timer.  */
+       SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
        SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
        SCTP_CMD_TIMER_STOP,    /* Stop a timer. */
        SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
index 99b027b2adce972e3df9c4b93e9800ae8fa820bf..ca4693b4e09e4bb879c0ef8e483247c913bffb6b 100644 (file)
@@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
 
 void sctp_ulpevent_free(struct sctp_ulpevent *);
 int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
 
 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
        const struct sctp_association *asoc,
index 7e59ffb3d0ba487c0474270a476b25cbc96d2ac1..ba06207b1dd3bf9f9d42bc8800998b80b909c02d 100644 (file)
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 static struct rcu_state *rcu_state;
 
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned.  So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier().  When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods.  This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks.  So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one.  We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
 #ifdef CONFIG_RCU_BOOST
 
 /*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
 
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
+       if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+               return;
        if (likely(!rsp->boost)) {
                rcu_do_batch(rsp, rdp);
                return;
index 14dc7dd0090220f717f83f666fb2a30a32f66143..8aafbb80b8b093e1072f2fcc4dc66bf40f249b7f 100644 (file)
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
+static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_data *rdp;
        struct rcu_node *rnp;
 
-       if (t->rcu_read_lock_nesting &&
+       if (t->rcu_read_lock_nesting > 0 &&
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       } else if (t->rcu_read_lock_nesting < 0 &&
+                  t->rcu_read_unlock_special) {
+
+               /*
+                * Complete exit from RCU read-side critical section on
+                * behalf of preempted instance of __rcu_read_unlock().
+                */
+               rcu_read_unlock_special(t);
        }
 
        /*
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
+               /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
+               if (t->rcu_boosted) {
+                       special |= RCU_READ_UNLOCK_BOOSTED;
+                       t->rcu_boosted = 0;
+               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
                t->rcu_blocked_node = NULL;
 
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
                if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
                        rt_mutex_unlock(t->rcu_boost_mutex);
                        t->rcu_boost_mutex = NULL;
                }
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
        struct task_struct *t = current;
 
        barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (t->rcu_read_lock_nesting != 1)
+               --t->rcu_read_lock_nesting;
+       else {
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+       if (t->rcu_read_lock_nesting > 0 &&
+           per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp))
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        break;
+               }
                if (rnp->parent == NULL) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        wake_up(&sync_rcu_preempt_exp_wq);
                        break;
                }
@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
                raw_spin_lock(&rnp->lock); /* irqs already disabled */
                rnp->expmask &= ~mask;
        }
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 /*
@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
        t->rcu_boost_mutex = &mtx;
-       t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
+       t->rcu_boosted = 1;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
@@ -1532,7 +1557,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
                return 0;
        t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1664,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            rnp->qsmaskinit == 0)
                return 0;
        if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1690,7 @@ static int __init rcu_spawn_kthreads(void)
        int cpu;
        struct rcu_node *rnp;
 
-       rcu_kthreads_spawnable = 1;
+       rcu_scheduler_fully_active = 1;
        for_each_possible_cpu(cpu) {
                per_cpu(rcu_cpu_has_work, cpu) = 0;
                if (cpu_online(cpu))
@@ -1687,7 +1712,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
        struct rcu_node *rnp = rdp->mynode;
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
+       if (rcu_scheduler_fully_active) {
                (void)rcu_spawn_one_cpu_kthread(cpu);
                if (rnp->node_kthread_task == NULL)
                        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1751,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
 }
 
+static int __init rcu_scheduler_really_started(void)
+{
+       rcu_scheduler_fully_active = 1;
+       return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
 static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
index 9769c756ad6650734f9f038db30064d1c38232f5..fde6ff90352583d65ff890a407200f2fb0c3073e 100644 (file)
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
 }
 
 #ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
 {
        struct rq *rq = this_rq();
-       struct task_struct *list = xchg(&rq->wake_list, NULL);
-
-       if (!list)
-               return;
 
        raw_spin_lock(&rq->lock);
 
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
        raw_spin_unlock(&rq->lock);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
 void scheduler_ipi(void)
 {
-       sched_ttwu_pending();
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       /*
+        * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+        * traditionally all their work was done from the interrupt return
+        * path. Now that we actually do some work, we need to make sure
+        * we do call them.
+        *
+        * Some archs already do call them, luckily irq_enter/exit nest
+        * properly.
+        *
+        * Arguably we should visit all archs and update all handlers,
+        * however a fair share of IPIs are still resched only so this would
+        * somewhat pessimize the simple resched case.
+        */
+       irq_enter();
+       sched_ttwu_do_pending(list);
+       irq_exit();
 }
 
 static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -6557,7 +6589,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->cpu_power) {
+               if (!group->sgp->power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -6581,9 +6613,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
                printk(KERN_CONT " %s", str);
-               if (group->cpu_power != SCHED_POWER_SCALE) {
+               if (group->sgp->power != SCHED_POWER_SCALE) {
                        printk(KERN_CONT " (cpu_power = %d)",
-                               group->cpu_power);
+                               group->sgp->power);
                }
 
                group = group->next;
@@ -6774,11 +6806,39 @@ static struct root_domain *alloc_rootdomain(void)
        return rd;
 }
 
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+       struct sched_group *tmp, *first;
+
+       if (!sg)
+               return;
+
+       first = sg;
+       do {
+               tmp = sg->next;
+
+               if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+                       kfree(sg->sgp);
+
+               kfree(sg);
+               sg = tmp;
+       } while (sg != first);
+}
+
 static void free_sched_domain(struct rcu_head *rcu)
 {
        struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
-       if (atomic_dec_and_test(&sd->groups->ref))
+
+       /*
+        * If its an overlapping domain it has private groups, iterate and
+        * nuke them all.
+        */
+       if (sd->flags & SD_OVERLAP) {
+               free_sched_groups(sd->groups, 1);
+       } else if (atomic_dec_and_test(&sd->groups->ref)) {
+               kfree(sd->groups->sgp);
                kfree(sd->groups);
+       }
        kfree(sd);
 }
 
@@ -6945,6 +7005,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
 struct sd_data {
        struct sched_domain **__percpu sd;
        struct sched_group **__percpu sg;
+       struct sched_group_power **__percpu sgp;
 };
 
 struct s_data {
@@ -6964,15 +7025,73 @@ struct sched_domain_topology_level;
 typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 
+#define SDTL_OVERLAP   0x01
+
 struct sched_domain_topology_level {
        sched_domain_init_f init;
        sched_domain_mask_f mask;
+       int                 flags;
        struct sd_data      data;
 };
 
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+       struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+       const struct cpumask *span = sched_domain_span(sd);
+       struct cpumask *covered = sched_domains_tmpmask;
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *child;
+       int i;
+
+       cpumask_clear(covered);
+
+       for_each_cpu(i, span) {
+               struct cpumask *sg_span;
+
+               if (cpumask_test_cpu(i, covered))
+                       continue;
+
+               sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                               GFP_KERNEL, cpu_to_node(i));
+
+               if (!sg)
+                       goto fail;
+
+               sg_span = sched_group_cpus(sg);
+
+               child = *per_cpu_ptr(sdd->sd, i);
+               if (child->child) {
+                       child = child->child;
+                       cpumask_copy(sg_span, sched_domain_span(child));
+               } else
+                       cpumask_set_cpu(i, sg_span);
+
+               cpumask_or(covered, covered, sg_span);
+
+               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+               atomic_inc(&sg->sgp->ref);
+
+               if (cpumask_test_cpu(cpu, sg_span))
+                       groups = sg;
+
+               if (!first)
+                       first = sg;
+               if (last)
+                       last->next = sg;
+               last = sg;
+               last->next = first;
+       }
+       sd->groups = groups;
+
+       return 0;
+
+fail:
+       free_sched_groups(first, 0);
+
+       return -ENOMEM;
+}
+
 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 {
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7100,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
 
-       if (sg)
+       if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
+               (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+               atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+       }
 
        return cpu;
 }
 
 /*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
  * build_sched_groups will build a circular linked list of the groups
  * covered by the given span, and will set each group's ->cpumask correctly,
  * and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
  */
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
 {
        struct sched_group *first = NULL, *last = NULL;
        struct sd_data *sdd = sd->private;
@@ -7006,6 +7125,12 @@ build_sched_groups(struct sched_domain *sd)
        struct cpumask *covered;
        int i;
 
+       get_group(cpu, sdd, &sd->groups);
+       atomic_inc(&sd->groups->ref);
+
+       if (cpu != cpumask_first(sched_domain_span(sd)))
+               return 0;
+
        lockdep_assert_held(&sched_domains_mutex);
        covered = sched_domains_tmpmask;
 
@@ -7020,7 +7145,7 @@ build_sched_groups(struct sched_domain *sd)
                        continue;
 
                cpumask_clear(sched_group_cpus(sg));
-               sg->cpu_power = 0;
+               sg->sgp->power = 0;
 
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7162,8 @@ build_sched_groups(struct sched_domain *sd)
                last = sg;
        }
        last->next = first;
+
+       return 0;
 }
 
 /*
@@ -7051,12 +7178,17 @@ build_sched_groups(struct sched_domain *sd)
  */
 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 {
-       WARN_ON(!sd || !sd->groups);
+       struct sched_group *sg = sd->groups;
 
-       if (cpu != group_first_cpu(sd->groups))
-               return;
+       WARN_ON(!sd || !sg);
+
+       do {
+               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+               sg = sg->next;
+       } while (sg != sd->groups);
 
-       sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+       if (cpu != group_first_cpu(sg))
+               return;
 
        update_group_power(sd, cpu);
 }
@@ -7177,15 +7309,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
 static void claim_allocations(int cpu, struct sched_domain *sd)
 {
        struct sd_data *sdd = sd->private;
-       struct sched_group *sg = sd->groups;
 
        WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
        *per_cpu_ptr(sdd->sd, cpu) = NULL;
 
-       if (cpu == cpumask_first(sched_group_cpus(sg))) {
-               WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+       if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
-       }
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+               *per_cpu_ptr(sdd->sgp, cpu) = NULL;
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7342,7 @@ static struct sched_domain_topology_level default_topology[] = {
 #endif
        { sd_init_CPU, cpu_cpu_mask, },
 #ifdef CONFIG_NUMA
-       { sd_init_NODE, cpu_node_mask, },
+       { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
        { sd_init_ALLNODES, cpu_allnodes_mask, },
 #endif
        { NULL, },
@@ -7234,9 +7366,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                if (!sdd->sg)
                        return -ENOMEM;
 
+               sdd->sgp = alloc_percpu(struct sched_group_power *);
+               if (!sdd->sgp)
+                       return -ENOMEM;
+
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
+                       struct sched_group_power *sgp;
 
                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7388,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                                return -ENOMEM;
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
+
+                       sgp = kzalloc_node(sizeof(struct sched_group_power),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sgp)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sgp, j) = sgp;
                }
        }
 
@@ -7266,11 +7410,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
-                       kfree(*per_cpu_ptr(sdd->sd, j));
+                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+                       if (sd && (sd->flags & SD_OVERLAP))
+                               free_sched_groups(sd->groups, 0);
                        kfree(*per_cpu_ptr(sdd->sg, j));
+                       kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
                free_percpu(sdd->sg);
+               free_percpu(sdd->sgp);
        }
 }
 
@@ -7316,8 +7464,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
                struct sched_domain_topology_level *tl;
 
                sd = NULL;
-               for (tl = sched_domain_topology; tl->init; tl++)
+               for (tl = sched_domain_topology; tl->init; tl++) {
                        sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+                       if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+                               sd->flags |= SD_OVERLAP;
+                       if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+                               break;
+               }
 
                while (sd->child)
                        sd = sd->child;
@@ -7329,13 +7482,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
        for_each_cpu(i, cpu_map) {
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
-                       get_group(i, sd->private, &sd->groups);
-                       atomic_inc(&sd->groups->ref);
-
-                       if (i != cpumask_first(sched_domain_span(sd)))
-                               continue;
-
-                       build_sched_groups(sd);
+                       if (sd->flags & SD_OVERLAP) {
+                               if (build_overlap_sched_groups(sd, i))
+                                       goto error;
+                       } else {
+                               if (build_sched_groups(sd, i))
+                                       goto error;
+                       }
                }
        }
 
@@ -7757,6 +7910,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 #endif
 #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
 }
 
 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
index 433491c2dc8f5c9952655de72958c7019dadd57f..c768588e180b5ae7a83bebad45ace3ac34da0d19 100644 (file)
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                }
 
                /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
 
                if (local_group) {
                        this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
                power >>= SCHED_POWER_SHIFT;
        }
 
-       sdg->cpu_power_orig = power;
+       sdg->sgp->power_orig = power;
 
        if (sched_feat(ARCH_POWER))
                power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
                power = 1;
 
        cpu_rq(cpu)->cpu_power = power;
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
 
        group = child->groups;
        do {
-               power += group->cpu_power;
+               power += group->sgp->power;
                group = group->next;
        } while (group != child->groups);
 
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 /*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
        /*
         * If ~90% of the cpu_power is still there, we're good.
         */
-       if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
                return 1;
 
        return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        }
 
        /* Adjust by relative CPU power of the group */
-       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
 
        /*
         * Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
                sgs->group_imb = 1;
 
-       sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+       sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
                                                SCHED_POWER_SCALE);
        if (!sgs->group_capacity)
                sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        return;
 
                sds->total_load += sgs.group_load;
-               sds->total_pwr += sg->cpu_power;
+               sds->total_pwr += sg->sgp->power;
 
                /*
                 * In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
        if (this_cpu > busiest_cpu)
                return 0;
 
-       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
                                       SCHED_POWER_SCALE);
        return 1;
 }
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 
        scaled_busy_load_per_task = sds->busiest_load_per_task
                                         * SCHED_POWER_SCALE;
-       scaled_busy_load_per_task /= sds->busiest->cpu_power;
+       scaled_busy_load_per_task /= sds->busiest->sgp->power;
 
        if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
                        (scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
         * moving them.
         */
 
-       pwr_now += sds->busiest->cpu_power *
+       pwr_now += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load);
-       pwr_now += sds->this->cpu_power *
+       pwr_now += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load);
        pwr_now /= SCHED_POWER_SCALE;
 
        /* Amount of load we'd subtract */
        tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
-               sds->busiest->cpu_power;
+               sds->busiest->sgp->power;
        if (sds->max_load > tmp)
-               pwr_move += sds->busiest->cpu_power *
+               pwr_move += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load - tmp);
 
        /* Amount of load we'd add */
-       if (sds->max_load * sds->busiest->cpu_power <
+       if (sds->max_load * sds->busiest->sgp->power <
                sds->busiest_load_per_task * SCHED_POWER_SCALE)
-               tmp = (sds->max_load * sds->busiest->cpu_power) /
-                       sds->this->cpu_power;
+               tmp = (sds->max_load * sds->busiest->sgp->power) /
+                       sds->this->sgp->power;
        else
                tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
-                       sds->this->cpu_power;
-       pwr_move += sds->this->cpu_power *
+                       sds->this->sgp->power;
+       pwr_move += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load + tmp);
        pwr_move /= SCHED_POWER_SCALE;
 
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
 
                load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
 
-               load_above_capacity /= sds->busiest->cpu_power;
+               load_above_capacity /= sds->busiest->sgp->power;
        }
 
        /*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
        max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
 
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds->busiest->cpu_power,
-               (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+       *imbalance = min(max_pull * sds->busiest->sgp->power,
+               (sds->avg_load - sds->this_load) * sds->this->sgp->power)
                        / SCHED_POWER_SCALE;
 
        /*
index be40f7371ee1ac2e2d91c6679bafbf798606e08e..1e7066d76c268c33bb17316b040e480389eac7b8 100644 (file)
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
  * using the scheduler IPI. Reduces rq->lock contention/bounces.
  */
 SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
index ff7678603328b3ba5e00c74dc9fd08bcbf113987..415d85d6f6c637b099826d012e46f70832b1d557 100644 (file)
@@ -1178,18 +1178,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 {
        struct sighand_struct *sighand;
 
-       rcu_read_lock();
        for (;;) {
+               local_irq_save(*flags);
+               rcu_read_lock();
                sighand = rcu_dereference(tsk->sighand);
-               if (unlikely(sighand == NULL))
+               if (unlikely(sighand == NULL)) {
+                       rcu_read_unlock();
+                       local_irq_restore(*flags);
                        break;
+               }
 
-               spin_lock_irqsave(&sighand->siglock, *flags);
-               if (likely(sighand == tsk->sighand))
+               spin_lock(&sighand->siglock);
+               if (likely(sighand == tsk->sighand)) {
+                       rcu_read_unlock();
                        break;
-               spin_unlock_irqrestore(&sighand->siglock, *flags);
+               }
+               spin_unlock(&sighand->siglock);
+               rcu_read_unlock();
+               local_irq_restore(*flags);
        }
-       rcu_read_unlock();
 
        return sighand;
 }
index 40cf63ddd4b3d740d2620ddbf1fa245830b1d703..fca82c32042b73133f2ab74838287c94cf8ad152 100644 (file)
@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                __do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #else
 static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #endif
 
index 5ed24b94c5e69a0dbe6c361ccb0665e3fcc591af..d036e59d302b092bc4186dbb753066416e35c9f6 100644 (file)
@@ -2310,7 +2310,8 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
        for (i = 0; i <= classzone_idx; i++)
                present_pages += pgdat->node_zones[i].present_pages;
 
-       return balanced_pages > (present_pages >> 2);
+       /* A special case here: if zone has no page, we think it's balanced */
+       return balanced_pages >= (present_pages >> 2);
 }
 
 /* is kswapd sleeping prematurely? */
index 86bff9b1ac4741b7a6c36fe6c8c98d8cab7bc610..6e82148edfc8c1009adec988e57c35a224111e3c 100644 (file)
@@ -528,7 +528,11 @@ static int vlan_dev_init(struct net_device *dev)
                                          (1<<__LINK_STATE_DORMANT))) |
                      (1<<__LINK_STATE_PRESENT);
 
-       dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
+       dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
+                          NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+                          NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+                          NETIF_F_ALL_FCOE;
+
        dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
 
index d3a05b9ade7a487fe7c84680883cb3ceca7621a1..bcd158f40bb9e4d7a7fa5c167c1281779d60847d 100644 (file)
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
 
        hci_dev_put(hdev);
 
+       if (conn->handle == 0)
+               kfree(conn);
+
        return 0;
 }
 
index c405a954a603341b52b51a98592ecccd618398e9..43b4c2deb7cc05bdc875e3f7b23a999468f5c1a4 100644 (file)
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
 {
        struct hidp_session *session = (struct hidp_session *) arg;
 
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 }
 
 static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
                skb_queue_purge(&session->ctrl_transmit);
                skb_queue_purge(&session->intr_transmit);
 
-               kthread_stop(session->task);
+               atomic_inc(&session->terminate);
+               wake_up_process(current);
        }
 }
 
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
        add_wait_queue(sk_sleep(intr_sk), &intr_wait);
        session->waiting_for_startup = 0;
        wake_up_interruptible(&session->startup_queue);
-       while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!atomic_read(&session->terminate)) {
                if (ctrl_sk->sk_state != BT_CONNECTED ||
                                intr_sk->sk_state != BT_CONNECTED)
                        break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
                hidp_process_transmit(session);
 
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
 err_add_device:
        hid_destroy_device(session->hid);
        session->hid = NULL;
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 
 unlink:
        hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
                        skb_queue_purge(&session->ctrl_transmit);
                        skb_queue_purge(&session->intr_transmit);
 
-                       kthread_stop(session->task);
+                       atomic_inc(&session->terminate);
+                       wake_up_process(session->task);
                }
        } else
                err = -ENOENT;
index 19e95004b28654fb14bb009d40efeec81b56eca1..af1bcc823f26d8196587be3624b8f091f29f1d8f 100644 (file)
@@ -142,6 +142,7 @@ struct hidp_session {
        uint ctrl_mtu;
        uint intr_mtu;
 
+       atomic_t terminate;
        struct task_struct *task;
 
        unsigned char keys[8];
index 56fdd9162da929fcacd0b31cd5e537c71a421c4b..7705e26e699f158f028c773a53d61ee5b08dba84 100644 (file)
@@ -620,7 +620,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                                        struct sock *parent = bt_sk(sk)->parent;
                                        rsp.result = cpu_to_le16(L2CAP_CR_PEND);
                                        rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-                                       parent->sk_data_ready(parent, 0);
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
 
                                } else {
                                        sk->sk_state = BT_CONFIG;
@@ -2323,7 +2324,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        sk = chan->sk;
 
-       if (sk->sk_state != BT_CONFIG) {
+       if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
                struct l2cap_cmd_rej rej;
 
                rej.reason = cpu_to_le16(0x0002);
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        /* Reject if config buffer is too small. */
        len = cmd_len - sizeof(*req);
-       if (chan->conf_len + len > sizeof(chan->conf_req)) {
+       if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
                l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
                                l2cap_build_conf_rsp(chan, rsp,
                                        L2CAP_CONF_REJECT, flags), rsp);
@@ -4009,7 +4010,8 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                                        struct sock *parent = bt_sk(sk)->parent;
                                        res = L2CAP_CR_PEND;
                                        stat = L2CAP_CS_AUTHOR_PEND;
-                                       parent->sk_data_ready(parent, 0);
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
                                } else {
                                        sk->sk_state = BT_CONFIG;
                                        res = L2CAP_CR_SUCCESS;
index a3a3a31d3c37b0006d0e9bfe26fd06a7ebe3b47d..41466ccb972a6698416d007acf375a81de00b71f 100644 (file)
@@ -36,16 +36,19 @@ int ceph_flags_to_mode(int flags)
        if ((flags & O_DIRECTORY) == O_DIRECTORY)
                return CEPH_FILE_MODE_PIN;
 #endif
-       if ((flags & O_APPEND) == O_APPEND)
-               flags |= O_WRONLY;
 
-       if ((flags & O_ACCMODE) == O_RDWR)
-               mode = CEPH_FILE_MODE_RDWR;
-       else if ((flags & O_ACCMODE) == O_WRONLY)
+       switch (flags & O_ACCMODE) {
+       case O_WRONLY:
                mode = CEPH_FILE_MODE_WR;
-       else
+               break;
+       case O_RDONLY:
                mode = CEPH_FILE_MODE_RD;
-
+               break;
+       case O_RDWR:
+       case O_ACCMODE: /* this is what the VFS does */
+               mode = CEPH_FILE_MODE_RDWR;
+               break;
+       }
 #ifdef O_LAZY
        if (flags & O_LAZY)
                mode |= CEPH_FILE_MODE_LAZY;
index 58ffa7d069c791c7d2c2c681861212d806260956..669d2e32efb61475384c2134840ce43bf15e2eed 100644 (file)
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                local->sched_scan_ies.ie[i] = kzalloc(2 +
                                                      IEEE80211_MAX_SSID_LEN +
-                                                     local->scan_ies_len,
+                                                     local->scan_ies_len +
+                                                     req->ie_len,
                                                      GFP_KERNEL);
                if (!local->sched_scan_ies.ie[i]) {
                        ret = -ENOMEM;
index d91c1a26630dc07a00ec22547e13ca7a02620f33..8f6a302d2ac3b89d191708f6f7f8eea1409ced8b 100644 (file)
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        /*
         * it makes no sense to check for MIC errors on anything other
@@ -148,8 +153,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 
 update_iv:
        /* update IV in key information to be able to detect replays */
-       rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
-       rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
+       rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
+       rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
 
        return RX_CONTINUE;
 
@@ -241,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
@@ -261,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
                                          key, skb->data + hdrlen,
                                          skb->len - hdrlen, rx->sta->sta.addr,
-                                         hdr->addr1, hwaccel, rx->queue,
+                                         hdr->addr1, hwaccel, queue,
                                          &rx->tkip_iv32,
                                          &rx->tkip_iv16);
        if (res != TKIP_DECRYPT_OK)
index b4f3cf06d8da7afd6f497d8e3dc1f3e3c7031afd..08b3cead6503c62f91dc8e97d9b817de7a79ffb9 100644 (file)
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * Note: Adler-32 is no longer applicable, as has been replaced
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
-       if (!sctp_checksum_disable &&
-           !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
-               __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+       if (!sctp_checksum_disable) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+                       __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
-               /* 3) Put the resultant value into the checksum field in the
-                *    common header, and leave the rest of the bits unchanged.
-                */
-               sh->checksum = sctp_end_cksum(crc32);
-       } else {
-               if (dst->dev->features & NETIF_F_SCTP_CSUM) {
+                       /* 3) Put the resultant value into the checksum field in the
+                        *    common header, and leave the rest of the bits unchanged.
+                        */
+                       sh->checksum = sctp_end_cksum(crc32);
+               } else {
                        /* no need to seed pseudo checksum for SCTP */
                        nskb->ip_summed = CHECKSUM_PARTIAL;
                        nskb->csum_start = (skb_transport_header(nskb) -
                                            nskb->head);
                        nskb->csum_offset = offsetof(struct sctphdr, checksum);
-               } else {
-                       nskb->ip_summed = CHECKSUM_UNNECESSARY;
                }
        }
 
index 1c88c8911dc50095315bc02463f2b3e5e1535509..d03682109b7a0417ea6dc1007a6277a0468f0b1b 100644 (file)
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
 #endif /* SCTP_DEBUG */
        if (transport) {
                if (bytes_acked) {
+                       struct sctp_association *asoc = transport->asoc;
+
                        /* We may have counted DATA that was migrated
                         * to this transport due to DEL-IP operation.
                         * Subtract those bytes, since the were never
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        transport->error_count = 0;
                        transport->asoc->overall_error_count = 0;
 
+                       /*
+                        * While in SHUTDOWN PENDING, we may have started
+                        * the T5 shutdown guard timer after reaching the
+                        * retransmission limit. Stop that timer as soon
+                        * as the receiver acknowledged any data.
+                        */
+                       if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+                           del_timer(&asoc->timers
+                               [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+                                       sctp_association_put(asoc);
+
                        /* Mark the destination transport address as
                         * active if it is not so marked.
                         */
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                         * A sender is doing zero window probing when the
                         * receiver's advertised window is zero, and there is
                         * only one data chunk in flight to the receiver.
+                        *
+                        * Allow the association to timeout while in SHUTDOWN
+                        * PENDING or SHUTDOWN RECEIVED in case the receiver
+                        * stays in zero window mode forever.
                         */
                        if (!q->asoc->peer.rwnd &&
                            !list_empty(&tlist) &&
-                           (sack_ctsn+2 == q->asoc->next_tsn)) {
+                           (sack_ctsn+2 == q->asoc->next_tsn) &&
+                           q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
                                SCTP_DEBUG_PRINTK("%s: SACK received for zero "
                                                  "window probe: %u\n",
                                                  __func__, sack_ctsn);
index 534c2e5feb054c933cbd0dcf46a0cb66386646dd..6e0f88295aafdc7012bea1cef543285c5d991f52 100644 (file)
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
         * HEARTBEAT should clear the error counter of the destination
         * transport address to which the HEARTBEAT was sent.
-        * The association's overall error count is also cleared.
         */
        t->error_count = 0;
-       t->asoc->overall_error_count = 0;
+
+       /*
+        * Although RFC4960 specifies that the overall error count must
+        * be cleared when a HEARTBEAT ACK is received, we make an
+        * exception while in SHUTDOWN PENDING. If the peer keeps its
+        * window shut forever, we may never be able to transmit our
+        * outstanding data and rely on the retransmission limit be reached
+        * to shutdown the association.
+        */
+       if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+               t->asoc->overall_error_count = 0;
 
        /* Clear the hb_sent flag to signal that we had a good
         * acknowledgement.
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
                        break;
 
+               case SCTP_CMD_TIMER_START_ONCE:
+                       timer = &asoc->timers[cmd->obj.to];
+
+                       if (timer_pending(timer))
+                               break;
+                       /* fall through */
+
                case SCTP_CMD_TIMER_START:
                        timer = &asoc->timers[cmd->obj.to];
                        timeout = asoc->timeouts[cmd->obj.to];
index a297283154d5035c6342e54916f10445aa610d83..246117142b5c9eae0b7d4b472b630382d91f2e00 100644 (file)
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
         * The sender of the SHUTDOWN MAY also start an overall guard timer
         * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
         */
-       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
        if (asoc->autoclose)
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
        SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
-               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
-                               SCTP_ERROR(ETIMEDOUT));
-               /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
-               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-                               SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-               return SCTP_DISPOSITION_DELETE_TCB;
+               if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+                       /*
+                        * We are here likely because the receiver had its rwnd
+                        * closed for a while and we have not been able to
+                        * transmit the locally queued data within the maximum
+                        * retransmission attempts limit.  Start the T5
+                        * shutdown guard timer to give the receiver one last
+                        * chance and some additional time to recover before
+                        * aborting.
+                        */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+                               SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+               } else {
+                       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                                       SCTP_ERROR(ETIMEDOUT));
+                       /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+                                       SCTP_PERR(SCTP_ERROR_NO_ERROR));
+                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       return SCTP_DISPOSITION_DELETE_TCB;
+               }
        }
 
        /* E1) For the destination address for which the timer
index 0338dc6fdc9df8328b26a3e2bc2c18c0b82008ca..7c211a7f90f4d065eec82baa0cb751373e7eb0be 100644 (file)
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
        /* SCTP_STATE_ESTABLISHED */ \
        TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
        /* SCTP_STATE_SHUTDOWN_PENDING */ \
-       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_SENT */ \
        TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
index 08c6238802de1fad5b81d1e7943ebc61c7430952..d3ccf7973c597402ba6e0783ef583f40d87a39ca 100644 (file)
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
+       unsigned int data_was_unread;
 
        SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
 
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
        ep = sctp_sk(sk)->ep;
 
+       /* Clean up any skbs sitting on the receive queue.  */
+       data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+       data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
        /* Walk all associations on an endpoint.  */
        list_for_each_safe(pos, temp, &ep->asocs) {
                asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        }
                }
 
-               if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+               if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+                   !skb_queue_empty(&asoc->ulpq.reasm) ||
+                   (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
                        struct sctp_chunk *chunk;
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        sctp_primitive_SHUTDOWN(asoc, NULL);
        }
 
-       /* Clean up any skbs sitting on the receive queue.  */
-       sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
-       sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
        /* On a TCP-style socket, block for at most linger_time if set. */
        if (sctp_style(sk, TCP) && timeout)
                sctp_wait_for_close(sk, timeout);
index e70e5fc87890c92031ab4b8a10e8fe3b15c5567e..8a84017834c211a840e83c1e39bebdbb001ec5c4 100644 (file)
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
 }
 
 /* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
-       while ((skb = skb_dequeue(list)) != NULL)
-               sctp_ulpevent_free(sctp_skb2event(skb));
+       unsigned int data_unread = 0;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+               if (!sctp_ulpevent_is_notification(event))
+                       data_unread += skb->len;
+
+               sctp_ulpevent_free(event);
+       }
+
+       return data_unread;
 }
index 9a80a922c5270ee78d8a76feebac4360c551da82..e45d2fbbe5a8b3e82e6c36f3385065967663bff3 100644 (file)
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
        u32 bind_version;
        struct rpc_xprt *xprt;
        struct rpc_clnt *rpcb_clnt;
-       static struct rpcbind_args *map;
+       struct rpcbind_args *map;
        struct rpc_task *child;
        struct sockaddr_storage addr;
        struct sockaddr *sap = (struct sockaddr *)&addr;
index a27406b1654f190f645e9b3c393b46f4b6d939fb..4814e246a874ac1c19c51c5c36e7bbcd60b2d373 100644 (file)
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
        BUG_ON(RPC_IS_QUEUED(task));
 
        for (;;) {
+               void (*do_action)(struct rpc_task *);
 
                /*
-                * Execute any pending callback.
+                * Execute any pending callback first.
                 */
-               if (task->tk_callback) {
-                       void (*save_callback)(struct rpc_task *);
-
-                       /*
-                        * We set tk_callback to NULL before calling it,
-                        * in case it sets the tk_callback field itself:
-                        */
-                       save_callback = task->tk_callback;
-                       task->tk_callback = NULL;
-                       save_callback(task);
-               } else {
+               do_action = task->tk_callback;
+               task->tk_callback = NULL;
+               if (do_action == NULL) {
                        /*
                         * Perform the next FSM step.
-                        * tk_action may be NULL when the task has been killed
-                        * by someone else.
+                        * tk_action may be NULL if the task has been killed.
+                        * In particular, note that rpc_killall_tasks may
+                        * do this at any time, so beware when dereferencing.
                         */
-                       if (task->tk_action == NULL)
+                       do_action = task->tk_action;
+                       if (do_action == NULL)
                                break;
-                       task->tk_action(task);
                }
+               do_action(task);
 
                /*
                 * Lockless check for whether task is sleeping or not.
index c22ef3492ee6f0b8f58f1d663c995a977a7bcc6b..880dbe2e6f94979847df154fb0a35e4ed964df69 100644 (file)
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
 
        mutex_init(&rdev->mtx);
        mutex_init(&rdev->devlist_mtx);
+       mutex_init(&rdev->sched_scan_mtx);
        INIT_LIST_HEAD(&rdev->netdev_list);
        spin_lock_init(&rdev->bss_lock);
        INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
        rfkill_destroy(rdev->rfkill);
        mutex_destroy(&rdev->mtx);
        mutex_destroy(&rdev->devlist_mtx);
+       mutex_destroy(&rdev->sched_scan_mtx);
        list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
                cfg80211_put_bss(&scan->pub);
        cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
                ___cfg80211_scan_done(rdev, true);
        }
 
+       cfg80211_unlock_rdev(rdev);
+
+       mutex_lock(&rdev->sched_scan_mtx);
+
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
        }
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        mutex_lock(&rdev->devlist_mtx);
        rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                        break;
                case NL80211_IFTYPE_P2P_CLIENT:
                case NL80211_IFTYPE_STATION:
-                       cfg80211_lock_rdev(rdev);
+                       mutex_lock(&rdev->sched_scan_mtx);
                        __cfg80211_stop_sched_scan(rdev, false);
-                       cfg80211_unlock_rdev(rdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
 
                        wdev_lock(wdev);
 #ifdef CONFIG_CFG80211_WEXT
index 3dce1f167eba338a6a3c92d0c584636936600420..a570ff9214ec3e60d2a2c88fef441a88ef70967f 100644 (file)
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
        struct work_struct scan_done_wk;
        struct work_struct sched_scan_results_wk;
 
+       struct mutex sched_scan_mtx;
+
 #ifdef CONFIG_NL80211_TESTMODE
        struct genl_info *testmode_info;
 #endif
index f07602d7bf68595fa73effe1ff90f759cd3f0447..cea338150d0564ee0103790eb80fc6af270e861b 100644 (file)
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
-       if (rdev->sched_scan_req)
-               return -EINPROGRESS;
-
        if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
                return -EINVAL;
 
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (ie_len > wiphy->max_scan_ie_len)
                return -EINVAL;
 
+       mutex_lock(&rdev->sched_scan_mtx);
+
+       if (rdev->sched_scan_req) {
+               err = -EINPROGRESS;
+               goto out;
+       }
+
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
 out_free:
        kfree(request);
 out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
                                   struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       int err;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_stop)
                return -EOPNOTSUPP;
 
-       return __cfg80211_stop_sched_scan(rdev, false);
+       mutex_lock(&rdev->sched_scan_mtx);
+       err = __cfg80211_stop_sched_scan(rdev, false);
+       mutex_unlock(&rdev->sched_scan_mtx);
+
+       return err;
 }
 
 static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
index 7a6c67667d708e97400a6974175323f7cfd712e6..ae0c2256ba3beef62c9bc3492d788fdd90882da2 100644 (file)
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            sched_scan_results_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        /* we don't have sched_scan_req anymore if the scan is stopping */
        if (rdev->sched_scan_req)
                nl80211_send_sched_scan_results(rdev,
                                                rdev->sched_scan_req->dev);
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        __cfg80211_stop_sched_scan(rdev, true);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
 
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        int err;
        struct net_device *dev;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (!rdev->sched_scan_req)
                return 0;
index d70f85eb7864f4e35326f41d63c49cc163296b05..9414b9c5b1e4284b9ed90e9fe3ff39cdfc07e2ce 100644 (file)
@@ -1345,6 +1345,8 @@ out:
                        xfrm_state_check_expire(x1);
 
                err = 0;
+               x->km.state = XFRM_STATE_DEAD;
+               __xfrm_state_put(x);
        }
        spin_unlock_bh(&x1->lock);
 
index 3b029cba2bafb8c5c8d9d77d8e5c978d257f7f49..a272356859497fec41796a37e750c4dfc891827d 100755 (executable)
@@ -21,13 +21,15 @@ fi
 # older versions of depmod require the version string to start with three
 # numbers, so we cheat with a symlink here
 depmod_hack_needed=true
-mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
-if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
-       if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
-               -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
+mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
+if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
+       if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
+               -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
                depmod_hack_needed=false
        fi
 fi
+rm -rf "$tmp_dir"
 if $depmod_hack_needed; then
        symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
        ln -s "$KERNELRELEASE" "$symlink"
index c2fc0356c2a44daac0c9a3fffd96e0119cd4ba3e..83014a7c2e142ccf0a63cafa3c38519b4f2fd5c8 100644 (file)
@@ -1190,7 +1190,6 @@ SND_SOC_DAPM_INPUT("DMIC1DAT"),
 SND_SOC_DAPM_INPUT("DMIC2DAT"),
 SND_SOC_DAPM_INPUT("Clock"),
 
-SND_SOC_DAPM_MICBIAS("MICBIAS", WM8994_MICBIAS, 2, 0),
 SND_SOC_DAPM_SUPPLY_S("MICBIAS Supply", 1, SND_SOC_NOPM, 0, 0, micbias_ev,
                      SND_SOC_DAPM_PRE_PMU),
 
@@ -1509,8 +1508,10 @@ static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
        { "AIF2DACDAT", NULL, "AIF1DACDAT" },
        { "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
        { "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
-       { "MICBIAS", NULL, "CLK_SYS" },
-       { "MICBIAS", NULL, "MICBIAS Supply" },
+       { "MICBIAS1", NULL, "CLK_SYS" },
+       { "MICBIAS1", NULL, "MICBIAS Supply" },
+       { "MICBIAS2", NULL, "CLK_SYS" },
+       { "MICBIAS2", NULL, "MICBIAS Supply" },
 };
 
 static const struct snd_soc_dapm_route wm8994_intercon[] = {
@@ -2763,7 +2764,7 @@ static void wm8958_default_micdet(u16 status, void *data)
        report = SND_JACK_MICROPHONE;
 
        /* Everything else is buttons; just assign slots */
-       if (status & 0x1c0)
+       if (status & 0x1c)
                report |= SND_JACK_BTN_0;
 
 done:
index d6f4703b3c0796fd5869f656904d232818e86298..770a71a15366252066a773357028e862abe9101d 100644 (file)
@@ -97,7 +97,7 @@ static int fsi_ak4642_remove(struct platform_device *pdev)
 
 static struct fsi_ak4642_data fsi_a_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSIA (AK4642)",
+       .card           = "FSIA-AK4642",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi.0",
@@ -106,7 +106,7 @@ static struct fsi_ak4642_data fsi_a_ak4642 = {
 
 static struct fsi_ak4642_data fsi_b_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSIB (AK4642)",
+       .card           = "FSIB-AK4642",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi.0",
@@ -115,7 +115,7 @@ static struct fsi_ak4642_data fsi_b_ak4642 = {
 
 static struct fsi_ak4642_data fsi_a_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSIA (AK4643)",
+       .card           = "FSIA-AK4643",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi.0",
@@ -124,7 +124,7 @@ static struct fsi_ak4642_data fsi_a_ak4643 = {
 
 static struct fsi_ak4642_data fsi_b_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSIB (AK4643)",
+       .card           = "FSIB-AK4643",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi.0",
@@ -133,7 +133,7 @@ static struct fsi_ak4642_data fsi_b_ak4643 = {
 
 static struct fsi_ak4642_data fsi2_a_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSI2A (AK4642)",
+       .card           = "FSI2A-AK4642",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi2",
@@ -142,7 +142,7 @@ static struct fsi_ak4642_data fsi2_a_ak4642 = {
 
 static struct fsi_ak4642_data fsi2_b_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSI2B (AK4642)",
+       .card           = "FSI2B-AK4642",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi2",
@@ -151,7 +151,7 @@ static struct fsi_ak4642_data fsi2_b_ak4642 = {
 
 static struct fsi_ak4642_data fsi2_a_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSI2A (AK4643)",
+       .card           = "FSI2A-AK4643",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi2",
@@ -160,7 +160,7 @@ static struct fsi_ak4642_data fsi2_a_ak4643 = {
 
 static struct fsi_ak4642_data fsi2_b_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSI2B (AK4643)",
+       .card           = "FSI2B-AK4643",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi2",
index dbafd7ac559066c6b8db9f82fe55ea23f002f3b4..59553fd8c2fb9173a72b7da8b9fa5f70942fe8c2 100644 (file)
@@ -42,7 +42,7 @@ static struct snd_soc_dai_link fsi_da7210_dai = {
 };
 
 static struct snd_soc_card fsi_soc_card = {
-       .name           = "FSI (DA7210)",
+       .name           = "FSI-DA7210",
        .dai_link       = &fsi_da7210_dai,
        .num_links      = 1,
 };
index 9719985eb82d2b4c3169be670cd7138abf41047c..d3d9fd880680e346e9558e4936a8bc4e84b018e6 100644 (file)
@@ -83,13 +83,13 @@ static int fsi_hdmi_remove(struct platform_device *pdev)
 
 static struct fsi_hdmi_data fsi2_a_hdmi = {
        .cpu_dai        = "fsia-dai",
-       .card           = "FSI2A (SH MOBILE HDMI)",
+       .card           = "FSI2A-HDMI",
        .id             = FSI_PORT_A,
 };
 
 static struct fsi_hdmi_data fsi2_b_hdmi = {
        .cpu_dai        = "fsib-dai",
-       .card           = "FSI2B (SH MOBILE HDMI)",
+       .card           = "FSI2B-HDMI",
        .id             = FSI_PORT_B,
 };
 
This page took 1.797844 seconds and 5 git commands to generate.