Merge branch 'pm-sleep' into pm-for-linus
authorRafael J. Wysocki <rjw@sisk.pl>
Sun, 25 Dec 2011 22:42:20 +0000 (23:42 +0100)
committerRafael J. Wysocki <rjw@sisk.pl>
Sun, 25 Dec 2011 22:42:20 +0000 (23:42 +0100)
* pm-sleep: (51 commits)
  PM: Drop generic_subsys_pm_ops
  PM / Sleep: Remove forward-only callbacks from AMBA bus type
  PM / Sleep: Remove forward-only callbacks from platform bus type
  PM: Run the driver callback directly if the subsystem one is not there
  PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers
  PM / Sleep: Merge internal functions in generic_ops.c
  PM / Sleep: Simplify generic system suspend callbacks
  PM / Hibernate: Remove deprecated hibernation snapshot ioctls
  PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled()
  PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex directly
  PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with [un]lock_system_sleep()
  PM / Sleep: Make [un]lock_system_sleep() generic
  PM / Sleep: Use the freezer_count() functions in [un]lock_system_sleep() APIs
  PM / Freezer: Remove the "userspace only" constraint from freezer[_do_not]_count()
  PM / Hibernate: Replace unintuitive 'if' condition in kernel/power/user.c with 'else'
  Freezer / sunrpc / NFS: don't allow TASK_KILLABLE sleeps to block the freezer
  PM / Sleep: Unify diagnostic messages from device suspend/resume
  ACPI / PM: Do not save/restore NVS on Asus K54C/K54HR
  PM / Hibernate: Remove deprecated hibernation test modes
  PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test path
  ...

Conflicts:
kernel/kmod.c

76 files changed:
Makefile
arch/sparc/kernel/pci_sun4v.c
arch/x86/net/bpf_jit_comp.c
drivers/ata/Kconfig
drivers/devfreq/Kconfig
drivers/devfreq/Makefile
drivers/devfreq/devfreq.c
drivers/devfreq/exynos4_bus.c [new file with mode: 0644]
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/usb/asix.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/epautoconf.c
drivers/usb/host/isp1760-if.c
drivers/usb/musb/musb_host.c
firmware/README.AddingFirmware
fs/btrfs/async-thread.c
fs/btrfs/inode.c
fs/fs-writeback.c
include/linux/lglock.h
include/net/dst.h
include/net/flow.h
include/net/sctp/structs.h
include/net/sock.h
include/trace/events/writeback.h
kernel/cpu.c
kernel/kmod.c
mm/filemap.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/core.c
net/bridge/br_netfilter.c
net/core/flow.c
net/core/net-sysfs.c
net/core/sock.c
net/ipv4/ipconfig.c
net/ipv4/route.c
net/ipv6/ip6_output.c
net/llc/af_llc.c
net/netfilter/xt_connbytes.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/sched/sch_mqprio.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/xfrm/xfrm_policy.c
scripts/kconfig/Makefile
sound/atmel/ac97c.c

index a43733df39788a122f645804e4a343882800f352..ea51081812f38d5ee8dfaeaab060a9fb4a86ba67 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index b272cda35a0125904bc6f17eb1c5d14054dec24e..af5755d20fbe91eb43b682e99aa8f836bfafa6ad 100644 (file)
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index bfab3fa10edc63e50b4184d00e75566678242a0d..7b65f752c5f8fd79af2c6b4afb342988bdd8d56c 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index 6bdedd7cca2cd3cd630370732b71b8b530992ca9..cf047c406d92797777d6255122f23b5a8297c58a 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index 8f0491037080df38856b76ec73db77cf18d3320f..464fa2147dfb47c9be87bafd6fb17a96858d658d 100644 (file)
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
 
 comment "DEVFREQ Drivers"
 
+config ARM_EXYNOS4_BUS_DEVFREQ
+       bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+       depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+       select ARCH_HAS_OPP
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
+       help
+         This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+         and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+         It reads PPMU counters of memory controllers and adjusts
+         the operating frequencies and voltages with OPP support.
+         To operate with optimal voltages, ASV support is required
+         (CONFIG_EXYNOS_ASV).
+
 endif # PM_DEVFREQ
index 4564a89e970ab9fe165e639f913efa79fe903169..8c464234f7e729155c291d9ad7f15b1883655623 100644 (file)
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)       += governor_simpleondemand.o
 obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)  += governor_performance.o
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)    += governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)    += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)  += exynos4_bus.o
index 59d24e9cb8c512a949a24803c5bf11a143fc649d..c189b82f5ececeddc29f97e62bc948ffb3f31961 100644 (file)
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
                if (!IS_ERR(devfreq)) {
                        dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
                        err = -EINVAL;
-                       goto out;
+                       goto err_out;
                }
        }
 
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
                dev_err(dev, "%s: Unable to create devfreq for the device\n",
                        __func__);
                err = -ENOMEM;
-               goto out;
+               goto err_out;
        }
 
        mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
                                   devfreq->next_polling);
        }
        mutex_unlock(&devfreq_list_lock);
-       goto out;
+out:
+       return devfreq;
+
 err_init:
        device_unregister(&devfreq->dev);
 err_dev:
        mutex_unlock(&devfreq->lock);
        kfree(devfreq);
-out:
-       if (err)
-               return ERR_PTR(err);
-       else
-               return devfreq;
+err_out:
+       return ERR_PTR(err);
 }
 
 /**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644 (file)
index 0000000..6460577
--- /dev/null
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *     MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ *     This version supports EXYNOS4210 only. This changes bus frequencies
+ *     and vddint voltages. Exynos4412/4212 should be able to be supported
+ *     with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT   1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+       TYPE_BUSF_EXYNOS4210,
+       TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO   40
+
+enum ppmu_counter {
+       PPMU_PMNCNT0 = 0,
+       PPMU_PMCCNT1,
+       PPMU_PMNCNT2,
+       PPMU_PMNCNT3,
+       PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+       void __iomem *hw_base;
+       unsigned int ccnt;
+       unsigned int event;
+       unsigned int count[PPMU_PMNCNT_MAX];
+       bool ccnt_overflow;
+       bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+       LV_0 = 0,
+       LV_1,
+       LV_2,
+       LV_3,
+       LV_4,
+       _LV_END
+};
+#define EX4210_LV_MAX  LV_2
+#define EX4x12_LV_MAX  LV_4
+#define EX4210_LV_NUM  (LV_2 + 1)
+#define EX4x12_LV_NUM  (LV_4 + 1)
+
+struct busfreq_data {
+       enum exynos4_busf_type type;
+       struct device *dev;
+       struct devfreq *devfreq;
+       bool disabled;
+       struct regulator *vdd_int;
+       struct regulator *vdd_mif; /* Exynos4412/4212 only */
+       struct opp *curr_opp;
+       struct exynos4_ppmu dmc[2];
+
+       struct notifier_block pm_notifier;
+       struct mutex lock;
+
+       /* Dividers calculated at boot/probe-time */
+       unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+       unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+       unsigned int idx;
+       unsigned long clk;
+       unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+       {LV_0, 400000, 1150000},
+       {LV_1, 267000, 1050000},
+       {LV_2, 133000, 1025000},
+       {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+       {LV_0, 400000, 1100000},
+       {LV_1, 267000, 1000000},
+       {LV_2, 160000, 950000},
+       {LV_3, 133000, 950000},
+       {LV_4, 100000, 950000},
+       {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+       {LV_0, 200000, 1000000},
+       {LV_1, 160000, 950000},
+       {LV_2, 133000, 925000},
+       {LV_3, 100000, 900000},
+       {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+       {1150000, 1050000, 1050000},
+       {1125000, 1025000, 1025000},
+       {1100000, 1000000, 1000000},
+       {1075000, 975000, 975000},
+       {1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+       /* 400      267     160     133     100 */
+       {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+       {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+       {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+       {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+       {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+       {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+       {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+       {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+       {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+       /* 200    160      133     100 */
+       {1000000, 950000, 925000, 900000}, /* ASV0 */
+       {975000,  925000, 925000, 900000}, /* ASV1 */
+       {950000,  925000, 900000, 875000}, /* ASV2 */
+       {950000,  900000, 900000, 875000}, /* ASV3 */
+       {925000,  875000, 875000, 875000}, /* ASV4 */
+       {900000,  850000, 850000, 850000}, /* ASV5 */
+       {900000,  850000, 850000, 850000}, /* ASV6 */
+       {900000,  850000, 850000, 850000}, /* ASV7 */
+       {900000,  850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+       /*
+        * Clock divider value for following
+        * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+        *              DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+        */
+
+       /* DMC L0: 400MHz */
+       { 3, 1, 1, 1, 1, 1, 3, 1 },
+       /* DMC L1: 266.7MHz */
+       { 4, 1, 1, 2, 1, 1, 3, 1 },
+       /* DMC L2: 133MHz */
+       { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+       /*
+        * Clock divider value for following
+        * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+        */
+       /* ACLK200 L0: 200MHz */
+       { 3, 7, 4, 5, 1 },
+       /* ACLK200 L1: 160MHz */
+       { 4, 7, 5, 6, 1 },
+       /* ACLK200 L2: 133MHz */
+       { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+       /*
+        * Clock divider value for following
+        * { DIVGDL/R, DIVGPL/R }
+        */
+       /* ACLK_GDL/R L1: 200MHz */
+       { 3, 1 },
+       /* ACLK_GDL/R L2: 160MHz */
+       { 4, 1 },
+       /* ACLK_GDL/R L3: 133MHz */
+       { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+       /*
+        * Clock divider value for following
+        * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+        *              DIVDMCP}
+        */
+
+       /* DMC L0: 400MHz */
+       {3, 1, 1, 1, 1, 1},
+       /* DMC L1: 266.7MHz */
+       {4, 1, 1, 2, 1, 1},
+       /* DMC L2: 160MHz */
+       {5, 1, 1, 4, 1, 1},
+       /* DMC L3: 133MHz */
+       {5, 1, 1, 5, 1, 1},
+       /* DMC L4: 100MHz */
+       {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+       /*
+        * Clock divider value for following
+        * { G2DACP, DIVC2C, DIVC2C_ACLK }
+        */
+
+       /* DMC L0: 400MHz */
+       {3, 1, 1},
+       /* DMC L1: 266.7MHz */
+       {4, 2, 1},
+       /* DMC L2: 160MHz */
+       {5, 4, 1},
+       /* DMC L3: 133MHz */
+       {5, 5, 1},
+       /* DMC L4: 100MHz */
+       {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+       /*
+        * Clock divider value for following
+        * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+               DIVACLK133, DIVONENAND }
+        */
+
+       /* ACLK_GDL/R L0: 200MHz */
+       {2, 7, 4, 5, 1},
+       /* ACLK_GDL/R L1: 200MHz */
+       {2, 7, 4, 5, 1},
+       /* ACLK_GDL/R L2: 160MHz */
+       {4, 7, 5, 7, 1},
+       /* ACLK_GDL/R L3: 133MHz */
+       {4, 7, 5, 7, 1},
+       /* ACLK_GDL/R L4: 100MHz */
+       {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+       /*
+        * Clock divider value for following
+        * { DIVGDL/R, DIVGPL/R }
+        */
+
+       /* ACLK_GDL/R L0: 200MHz */
+       {3, 1},
+       /* ACLK_GDL/R L1: 200MHz */
+       {3, 1},
+       /* ACLK_GDL/R L2: 160MHz */
+       {4, 1},
+       /* ACLK_GDL/R L3: 133MHz */
+       {5, 1},
+       /* ACLK_GDL/R L4: 100MHz */
+       {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+       /*
+        * Clock divider value for following
+        * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+        */
+
+       /* SCLK_MFC: 200MHz */
+       {3, 3, 4},
+       /* SCLK_MFC: 200MHz */
+       {3, 3, 4},
+       /* SCLK_MFC: 160MHz */
+       {4, 4, 5},
+       /* SCLK_MFC: 133MHz */
+       {5, 5, 5},
+       /* SCLK_MFC: 100MHz */
+       {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+       unsigned int index;
+       unsigned int tmp;
+
+       for (index = LV_0; index < EX4210_LV_NUM; index++)
+               if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+                       break;
+
+       if (index == EX4210_LV_NUM)
+               return -EINVAL;
+
+       /* Change Divider - DMC0 */
+       tmp = data->dmc_divtable[index];
+
+       __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+       } while (tmp & 0x11111111);
+
+       /* Change Divider - TOP */
+       tmp = data->top_divtable[index];
+
+       __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+       } while (tmp & 0x11111);
+
+       /* Change Divider - LEFTBUS */
+       tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+       tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+       tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+                               S5P_CLKDIV_BUS_GDLR_SHIFT) |
+               (exynos4210_clkdiv_lr_bus[index][1] <<
+                               S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+       } while (tmp & 0x11);
+
+       /* Change Divider - RIGHTBUS */
+       tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+       tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+       tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+                               S5P_CLKDIV_BUS_GDLR_SHIFT) |
+               (exynos4210_clkdiv_lr_bus[index][1] <<
+                               S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+       } while (tmp & 0x11);
+
+       return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+       unsigned int index;
+       unsigned int tmp;
+
+       for (index = LV_0; index < EX4x12_LV_NUM; index++)
+               if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+                       break;
+
+       if (index == EX4x12_LV_NUM)
+               return -EINVAL;
+
+       /* Change Divider - DMC0 */
+       tmp = data->dmc_divtable[index];
+
+       __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+       } while (tmp & 0x11111111);
+
+       /* Change Divider - DMC1 */
+       tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+       tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+               S5P_CLKDIV_DMC1_C2C_MASK |
+               S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+                               S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+               (exynos4x12_clkdiv_dmc1[index][1] <<
+                               S5P_CLKDIV_DMC1_C2C_SHIFT) |
+               (exynos4x12_clkdiv_dmc1[index][2] <<
+                               S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+       } while (tmp & 0x111111);
+
+       /* Change Divider - TOP */
+       tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+       tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+               S5P_CLKDIV_TOP_ACLK100_MASK |
+               S5P_CLKDIV_TOP_ACLK160_MASK |
+               S5P_CLKDIV_TOP_ACLK133_MASK |
+               S5P_CLKDIV_TOP_ONENAND_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+                               S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+               (exynos4x12_clkdiv_top[index][1] <<
+                               S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+               (exynos4x12_clkdiv_top[index][2] <<
+                               S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+               (exynos4x12_clkdiv_top[index][3] <<
+                               S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+               (exynos4x12_clkdiv_top[index][4] <<
+                               S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+       } while (tmp & 0x11111);
+
+       /* Change Divider - LEFTBUS */
+       tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+       tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+                               S5P_CLKDIV_BUS_GDLR_SHIFT) |
+               (exynos4x12_clkdiv_lr_bus[index][1] <<
+                               S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+       } while (tmp & 0x11);
+
+       /* Change Divider - RIGHTBUS */
+       tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+       tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+                               S5P_CLKDIV_BUS_GDLR_SHIFT) |
+               (exynos4x12_clkdiv_lr_bus[index][1] <<
+                               S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+       } while (tmp & 0x11);
+
+       /* Change Divider - MFC */
+       tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+       tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+                               S5P_CLKDIV_MFC_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_MFC);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+       } while (tmp & 0x1);
+
+       /* Change Divider - JPEG */
+       tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+       tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+                               S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+       } while (tmp & 0x1);
+
+       /* Change Divider - FIMC0~3 */
+       tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+       tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+               S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+       tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+                               S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+               (exynos4x12_clkdiv_sclkip[index][2] <<
+                               S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+               (exynos4x12_clkdiv_sclkip[index][2] <<
+                               S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+               (exynos4x12_clkdiv_sclkip[index][2] <<
+                               S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+       __raw_writel(tmp, S5P_CLKDIV_CAM);
+
+       do {
+               tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+       } while (tmp & 0x1111);
+
+       return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+       unsigned int i;
+
+       for (i = 0; i < 2; i++) {
+               void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+               /* Reset PPMU */
+               __raw_writel(0x8000000f, ppmu_base + 0xf010);
+               __raw_writel(0x8000000f, ppmu_base + 0xf050);
+               __raw_writel(0x6, ppmu_base + 0xf000);
+               __raw_writel(0x0, ppmu_base + 0xf100);
+
+               /* Set PPMU Event */
+               data->dmc[i].event = 0x6;
+               __raw_writel(((data->dmc[i].event << 12) | 0x1),
+                            ppmu_base + 0xfc);
+
+               /* Start PPMU */
+               __raw_writel(0x1, ppmu_base + 0xf000);
+       }
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+       int i, j;
+
+       for (i = 0; i < 2; i++) {
+               void __iomem *ppmu_base = data->dmc[i].hw_base;
+               u32 overflow;
+
+               /* Stop PPMU */
+               __raw_writel(0x0, ppmu_base + 0xf000);
+
+               /* Update local data from PPMU */
+               overflow = __raw_readl(ppmu_base + 0xf050);
+
+               data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+               data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+               for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+                       data->dmc[i].count[j] = __raw_readl(
+                                       ppmu_base + (0xf110 + (0x10 * j)));
+                       data->dmc[i].count_overflow[j] = overflow & (1 << j);
+               }
+       }
+
+       busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+       int i = 0;
+
+       while (exynos4x12_intclk_table[i].clk) {
+               if (exynos4x12_intclk_table[i].clk <= mifclk)
+                       return i;
+               i++;
+       }
+
+       return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+                              struct opp *oldopp)
+{
+       int err = 0, tmp;
+       unsigned long volt = opp_get_voltage(opp);
+
+       switch (data->type) {
+       case TYPE_BUSF_EXYNOS4210:
+               /* OPP represents DMC clock + INT voltage */
+               err = regulator_set_voltage(data->vdd_int, volt,
+                                           MAX_SAFEVOLT);
+               break;
+       case TYPE_BUSF_EXYNOS4x12:
+               /* OPP represents MIF clock + MIF voltage */
+               err = regulator_set_voltage(data->vdd_mif, volt,
+                                           MAX_SAFEVOLT);
+               if (err)
+                       break;
+
+               tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+               if (tmp < 0) {
+                       err = tmp;
+                       regulator_set_voltage(data->vdd_mif,
+                                             opp_get_voltage(oldopp),
+                                             MAX_SAFEVOLT);
+                       break;
+               }
+               err = regulator_set_voltage(data->vdd_int,
+                                           exynos4x12_intclk_table[tmp].volt,
+                                           MAX_SAFEVOLT);
+               /*  Try to recover */
+               if (err)
+                       regulator_set_voltage(data->vdd_mif,
+                                             opp_get_voltage(oldopp),
+                                             MAX_SAFEVOLT);
+               break;
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+       int err = 0;
+       struct platform_device *pdev = container_of(dev, struct platform_device,
+                                                   dev);
+       struct busfreq_data *data = platform_get_drvdata(pdev);
+       struct opp *opp = devfreq_recommended_opp(dev, _freq);
+       unsigned long old_freq = opp_get_freq(data->curr_opp);
+       unsigned long freq = opp_get_freq(opp);
+
+       if (old_freq == freq)
+               return 0;
+
+       dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+       mutex_lock(&data->lock);
+
+       if (data->disabled)
+               goto out;
+
+       if (old_freq < freq)
+               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+       if (err)
+               goto out;
+
+       if (old_freq != freq) {
+               switch (data->type) {
+               case TYPE_BUSF_EXYNOS4210:
+                       err = exynos4210_set_busclk(data, opp);
+                       break;
+               case TYPE_BUSF_EXYNOS4x12:
+                       err = exynos4x12_set_busclk(data, opp);
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+       }
+       if (err)
+               goto out;
+
+       if (old_freq > freq)
+               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+       if (err)
+               goto out;
+
+       data->curr_opp = opp;
+out:
+       mutex_unlock(&data->lock);
+       return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+       u64 p0 = data->dmc[0].count[0];
+       u64 p1 = data->dmc[1].count[0];
+
+       p0 *= data->dmc[1].ccnt;
+       p1 *= data->dmc[0].ccnt;
+
+       if (data->dmc[1].ccnt == 0)
+               return 0;
+
+       if (p0 > p1)
+               return 0;
+       return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+                                     struct devfreq_dev_status *stat)
+{
+       struct platform_device *pdev = container_of(dev, struct platform_device,
+                                                   dev);
+       struct busfreq_data *data = platform_get_drvdata(pdev);
+       int busier_dmc;
+       int cycles_x2 = 2; /* 2 x cycles */
+       void __iomem *addr;
+       u32 timing;
+       u32 memctrl;
+
+       exynos4_read_ppmu(data);
+       busier_dmc = exynos4_get_busier_dmc(data);
+       stat->current_frequency = opp_get_freq(data->curr_opp);
+
+       if (busier_dmc)
+               addr = S5P_VA_DMC1;
+       else
+               addr = S5P_VA_DMC0;
+
+       memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+       timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+       switch ((memctrl >> 8) & 0xf) {
+       case 0x4: /* DDR2 */
+               cycles_x2 = ((timing >> 16) & 0xf) * 2;
+               break;
+       case 0x5: /* LPDDR2 */
+       case 0x6: /* DDR3 */
+               cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+               break;
+       default:
+               pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+                      (memctrl >> 8) & 0xf);
+               return -EINVAL;
+       }
+
+       /* Number of cycles spent on memory access */
+       stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+       stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+       stat->total_time = data->dmc[busier_dmc].ccnt;
+
+       /* If the counters have overflown, retry */
+       if (data->dmc[busier_dmc].ccnt_overflow ||
+           data->dmc[busier_dmc].count_overflow[0])
+               return -EAGAIN;
+
+       return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+       struct platform_device *pdev = container_of(dev, struct platform_device,
+                                                   dev);
+       struct busfreq_data *data = platform_get_drvdata(pdev);
+
+       devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+       .initial_freq   = 400000,
+       .polling_ms     = 50,
+       .target         = exynos4_bus_target,
+       .get_dev_status = exynos4_bus_get_dev_status,
+       .exit           = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+       u32 tmp;
+       int mgrp;
+       int i, err = 0;
+
+       tmp = __raw_readl(S5P_CLKDIV_DMC0);
+       for (i = LV_0; i < EX4210_LV_NUM; i++) {
+               tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+                       S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+                       S5P_CLKDIV_DMC0_DPHY_MASK |
+                       S5P_CLKDIV_DMC0_DMC_MASK |
+                       S5P_CLKDIV_DMC0_DMCD_MASK |
+                       S5P_CLKDIV_DMC0_DMCP_MASK |
+                       S5P_CLKDIV_DMC0_COPY2_MASK |
+                       S5P_CLKDIV_DMC0_CORETI_MASK);
+
+               tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+                                       S5P_CLKDIV_DMC0_ACP_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][1] <<
+                                       S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][2] <<
+                                       S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][3] <<
+                                       S5P_CLKDIV_DMC0_DMC_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][4] <<
+                                       S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][5] <<
+                                       S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][6] <<
+                                       S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+                       (exynos4210_clkdiv_dmc0[i][7] <<
+                                       S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+               data->dmc_divtable[i] = tmp;
+       }
+
+       tmp = __raw_readl(S5P_CLKDIV_TOP);
+       for (i = LV_0; i <  EX4210_LV_NUM; i++) {
+               tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+                       S5P_CLKDIV_TOP_ACLK100_MASK |
+                       S5P_CLKDIV_TOP_ACLK160_MASK |
+                       S5P_CLKDIV_TOP_ACLK133_MASK |
+                       S5P_CLKDIV_TOP_ONENAND_MASK);
+
+               tmp |= ((exynos4210_clkdiv_top[i][0] <<
+                                       S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+                       (exynos4210_clkdiv_top[i][1] <<
+                                       S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+                       (exynos4210_clkdiv_top[i][2] <<
+                                       S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+                       (exynos4210_clkdiv_top[i][3] <<
+                                       S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+                       (exynos4210_clkdiv_top[i][4] <<
+                                       S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+               data->top_divtable[i] = tmp;
+       }
+
+#ifdef CONFIG_EXYNOS_ASV
+       tmp = exynos4_result_of_asv;
+#else
+       tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+       pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+       /* Use merged grouping for voltage */
+       switch (tmp) {
+       case 0:
+               mgrp = 0;
+               break;
+       case 1:
+       case 2:
+               mgrp = 1;
+               break;
+       case 3:
+       case 4:
+               mgrp = 2;
+               break;
+       case 5:
+       case 6:
+               mgrp = 3;
+               break;
+       case 7:
+               mgrp = 4;
+               break;
+       default:
+               pr_warn("Unknown ASV Group. Use max voltage.\n");
+               mgrp = 0;
+       }
+
+       for (i = LV_0; i < EX4210_LV_NUM; i++)
+               exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+       for (i = LV_0; i < EX4210_LV_NUM; i++) {
+               err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+                             exynos4210_busclk_table[i].volt);
+               if (err) {
+                       dev_err(data->dev, "Cannot add opp entries.\n");
+                       return err;
+               }
+       }
+
+
+       return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+       unsigned int i;
+       unsigned int tmp;
+       int ret;
+
+       /* Enable pause function for DREX2 DVFS */
+       tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+       tmp |= DMC_PAUSE_ENABLE;
+       __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+       tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+       for (i = 0; i <  EX4x12_LV_NUM; i++) {
+               tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+                       S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+                       S5P_CLKDIV_DMC0_DPHY_MASK |
+                       S5P_CLKDIV_DMC0_DMC_MASK |
+                       S5P_CLKDIV_DMC0_DMCD_MASK |
+                       S5P_CLKDIV_DMC0_DMCP_MASK);
+
+               tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+                                       S5P_CLKDIV_DMC0_ACP_SHIFT) |
+                       (exynos4x12_clkdiv_dmc0[i][1] <<
+                                       S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+                       (exynos4x12_clkdiv_dmc0[i][2] <<
+                                       S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+                       (exynos4x12_clkdiv_dmc0[i][3] <<
+                                       S5P_CLKDIV_DMC0_DMC_SHIFT) |
+                       (exynos4x12_clkdiv_dmc0[i][4] <<
+                                       S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+                       (exynos4x12_clkdiv_dmc0[i][5] <<
+                                       S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+               data->dmc_divtable[i] = tmp;
+       }
+
+#ifdef CONFIG_EXYNOS_ASV
+       tmp = exynos4_result_of_asv;
+#else
+       tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+       if (tmp > 8)
+               tmp = 0;
+       pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+       for (i = 0; i < EX4x12_LV_NUM; i++) {
+               exynos4x12_mifclk_table[i].volt =
+                       exynos4x12_mif_step_50[tmp][i];
+               exynos4x12_intclk_table[i].volt =
+                       exynos4x12_int_volt[tmp][i];
+       }
+
+       for (i = 0; i < EX4x12_LV_NUM; i++) {
+               ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+                             exynos4x12_mifclk_table[i].volt);
+               if (ret) {
+                       dev_err(data->dev, "Fail to add opp entries.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+               unsigned long event, void *ptr)
+{
+       struct busfreq_data *data = container_of(this, struct busfreq_data,
+                                                pm_notifier);
+       struct opp *opp;
+       unsigned long maxfreq = ULONG_MAX;
+       int err = 0;
+
+       switch (event) {
+       case PM_SUSPEND_PREPARE:
+               /* Set Fastest and Deactivate DVFS */
+               mutex_lock(&data->lock);
+
+               data->disabled = true;
+
+               opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               if (err)
+                       goto unlock;
+
+               switch (data->type) {
+               case TYPE_BUSF_EXYNOS4210:
+                       err = exynos4210_set_busclk(data, opp);
+                       break;
+               case TYPE_BUSF_EXYNOS4x12:
+                       err = exynos4x12_set_busclk(data, opp);
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+               if (err)
+                       goto unlock;
+
+               data->curr_opp = opp;
+unlock:
+               mutex_unlock(&data->lock);
+               if (err)
+                       return err;
+               return NOTIFY_OK;
+       case PM_POST_RESTORE:
+       case PM_POST_SUSPEND:
+               /* Reactivate */
+               mutex_lock(&data->lock);
+               data->disabled = false;
+               mutex_unlock(&data->lock);
+               return NOTIFY_OK;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+       struct busfreq_data *data;
+       struct opp *opp;
+       struct device *dev = &pdev->dev;
+       int err = 0;
+
+       data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+       if (data == NULL) {
+               dev_err(dev, "Cannot allocate memory.\n");
+               return -ENOMEM;
+       }
+
+       data->type = pdev->id_entry->driver_data;
+       data->dmc[0].hw_base = S5P_VA_DMC0;
+       data->dmc[1].hw_base = S5P_VA_DMC1;
+       data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+       data->dev = dev;
+       mutex_init(&data->lock);
+
+       switch (data->type) {
+       case TYPE_BUSF_EXYNOS4210:
+               err = exynos4210_init_tables(data);
+               break;
+       case TYPE_BUSF_EXYNOS4x12:
+               err = exynos4x12_init_tables(data);
+               break;
+       default:
+               dev_err(dev, "Cannot determine the device id %d\n", data->type);
+               err = -EINVAL;
+       }
+       if (err)
+               goto err_regulator;
+
+       data->vdd_int = regulator_get(dev, "vdd_int");
+       if (IS_ERR(data->vdd_int)) {
+               dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+               err = PTR_ERR(data->vdd_int);
+               goto err_regulator;
+       }
+       if (data->type == TYPE_BUSF_EXYNOS4x12) {
+               data->vdd_mif = regulator_get(dev, "vdd_mif");
+               if (IS_ERR(data->vdd_mif)) {
+                       dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+                       err = PTR_ERR(data->vdd_mif);
+                       regulator_put(data->vdd_int);
+                       goto err_regulator;
+
+               }
+       }
+
+       opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+       if (IS_ERR(opp)) {
+               dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+                      exynos4_devfreq_profile.initial_freq);
+               err = PTR_ERR(opp);
+               goto err_opp_add;
+       }
+       data->curr_opp = opp;
+
+       platform_set_drvdata(pdev, data);
+
+       busfreq_mon_reset(data);
+
+       data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+                                          &devfreq_simple_ondemand, NULL);
+       if (IS_ERR(data->devfreq)) {
+               err = PTR_ERR(data->devfreq);
+               goto err_opp_add;
+       }
+
+       devfreq_register_opp_notifier(dev, data->devfreq);
+
+       err = register_pm_notifier(&data->pm_notifier);
+       if (err) {
+               dev_err(dev, "Failed to setup pm notifier\n");
+               goto err_devfreq_add;
+       }
+
+       return 0;
+err_devfreq_add:
+       devfreq_remove_device(data->devfreq);
+err_opp_add:
+       if (data->vdd_mif)
+               regulator_put(data->vdd_mif);
+       regulator_put(data->vdd_int);
+err_regulator:
+       kfree(data);
+       return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+       struct busfreq_data *data = platform_get_drvdata(pdev);
+
+       unregister_pm_notifier(&data->pm_notifier);
+       devfreq_remove_device(data->devfreq);
+       regulator_put(data->vdd_int);
+       if (data->vdd_mif)
+               regulator_put(data->vdd_mif);
+       kfree(data);
+
+       return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+       struct platform_device *pdev = container_of(dev, struct platform_device,
+                                                   dev);
+       struct busfreq_data *data = platform_get_drvdata(pdev);
+
+       busfreq_mon_reset(data);
+       return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+       .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+       { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+       { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+       { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+       { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+       .probe  = exynos4_busfreq_probe,
+       .remove = __devexit_p(exynos4_busfreq_remove),
+       .id_table = exynos4_busfreq_id,
+       .driver = {
+               .name   = "exynos4-busfreq",
+               .owner  = THIS_MODULE,
+               .pm     = &exynos4_busfreq_pm,
+       },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+       return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+       platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
index 5e00d1670aa9964b8d49d0e289e9da5211e501d2..92c9628c572daa2495a685b0a7898273957ffc73 100644 (file)
@@ -3276,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
                        rdev->accel_working = false;
                }
        }
+
+       /* Don't start up if the MC ucode is missing on BTC parts.
+        * The default clocks and voltages before the MC ucode
+        * is loaded are not suffient for advanced operations.
+        */
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+                       DRM_ERROR("radeon: MC ucode required for NI+.\n");
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
index 8aa1dbb45c67efad383e6f938383efeb11589210..f94b33ae221546a9d9170f3e0eb6b8292776721b 100644 (file)
@@ -1093,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
-       u64 required_size;
        int ret;
 
        /**
@@ -1102,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * requested framebuffer.
         */
 
-       required_size = mode_cmd->pitch * mode_cmd->height;
-       if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+       if (!vmw_kms_validate_mode_vram(dev_priv,
+                                       mode_cmd->pitch,
+                                       mode_cmd->height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
index b6907118283a627a656fbe408e368da9853ff634..6d03774b176ec8236d9cfb72654bedf600f052d2 100644 (file)
@@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                         atomic_read(&bitmap->behind_writes),
                         bitmap->mddev->bitmap_info.max_write_behind);
        }
-       if (bitmap->mddev->degraded)
-               /* Never clear bits or update events_cleared when degraded */
-               success = 0;
 
        while (sectors) {
                sector_t blocks;
@@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
-               if (success &&
+               if (success && !bitmap->mddev->degraded &&
                    bitmap->events_cleared < bitmap->mddev->events) {
                        bitmap->events_cleared = bitmap->mddev->events;
                        bitmap->need_sync = 1;
index c3273efd08cb6dce7a19cd8d193e98ff23600bb7..627456542fb3d0d1f1d18780db24a5531b29ce32 100644 (file)
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
 
        rdev->raid_disk = rdev->saved_raid_disk;
+       rdev->saved_raid_disk = -1;
 
        newconf = linear_conf(mddev,mddev->raid_disks+1);
 
index ee981737edfcf3ff674749116d44d89bd45be751..f47f1f8ac44bc16677b212f35b398dae093dab87 100644 (file)
@@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
                                        spares++;
                                        md_new_event(mddev);
                                        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                               } else
-                                       break;
+                               }
                        }
                }
        }
index 31670f8d6b65789c071c1fd8d212c168138e14d8..858fdbb7eb07a24ceb18a4d6214218c9205089e7 100644 (file)
@@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
                if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
index b0b0fa5a3572fb834a87c45dba668b798524dfad..54a4a3f22e2e4187c5651aaa42cf73dcf9141059 100644 (file)
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
 {
        struct isp_pipeline *pipe =
                to_isp_pipeline(&ccdc->video_out.video.entity);
-       struct video_device *vdev = &ccdc->subdev.devnode;
+       struct video_device *vdev = ccdc->subdev.devnode;
        struct v4l2_event event;
 
        memset(&event, 0, sizeof(event));
index 68d539456c552aa0c5ee16bd999ed09f41dee712..bc0b2c7349b97894d62c6fcdb876eff8613634f7 100644 (file)
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-       struct video_device *vdev = &stat->subdev.devnode;
+       struct video_device *vdev = stat->subdev.devnode;
        struct v4l2_event event;
        struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
index 43c0ebb81956188b22f7ce2953eddc936f39ae3a..b7b2d3483fd4e1e1f8bb5b6a2feb6e7c349c57f3 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e9173804ede2bacb93a7179d5d7c817c79dfcf5..d3d572b2317b888be174bf52d3d17c65b391ef07 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d88483112cac13565cce2a8f88d3092c9d516c..8d816cce8322ebecdf8d40e29f0c0b0a3aedcbdb 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37fd59901b882fc0b995e6182fe43a4eedb..1924b857a0fbf6355d9acfcd96fa387b841c6976 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee533eacb8d204e860f9881d58e2675adf126..ef39528088f2298a47e7f62f69317b2a6af428ee 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d96c24075a3cbca9a320fabe3177b4de1a8..a5ddf31b60ca89d3f228997f7da526b126449d94 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf2f652b8edf6accf9db107d8f63ea72c23..c1da84bc1573f563c4b698f0bc786068e7f911bc 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd66021afd383703fad96e890975853c2bf063..61e70cfaa774fb977adcba8cc697a06edd673210 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8cc6c38f3e40337444b91111646afefde88..29f11e0765feef54093b839b6e288ae56bc0bf3f 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba132837e8efb5f470d48784d5bb5f9809509..61894fced8ea281570bf80320fd593efa0d25740 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index 67bf0781999200d656e09e990be95709700cbce1..c8f47f17186f1449a10dd8eb14fb2e1f81a3c978 100644 (file)
@@ -477,7 +477,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
index dca9d3369cdd9de8eb5f1156778369d76006c246..c97d2f59085504274eddcc2d5a25ecf352c824f0 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index e6fed4d4cb77f70c8c6d62af16d53a57ed31c99c..e95f0e60a9bc7bc9dd18587986c53e5af8f79d42 100644 (file)
@@ -1655,6 +1655,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index 888abc2be3a547d6f85ad32723fd2d2910c5582e..528d5f3e868c712a7b2372dc8616e1581205bddc 100644 (file)
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
 
        ath_rc_priv->max_valid_rate = k;
        ath_rc_sort_validrates(rate_table, ath_rc_priv);
-       ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+       ath_rc_priv->rate_max_phy = (k > 4) ?
+                                       ath_rc_priv->valid_rate_index[k-4] :
+                                       ath_rc_priv->valid_rate_index[k-1];
        ath_rc_priv->rate_table = rate_table;
 
        ath_dbg(common, ATH_DBG_CONFIG,
index a7a6def40d05aa29fe6d739daf1f0d247bf45a80..5c7c17c7166ac7a307407ddff67eee59ca148771 100644 (file)
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                        if (ctx->ht.enabled) {
                                /* if HT40 is used, it should not change
                                 * after associated except channel switch */
-                               if (iwl_is_associated_ctx(ctx) &&
-                                    !ctx->ht.is_40mhz)
+                               if (!ctx->ht.is_40mhz ||
+                                               !iwl_is_associated_ctx(ctx))
                                        iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
index 35a6b71f358ce7a563f6507b58f0743eb6ded16a..df1540ca6102f641ed491cd50899e5eec17b8963 100644 (file)
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
        iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
index bacc06c95e7ac798449e11ca883e5b5dc1def717..e0e9a3dfbc00a2febd8ce146bc8c3d02bc8e312a 100644 (file)
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
        int ret;
        u8 sta_id;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return 0;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *ctx = vif_priv->ctx;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
index ce918980e97799a51fc3b0b9a732d28fe706c84a..5f17ab8e76bacbc7e4521951c15a716a532afc45 100644 (file)
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
index ac278156d390ea901d37eda9819abdf2de26edcf..6e0a3eaecf7070bcdcce90eb91e63d7fe6cc6aab 100644 (file)
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
-       unsigned long cmd_pending_q_flags;
        unsigned long scan_pending_q_flags;
        uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                cmd_node = adapter->curr_cmd;
                cmd_node->wait_q_enabled = false;
                cmd_node->cmd_flag |= CMD_F_CANCELED;
-               spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-                                 cmd_pending_q_flags);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-                                      cmd_pending_q_flags);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
 
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
        adapter->cmd_wait_q.status = -1;
-       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
index 717ebc9ff941808e2ddd52c93db09cc998456565..600d82348511068b141dc65f7a6e7e7c79037a14 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index 596a0b464e61f069f93c8278a3c8193ab44a7873..4dff83d2f265235338f3b92f0e5b59690a96de25 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index a7dc1e1d45f2a77e1cfce4de339b449b8863f778..2ac4ac2e4ef95208be6d1ae2b42af1840cb42bb9 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index 60ddba8066ea201181b0b6ce3c69a847eafc618a..79cb0af779fa07dac0702ee9b2b2d43e0a22b738 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index e24cd8986d8badacc41ce764a15cc99f2524fb8c..ea78c3a17eecd8d4bf38e182fcb4ca1ce0389492 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index 704a2ba08ea86fa4232f95549eefad9a8d737d7c..0cc20b35c1c4d99bd424dc720e5764c9f80ac7ba 100644 (file)
@@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct list_head *fallback;
        int ret;
 
-again:
        spin_lock_irqsave(&workers->lock, flags);
+again:
        worker = next_worker(workers);
 
        if (!worker) {
@@ -579,6 +579,7 @@ again:
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
                        ret = __btrfs_start_workers(workers);
+                       spin_lock_irqsave(&workers->lock, flags);
                        if (ret)
                                goto fallback;
                        goto again;
index 0a6b928813a41f4ca3176cf2e0a228d166492bab..fd1a06df5bc637c5dad0b0b7e5ce1ce88d5c1616 100644 (file)
@@ -4590,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
        int err = btrfs_add_link(trans, dir, inode,
                                 dentry->d_name.name, dentry->d_name.len,
                                 backref, index);
-       if (!err) {
-               d_instantiate(dentry, inode);
-               return 0;
-       }
        if (err > 0)
                err = -EEXIST;
        return err;
@@ -4655,6 +4651,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        else {
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4722,6 +4719,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4779,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *parent = dentry->d_parent;
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
+               d_instantiate(dentry, inode);
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
@@ -7245,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
 
 out_unlock:
+       if (!err)
+               d_instantiate(dentry, inode);
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
        if (drop_inode) {
index e4c7af393c2db85a75b17a0e0fb6c7c24c026456..30f78bb16afb2053134d2b155717ddda3459ce88 100644 (file)
@@ -47,17 +47,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-       [WB_REASON_BACKGROUND]          = "background",
-       [WB_REASON_TRY_TO_FREE_PAGES]   = "try_to_free_pages",
-       [WB_REASON_SYNC]                = "sync",
-       [WB_REASON_PERIODIC]            = "periodic",
-       [WB_REASON_LAPTOP_TIMER]        = "laptop_timer",
-       [WB_REASON_FREE_MORE_MEM]       = "free_more_memory",
-       [WB_REASON_FS_FREE_SPACE]       = "fs_free_space",
-       [WB_REASON_FORKER_THREAD]       = "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
index f549056fb20bd5533555918cc1b1f9805c2cdcc3..87f402ccec55567330943ab774ffb12ae21c7da8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index 6faec1a6021629d73896fb425b49f7820720c151..75766b42660e2dd385b26a0144c96dc90b49cbaf 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
index a09447749e2d59a467c51cde514b35cef79c9b1b..57f15a7f1cddcb4a254cd52f46c914cce176caea 100644 (file)
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index e90e7a9935ddc5c70c8e920487bc1196c2cc49e2..a15432da27c3a911ef2669c736e43cdd18b2daa0 100644 (file)
@@ -241,6 +241,9 @@ extern struct sctp_globals {
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
index abb6e0f0c3c3e59b79f23fc72d2e9d8d389e3774..32e39371fba627ebfaab66713e645889ccb7ba47 100644 (file)
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
index b99caa8b780c624af834caeab4130549479e3844..99d1d0decf88e41a7c0c038d463330e8351046b1 100644 (file)
                {I_REFERENCED,          "I_REFERENCED"}         \
        )
 
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                  __entry->for_kupdate,
                  __entry->range_cyclic,
                  __entry->for_background,
-                 wb_reason_name[__entry->reason]
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
        )
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
                __entry->older, /* older_than_this in jiffies */
                __entry->age,   /* older_than_this in relative milliseconds */
                __entry->moved,
-               wb_reason_name[__entry->reason])
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
 );
 
 TRACE_EVENT(global_dirty_state,
index 563f136094704fb8e6211636d72c3cc050da5f96..cf915b86a5fbc2c3fcffba1f125fe3e1afd56855 100644 (file)
@@ -470,7 +470,7 @@ out:
        cpu_maps_update_done();
 }
 
-static int alloc_frozen_cpus(void)
+static int __init alloc_frozen_cpus(void)
 {
        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
                return -ENOMEM;
@@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
 }
 
 
-int cpu_hotplug_pm_sync_init(void)
+static int __init cpu_hotplug_pm_sync_init(void)
 {
        pm_notifier(cpu_hotplug_pm_callback, 0);
        return 0;
index 81b4a27261b2199b375082bc66390c2f232829b1..a0a88543934ec44a2218c8a654f378501d754336 100644 (file)
@@ -285,14 +285,14 @@ static int usermodehelper_disabled = 1;
 static atomic_t running_helpers = ATOMIC_INIT(0);
 
 /*
- * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * Wait queue head used by usermodehelper_disable() to wait for all running
  * helpers to finish.
  */
 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
 
 /*
  * Time to wait for running_helpers to become zero before the setting of
- * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ * usermodehelper_disabled in usermodehelper_disable() fails
  */
 #define RUNNING_HELPERS_TIMEOUT        (5 * HZ)
 
index c106d3b3cc640d56266c609f5c840e29c6fcae01..5f0a3c91fdac043437392bbe141b45c30cdcf66e 100644 (file)
@@ -1828,7 +1828,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
index e0af7237cd9245fedfc8886ec446f670ddc48500..c1c597e3e198e9ffba26dd4dcf2e59e2ffc923c3 100644 (file)
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
        if (!hci_conn_auth(conn, sec_level, auth_type))
index 5ea94a1eecf2f9a4338aa116d645b03b77fa7bd6..17b5b1cd96579e352b2f3bff0bb5d9b7d9406ff2 100644 (file)
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        void *ptr = req->data;
        int type, olen;
        unsigned long val;
-       struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
                }
        }
 
+       /* Use sane default values in case a misbehaving remote device
+        * did not send an RFC option.
+        */
+       rfc.mode = chan->mode;
+       rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+       rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+       rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+       BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
index 4e32e18211f9187d8a98a27772a9ba97f5cf7112..2d28dfe983890fc74a2a8ebf5bc5f7e5716058c5 100644 (file)
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        if (list_empty(&s->dlcs)) {
                                s->state = BT_DISCONN;
                                rfcomm_send_disc(s, 0);
+                               rfcomm_session_clear_timer(s);
                        }
 
                        break;
index d6ec3720c77e448c4fd014f78397dfe5fb52e592..fa8b8f763580a0fcf82cf9afe5c6fb91d1f64f8a 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
index 8ae42de9c79e78379691f20207987c5b2ea338b6..e318c7e98042ffcfd1fc43a1d04b5cf816d651a8 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
index c71c434a4c053e440dc816682d944c521e05c50f..385aefe536489548ed3f53e61094faaac0d08afd 100644 (file)
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
        if (count) {
                int i;
 
-               if (count > 1<<30) {
+               if (count > INT_MAX)
+                       return -EINVAL;
+               count = roundup_pow_of_two(count);
+               if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
                if (!table)
                        return -ENOMEM;
index 4ed7b1d12f5ecde5b8c2119c0d4cfaaa765ff470..b23f174ab84c3b6c72302834d8e13695d22a1c6c 100644 (file)
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
index 0da2afc97f32ffae2773098391aba0dcbd004903..99ec116bef145e1ac0432918894b447b2abf8da3 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 46af62363b8c1e9ef452b6d750886f21e56f08b1..94cdbc55ca7ead13879d563608f8a191ca3b424c 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -133,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly   = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 static int redirect_genid;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -830,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1271,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1282,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -3178,6 +3274,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3388,6 +3491,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 84d0bd5cac939814edaed4379f09464a958d61bf..ec562713db9b16e3ee6a3b03ca17a13af2024e77 100644 (file)
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
index dfd3a648a55107bda2ff14adb6f9e91c06449240..a18e6c3d36e37e699089ed5e0910c857da073d1c 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index 5b138506690ec578105911ffb64309b5297d6458..9ddf1c3bfb39c5f67cb8f8c24ccdd22d03c38f10 100644 (file)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
index 3925c6578767ea61be8cc66933d38a6c500cd1d2..ea66034499ce6bf54c3be5f02ec66ff2140f69a8 100644 (file)
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
        __u32 timeout)
 {
        int rc = 0;
-       unsigned long completion_rc;
+       long completion_rc;
 
        ndev->req_status = NCI_REQ_PEND;
 
index 82a6f34d39d012fb35d9a0d490503fcc2048e6e2..3891702b81df8fef757677dc779f4bddd2e31789 100644 (file)
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
index f88256cbacbfe4b89c202d591514fb28feaac11b..28de43092330abc125423d5328babc709b1f986f 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index 152b5b3c3fffa978ae0d725d182f9aa54c8a9cf6..acd2edbc073ebf4ad334a4b0a16ff7b45413fac5 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 08b3cead6503c62f91dc8e97d9b817de7a79ffb9..817174eb5f41a50147dddded99bf222e421001b6 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06028ffb1bea3acde6243ff9386d053616e..cfeb1d4a1ee6ca730595959946ced911b7442baa 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca5a173bba9057f9a09dc2ac6cf45f34bc9..6f6ad8686833920fee313ad2dbd4b0cb4a23cade 100644 (file)
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index 13bf5fcdbff1b9f80d2d0c6288ab98762d499e74..54a7cd2fdd7af5c96d9a0354bfb6bfd56b27f09d 100644 (file)
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
index 6b3952961b858369d8a63b908b0911357a9e28e7..60ffbd067ff75643ac3f5cc61e4ba20c2b8ef3b9 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index 2118d6446630e3ef64ae3e65b0e7066daf5a85c2..9049a5caeb257d783db1008224eabea7cefa7cef 100644 (file)
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index ba573fe7c74d5bfe0495372931ebff69406f35c2..914833d99b06f78242fa12584c568ecef6a5e65a 100644 (file)
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --directory=$(srctree) --directory=$(objtree)           \
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-       $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+           $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
                $(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
-       $(Q)rm -f $(srctree)/arch/um/Kconfig
        $(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
index 6e5addeb236b49595ea563e7525d5a262a32e295..73516f69ac7ca8a33244cb300df8958ac2d77e20 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
This page took 0.107222 seconds and 5 git commands to generate.