Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Sat, 1 Sep 2007 03:03:15 +0000 (20:03 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Sat, 1 Sep 2007 03:03:15 +0000 (20:03 -0700)
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
  - cxgb3 engine microcode load
  cxgb3 - Fix dev->priv usage
  qeth: Drop ARP packages on HiperSockets interface with NOARP attribute.
  qeth: provide specific message for OSA-adapters exclusively used
  qeth: crash during reboot after failing online setting
  qeth: Announce tx checksumming for qeth devices in TSO/EDDP mode
  qeth: dont return the return values of void functions.
  qeth: enforce a rate limit for inbound scatter gather messages
  qeth: ungrouping a device must not be interruptible
  netxen: fix crashes during module unload
  netxen: Avoid firmware load in PCI probe
  PS3: fix the bug that 'ifconfig down' would hang
  IOC3: Program UART predividers.

108 files changed:
Documentation/i2c/busses/i2c-piix4
MAINTAINERS
arch/i386/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/mips/tx4938/toshiba_rbtx4938/setup.c
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/pci.c
arch/sparc64/kernel/pci_sun4v.c
arch/sparc64/mm/hugetlbpage.c
arch/um/kernel/dyn.lds.S
arch/um/kernel/uml.lds.S
arch/um/os-Linux/sys-x86_64/registers.c
arch/um/sys-x86_64/ptrace.c
arch/x86_64/defconfig
drivers/ata/ata_generic.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/ata/pata_ali.c
drivers/ata/pata_amd.c
drivers/ata/pata_atiixp.c
drivers/ata/pata_cs5520.c
drivers/ata/pata_cs5530.c
drivers/ata/pata_isapnp.c
drivers/ata/pata_it821x.c
drivers/ata/pata_marvell.c
drivers/ata/pata_mpc52xx.c
drivers/ata/pata_pcmcia.c
drivers/ata/pata_pdc2027x.c
drivers/ata/pata_platform.c
drivers/ata/pata_sc1200.c
drivers/ata/pata_scc.c
drivers/ata/pata_serverworks.c
drivers/ata/pata_sil680.c
drivers/ata/pata_sl82c105.c
drivers/ata/pdc_adma.c
drivers/ata/sata_inic162x.c
drivers/ata/sata_mv.c
drivers/ata/sata_nv.c
drivers/ata/sata_promise.c
drivers/ata/sata_qstor.c
drivers/ata/sata_sil.c
drivers/ata/sata_sil24.c
drivers/ata/sata_sis.c
drivers/ata/sata_svw.c
drivers/ata/sata_sx4.c
drivers/ata/sata_uli.c
drivers/ata/sata_via.c
drivers/ata/sata_vsc.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-piix4.c
drivers/net/bnx2.c
drivers/serial/8250_pci.c
drivers/spi/atmel_spi.c
drivers/spi/spi_bfin5xx.c
drivers/spi/spi_imx.c
drivers/spi/spi_mpc83xx.c
drivers/spi/spi_s3c24xx.c
drivers/spi/spi_s3c24xx_gpio.c
drivers/spi/spi_txx9.c
drivers/spi/xilinx_spi.c
drivers/video/console/Kconfig
fs/ecryptfs/inode.c
fs/ecryptfs/mmap.c
fs/hugetlbfs/inode.c
fs/udf/balloc.c
fs/udf/super.c
include/asm-sparc64/device.h
include/asm-sparc64/irq.h
include/asm-um/common.lds.S
include/asm-um/elf-x86_64.h
include/linux/ata.h
include/linux/cpu.h
include/linux/hugetlb.h
include/linux/libata.h
include/linux/pci_ids.h
include/linux/rtnetlink.h
include/linux/sched.h
include/linux/slub_def.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sctp/ulpqueue.h
kernel/cpu.c
kernel/exit.c
kernel/irq/manage.c
kernel/power/Kconfig
kernel/sched.c
kernel/sched_fair.c
kernel/signal.c
kernel/sys.c
kernel/user_namespace.c
mm/mempolicy.c
mm/migrate.c
mm/page_alloc.c
mm/slub.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_input.c
net/core/pktgen.c
net/ipv4/tcp_input.c
net/netfilter/xt_tcpudp.c
net/sched/sch_prio.c
net/sctp/associola.c
net/sctp/outqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/ulpqueue.c

index fa0c786a8bf5a787e6a980dd9dbeef9c283d7b41..cf6b6cb02aa15e8975fb191486e56493f3a06aa3 100644 (file)
@@ -6,7 +6,7 @@ Supported adapters:
     Datasheet: Publicly available at the Intel website
   * ServerWorks OSB4, CSB5, CSB6 and HT-1000 southbridges
     Datasheet: Only available via NDA from ServerWorks
-  * ATI IXP200, IXP300, IXP400, SB600 and SB700 southbridges
+  * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
     Datasheet: Not publicly available
   * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
     Datasheet: Publicly available at the SMSC website http://www.smsc.com
index 48ca8b47150d37c088d9d31bd3582aae94054776..9c54a5ef0ba7560232ee24fff40c3728fb277ecb 100644 (file)
@@ -44,9 +44,10 @@ trivial patch so apply some common sense.
        or does something very odd once a month document it.
 
        PLEASE remember that submissions must be made under the terms
-       of the OSDL certificate of contribution
-       (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html)
-       and should include a Signed-off-by: line.
+       of the OSDL certificate of contribution and should include a
+       Signed-off-by: line.  The current version of this "Developer's
+       Certificate of Origin" (DCO) is listed in the file
+       Documentation/SubmittingPatches.
 
 6.     Make sure you have the right to send any changes you make. If you
        do changes at work you may find your employer owns the patch
@@ -2057,12 +2058,18 @@ L:      http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
 W:     http://ipw2200.sourceforge.net
 S:     Supported
 
-IOC3 DRIVER
+IOC3 ETHERNET DRIVER
 P:     Ralf Baechle
 M:     ralf@linux-mips.org
 L:     linux-mips@linux-mips.org
 S:     Maintained
 
+IOC3 SERIAL DRIVER
+P:     Pat Gefre
+M:     pfg@sgi.com
+L:     linux-kernel@linux-mips.org
+S:     Maintained
+
 IP MASQUERADING:
 P:     Juanjo Ciarlante
 M:     jjciarla@raiz.uncu.edu.ar
@@ -2594,6 +2601,19 @@ M:       shemminger@linux-foundation.org
 L:     netem@lists.linux-foundation.org
 S:     Maintained
 
+NETERION (S2IO) Xframe 10GbE DRIVER
+P:     Ramkrishna Vepa
+M:     ram.vepa@neterion.com
+P:     Rastapur Santosh
+M:     santosh.rastapur@neterion.com
+P:     Sivakumar Subramani
+M:     sivakumar.subramani@neterion.com
+P:     Sreenivasa Honnur
+M:     sreenivasa.honnur@neterion.com
+L:     netdev@vger.kernel.org
+W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous
+S:     Supported
+
 NETFILTER/IPTABLES/IPCHAINS
 P:     Rusty Russell
 P:     Marc Boucher
@@ -2734,19 +2754,6 @@ M:       adaplas@gmail.com
 L:     linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
 S:     Maintained
 
-NETERION (S2IO) Xframe 10GbE DRIVER
-P:     Ramkrishna Vepa
-M:     ram.vepa@neterion.com
-P:     Rastapur Santosh
-M:     santosh.rastapur@neterion.com
-P:     Sivakumar Subramani
-M:     sivakumar.subramani@neterion.com
-P:     Sreenivasa Honnur
-M:     sreenivasa.honnur@neterion.com
-L:     netdev@vger.kernel.org
-W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous
-S:     Supported
-
 OPENCORES I2C BUS DRIVER
 P:     Peter Korsgaard
 M:     jacmet@sunsite.dk
index efdf95ac8031daf0a8c5d865575e1960890bd156..6c06d9c0488ec28c05d45c226d77c1050a7394a8 100644 (file)
@@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index d22861c5b04ca596e4beebbf295f66955bef1606..a9ff685aea25acfbd6547d62d3845fb8016c96a6 100644 (file)
@@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  * Don't actually need to do any preparation, but need to make sure
  * the address is in the right region.
  */
-int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
 {
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
 
        /* Handle MAP_FIXED */
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index 84ebff711e6ee294f84cbde0fb091c47a7fe3be1..f236b1ff89235bbe1e6adea98d54b133c7feffb0 100644 (file)
@@ -1108,7 +1108,7 @@ static void __init txx9_spi_init(unsigned long base, int irq)
                        .flags  = IORESOURCE_IRQ,
                },
        };
-       platform_device_register_simple("txx9spi", 0,
+       platform_device_register_simple("spi_txx9", 0,
                                        res, ARRAY_SIZE(res));
 }
 
index 384abf410cf03c0e5f57f823dffd27c7859dc9fa..23956096b3bf2f4044628eea93332251afe12adf 100644 (file)
@@ -217,8 +217,27 @@ struct irq_handler_data {
        void            (*pre_handler)(unsigned int, void *, void *);
        void            *pre_handler_arg1;
        void            *pre_handler_arg2;
+
+       u32             msi;
 };
 
+void sparc64_set_msi(unsigned int virt_irq, u32 msi)
+{
+       struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+       if (data)
+               data->msi = msi;
+}
+
+u32 sparc64_get_msi(unsigned int virt_irq)
+{
+       struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+       if (data)
+               return data->msi;
+       return 0xffffffff;
+}
+
 static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
 {
        unsigned int real_irq = virt_to_real_irq(virt_irq);
@@ -308,7 +327,7 @@ static void sun4u_irq_disable(unsigned int virt_irq)
 
        if (likely(data)) {
                unsigned long imap = data->imap;
-               u32 tmp = upa_readq(imap);
+               unsigned long tmp = upa_readq(imap);
 
                tmp &= ~IMAP_VALID;
                upa_writeq(tmp, imap);
@@ -741,7 +760,7 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
                        break;
        }
        if (devino >= msi_end)
-               return 0;
+               return -ENOSPC;
 
        sysino = sun4v_devino_to_sysino(devhandle, devino);
        bucket = &ivector_table[sysino];
@@ -755,8 +774,8 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
 
        data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
        if (unlikely(!data)) {
-               prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
-               prom_halt();
+               virt_irq_free(*virt_irq_p);
+               return -ENOMEM;
        }
        set_irq_chip_data(bucket->virt_irq, data);
 
index 3d93e9203ba2d412ccb6b0e71b5c1432ca9a06d3..139b4cff80191149f89400c4cfb402751cf87c02 100644 (file)
@@ -393,7 +393,6 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        sd->host_controller = pbm;
        sd->prom_node = node;
        sd->op = of_find_device_by_node(node);
-       sd->msi_num = 0xffffffff;
 
        sd = &sd->op->dev.archdata;
        sd->iommu = pbm->iommu;
index 466f4aa8fc82236549567ba47007fa193cf24857..da724b13e89e45eaf567bdfb380268bbd7fcecf9 100644 (file)
@@ -940,13 +940,13 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
        if (msi_num < 0)
                return msi_num;
 
-       devino = sun4v_build_msi(pbm->devhandle, virt_irq_p,
-                                pbm->msiq_first_devino,
-                                (pbm->msiq_first_devino +
-                                 pbm->msiq_num));
-       err = -ENOMEM;
-       if (!devino)
+       err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
+                             pbm->msiq_first_devino,
+                             (pbm->msiq_first_devino +
+                              pbm->msiq_num));
+       if (err < 0)
                goto out_err;
+       devino = err;
 
        msiqid = ((devino - pbm->msiq_first_devino) +
                  pbm->msiq_first);
@@ -971,7 +971,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
        if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
                goto out_err;
 
-       pdev->dev.archdata.msi_num = msi_num;
+       sparc64_set_msi(*virt_irq_p, msi_num);
 
        if (entry->msi_attrib.is_64) {
                msg.address_hi = pbm->msi64_start >> 32;
@@ -993,8 +993,6 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
 
 out_err:
        free_msi(pbm, msi_num);
-       sun4v_destroy_msi(*virt_irq_p);
-       *virt_irq_p = 0;
        return err;
 
 }
@@ -1006,7 +1004,7 @@ static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
        unsigned long msiqid, err;
        unsigned int msi_num;
 
-       msi_num = pdev->dev.archdata.msi_num;
+       msi_num = sparc64_get_msi(virt_irq);
        err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
        if (err) {
                printk(KERN_ERR "%s: getmsiq gives error %lu\n",
index eaba9b70b184ef67d47fbf76bdc9d92ea4314754..6cfab2e4d340945dd63445f71d3caa361b6701a9 100644 (file)
@@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index 24547741b205b4f7ccedf081725bd3f1c490cff3..41850906116ea4a1579c2001709f61e921a00a4e 100644 (file)
@@ -71,11 +71,13 @@ SECTIONS
     *(.gnu.warning)
 
     . = ALIGN(4096);
-    __syscall_stub_start = .;
-    *(.__syscall_stub*)
-    __syscall_stub_end = .;
-    . = ALIGN(4096);
   } =0x90909090
+  . = ALIGN(4096);
+  .syscall_stub : {
+       __syscall_stub_start = .;
+       *(.__syscall_stub*)
+       __syscall_stub_end = .;
+  }
   .fini           : {
     KEEP (*(.fini))
   } =0x90909090
@@ -138,8 +140,8 @@ SECTIONS
   .got            : { *(.got.plt) *(.got) }
   _edata = .;
   PROVIDE (edata = .);
-  __bss_start = .;
   .bss            : {
+   __bss_start = .;
    *(.dynbss)
    *(.bss .bss.* .gnu.linkonce.b.*)
    *(COMMON)
index 307b9373676b2f4d0ae49374df6b47292fb4c53c..81acdc24348ee7fbf3ca2a53032abd69315647e5 100644 (file)
@@ -44,12 +44,13 @@ SECTIONS
     /* .gnu.warning sections are handled specially by elf32.em.  */
     *(.gnu.warning)
     *(.gnu.linkonce.t*)
+  }
 
-    . = ALIGN(4096);
-    __syscall_stub_start = .;
-    *(.__syscall_stub*)
-    __syscall_stub_end = .;
-    . = ALIGN(4096);
+  . = ALIGN(4096);
+  .syscall_stub : {
+       __syscall_stub_start = .;
+       *(.__syscall_stub*)
+       __syscall_stub_end = .;
   }
 
   #include "asm/common.lds.S"
index e6fc2179d1bc4ebb9944f934604b29ac6836b33f..9467315b8059cbe4e8d733d9451e66a9c9524afd 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <errno.h>
+#include <sys/ptrace.h>
 #include <string.h>
 #include "ptrace_user.h"
 #include "uml-config.h"
 static unsigned long exec_regs[MAX_REG_NR];
 static unsigned long exec_fp_regs[HOST_FP_SIZE];
 
+int save_fp_registers(int pid, unsigned long *fp_regs)
+{
+       if(ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
+               return -errno;
+       return 0;
+}
+
+int restore_fp_registers(int pid, unsigned long *fp_regs)
+{
+       if(ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
+               return -errno;
+       return 0;
+}
+
 void init_thread_registers(union uml_pt_regs *to)
 {
        memcpy(to->skas.regs, exec_regs, sizeof(to->skas.regs));
index 55b66e09a98cce2437969f49fdc477e08bb92311..1970d78aa5289ecaf8e3cb52e2e87c188f353b2f 100644 (file)
@@ -156,12 +156,6 @@ int is_syscall(unsigned long addr)
        return(instr == 0x050f);
 }
 
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu )
-{
-       panic("dump_fpu");
-       return(1);
-}
-
 int get_fpregs(unsigned long buf, struct task_struct *child)
 {
        panic("get_fpregs");
index e64f65c9d901903e67d0a487a319a76e98176c6f..b091c5e35558c1daedf34688e296c08166f85f60 100644 (file)
@@ -201,7 +201,6 @@ CONFIG_PM=y
 # CONFIG_PM_DEBUG is not set
 CONFIG_HIBERNATION=y
 CONFIG_PM_STD_PARTITION=""
-CONFIG_SUSPEND_SMP=y
 
 #
 # ACPI (Advanced Configuration and Power Interface) Support
index 430fcf4f9ef3be67d98e6896607f1554b9e1926c..9454669547244e5960e05c5bd9741d6a3efc90fb 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "ata_generic"
-#define DRV_VERSION "0.2.12"
+#define DRV_VERSION "0.2.13"
 
 /*
  *     A generic parallel ATA driver using libata
index 071d274afaabcf6a971831019c44e50900d306c9..e40c94f5f59d7a0fa3c1f1633b5fdfd1cdfafe1d 100644 (file)
@@ -94,7 +94,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME       "ata_piix"
-#define DRV_VERSION    "2.11"
+#define DRV_VERSION    "2.12"
 
 enum {
        PIIX_IOCFG              = 0x54, /* IDE I/O configuration register */
@@ -130,6 +130,7 @@ enum {
        ich6m_sata_ahci         = 8,
        ich8_sata_ahci          = 9,
        piix_pata_mwdma         = 10,   /* PIIX3 MWDMA only */
+       tolapai_sata_ahci       = 11,
 
        /* constants for mapping table */
        P0                      = 0,  /* port 0 */
@@ -253,6 +254,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
        /* SATA Controller IDE (ICH9M) */
        { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+       /* SATA Controller IDE (Tolapai) */
+       { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci },
 
        { }     /* terminate list */
 };
@@ -441,12 +444,25 @@ static const struct piix_map_db ich8_map_db = {
        },
 };
 
+static const struct piix_map_db tolapai_map_db = {
+        .mask = 0x3,
+        .port_enable = 0x3,
+        .map = {
+                /* PM   PS   SM   SS       MAP */
+                {  P0,  NA,  P1,  NA }, /* 00b */
+                {  RV,  RV,  RV,  RV }, /* 01b */
+                {  RV,  RV,  RV,  RV }, /* 10b */
+                {  RV,  RV,  RV,  RV },
+        },
+};
+
 static const struct piix_map_db *piix_map_db_table[] = {
        [ich5_sata]             = &ich5_map_db,
        [ich6_sata]             = &ich6_map_db,
        [ich6_sata_ahci]        = &ich6_map_db,
        [ich6m_sata_ahci]       = &ich6m_map_db,
        [ich8_sata_ahci]        = &ich8_map_db,
+       [tolapai_sata_ahci]     = &tolapai_map_db,
 };
 
 static struct ata_port_info piix_port_info[] = {
@@ -560,6 +576,17 @@ static struct ata_port_info piix_port_info[] = {
                .mwdma_mask     = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
                .port_ops       = &piix_pata_ops,
        },
+
+       /* tolapai_sata_ahci: 11: */
+       {
+               .sht            = &piix_sht,
+               .flags          = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+                                 PIIX_FLAG_AHCI,
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .mwdma_mask     = 0x07, /* mwdma0-2 */
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &piix_sata_ops,
+       },
 };
 
 static struct pci_bits piix_enable_bits[] = {
@@ -907,6 +934,13 @@ static int piix_broken_suspend(void)
                                DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
                        },
                },
+               {
+                       .ident = "Satellite U200",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+                       },
+               },
                {
                        .ident = "Satellite U205",
                        .matches = {
@@ -1139,6 +1173,39 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
        hpriv->map = map;
 }
 
+static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+{
+       static struct dmi_system_id sysids[] = {
+               {
+                       /* Clevo M570U sets IOCFG bit 18 if the cdrom
+                        * isn't used to boot the system which
+                        * disables the channel.
+                        */
+                       .ident = "M570U",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
+                       },
+               },
+       };
+       u32 iocfg;
+
+       if (!dmi_check_system(sysids))
+               return;
+
+       /* The datasheet says that bit 18 is NOOP but certain systems
+        * seem to use it to disable a channel.  Clear the bit on the
+        * affected systems.
+        */
+       pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg);
+       if (iocfg & (1 << 18)) {
+               dev_printk(KERN_INFO, &pdev->dev,
+                          "applying IOCFG bit18 quirk\n");
+               iocfg &= ~(1 << 18);
+               pci_write_config_dword(pdev, PIIX_IOCFG, iocfg);
+       }
+}
+
 /**
  *     piix_init_one - Register PIIX ATA PCI device with kernel services
  *     @pdev: PCI device to register
@@ -1200,6 +1267,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
                              piix_map_db_table[ent->driver_data]);
        }
 
+       /* apply IOCFG bit18 quirk */
+       piix_iocfg_bit18_quirk(pdev);
+
        /* On ICH5, some BIOSen disable the interrupt using the
         * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
         * On ICH6, this bit has the same effect, but only when
index 2ad4dda6d4a780e13e1814296ead50ed1dd0774f..a3ee087223deabf12f9491905fb641c97854fabf 100644 (file)
@@ -1911,8 +1911,9 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->flags |= ATA_DFLAG_FLUSH_EXT;
                        }
 
-                       if (ata_id_hpa_enabled(dev->id))
-                               dev->n_sectors = ata_hpa_resize(dev);
+                       if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
+                           ata_id_hpa_enabled(dev->id))
+                               dev->n_sectors = ata_hpa_resize(dev);
 
                        /* config NCQ */
                        ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
@@ -3795,7 +3796,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST9160821AS",        "3.CLF",        ATA_HORKAGE_NONCQ, },
        { "SAMSUNG HD401LJ",    "ZZ100-15",     ATA_HORKAGE_NONCQ, },
 
-       /* Devices with NCQ limits */
+       /* devices which puke on READ_NATIVE_MAX */
+       { "HDS724040KLSA80",    "KFAOA20N",     ATA_HORKAGE_BROKEN_HPA, },
+       { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+       { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+       { "MAXTOR 6L080L4",     "A93.0500",     ATA_HORKAGE_BROKEN_HPA },
 
        /* End Marker */
        { }
@@ -3985,6 +3990,11 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
        tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
 
        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+       /* A clean abort indicates an original or just out of spec drive
+          and we should continue as we issue the setup based on the
+          drive reported working geometry */
+       if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+               err_mask = 0;
 
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
index e8a28e94fe476a0a8adcfd09131d999de9f7489f..94e5edc12ac9769fd15c3eae3509140ea3b2d80d 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.7.4"
+#define DRV_VERSION "0.7.5"
 
 /*
  *     Cable special cases
index b09facad63e17a0f4dac7e64f854b39592899cb0..04048fcf6305ef7e9423c7e38e9563f56535225b 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.9"
 
 /**
  *     timing_setup            -       shared timing computation and load
index 80509be49e7acb0b18cb3c2c94e54aa42e55843e..86f85a2cab7e3a8edd54244f995232e85919cea4 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_atiixp"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.4.6"
 
 enum {
        ATIIXP_IDE_PIO_TIMING   = 0x40,
index 7dc76e71bd55070b4fc177eae62b3e0c57f8e2eb..e2459088cdcd2015bb4810a8d80249d062b541d5 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_cs5520"
-#define DRV_VERSION    "0.6.5"
+#define DRV_VERSION    "0.6.6"
 
 struct pio_clocks
 {
index 68f150a1e2f480b42d42ca88895563ba6a91fa16..c6066aa43ec8618cb31b28befa72e74b36706705 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME       "pata_cs5530"
-#define DRV_VERSION    "0.7.3"
+#define DRV_VERSION    "0.7.4"
 
 static void __iomem *cs5530_port_base(struct ata_port *ap)
 {
index 91a396fa5b20f0f75fdbc550ab6d1b9654738145..9e553c54203a32e5f44dfeac976de05d3af6a581 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_isapnp"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.2"
 
 static struct scsi_host_template isapnp_sht = {
        .module                 = THIS_MODULE,
index 7225124d96c20613d56969280e7131774a932974..ed637ae33ece31faecb60076df6106166bd32ff9 100644 (file)
@@ -80,7 +80,7 @@
 
 
 #define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.7"
+#define DRV_VERSION "0.3.8"
 
 struct it821x_dev
 {
index 87594c04d3a371d7ce69c24a0b8c018455e2f8c2..ae206f35f747a677e54675847e6fb049c4043569 100644 (file)
@@ -192,6 +192,8 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
 
 static const struct pci_device_id marvell_pci_tbl[] = {
        { PCI_DEVICE(0x11AB, 0x6101), },
+       { PCI_DEVICE(0x11AB, 0x6121), },
+       { PCI_DEVICE(0x11AB, 0x6123), },
        { PCI_DEVICE(0x11AB, 0x6145), },
        { }     /* terminate list */
 };
index 182e83c9047b2bed550b82194c2daabd522437be..099f4cdc4cd9823919c19489504c242993a74d6f 100644 (file)
@@ -24,7 +24,7 @@
 
 
 #define DRV_NAME       "mpc52xx_ata"
-#define DRV_VERSION    "0.1.0ac2"
+#define DRV_VERSION    "0.1.2"
 
 
 /* Private structures used by the driver */
index 6da23feed03922867224c78faa784bb2fcca4980..0f2b027624d6e71273c01a4147f3d6afe2462da1 100644 (file)
@@ -42,7 +42,7 @@
 
 
 #define DRV_NAME "pata_pcmcia"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
 
 /*
  *     Private data structure to glue stuff together
index e3245b36269a2cd64257a228c131b68c683ba1cd..bb64a986e8f5496028fd174452eeb22872285f7c 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_pdc2027x"
-#define DRV_VERSION    "0.9"
+#define DRV_VERSION    "1.0"
 #undef PDC_DEBUG
 
 #ifdef PDC_DEBUG
index a909f793ffc1327dbdbf3b7c9137ab80a928ca04..5086d03f2d7c54883bb5c9ffaf4eba749bb32089 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/pata_platform.h>
 
 #define DRV_NAME "pata_platform"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.1"
 
 static int pio_mask = 1;
 
index b8b2d11e4180c29f13a7f30303c628f49bd23a85..5edf67b1f3bf593f4ffc5e04c9eb46cfe83aac08 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sc1200"
-#define DRV_VERSION    "0.2.5"
+#define DRV_VERSION    "0.2.6"
 
 #define SC1200_REV_A   0x00
 #define SC1200_REV_B1  0x01
index 36cdbd2b0bd50c963df8253925f856328be0579e..2d048ef25a5a11babbef2c2ef9f705adaf14e50d 100644 (file)
@@ -43,7 +43,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME               "pata_scc"
-#define DRV_VERSION            "0.2"
+#define DRV_VERSION            "0.3"
 
 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA          0x01b4
 
index 89691541fe5936f9e2c51a8c27dc2b6b0dffafb9..0faf99c8f13e4c8f85885c4d4c8d7941e4c6e6ce 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_serverworks"
-#define DRV_VERSION "0.4.1"
+#define DRV_VERSION "0.4.2"
 
 #define SVWKS_CSB5_REVISION_NEW        0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
 #define SVWKS_CSB6_REVISION    0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
index b0cd52d6e3fbc996b5c63e6d02c21c9b5b32aa18..40395804a66fef72b3f280f1d7af87c7d4e6a32f 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.4.6"
+#define DRV_VERSION "0.4.7"
 
 #define SIL680_MMIO_BAR                5
 
index 8c2813aa6cdb39b6c52cdfe0a15e70364991b985..c0f43bb25956d15a7bfdf4188038c952d35cd3d0 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sl82c105"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
 
 enum {
        /*
index bec1de594de8b0b4678ac49099bd6ef41a263041..5c79271401afc29ecb216ab46b6c46b7b82ff1a5 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pdc_adma"
-#define DRV_VERSION    "0.06"
+#define DRV_VERSION    "1.0"
 
 /* macro to calculate base address for ATA regs */
 #define ADMA_ATA_REGS(base,port_no)    ((base) + ((port_no) * 0x40))
index a9c948d7604af602632ed3fba4c5772ad456e3ea..fdbed8ecdfc20d66cabcb5deef7eac904e240431 100644 (file)
@@ -28,7 +28,7 @@
 #include <scsi/scsi_device.h>
 
 #define DRV_NAME       "sata_inic162x"
-#define DRV_VERSION    "0.2"
+#define DRV_VERSION    "0.3"
 
 enum {
        MMIO_BAR                = 5,
index 3acf65e75eb2dd0b64b6ce2d2bfc7670c7f1cf8b..11bf6c7ac12260d4ba40551b5eef854022ab8eb3 100644 (file)
@@ -72,7 +72,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_mv"
-#define DRV_VERSION    "0.81"
+#define DRV_VERSION    "1.0"
 
 enum {
        /* BAR's are enumerated in terms of pci_resource_start() terms */
index 0b58c4df6fd2a800c23dfe59390abb8d1055ca2b..40dc73139858832e644118e606e608430f9d5cb5 100644 (file)
@@ -49,7 +49,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME                       "sata_nv"
-#define DRV_VERSION                    "3.4"
+#define DRV_VERSION                    "3.5"
 
 #define NV_ADMA_DMA_BOUNDARY           0xffffffffUL
 
index d39ebc23c4a9e45ec0ac24474af5142f0cfb130e..25698cf0dce01beea9c469701dfe8415190c4b69 100644 (file)
@@ -45,7 +45,7 @@
 #include "sata_promise.h"
 
 #define DRV_NAME       "sata_promise"
-#define DRV_VERSION    "2.09"
+#define DRV_VERSION    "2.10"
 
 enum {
        PDC_MAX_PORTS           = 4,
@@ -328,8 +328,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 
        { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
        { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
-       { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
-       { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
+       { PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
+       { PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
        { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
        { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
 
index c8f9242e7f44b04670a458291c2d101191e3f9f8..5e1dfdda698f0626baeb6194ff4cfc87038289a7 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_qstor"
-#define DRV_VERSION    "0.08"
+#define DRV_VERSION    "0.09"
 
 enum {
        QS_MMIO_BAR             = 4,
index db67637589520c02b3289421ae79579f4e84d4a8..8c72e714b4564d52ad6fa0bc584e25af1217dd55 100644 (file)
@@ -46,7 +46,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_sil"
-#define DRV_VERSION    "2.2"
+#define DRV_VERSION    "2.3"
 
 enum {
        SIL_MMIO_BAR            = 5,
index 46fbbe7f121c2da1ca115a383800ee9f3525da5c..ef83e6b1e314d1aaf3842ca673012425b92bd66f 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_sil24"
-#define DRV_VERSION    "0.9"
+#define DRV_VERSION    "1.0"
 
 /*
  * Port request block (PRB) 32 bytes
index 31a2f55aae666c137d8b7bbf8bf556e84978ef0e..41c1d6e8f1feb037afc6a278dfd17652dc644380 100644 (file)
@@ -43,7 +43,7 @@
 #include "sis.h"
 
 #define DRV_NAME       "sata_sis"
-#define DRV_VERSION    "0.8"
+#define DRV_VERSION    "1.0"
 
 enum {
        sis_180                 = 0,
index 92e8770750375d35ed129131fc96bd710235ef93..d9678e7bc3a9a3e36cfb71710d9c0e8db723a391 100644 (file)
@@ -53,7 +53,7 @@
 #endif /* CONFIG_PPC_OF */
 
 #define DRV_NAME       "sata_svw"
-#define DRV_VERSION    "2.2"
+#define DRV_VERSION    "2.3"
 
 enum {
        /* ap->flags bits */
index 5193bd8647baa34288d1fbed7fcbd700118766a3..97aefdd87be4d28ee62556c54c2522eb11f51393 100644 (file)
@@ -92,7 +92,7 @@
 #include "sata_promise.h"
 
 #define DRV_NAME       "sata_sx4"
-#define DRV_VERSION    "0.11"
+#define DRV_VERSION    "0.12"
 
 
 enum {
index 78c28512f01c634e2050256a976fe45721f531fb..e6b8b45279afb8f670c74664239360c284fa2a66 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_uli"
-#define DRV_VERSION    "1.2"
+#define DRV_VERSION    "1.3"
 
 enum {
        uli_5289                = 0,
index 86b7bfc173244a203bf11977eba1fbe268db7799..a4e631766eee3525e89591c905ce434f057e4663 100644 (file)
@@ -46,7 +46,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_via"
-#define DRV_VERSION    "2.2"
+#define DRV_VERSION    "2.3"
 
 enum board_ids_enum {
        vt6420,
index 24344d0d0575b10dc394ad501c22fd15f653e04e..1920915dfa2ca41f5856d591b1fd1fd740b971ff 100644 (file)
@@ -47,7 +47,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "sata_vsc"
-#define DRV_VERSION    "2.2"
+#define DRV_VERSION    "2.3"
 
 enum {
        VSC_MMIO_BAR                    = 0,
index 1842f523c23d33b3b1458e992d35effcbf88d0f8..9f3a4cd0b07f7468db3f086022e108bd86516805 100644 (file)
@@ -208,6 +208,7 @@ config I2C_PIIX4
            ATI IXP400
            ATI SB600
            ATI SB700
+           ATI SB800
            Serverworks OSB4
            Serverworks CSB5
            Serverworks CSB6
index debc76cd2161c64b1c675d196ec8d40e3b9c8e50..167e4137ee2176b024e0c89238a880ca069ba2af 100644 (file)
@@ -23,7 +23,7 @@
    Supports:
        Intel PIIX4, 440MX
        Serverworks OSB4, CSB5, CSB6, HT-1000
-       ATI IXP200, IXP300, IXP400, SB600, SB700
+       ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
        SMSC Victory66
 
    Note: we assume there can only be one device, with one SMBus interface.
@@ -397,9 +397,7 @@ static struct pci_device_id piix4_ids[] = {
          .driver_data = 0 },
        { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS),
          .driver_data = 0 },
-       { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SMBUS),
-         .driver_data = 0 },
-       { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SMBUS),
+       { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS),
          .driver_data = 0 },
        { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4),
          .driver_data = 0 },
index 24e7f9ab3f5a72c76eeefd54b332152531c2b98b..854d80c330ec8ab4449bddb23a1af1bf43927f0c 100644 (file)
@@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
                /* Chip reset. */
                REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
 
+               /* Reading back any register after chip reset will hang the
+                * bus on 5706 A0 and A1.  The msleep below provides plenty
+                * of margin for write posting.
+                */
                if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
-                   (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
-                       current->state = TASK_UNINTERRUPTIBLE;
-                       schedule_timeout(HZ / 50);
-               }
+                   (CHIP_ID(bp) == CHIP_ID_5706_A1))
+                       msleep(20);
 
                /* Reset takes approximate 30 usec */
                for (i = 0; i < 10; i++) {
index bd66339f7a3f6578a3c008b39a8a7db6c4c0060b..1ea1ed82c352f4c9b7006e9b142184e1afdae3c3 100644 (file)
@@ -610,7 +610,7 @@ static int pci_netmos_init(struct pci_dev *dev)
 /* enable IO_Space bit */
 #define ITE_887x_POSIO_ENABLE          (1 << 31)
 
-static int __devinit pci_ite887x_init(struct pci_dev *dev)
+static int pci_ite887x_init(struct pci_dev *dev)
 {
        /* inta_addr are the configuration addresses of the ITE */
        static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0,
index ad144054da301278b67a205350d96e635ba78a65..b0469749310a449444016e719393fd1aa10005f9 100644 (file)
@@ -251,7 +251,7 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
                xfer->rx_dma = dma_map_single(dev,
                                xfer->rx_buf, xfer->len,
                                DMA_FROM_DEVICE);
-               if (dma_mapping_error(xfer->tx_dma)) {
+               if (dma_mapping_error(xfer->rx_dma)) {
                        if (xfer->tx_buf)
                                dma_unmap_single(dev,
                                                xfer->tx_dma, xfer->len,
index 48587c27050d335fb083d9ea989181a50c7063a3..f540ed77a1021cda053c72d3e8c293606b31e099 100644 (file)
@@ -1303,8 +1303,9 @@ static int bfin5xx_spi_resume(struct platform_device *pdev)
 #define bfin5xx_spi_resume NULL
 #endif                         /* CONFIG_PM */
 
+MODULE_ALIAS("bfin-spi-master");       /* for platform bus hotplug */
 static struct platform_driver bfin5xx_spi_driver = {
-       .driver         = {
+       .driver = {
                .name   = "bfin-spi-master",
                .owner  = THIS_MODULE,
        },
index aee9ad6f633ca14e2619c78e0d36dd82df4ba34f..bd9177f51de9d347062e2493d4d7fd1d963ce0d6 100644 (file)
@@ -1735,7 +1735,7 @@ static int spi_imx_resume(struct platform_device *pdev)
 
 static struct platform_driver driver = {
        .driver = {
-               .name = "imx-spi",
+               .name = "spi_imx",
                .bus = &platform_bus_type,
                .owner = THIS_MODULE,
        },
index 2adf856e44c2bf0d91d6081adfc25b9549559384..fcbf1b8a5264dfec0e4e8f74b6101f5440ea8ea6 100644 (file)
@@ -530,6 +530,7 @@ static int __devexit mpc83xx_spi_remove(struct platform_device *dev)
        return 0;
 }
 
+MODULE_ALIAS("mpc83xx_spi");                   /* for platform bus hotplug */
 static struct platform_driver mpc83xx_spi_driver = {
        .probe = mpc83xx_spi_probe,
        .remove = __devexit_p(mpc83xx_spi_remove),
index 5cf48123e0efbb42612d3c94e59b730b6c00ee79..e9b683f7d7b3bc7f77d0e3e55b611cf09b0f96ea 100644 (file)
@@ -427,6 +427,7 @@ static int s3c24xx_spi_resume(struct platform_device *pdev)
 #define s3c24xx_spi_resume  NULL
 #endif
 
+MODULE_ALIAS("s3c2410_spi");                   /* for platform bus hotplug */
 static struct platform_driver s3c24xx_spidrv = {
        .probe          = s3c24xx_spi_probe,
        .remove         = s3c24xx_spi_remove,
index 611ac22b7cdc2c404e69c07d8dfbcd4e73b1db7c..0fa25e2e80fe556cbde8b1e01843f452129dfaea 100644 (file)
@@ -180,7 +180,7 @@ static struct platform_driver s3c2410_spigpio_drv = {
         .suspend       = s3c2410_spigpio_suspend,
         .resume                = s3c2410_spigpio_resume,
         .driver                = {
-               .name   = "s3c24xx-spi-gpio",
+               .name   = "spi_s3c24xx_gpio",
                .owner  = THIS_MODULE,
         },
 };
index 08e981c40646810dd5de12ae1d03026b636374cb..b7f4bb239eaf79e74dfede155474c43aac272795 100644 (file)
@@ -453,7 +453,7 @@ static int __exit txx9spi_remove(struct platform_device *dev)
 static struct platform_driver txx9spi_driver = {
        .remove = __exit_p(txx9spi_remove),
        .driver = {
-               .name = "txx9spi",
+               .name = "spi_txx9",
                .owner = THIS_MODULE,
        },
 };
index f0bf9a68e96bf03424c65a891e91f4e1322b81b2..5d04f520c12323067e810047afe0aa0e740d1851 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <syslib/virtex_devices.h>
 
-#define XILINX_SPI_NAME "xspi"
+#define XILINX_SPI_NAME "xilinx_spi"
 
 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
  * Product Specification", DS464
index 5db6b1e489b0e1bf0946784d7c900f631a162da1..a22ccf9485a416d7201726fa963f34dd7f1a1d34 100644 (file)
@@ -182,7 +182,7 @@ config FONT_8x8
 
 config FONT_8x16
        bool "VGA 8x16 font" if FONTS
-       depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE=y || STI_CONSOLE || USB_SISUSBVGA_CON 
+       depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
        default y if !SPARC && !FONTS
        help
          This is the "high resolution" font for the VGA frame buffer (the one
index 5d40ad13ab5cb036ba14d5f886b46ee445d29915..131954b3fb98b588db1802301c70ffe2c0196963 100644 (file)
@@ -357,10 +357,6 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
                ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
                goto out;
        }
-       if (special_file(lower_inode->i_mode)) {
-               ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
-               goto out;
-       }
        if (!nd) {
                ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
                                "as we *think* we are about to unlink\n");
index e4ab7bc14efede18f657dc477d1c92137bc37e27..fd3f94d4a66847270aa1a329b78419d6dbeab471 100644 (file)
@@ -834,7 +834,8 @@ static void ecryptfs_sync_page(struct page *page)
                ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
                return;
        }
-       lower_page->mapping->a_ops->sync_page(lower_page);
+       if (lower_page->mapping->a_ops->sync_page)
+               lower_page->mapping->a_ops->sync_page(lower_page);
        ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
                        lower_page->index);
        unlock_page(lower_page);
index c848a191525db4776bec08970538449c442ffb81..950c2fbb815bc35760163020c0133046ff4d1255 100644 (file)
@@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        int ret;
 
        /*
-        * vma alignment has already been checked by prepare_hugepage_range.
-        * If you add any error returns here, do so after setting VM_HUGETLB,
-        * so is_vm_hugetlb_page tests below unmap_region go the right way
-        * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+        * vma address alignment (but not the pgoff alignment) has
+        * already been checked by prepare_hugepage_range.  If you add
+        * any error returns here, do so after setting VM_HUGETLB, so
+        * is_vm_hugetlb_page tests below unmap_region go the right
+        * way when do_mmap_pgoff unwinds (may be important on powerpc
+        * and ia64).
         */
        vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
+
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
 
        mutex_lock(&inode->i_mutex);
@@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(addr, len, pgoff))
+               if (prepare_hugepage_range(addr, len))
                        return -EINVAL;
                return addr;
        }
index 276f7207a5642369ba2d56dabad2b683f9444388..87e87dcd3f9c94db447581b9f718ac92ea13c146 100644 (file)
@@ -540,26 +540,24 @@ static void udf_table_free_blocks(struct super_block *sb,
                        if (epos.offset + adsize > sb->s_blocksize) {
                                loffset = epos.offset;
                                aed->lengthAllocDescs = cpu_to_le32(adsize);
-                               sptr = UDF_I_DATA(inode) + epos.offset -
-                                       udf_file_entry_alloc_offset(inode) +
-                                       UDF_I_LENEATTR(inode) - adsize;
+                               sptr = UDF_I_DATA(table) + epos.offset - adsize;
                                dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
                                memcpy(dptr, sptr, adsize);
                                epos.offset = sizeof(struct allocExtDesc) + adsize;
                        } else {
                                loffset = epos.offset + adsize;
                                aed->lengthAllocDescs = cpu_to_le32(0);
-                               sptr = oepos.bh->b_data + epos.offset;
-                               epos.offset = sizeof(struct allocExtDesc);
-
                                if (oepos.bh) {
+                                       sptr = oepos.bh->b_data + epos.offset;
                                        aed = (struct allocExtDesc *)oepos.bh->b_data;
                                        aed->lengthAllocDescs =
                                                cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
                                } else {
+                                       sptr = UDF_I_DATA(table) + epos.offset;
                                        UDF_I_LENALLOC(table) += adsize;
                                        mark_inode_dirty(table);
                                }
+                               epos.offset = sizeof(struct allocExtDesc);
                        }
                        if (UDF_SB_UDFREV(sb) >= 0x0200)
                                udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
index 382be7be5ae34e66b6619d5c7bab8e20852edd48..c68a6e730b97918f041819f055492193b10869ab 100644 (file)
@@ -89,7 +89,7 @@ static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
 static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
 static void udf_load_fileset(struct super_block *, struct buffer_head *,
                             kernel_lb_addr *);
-static void udf_load_partdesc(struct super_block *, struct buffer_head *);
+static int udf_load_partdesc(struct super_block *, struct buffer_head *);
 static void udf_open_lvid(struct super_block *);
 static void udf_close_lvid(struct super_block *);
 static unsigned int udf_count_free(struct super_block *);
@@ -877,7 +877,7 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
                  root->logicalBlockNum, root->partitionReferenceNum);
 }
 
-static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
 {
        struct partitionDesc *p;
        int i;
@@ -912,6 +912,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
 
                                        UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
                                                udf_iget(sb, loc);
+                                       if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) {
+                                               udf_debug("cannot load unallocSpaceTable (part %d)\n",
+                                                       i);
+                                               return 1;
+                                       }
                                        UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
                                        udf_debug("unallocSpaceTable (part %d) @ %ld\n",
                                                  i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
@@ -938,6 +943,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
 
                                        UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
                                                udf_iget(sb, loc);
+                                       if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) {
+                                               udf_debug("cannot load freedSpaceTable (part %d)\n",
+                                                       i);
+                                               return 1;
+                                       }
                                        UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
                                        udf_debug("freedSpaceTable (part %d) @ %ld\n",
                                                  i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
@@ -966,6 +976,7 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
                          le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
                          UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
        }
+       return 0;
 }
 
 static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
@@ -1177,12 +1188,19 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo
                                udf_load_logicalvol(sb, bh, fileset);
                        } else if (i == VDS_POS_PARTITION_DESC) {
                                struct buffer_head *bh2 = NULL;
-                               udf_load_partdesc(sb, bh);
+                               if (udf_load_partdesc(sb, bh)) {
+                                       brelse(bh);
+                                       return 1;
+                               }
                                for (j = vds[i].block + 1; j <  vds[VDS_POS_TERMINATING_DESC].block; j++) {
                                        bh2 = udf_read_tagged(sb, j, j, &ident);
                                        gd = (struct generic_desc *)bh2->b_data;
                                        if (ident == TAG_IDENT_PD)
-                                               udf_load_partdesc(sb, bh2);
+                                               if (udf_load_partdesc(sb, bh2)) {
+                                                       brelse(bh);
+                                                       brelse(bh2);
+                                                       return 1;
+                                               }
                                        brelse(bh2);
                                }
                        }
index d5a4559b95557990fd637f1c5cb3f8e89d0b0734..5111e8717be321e395ea8ebf76c8ff32f02ed9cf 100644 (file)
@@ -16,8 +16,6 @@ struct dev_archdata {
 
        struct device_node      *prom_node;
        struct of_device        *op;
-
-       unsigned int            msi_num;
 };
 
 #endif /* _ASM_SPARC64_DEVICE_H */
index e6c436ef9356c3b0c3a7e7e1dec455e4d06c798d..c00ad152771b04f7787310d4b7300c2ce7da8032 100644 (file)
 #include <asm/ptrace.h>
 
 /* IMAP/ICLR register defines */
-#define IMAP_VALID             0x80000000      /* IRQ Enabled          */
-#define IMAP_TID_UPA           0x7c000000      /* UPA TargetID         */
-#define IMAP_TID_JBUS          0x7c000000      /* JBUS TargetID        */
+#define IMAP_VALID             0x80000000UL    /* IRQ Enabled          */
+#define IMAP_TID_UPA           0x7c000000UL    /* UPA TargetID         */
+#define IMAP_TID_JBUS          0x7c000000UL    /* JBUS TargetID        */
 #define IMAP_TID_SHIFT         26
-#define IMAP_AID_SAFARI                0x7c000000      /* Safari AgentID       */
+#define IMAP_AID_SAFARI                0x7c000000UL    /* Safari AgentID       */
 #define IMAP_AID_SHIFT         26
-#define IMAP_NID_SAFARI                0x03e00000      /* Safari NodeID        */
+#define IMAP_NID_SAFARI                0x03e00000UL    /* Safari NodeID        */
 #define IMAP_NID_SHIFT         21
-#define IMAP_IGN               0x000007c0      /* IRQ Group Number     */
-#define IMAP_INO               0x0000003f      /* IRQ Number           */
-#define IMAP_INR               0x000007ff      /* Full interrupt number*/
+#define IMAP_IGN               0x000007c0UL    /* IRQ Group Number     */
+#define IMAP_INO               0x0000003fUL    /* IRQ Number           */
+#define IMAP_INR               0x000007ffUL    /* Full interrupt number*/
 
-#define ICLR_IDLE              0x00000000      /* Idle state           */
-#define ICLR_TRANSMIT          0x00000001      /* Transmit state       */
-#define ICLR_PENDING           0x00000003      /* Pending state        */
+#define ICLR_IDLE              0x00000000UL    /* Idle state           */
+#define ICLR_TRANSMIT          0x00000001UL    /* Transmit state       */
+#define ICLR_PENDING           0x00000003UL    /* Pending state        */
 
 /* The largest number of unique interrupt sources we support.
  * If this needs to ever be larger than 255, you need to change
@@ -53,6 +53,9 @@ extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
 extern void sun4v_destroy_msi(unsigned int virt_irq);
 extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
 
+extern void sparc64_set_msi(unsigned int virt_irq, u32 msi);
+extern u32 sparc64_get_msi(unsigned int virt_irq);
+
 extern void fixup_irqs(void);
 
 static __inline__ void set_softint(unsigned long bits)
index e3f010bd12b3cbaa9f724f768a884b1ae3021f3e..cb0248616d49d75ef1ef255210ac393b84b44018 100644 (file)
 
   . = ALIGN(4096);
   .note : { *(.note.*) }
-  __start___ex_table = .;
-  __ex_table : { *(__ex_table) }
-  __stop___ex_table = .;
+  __ex_table : {
+       __start___ex_table = .;
+       *(__ex_table)
+       __stop___ex_table = .;
+  }
 
   BUG_TABLE
 
-  __uml_setup_start = .;
-  .uml.setup.init : { *(.uml.setup.init) }
-  __uml_setup_end = .;
+  .uml.setup.init : {
+       __uml_setup_start = .;
+       *(.uml.setup.init)
+       __uml_setup_end = .;
+  }
        
-  __uml_help_start = .;
-  .uml.help.init : { *(.uml.help.init) }
-  __uml_help_end = .;
+  .uml.help.init : {
+       __uml_help_start = .;
+       *(.uml.help.init)
+       __uml_help_end = .;
+  }
        
-  __uml_postsetup_start = .;
-  .uml.postsetup.init : { *(.uml.postsetup.init) }
-  __uml_postsetup_end = .;
+  .uml.postsetup.init : {
+       __uml_postsetup_start = .;
+       *(.uml.postsetup.init)
+       __uml_postsetup_end = .;
+  }
        
-  __setup_start = .;
-  .init.setup : { *(.init.setup) }
-  __setup_end = .;
+  .init.setup : {
+       __setup_start = .;
+       *(.init.setup)
+       __setup_end = .;
+  }
 
   . = ALIGN(32);
-  __per_cpu_start = . ; 
-  .data.percpu : { *(.data.percpu) }
-  __per_cpu_end = . ;
+  .data.percpu : {
+       __per_cpu_start = . ;
+       *(.data.percpu)
+       __per_cpu_end = . ;
+  }
        
-  __initcall_start = .;
   .initcall.init : {
+       __initcall_start = .;
        INITCALLS
+       __initcall_end = .;
   }
-  __initcall_end = .;
 
-  __con_initcall_start = .;
-  .con_initcall.init : { *(.con_initcall.init) }
-  __con_initcall_end = .;
+  .con_initcall.init : {
+       __con_initcall_start = .;
+       *(.con_initcall.init)
+       __con_initcall_end = .;
+  }
 
-  __uml_initcall_start = .;
-  .uml.initcall.init : { *(.uml.initcall.init) }
-  __uml_initcall_end = .;
+  .uml.initcall.init : {
+       __uml_initcall_start = .;
+       *(.uml.initcall.init)
+       __uml_initcall_end = .;
+  }
   __init_end = .;
 
   SECURITY_INIT
 
-  __exitcall_begin = .;
-  .exitcall : { *(.exitcall.exit) }
-  __exitcall_end = .;
+  .exitcall : {
+       __exitcall_begin = .;
+       *(.exitcall.exit)
+       __exitcall_end = .;
+  }
 
-  __uml_exitcall_begin = .;
-  .uml.exitcall : { *(.uml.exitcall.exit) }
-  __uml_exitcall_end = .;
+  .uml.exitcall : {
+       __uml_exitcall_begin = .;
+       *(.uml.exitcall.exit)
+       __uml_exitcall_end = .;
+  }
 
   . = ALIGN(4);
-  __alt_instructions = .;
-  .altinstructions : { *(.altinstructions) }
-  __alt_instructions_end = .;
+  .altinstructions : {
+       __alt_instructions = .;
+       *(.altinstructions)
+       __alt_instructions_end = .;
+  }
   .altinstr_replacement : { *(.altinstr_replacement) }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
   .exit.text : { *(.exit.text) }
   .exit.data : { *(.exit.data) }
 
-  __preinit_array_start = .;
-  .preinit_array : { *(.preinit_array) }
-  __preinit_array_end = .;
-  __init_array_start = .;
-  .init_array : { *(.init_array) }
-  __init_array_end = .;
-  __fini_array_start = .;
-  .fini_array : { *(.fini_array) }
-  __fini_array_end = .;
+  .preinit_array : {
+       __preinit_array_start = .;
+       *(.preinit_array)
+       __preinit_array_end = .;
+  }
+  .init_array : {
+       __init_array_start = .;
+       *(.init_array)
+       __init_array_end = .;
+  }
+  .fini_array : {
+       __fini_array_start = .;
+       *(.fini_array)
+       __fini_array_end = .;
+  }
 
    . = ALIGN(4096);
-  __initramfs_start = .;
-  .init.ramfs : { *(.init.ramfs) }
-  __initramfs_end = .;
+  .init.ramfs : {
+       __initramfs_start = .;
+       *(.init.ramfs)
+       __initramfs_end = .;
+  }
 
   /* Sections to be discarded */
   /DISCARD/ : {
index 8a8246d039363f0bd60c43584050de3e620caa1f..bfe27aa2c9c461862e79df34b12824cb906e778b 100644 (file)
@@ -6,7 +6,9 @@
 #ifndef __UM_ELF_X86_64_H
 #define __UM_ELF_X86_64_H
 
+#include <linux/sched.h>
 #include <asm/user.h>
+#include "skas.h"
 
 /* x86-64 relocation types, taken from asm-x86_64/elf.h */
 #define R_X86_64_NONE          0       /* No reloc */
@@ -64,6 +66,44 @@ typedef struct { } elf_fpregset_t;
        PT_REGS_R15(regs) = 0; \
 } while (0)
 
+#define ELF_CORE_COPY_REGS(pr_reg, regs)               \
+       (pr_reg)[0] = (regs)->regs.gp[0];                       \
+       (pr_reg)[1] = (regs)->regs.gp[1];                       \
+       (pr_reg)[2] = (regs)->regs.gp[2];                       \
+       (pr_reg)[3] = (regs)->regs.gp[3];                       \
+       (pr_reg)[4] = (regs)->regs.gp[4];                       \
+       (pr_reg)[5] = (regs)->regs.gp[5];                       \
+       (pr_reg)[6] = (regs)->regs.gp[6];                       \
+       (pr_reg)[7] = (regs)->regs.gp[7];                       \
+       (pr_reg)[8] = (regs)->regs.gp[8];                       \
+       (pr_reg)[9] = (regs)->regs.gp[9];                       \
+       (pr_reg)[10] = (regs)->regs.gp[10];                     \
+       (pr_reg)[11] = (regs)->regs.gp[11];                     \
+       (pr_reg)[12] = (regs)->regs.gp[12];                     \
+       (pr_reg)[13] = (regs)->regs.gp[13];                     \
+       (pr_reg)[14] = (regs)->regs.gp[14];                     \
+       (pr_reg)[15] = (regs)->regs.gp[15];                     \
+       (pr_reg)[16] = (regs)->regs.gp[16];                     \
+       (pr_reg)[17] = (regs)->regs.gp[17];                     \
+       (pr_reg)[18] = (regs)->regs.gp[18];                     \
+       (pr_reg)[19] = (regs)->regs.gp[19];                     \
+       (pr_reg)[20] = (regs)->regs.gp[20];                     \
+       (pr_reg)[21] = current->thread.arch.fs;                 \
+       (pr_reg)[22] = 0;                                       \
+       (pr_reg)[23] = 0;                                       \
+       (pr_reg)[24] = 0;                                       \
+       (pr_reg)[25] = 0;                                       \
+       (pr_reg)[26] = 0;
+
+static inline int elf_core_copy_fpregs(struct task_struct *t,
+                                      elf_fpregset_t *fpu)
+{
+       int cpu = current_thread->cpu;
+       return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+}
+
+#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
+
 #ifdef TIF_IA32 /* XXX */
 #error XXX, indeed
         clear_thread_flag(TIF_IA32);
index 23a22df039d8c64664b9f85582e16594a600b16a..c043c1ccf1c5098530bfef18941db54b716131b7 100644 (file)
@@ -73,6 +73,19 @@ enum {
        ATA_PIO5                = ATA_PIO4 | (1 << 5),
        ATA_PIO6                = ATA_PIO5 | (1 << 6),
 
+       ATA_SWDMA0              = (1 << 0),
+       ATA_SWDMA1              = ATA_SWDMA0 | (1 << 1),
+       ATA_SWDMA2              = ATA_SWDMA1 | (1 << 2),
+
+       ATA_SWDMA2_ONLY         = (1 << 2),
+
+       ATA_MWDMA0              = (1 << 0),
+       ATA_MWDMA1              = ATA_MWDMA0 | (1 << 1),
+       ATA_MWDMA2              = ATA_MWDMA1 | (1 << 2),
+
+       ATA_MWDMA12_ONLY        = (1 << 1) | (1 << 2),
+       ATA_MWDMA2_ONLY         = (1 << 2),
+
        ATA_UDMA0               = (1 << 0),
        ATA_UDMA1               = ATA_UDMA0 | (1 << 1),
        ATA_UDMA2               = ATA_UDMA1 | (1 << 2),
index 1d5ded0836eee2161221b25d3ef7b6f432fd0095..0ad72c4cf312a8bb9dc18bb6f39ad54b36fac941 100644 (file)
@@ -126,16 +126,16 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
 static inline int cpu_is_offline(int cpu) { return 0; }
 #endif         /* CONFIG_HOTPLUG_CPU */
 
-#ifdef CONFIG_SUSPEND_SMP
+#ifdef CONFIG_PM_SLEEP_SMP
 extern int suspend_cpu_hotplug;
 
 extern int disable_nonboot_cpus(void);
 extern void enable_nonboot_cpus(void);
-#else
+#else /* !CONFIG_PM_SLEEP_SMP */
 #define suspend_cpu_hotplug    0
 
 static inline int disable_nonboot_cpus(void) { return 0; }
 static inline void enable_nonboot_cpus(void) {}
-#endif
+#endif /* !CONFIG_PM_SLEEP_SMP */
 
 #endif /* _LINUX_CPU_H_ */
index e6a71c82d204699b6a18226e261a7beda4463ed7..3a19b032c0eb6ecfe887fad15b2adaddf3969b83 100644 (file)
@@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
 {
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
        return 0;
 }
 #else
-int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff);
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
 #endif
 
 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void)
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
+#define prepare_hugepage_range(addr,len)       (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
index 41978a5573187c3b4619fd842366ad90f226f97e..a67bb9075e9bbd0023a42e2ede2ed8b0fa601c52 100644 (file)
@@ -303,6 +303,7 @@ enum {
        ATA_HORKAGE_NODMA       = (1 << 1),     /* DMA problems */
        ATA_HORKAGE_NONCQ       = (1 << 2),     /* Don't use NCQ */
        ATA_HORKAGE_MAX_SEC_128 = (1 << 3),     /* Limit max sects to 128 */
+       ATA_HORKAGE_BROKEN_HPA  = (1 << 4),     /* Broken HPA */
 };
 
 enum hsm_task_states {
index 06d23e10a16d4679ae860c63fedf566de4555bce..17168f3cc73fdf0d85db76e0beb50385033d32d9 100644 (file)
 #define PCI_DEVICE_ID_ATI_IXP400_SATA   0x4379
 #define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a
 #define PCI_DEVICE_ID_ATI_IXP600_SATA  0x4380
-#define PCI_DEVICE_ID_ATI_IXP600_SMBUS 0x4385
+#define PCI_DEVICE_ID_ATI_SBX00_SMBUS  0x4385
 #define PCI_DEVICE_ID_ATI_IXP600_IDE   0x438c
 #define PCI_DEVICE_ID_ATI_IXP700_SATA  0x4390
-#define PCI_DEVICE_ID_ATI_IXP700_SMBUS 0x4395
 #define PCI_DEVICE_ID_ATI_IXP700_IDE   0x439c
 
 #define PCI_VENDOR_ID_VLSI             0x1004
index c91476ce314ac32853cd8cac29859ad3790ee975..dff3192374f8f0af665715a12e309d471ae70b97 100644 (file)
@@ -351,6 +351,8 @@ enum
 #define RTAX_INITCWND RTAX_INITCWND
        RTAX_FEATURES,
 #define RTAX_FEATURES RTAX_FEATURES
+       RTAX_RTO_MIN,
+#define RTAX_RTO_MIN RTAX_RTO_MIN
        __RTAX_MAX
 };
 
index bd6a0320a770bb9b3123bd57e517136a162165d1..f4e324ed2e4478e5282ba3b4c0f33339fc581dd8 100644 (file)
@@ -904,6 +904,7 @@ struct sched_entity {
 
        u64                     exec_start;
        u64                     sum_exec_runtime;
+       u64                     prev_sum_exec_runtime;
        u64                     wait_start_fair;
        u64                     sleep_start_fair;
 
index 124270df8734eb2b20e3f36adb115bc356692a15..74962077f63230eebfb40e723f2876c2922c3a2b 100644 (file)
@@ -78,7 +78,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  * Sorry that the following has to be that ugly but some versions of GCC
  * have trouble with constant propagation and loops.
  */
-static inline int kmalloc_index(size_t size)
+static __always_inline int kmalloc_index(size_t size)
 {
        if (!size)
                return 0;
@@ -133,7 +133,7 @@ static inline int kmalloc_index(size_t size)
  * This ought to end up with a global pointer to the right cache
  * in kmalloc_caches.
  */
-static inline struct kmem_cache *kmalloc_slab(size_t size)
+static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
 {
        int index = kmalloc_index(size);
 
@@ -166,7 +166,7 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
        if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
                struct kmem_cache *s = kmalloc_slab(size);
@@ -183,7 +183,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
        if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
                struct kmem_cache *s = kmalloc_slab(size);
index 73cb9943c8a887ef8afb61b59e243ce858427599..991c85bb9e3648e83cf44599d36845aa92bd9fc4 100644 (file)
@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
                                          const struct sctp_chunk *);
 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
                                          const struct sctp_chunk *);
-void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t);
+void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
 struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
                              const struct sctp_chunk *,
                              const size_t hint);
index ee4559b1130201e845548d07e12bd425cbb1349c..c0d5848c33dc1535688799b4020a2f9a7b52d586 100644 (file)
@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
                          struct iovec *data);
 void sctp_chunk_free(struct sctp_chunk *);
 void  *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void  *sctp_addto_param(struct sctp_chunk *, int len, const void *data);
 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
                                 const struct sctp_association *,
                                 struct sock *);
index 39ea3f442b47d161d2201384d6d1f147400fe8d9..cd33270e86dd61eb3b856cb6b0a61434d911f7bb 100644 (file)
@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
 /* Skip over an SSN. */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
 
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
 #endif /* __sctp_ulpqueue_h__ */
 
 
index 181ae7086029e0b0eebb60cbc264b01b49f9297d..38033db8d8ec3d8f23af3973560dc995782efbda 100644 (file)
@@ -273,7 +273,7 @@ int __cpuinit cpu_up(unsigned int cpu)
        return err;
 }
 
-#ifdef CONFIG_SUSPEND_SMP
+#ifdef CONFIG_PM_SLEEP_SMP
 static cpumask_t frozen_cpus;
 
 int disable_nonboot_cpus(void)
@@ -334,4 +334,4 @@ void enable_nonboot_cpus(void)
 out:
        mutex_unlock(&cpu_add_remove_lock);
 }
-#endif
+#endif /* CONFIG_PM_SLEEP_SMP */
index 9578c1ae19ca9fde7a69e27fe617d3b928438637..06b24b3aa370c69577012023b12573d4c7624d19 100644 (file)
@@ -975,6 +975,7 @@ fastcall NORET_TYPE void do_exit(long code)
        if (unlikely(tsk->audit_context))
                audit_free(tsk);
 
+       tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
 
        exit_mm(tsk);
@@ -996,7 +997,6 @@ fastcall NORET_TYPE void do_exit(long code)
        if (tsk->binfmt)
                module_put(tsk->binfmt->module);
 
-       tsk->exit_code = code;
        proc_exit_connector(tsk);
        exit_task_namespaces(tsk);
        exit_notify(tsk);
index 853aefbd184bfeec03783686abd92bf3ee0de36e..7230d914eaa28b86eaba939b5644c899d9872ce3 100644 (file)
@@ -547,14 +547,11 @@ int request_irq(unsigned int irq, irq_handler_t handler,
                 * We do this before actually registering it, to make sure that
                 * a 'real' IRQ doesn't run in parallel with our fake
                 */
-               if (irqflags & IRQF_DISABLED) {
-                       unsigned long flags;
+               unsigned long flags;
 
-                       local_irq_save(flags);
-                       handler(irq, dev_id);
-                       local_irq_restore(flags);
-               } else
-                       handler(irq, dev_id);
+               local_irq_save(flags);
+               handler(irq, dev_id);
+               local_irq_restore(flags);
        }
 #endif
 
index 412859f8d94abde2edd6248aa8db20764111cc6a..c8580a1e68739510088355cc5c37f4c47b571a77 100644 (file)
@@ -72,15 +72,10 @@ config PM_TRACE
        CAUTION: this option will cause your machine's real-time clock to be
        set to an invalid time after a resume.
 
-config SUSPEND_SMP_POSSIBLE
-       bool
-       depends on (X86 && !X86_VOYAGER) || (PPC64 && (PPC_PSERIES || PPC_PMAC))
-       depends on SMP
-       default y
-
-config SUSPEND_SMP
+config PM_SLEEP_SMP
        bool
-       depends on SUSPEND_SMP_POSSIBLE && PM_SLEEP
+       depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+       depends on PM_SLEEP
        select HOTPLUG_CPU
        default y
 
@@ -89,20 +84,46 @@ config PM_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
+config SUSPEND_UP_POSSIBLE
+       bool
+       depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \
+                  || SUPERH || FRV
+       depends on !SMP
+       default y
+
+config SUSPEND_SMP_POSSIBLE
+       bool
+       depends on (X86 && !X86_VOYAGER) \
+                  || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM
+       depends on SMP
+       default y
+
 config SUSPEND
        bool "Suspend to RAM and standby"
        depends on PM
-       depends on !SMP || SUSPEND_SMP_POSSIBLE
+       depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE
        default y
        ---help---
          Allow the system to enter sleep states in which main memory is
          powered and thus its contents are preserved, such as the
          suspend-to-RAM state (i.e. the ACPI S3 state).
 
+config HIBERNATION_UP_POSSIBLE
+       bool
+       depends on X86 || PPC64_SWSUSP || FRV || PPC32
+       depends on !SMP
+       default y
+
+config HIBERNATION_SMP_POSSIBLE
+       bool
+       depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP
+       depends on SMP
+       default y
+
 config HIBERNATION
        bool "Hibernation (aka 'suspend to disk')"
        depends on PM && SWAP
-       depends on ((X86 || PPC64_SWSUSP || FRV || PPC32) && !SMP) || SUSPEND_SMP_POSSIBLE
+       depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
        ---help---
          Enable the suspend to disk (STD) functionality, which is usually
          called "hibernation" in user interfaces.  STD checkpoints the
index 9fe473a190deac015a0ead90149e0f498cd393be..b533d6db78aab0afd5840627e0b568797fb12fbd 100644 (file)
@@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.wait_start_fair           = 0;
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
+       p->se.prev_sum_exec_runtime     = 0;
        p->se.delta_exec                = 0;
        p->se.delta_fair_run            = 0;
        p->se.delta_fair_sleep          = 0;
index ee3771850aaf1548e41bdcf224e95301d797a5e1..ce39282d9c0d221205fa793a3a20c77627b90905 100644 (file)
@@ -354,7 +354,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        delta_fair = calc_delta_fair(delta_exec, lw);
        delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
 
-       if (cfs_rq->sleeper_bonus > sysctl_sched_latency) {
+       if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
                delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
                delta = min(delta, (unsigned long)(
                        (long)sysctl_sched_runtime_limit - curr->wait_runtime));
@@ -489,6 +489,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        unsigned long delta_fair;
 
+       if (unlikely(!se->wait_start_fair))
+               return;
+
        delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
                        (u64)(cfs_rq->fair_clock - se->wait_start_fair));
 
@@ -668,7 +671,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void
+static int
 __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
                          struct sched_entity *curr, unsigned long granularity)
 {
@@ -679,8 +682,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
         * preempt the current task unless the best task has
         * a larger than sched_granularity fairness advantage:
         */
-       if (__delta > niced_granularity(curr, granularity))
+       if (__delta > niced_granularity(curr, granularity)) {
                resched_task(rq_of(cfs_rq)->curr);
+               return 1;
+       }
+       return 0;
 }
 
 static inline void
@@ -725,6 +731,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 
 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
+       unsigned long gran, ideal_runtime, delta_exec;
        struct sched_entity *next;
 
        /*
@@ -741,8 +748,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        if (next == curr)
                return;
 
-       __check_preempt_curr_fair(cfs_rq, next, curr,
-                       sched_granularity(cfs_rq));
+       gran = sched_granularity(cfs_rq);
+       ideal_runtime = niced_granularity(curr,
+               max(sysctl_sched_latency / cfs_rq->nr_running,
+                   (unsigned long)sysctl_sched_min_granularity));
+       /*
+        * If we executed more than what the latency constraint suggests,
+        * reduce the rescheduling granularity. This way the total latency
+        * of how much a task is not scheduled converges to
+        * sysctl_sched_latency:
+        */
+       delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+       if (delta_exec > ideal_runtime)
+               gran = 0;
+
+       if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
+               curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
 }
 
 /**************************************************
@@ -1076,31 +1097,34 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
 static void task_new_fair(struct rq *rq, struct task_struct *p)
 {
        struct cfs_rq *cfs_rq = task_cfs_rq(p);
-       struct sched_entity *se = &p->se;
+       struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
 
        sched_info_queued(p);
 
+       update_curr(cfs_rq);
        update_stats_enqueue(cfs_rq, se);
        /*
         * Child runs first: we let it run before the parent
         * until it reschedules once. We set up the key so that
         * it will preempt the parent:
         */
-       p->se.fair_key = current->se.fair_key -
-               niced_granularity(&rq->curr->se, sched_granularity(cfs_rq)) - 1;
+       se->fair_key = curr->fair_key -
+               niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
        /*
         * The first wait is dominated by the child-runs-first logic,
         * so do not credit it with that waiting time yet:
         */
        if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
-               p->se.wait_start_fair = 0;
+               se->wait_start_fair = 0;
 
        /*
         * The statistical average of wait_runtime is about
         * -granularity/2, so initialize the task with that:
         */
-       if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
-               p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2);
+       if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
+               se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
+               schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+       }
 
        __enqueue_entity(cfs_rq, se);
 }
index ad63109e413c1819804f326f50ff262dce9b171a..3169bed0b4d0e44166b37c48ed663d516a9ccba5 100644 (file)
@@ -1300,20 +1300,19 @@ struct sigqueue *sigqueue_alloc(void)
 void sigqueue_free(struct sigqueue *q)
 {
        unsigned long flags;
+       spinlock_t *lock = &current->sighand->siglock;
+
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
        /*
         * If the signal is still pending remove it from the
-        * pending queue.
+        * pending queue. We must hold ->siglock while testing
+        * q->list to serialize with collect_signal().
         */
-       if (unlikely(!list_empty(&q->list))) {
-               spinlock_t *lock = &current->sighand->siglock;
-               read_lock(&tasklist_lock);
-               spin_lock_irqsave(lock, flags);
-               if (!list_empty(&q->list))
-                       list_del_init(&q->list);
-               spin_unlock_irqrestore(lock, flags);
-               read_unlock(&tasklist_lock);
-       }
+       spin_lock_irqsave(lock, flags);
+       if (!list_empty(&q->list))
+               list_del_init(&q->list);
+       spin_unlock_irqrestore(lock, flags);
+
        q->flags &= ~SIGQUEUE_PREALLOC;
        __sigqueue_free(q);
 }
index 449b81b98b3db5b873301ee45f68605296f118c2..1b33b05d346bb8958ac235a2aa423b657cdf28ad 100644 (file)
@@ -1442,7 +1442,6 @@ asmlinkage long sys_times(struct tms __user * tbuf)
  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
  * LBT 04.03.94
  */
-
 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
 {
        struct task_struct *p;
@@ -1470,7 +1469,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
        if (!thread_group_leader(p))
                goto out;
 
-       if (p->real_parent == group_leader) {
+       if (p->real_parent->tgid == group_leader->tgid) {
                err = -EPERM;
                if (task_session(p) != task_session(group_leader))
                        goto out;
index d055d987850cc96ed2a73cc36d064b15d552dcaf..85af9422ea6e0163afd849f59237752825a62406 100644 (file)
@@ -81,6 +81,7 @@ void free_user_ns(struct kref *kref)
        struct user_namespace *ns;
 
        ns = container_of(kref, struct user_namespace, kref);
+       free_uid(ns->root_user);
        kfree(ns);
 }
 
index 172abffeb2e386fe08a333051934b73fcd98707a..bb54b88c3d5aaab2752569f046129bea8082f4ec 100644 (file)
@@ -955,6 +955,11 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
                goto out;
        }
 
+       if (!nodes_subset(new, node_online_map)) {
+               err = -EINVAL;
+               goto out;
+       }
+
        err = security_task_movememory(task);
        if (err)
                goto out;
index 37c73b902008e1dd2083f71995f2ce67975f6512..e2fdbce1874b4ff7a481621b6c5a3285d8d75260 100644 (file)
@@ -611,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        int rc = 0;
        int *result = NULL;
        struct page *newpage = get_new_page(page, private, &result);
+       int rcu_locked = 0;
 
        if (!newpage)
                return -ENOMEM;
@@ -636,8 +637,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
         * we cannot notice that anon_vma is freed while we migrates a page.
         * This rcu_read_lock() delays freeing anon_vma pointer until the end
         * of migration. File cache pages are no problem because of page_lock()
+        * File Caches may use write_page() or lock_page() in migration, then,
+        * just care Anon page here.
         */
-       rcu_read_lock();
+       if (PageAnon(page)) {
+               rcu_read_lock();
+               rcu_locked = 1;
+       }
        /*
         * This is a corner case handling.
         * When a new swap-cache is read into, it is linked to LRU
@@ -656,7 +662,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        if (rc)
                remove_migration_ptes(page, page);
 rcu_unlock:
-       rcu_read_unlock();
+       if (rcu_locked)
+               rcu_read_unlock();
 
 unlock:
 
index 6427653023aabfb501a2c34d3cc18f39b686c532..1a8c59571cb7303515c7b616f9572c4706ccebb6 100644 (file)
@@ -2345,6 +2345,8 @@ static int __cpuinit process_zones(int cpu)
        return 0;
 bad:
        for_each_zone(dzone) {
+               if (!populated_zone(dzone))
+                       continue;
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
index 04151da399c6ce8c344409c2ce7fbf289ac645e5..7defe84e6bd0bfec5e8c8f6b7833f2f2c0ea53ea 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3813,7 +3813,9 @@ static int __init slab_sysfs_init(void)
 
        list_for_each_entry(s, &slab_caches, list) {
                err = sysfs_slab_add(s);
-               BUG_ON(err);
+               if (err)
+                       printk(KERN_ERR "SLUB: Unable to add boot slab %s"
+                                               " to sysfs\n", s->name);
        }
 
        while (alias_list) {
@@ -3821,7 +3823,9 @@ static int __init slab_sysfs_init(void)
 
                alias_list = alias_list->next;
                err = sysfs_slab_alias(al->s, al->name);
-               BUG_ON(err);
+               if (err)
+                       printk(KERN_ERR "SLUB: Unable to add boot slab alias"
+                                       " %s to sysfs\n", s->name);
                kfree(al);
        }
 
index 69b70977f00061610b30fc86475e167bf49f4ee0..eb57502bb2641b7087b50a8c2577699f3b3ab388 100644 (file)
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
        if (hold_time(br) == 0)
                return;
 
+       /* ignore packets unless we are using this port */
+       if (!(source->state == BR_STATE_LEARNING ||
+             source->state == BR_STATE_FORWARDING))
+               return;
+
        fdb = fdb_find(head, addr);
        if (likely(fdb)) {
                /* attempt to update an entry for a local interface */
index 749f0e8f541d124f1975e6e6aa5a1e8d06ddb439..9272f12f664cfb6a4604e041652c824b30480abf 100644 (file)
  */
 static int port_cost(struct net_device *dev)
 {
-       if (dev->ethtool_ops->get_settings) {
-               struct ethtool_cmd ecmd = { ETHTOOL_GSET };
-               int err = dev->ethtool_ops->get_settings(dev, &ecmd);
-               if (!err) {
+       if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
+               struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
+
+               if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
                        switch(ecmd.speed) {
-                       case SPEED_100:
-                               return 19;
-                       case SPEED_1000:
-                               return 4;
                        case SPEED_10000:
                                return 2;
+                       case SPEED_1000:
+                               return 4;
+                       case SPEED_100:
+                               return 19;
                        case SPEED_10:
                                return 100;
                        }
index 5c18595b7616602b7f987b80be7f3ce403617549..6f468fc3357a08f9c04bcbf4b22d93004521b3ac 100644 (file)
@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
 {
        struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
 
-       if (p && p->state != BR_STATE_DISABLED)
+       if (p)
                br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
-
        return 0;        /* process further */
 }
 
index 7bae576ac115bf19732d878ac7022d5baeb1f0ab..36fdea71d74230e541060322b784eb794fbb5f47 100644 (file)
@@ -380,7 +380,6 @@ struct pktgen_thread {
        /* Field for thread to receive "posted" events terminate, stop ifs etc. */
 
        u32 control;
-       int pid;
        int cpu;
 
        wait_queue_head_t queue;
@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
        }
 
        if ((netif_queue_stopped(odev) ||
-            netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
-            need_resched()) {
+            (pkt_dev->skb &&
+             netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
+           need_resched()) {
                idle_start = getCurUs();
 
                if (!netif_running(odev)) {
@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg)
 
        init_waitqueue_head(&t->queue);
 
-       t->pid = current->pid;
-
        pr_debug("pktgen: starting pktgen/%d:  pid=%d\n", cpu, current->pid);
 
        max_before_softirq = t->max_before_softirq;
index 9785df37a65feeb285bc2f656eb828216de23585..1ee72127462bf37ed5ef834ebc71111b1cf070d5 100644 (file)
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                tcp_grow_window(sk, skb);
 }
 
+static u32 tcp_rto_min(struct sock *sk)
+{
+       struct dst_entry *dst = __sk_dst_get(sk);
+       u32 rto_min = TCP_RTO_MIN;
+
+       if (dst_metric_locked(dst, RTAX_RTO_MIN))
+               rto_min = dst->metrics[RTAX_RTO_MIN-1];
+       return rto_min;
+}
+
 /* Called to compute a smoothed rtt estimate. The data fed to this
  * routine either comes from timestamps, or from segments that were
  * known _not_ to have been retransmitted [see Karn/Partridge
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
                        if (tp->mdev_max < tp->rttvar)
                                tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
                        tp->rtt_seq = tp->snd_nxt;
-                       tp->mdev_max = TCP_RTO_MIN;
+                       tp->mdev_max = tcp_rto_min(sk);
                }
        } else {
                /* no previous measure. */
                tp->srtt = m<<3;        /* take the measured time to be rtt */
                tp->mdev = m<<1;        /* make sure rto = 3*rtt */
-               tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+               tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
                tp->rtt_seq = tp->snd_nxt;
        }
 }
index ab7d845224fce9f7dad1b92f3d6a91d9c7c16411..223f9bded672dcfc3e3ea01e130b37e9fa15e53d 100644 (file)
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
               void *matchinfo,
               unsigned int hook_mask)
 {
-       const struct xt_tcp *udpinfo = matchinfo;
+       const struct xt_udp *udpinfo = matchinfo;
 
        /* Must specify no unknown invflags */
        return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
index 4a49db65772e7c23d60214720d0db5ab4c844a59..abd82fc3ec609c40c9c804eb6d03906822bcfd6f 100644 (file)
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
        if (TC_H_MAJ(skb->priority) != sch->handle) {
                err = tc_classify(skb, q->filter_list, &res);
 #ifdef CONFIG_NET_CLS_ACT
-               switch (tc_classify(skb, q->filter_list, &res)) {
+               switch (err) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
                        *qerr = NET_XMIT_SUCCESS;
index 498edb0cd4e5f7156cb98506304db1f48bcc8cef..2ad1caf1ea42577205a5a61f7e4494125542671b 100644 (file)
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                break;
 
        case SCTP_TRANSPORT_DOWN:
-               transport->state = SCTP_INACTIVE;
+               /* if the transort was never confirmed, do not transition it
+                * to inactive state.
+                */
+               if (transport->state != SCTP_UNCONFIRMED)
+                       transport->state = SCTP_INACTIVE;
+
                spc_state = SCTP_ADDR_UNREACHABLE;
                break;
 
index 992f361084b741115f3e14006bb00bb0772a4fcf..28f4fe77ceee2e44e9b2e29eefd2be632464c331 100644 (file)
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                 */
                if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
                   (!fast_retransmit && !chunk->tsn_gap_acked)) {
+                       /* If this chunk was sent less then 1 rto ago, do not
+                        * retransmit this chunk, but give the peer time
+                        * to acknowlege it.
+                        */
+                       if ((jiffies - chunk->sent_at) < transport->rto)
+                               continue;
+
                        /* RFC 2960 6.2.1 Processing a Received SACK
                         *
                         * C) Any time a DATA chunk is marked for
index 51c4d7fef1d23137c29fd30aabdeb261ae1e9d86..79856c9245252af44f4b9d793ec8a746fa5cc40f 100644 (file)
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
  * abort chunk.
  */
 void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
-                     const void *payload, size_t paylen)
+                     size_t paylen)
 {
        sctp_errhdr_t err;
        __u16 len;
@@ -120,7 +120,6 @@ void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
        len = sizeof(sctp_errhdr_t) + paylen;
        err.length  = htons(len);
        chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
-       sctp_addto_chunk(chunk, paylen, payload);
 }
 
 /* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
 
        /* Put the tsn back into network byte order.  */
        payload = htonl(tsn);
-       sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
-                       sizeof(payload));
+       sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+       sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
                        goto err_copy;
        }
 
-       sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
+       sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+       sctp_addto_chunk(retval, paylen, payload);
 
        if (paylen)
                kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
        struct sctp_paramhdr phdr;
 
        retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
-                                       + sizeof(sctp_chunkhdr_t));
+                                       + sizeof(sctp_paramhdr_t));
        if (!retval)
                goto end;
 
-       sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
+       sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+                                       + sizeof(sctp_paramhdr_t));
 
        phdr.type = htons(chunk->chunk_hdr->type);
        phdr.length = chunk->chunk_hdr->length;
-       sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
+       sctp_addto_chunk(retval, paylen, payload);
+       sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
 
 end:
        return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
        if (!retval)
                goto nodata;
 
-       sctp_init_cause(retval, cause_code, payload, paylen);
+       sctp_init_cause(retval, cause_code, paylen);
+       sctp_addto_chunk(retval, paylen, payload);
 
 nodata:
        return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
        void *target;
        void *padding;
        int chunklen = ntohs(chunk->chunk_hdr->length);
-       int padlen = chunklen % 4;
+       int padlen = WORD_ROUND(chunklen) - chunklen;
 
        padding = skb_put(chunk->skb, padlen);
        target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
        return target;
 }
 
+/* Append bytes to the end of a parameter.  Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
+{
+       void *target;
+       int chunklen = ntohs(chunk->chunk_hdr->length);
+
+       target = skb_put(chunk->skb, len);
+
+       memcpy(target, data, len);
+
+       /* Adjust the chunk length field.  */
+       chunk->chunk_hdr->length = htons(chunklen + len);
+       chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+       return target;
+}
+
 /* Append bytes from user space to the end of a chunk.  Will panic if
  * chunk is not big enough.
  * Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
  */
 void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
 {
+       struct sctp_datamsg *msg;
+       struct sctp_chunk *lchunk;
+       struct sctp_stream *stream;
        __u16 ssn;
        __u16 sid;
 
        if (chunk->has_ssn)
                return;
 
-       /* This is the last possible instant to assign a SSN. */
-       if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
-               ssn = 0;
-       } else {
-               sid = ntohs(chunk->subh.data_hdr->stream);
-               if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
-                       ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
-               else
-                       ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
-       }
+       /* All fragments will be on the same stream */
+       sid = ntohs(chunk->subh.data_hdr->stream);
+       stream = &chunk->asoc->ssnmap->out;
 
-       chunk->subh.data_hdr->ssn = htons(ssn);
-       chunk->has_ssn = 1;
+       /* Now assign the sequence number to the entire message.
+        * All fragments must have the same stream sequence number.
+        */
+       msg = chunk->msg;
+       list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+               if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+                       ssn = 0;
+               } else {
+                       if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+                               ssn = sctp_ssn_next(stream, sid);
+                       else
+                               ssn = sctp_ssn_peek(stream, sid);
+               }
+
+               lchunk->subh.data_hdr->ssn = htons(ssn);
+               lchunk->has_ssn = 1;
+       }
 }
 
 /* Helper function to assign a TSN if needed.  This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
                        __be32 n = htonl(usecs);
 
                        sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
-                                       &n, sizeof(n));
+                                       sizeof(n));
+                       sctp_addto_chunk(*errp, sizeof(n), &n);
                        *error = -SCTP_IERROR_STALE_COOKIE;
                } else
                        *error = -SCTP_IERROR_NOMEM;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
                report.num_missing = htonl(1);
                report.type = paramtype;
                sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
-                               &report, sizeof(report));
+                               sizeof(report));
+               sctp_addto_chunk(*errp, sizeof(report), &report);
        }
 
        /* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
                *errp = sctp_make_op_error_space(asoc, chunk, 0);
 
        if (*errp)
-               sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
+               sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
 
        /* Stop processing this chunk. */
        return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
                *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
 
        if (*errp) {
-               sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
-                               sizeof(error));
-               sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
+               sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
+                               sizeof(error) + sizeof(sctp_paramhdr_t));
+               sctp_addto_chunk(*errp, sizeof(error), error);
+               sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
        }
 
        return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
        if (!*errp)
                *errp = sctp_make_op_error_space(asoc, chunk, len);
 
-       if (*errp)
-               sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
-                               param.v, len);
+       if (*errp) {
+               sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
+               sctp_addto_chunk(*errp, len, param.v);
+       }
 
        /* Stop processing this chunk. */
        return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
                        *errp = sctp_make_op_error_space(asoc, chunk,
                                        ntohs(chunk->chunk_hdr->length));
 
-               if (*errp)
+               if (*errp) {
                        sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       param.v,
                                        WORD_ROUND(ntohs(param.p->length)));
+                       sctp_addto_chunk(*errp,
+                                       WORD_ROUND(ntohs(param.p->length)),
+                                       param.v);
+               }
 
                break;
        case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
 
                if (*errp) {
                        sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       param.v,
                                        WORD_ROUND(ntohs(param.p->length)));
+                       sctp_addto_chunk(*errp,
+                                       WORD_ROUND(ntohs(param.p->length)),
+                                       param.v);
                } else {
                        /* If there is no memory for generating the ERROR
                         * report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
         * VIOLATION error.  We build the ERROR chunk here and let the normal
         * error handling code build and send the packet.
         */
-       if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
+       if (param.v != (void*)chunk->chunk_end) {
                sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
                return 0;
        }
index d9fad4f6ffc3f592c7feed6af33b7bb49edb94e1..8d7890083493d9619c7899311bf9afb9ab6eb195 100644 (file)
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                break;
 
        case SCTP_DISPOSITION_VIOLATION:
-               printk(KERN_ERR "sctp protocol violation state %d "
-                      "chunkid %d\n", state, subtype.chunk);
+               if (net_ratelimit())
+                       printk(KERN_ERR "sctp protocol violation state %d "
+                              "chunkid %d\n", state, subtype.chunk);
                break;
 
        case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        /* Move the Cumulattive TSN Ack ahead. */
                        sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
 
+                       /* purge the fragmentation queue */
+                       sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
                        /* Abort any in progress partial delivery. */
                        sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
                        break;
index 71cad56dd73fe62e0e1422f23cd7eb56c5f31f70..177528ed3e1b1d9f50ea2c9be844d5a5d0553187 100644 (file)
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
        struct sctp_chunk *err_chunk;
        struct sctp_packet *packet;
        sctp_unrecognized_param_t *unk_param;
-       struct sock *sk;
        int len;
 
        /* 6.10 Bundling
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
        if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
                return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
 
-       sk = ep->base.sk;
-       /* If the endpoint is not listening or if the number of associations
-        * on the TCP-style socket exceed the max backlog, respond with an
-        * ABORT.
-        */
-       if (!sctp_sstate(sk, LISTENING) ||
-           (sctp_style(sk, TCP) &&
-            sk_acceptq_is_full(sk)))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev, *ai_ev = NULL;
        int error = 0;
        struct sctp_chunk *err_chk_p;
+       struct sock *sk;
 
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
                return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
+       /* If the endpoint is not listening or if the number of associations
+        * on the TCP-style socket exceed the max backlog, respond with an
+        * ABORT.
+        */
+       sk = ep->base.sk;
+       if (!sctp_sstate(sk, LISTENING) ||
+           (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
        /* "Decode" the chunk.  We have no optional parameters so we
         * are in good shape.
         */
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
        /* This should never happen, but lets log it if so.  */
        if (unlikely(!link)) {
                if (from_addr.sa.sa_family == AF_INET6) {
-                       printk(KERN_WARNING
-                              "%s association %p could not find address "
-                              NIP6_FMT "\n",
-                              __FUNCTION__,
-                              asoc,
-                              NIP6(from_addr.v6.sin6_addr));
+                       if (net_ratelimit())
+                               printk(KERN_WARNING
+                                   "%s association %p could not find address "
+                                   NIP6_FMT "\n",
+                                   __FUNCTION__,
+                                   asoc,
+                                   NIP6(from_addr.v6.sin6_addr));
                } else {
-                       printk(KERN_WARNING
-                              "%s association %p could not find address "
-                              NIPQUAD_FMT "\n",
-                              __FUNCTION__,
-                              asoc,
-                              NIPQUAD(from_addr.v4.sin_addr.s_addr));
+                       if (net_ratelimit())
+                               printk(KERN_WARNING
+                                   "%s association %p could not find address "
+                                   NIPQUAD_FMT "\n",
+                                   __FUNCTION__,
+                                   asoc,
+                                   NIPQUAD(from_addr.v4.sin_addr.s_addr));
                }
                return SCTP_DISPOSITION_DISCARD;
        }
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                abort = sctp_make_abort(asoc, asconf_ack,
                                        sizeof(sctp_errhdr_t));
                if (abort) {
-                       sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
+                       sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(abort));
                }
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                abort = sctp_make_abort(asoc, asconf_ack,
                                        sizeof(sctp_errhdr_t));
                if (abort) {
-                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
+                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(abort));
                }
index 01c6364245b7081ec1224ef10c8434eebdb971ed..33354602ae86ef4b888bb534778f1d50d75af8b0 100644 (file)
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
         * The function sctp_get_port_local() does duplicate address
         * detection.
         */
+       addr->v4.sin_port = htons(snum);
        if ((ret = sctp_get_port_local(sk, addr))) {
                if (ret == (long) sk) {
                        /* This endpoint has a conflicting address. */
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
 
                sctp_unhash_endpoint(ep);
                sk->sk_state = SCTP_SS_CLOSED;
+               return 0;
        }
 
        /* Return if we are already listening. */
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
 
                sctp_unhash_endpoint(ep);
                sk->sk_state = SCTP_SS_CLOSED;
+               return 0;
        }
 
        if (sctp_sstate(sk, LISTENING))
index 34eb977a204d62b564d4cc76d3f2de1a382b723b..fa0ba2a5564e51c8a6f78aa6d40481fc80f52a02 100644 (file)
@@ -659,6 +659,46 @@ done:
        return retval;
 }
 
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue.  The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+       struct sk_buff *pos, *tmp;
+       struct sctp_ulpevent *event;
+       __u32 tsn;
+
+       if (skb_queue_empty(&ulpq->reasm))
+               return;
+
+       skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+               event = sctp_skb2event(pos);
+               tsn = event->tsn;
+
+               /* Since the entire message must be abandoned by the
+                * sender (item A3 in Section 3.5, RFC 3758), we can
+                * free all fragments on the list that are less then
+                * or equal to ctsn_point
+                */
+               if (TSN_lte(tsn, fwd_tsn)) {
+                       __skb_unlink(pos, &ulpq->reasm);
+                       sctp_ulpevent_free(event);
+               } else
+                       break;
+       }
+}
+
 /* Helper function to gather skbs that have possibly become
  * ordered by an an incoming chunk.
  */
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 /* Helper function to gather skbs that have possibly become
  * ordered by forward tsn skipping their dependencies.
  */
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 {
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
                csid = cevent->stream;
                cssn = cevent->ssn;
 
-               if (cssn != sctp_ssn_peek(in, csid))
+               /* Have we gone too far?  */
+               if (csid > sid)
                        break;
 
-               /* Found it, so mark in the ssnmap. */
-               sctp_ssn_next(in, csid);
+               /* Have we not gone far enough?  */
+               if (csid < sid)
+                       continue;
+
+               /* see if this ssn has been marked by skipping */
+               if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+                       break;
 
                __skb_unlink(pos, &ulpq->lobby);
-               if (!event) {
+               if (!event)
                        /* Create a temporary list to collect chunks on.  */
                        event = sctp_skb2event(pos);
-                       __skb_queue_tail(&temp, sctp_event2skb(event));
-               } else {
-                       /* Attach all gathered skbs to the event.  */
-                       __skb_queue_tail(&temp, pos);
-               }
+
+               /* Attach all gathered skbs to the event.  */
+               __skb_queue_tail(&temp, pos);
        }
 
        /* Send event to the ULP.  'event' is the sctp_ulpevent for
         * very first SKB on the 'temp' list.
         */
-       if (event)
+       if (event) {
+               /* see if we have more ordered that we can deliver */
+               sctp_ulpq_retrieve_ordered(ulpq, event);
                sctp_ulpq_tail_event(ulpq, event);
+       }
 }
 
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 {
        struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
        /* Go find any other chunks that were waiting for
         * ordering and deliver them if needed.
         */
-       sctp_ulpq_reap_ordered(ulpq);
+       sctp_ulpq_reap_ordered(ulpq, sid);
        return;
 }
 
This page took 0.109576 seconds and 5 git commands to generate.