* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
- cxgb3 engine microcode load
cxgb3 - Fix dev->priv usage
qeth: Drop ARP packages on HiperSockets interface with NOARP attribute.
qeth: provide specific message for OSA-adapters exclusively used
qeth: crash during reboot after failing online setting
qeth: Announce tx checksumming for qeth devices in TSO/EDDP mode
qeth: dont return the return values of void functions.
qeth: enforce a rate limit for inbound scatter gather messages
qeth: ungrouping a device must not be interruptible
netxen: fix crashes during module unload
netxen: Avoid firmware load in PCI probe
PS3: fix the bug that 'ifconfig down' would hang
IOC3: Program UART predividers.
Datasheet: Publicly available at the Intel website
* ServerWorks OSB4, CSB5, CSB6 and HT-1000 southbridges
Datasheet: Only available via NDA from ServerWorks
- * ATI IXP200, IXP300, IXP400, SB600 and SB700 southbridges
+ * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
Datasheet: Not publicly available
* Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
Datasheet: Publicly available at the SMSC website http://www.smsc.com
or does something very odd once a month document it.
PLEASE remember that submissions must be made under the terms
- of the OSDL certificate of contribution
- (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html)
- and should include a Signed-off-by: line.
+ of the OSDL certificate of contribution and should include a
+ Signed-off-by: line. The current version of this "Developer's
+ Certificate of Origin" (DCO) is listed in the file
+ Documentation/SubmittingPatches.
6. Make sure you have the right to send any changes you make. If you
do changes at work you may find your employer owns the patch
W: http://ipw2200.sourceforge.net
S: Supported
-IOC3 DRIVER
+IOC3 ETHERNET DRIVER
P: Ralf Baechle
M: ralf@linux-mips.org
L: linux-mips@linux-mips.org
S: Maintained
+IOC3 SERIAL DRIVER
+P: Pat Gefre
+M: pfg@sgi.com
+L: linux-kernel@linux-mips.org
+S: Maintained
+
IP MASQUERADING:
P: Juanjo Ciarlante
M: jjciarla@raiz.uncu.edu.ar
L: netem@lists.linux-foundation.org
S: Maintained
+NETERION (S2IO) Xframe 10GbE DRIVER
+P: Ramkrishna Vepa
+M: ram.vepa@neterion.com
+P: Rastapur Santosh
+M: santosh.rastapur@neterion.com
+P: Sivakumar Subramani
+M: sivakumar.subramani@neterion.com
+P: Sreenivasa Honnur
+M: sreenivasa.honnur@neterion.com
+L: netdev@vger.kernel.org
+W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous
+S: Supported
+
NETFILTER/IPTABLES/IPCHAINS
P: Rusty Russell
P: Marc Boucher
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
-NETERION (S2IO) Xframe 10GbE DRIVER
-P: Ramkrishna Vepa
-M: ram.vepa@neterion.com
-P: Rastapur Santosh
-M: santosh.rastapur@neterion.com
-P: Sivakumar Subramani
-M: sivakumar.subramani@neterion.com
-P: Sreenivasa Honnur
-M: sreenivasa.honnur@neterion.com
-L: netdev@vger.kernel.org
-W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous
-S: Supported
-
OPENCORES I2C BUS DRIVER
P: Peter Korsgaard
M: jacmet@sunsite.dk
return -ENOMEM;
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
* Don't actually need to do any preparation, but need to make sure
* the address is in the right region.
*/
-int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
- if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
- return -EINVAL;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
/* Handle MAP_FIXED */
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
.flags = IORESOURCE_IRQ,
},
};
- platform_device_register_simple("txx9spi", 0,
+ platform_device_register_simple("spi_txx9", 0,
res, ARRAY_SIZE(res));
}
void (*pre_handler)(unsigned int, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
+
+ u32 msi;
};
+void sparc64_set_msi(unsigned int virt_irq, u32 msi)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ data->msi = msi;
+}
+
+u32 sparc64_get_msi(unsigned int virt_irq)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ return data->msi;
+ return 0xffffffff;
+}
+
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
{
unsigned int real_irq = virt_to_real_irq(virt_irq);
if (likely(data)) {
unsigned long imap = data->imap;
- u32 tmp = upa_readq(imap);
+ unsigned long tmp = upa_readq(imap);
tmp &= ~IMAP_VALID;
upa_writeq(tmp, imap);
break;
}
if (devino >= msi_end)
- return 0;
+ return -ENOSPC;
sysino = sun4v_devino_to_sysino(devhandle, devino);
bucket = &ivector_table[sysino];
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
- prom_halt();
+ virt_irq_free(*virt_irq_p);
+ return -ENOMEM;
}
set_irq_chip_data(bucket->virt_irq, data);
sd->host_controller = pbm;
sd->prom_node = node;
sd->op = of_find_device_by_node(node);
- sd->msi_num = 0xffffffff;
sd = &sd->op->dev.archdata;
sd->iommu = pbm->iommu;
if (msi_num < 0)
return msi_num;
- devino = sun4v_build_msi(pbm->devhandle, virt_irq_p,
- pbm->msiq_first_devino,
- (pbm->msiq_first_devino +
- pbm->msiq_num));
- err = -ENOMEM;
- if (!devino)
+ err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
+ pbm->msiq_first_devino,
+ (pbm->msiq_first_devino +
+ pbm->msiq_num));
+ if (err < 0)
goto out_err;
+ devino = err;
msiqid = ((devino - pbm->msiq_first_devino) +
pbm->msiq_first);
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
goto out_err;
- pdev->dev.archdata.msi_num = msi_num;
+ sparc64_set_msi(*virt_irq_p, msi_num);
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
out_err:
free_msi(pbm, msi_num);
- sun4v_destroy_msi(*virt_irq_p);
- *virt_irq_p = 0;
return err;
}
unsigned long msiqid, err;
unsigned int msi_num;
- msi_num = pdev->dev.archdata.msi_num;
+ msi_num = sparc64_get_msi(virt_irq);
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
if (err) {
printk(KERN_ERR "%s: getmsiq gives error %lu\n",
return -ENOMEM;
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
*(.gnu.warning)
. = ALIGN(4096);
- __syscall_stub_start = .;
- *(.__syscall_stub*)
- __syscall_stub_end = .;
- . = ALIGN(4096);
} =0x90909090
+ . = ALIGN(4096);
+ .syscall_stub : {
+ __syscall_stub_start = .;
+ *(.__syscall_stub*)
+ __syscall_stub_end = .;
+ }
.fini : {
KEEP (*(.fini))
} =0x90909090
.got : { *(.got.plt) *(.got) }
_edata = .;
PROVIDE (edata = .);
- __bss_start = .;
.bss : {
+ __bss_start = .;
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
*(.gnu.linkonce.t*)
+ }
- . = ALIGN(4096);
- __syscall_stub_start = .;
- *(.__syscall_stub*)
- __syscall_stub_end = .;
- . = ALIGN(4096);
+ . = ALIGN(4096);
+ .syscall_stub : {
+ __syscall_stub_start = .;
+ *(.__syscall_stub*)
+ __syscall_stub_end = .;
}
#include "asm/common.lds.S"
*/
#include <errno.h>
+#include <sys/ptrace.h>
#include <string.h>
#include "ptrace_user.h"
#include "uml-config.h"
static unsigned long exec_regs[MAX_REG_NR];
static unsigned long exec_fp_regs[HOST_FP_SIZE];
+int save_fp_registers(int pid, unsigned long *fp_regs)
+{
+ if(ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
+ return -errno;
+ return 0;
+}
+
+int restore_fp_registers(int pid, unsigned long *fp_regs)
+{
+ if(ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
+ return -errno;
+ return 0;
+}
+
void init_thread_registers(union uml_pt_regs *to)
{
memcpy(to->skas.regs, exec_regs, sizeof(to->skas.regs));
return(instr == 0x050f);
}
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu )
-{
- panic("dump_fpu");
- return(1);
-}
-
int get_fpregs(unsigned long buf, struct task_struct *child)
{
panic("get_fpregs");
# CONFIG_PM_DEBUG is not set
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
-CONFIG_SUSPEND_SMP=y
#
# ACPI (Advanced Configuration and Power Interface) Support
#include <linux/libata.h>
#define DRV_NAME "ata_generic"
-#define DRV_VERSION "0.2.12"
+#define DRV_VERSION "0.2.13"
/*
* A generic parallel ATA driver using libata
#include <linux/dmi.h>
#define DRV_NAME "ata_piix"
-#define DRV_VERSION "2.11"
+#define DRV_VERSION "2.12"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
ich6m_sata_ahci = 8,
ich8_sata_ahci = 9,
piix_pata_mwdma = 10, /* PIIX3 MWDMA only */
+ tolapai_sata_ahci = 11,
/* constants for mapping table */
P0 = 0, /* port 0 */
{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+ /* SATA Controller IDE (Tolapai) */
+ { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci },
{ } /* terminate list */
};
},
};
+static const struct piix_map_db tolapai_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x3,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, P1, NA }, /* 00b */
+ { RV, RV, RV, RV }, /* 01b */
+ { RV, RV, RV, RV }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
static const struct piix_map_db *piix_map_db_table[] = {
[ich5_sata] = &ich5_map_db,
[ich6_sata] = &ich6_map_db,
[ich6_sata_ahci] = &ich6_map_db,
[ich6m_sata_ahci] = &ich6m_map_db,
[ich8_sata_ahci] = &ich8_map_db,
+ [tolapai_sata_ahci] = &tolapai_map_db,
};
static struct ata_port_info piix_port_info[] = {
.mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.port_ops = &piix_pata_ops,
},
+
+ /* tolapai_sata_ahci: 11: */
+ {
+ .sht = &piix_sht,
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+ PIIX_FLAG_AHCI,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
};
static struct pci_bits piix_enable_bits[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
},
},
+ {
+ .ident = "Satellite U200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+ },
+ },
{
.ident = "Satellite U205",
.matches = {
hpriv->map = map;
}
+static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+{
+ static struct dmi_system_id sysids[] = {
+ {
+ /* Clevo M570U sets IOCFG bit 18 if the cdrom
+ * isn't used to boot the system which
+ * disables the channel.
+ */
+ .ident = "M570U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
+ },
+ },
+ };
+ u32 iocfg;
+
+ if (!dmi_check_system(sysids))
+ return;
+
+ /* The datasheet says that bit 18 is NOOP but certain systems
+ * seem to use it to disable a channel. Clear the bit on the
+ * affected systems.
+ */
+ pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg);
+ if (iocfg & (1 << 18)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "applying IOCFG bit18 quirk\n");
+ iocfg &= ~(1 << 18);
+ pci_write_config_dword(pdev, PIIX_IOCFG, iocfg);
+ }
+}
+
/**
* piix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
piix_map_db_table[ent->driver_data]);
}
+ /* apply IOCFG bit18 quirk */
+ piix_iocfg_bit18_quirk(pdev);
+
/* On ICH5, some BIOSen disable the interrupt using the
* PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
* On ICH6, this bit has the same effect, but only when
dev->flags |= ATA_DFLAG_FLUSH_EXT;
}
- if (ata_id_hpa_enabled(dev->id))
- dev->n_sectors = ata_hpa_resize(dev);
+ if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
+ ata_id_hpa_enabled(dev->id))
+ dev->n_sectors = ata_hpa_resize(dev);
/* config NCQ */
ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
{ "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
{ "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
- /* Devices with NCQ limits */
+ /* devices which puke on READ_NATIVE_MAX */
+ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
/* End Marker */
{ }
tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+ /* A clean abort indicates an original or just out of spec drive
+ and we should continue as we issue the setup based on the
+ drive reported working geometry */
+ if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ err_mask = 0;
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
#include <linux/dmi.h>
#define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.7.4"
+#define DRV_VERSION "0.7.5"
/*
* Cable special cases
#include <linux/libata.h>
#define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.9"
/**
* timing_setup - shared timing computation and load
#include <linux/libata.h>
#define DRV_NAME "pata_atiixp"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.4.6"
enum {
ATIIXP_IDE_PIO_TIMING = 0x40,
#include <linux/libata.h>
#define DRV_NAME "pata_cs5520"
-#define DRV_VERSION "0.6.5"
+#define DRV_VERSION "0.6.6"
struct pio_clocks
{
#include <linux/dmi.h>
#define DRV_NAME "pata_cs5530"
-#define DRV_VERSION "0.7.3"
+#define DRV_VERSION "0.7.4"
static void __iomem *cs5530_port_base(struct ata_port *ap)
{
#include <linux/libata.h>
#define DRV_NAME "pata_isapnp"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.2"
static struct scsi_host_template isapnp_sht = {
.module = THIS_MODULE,
#define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.7"
+#define DRV_VERSION "0.3.8"
struct it821x_dev
{
static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6101), },
+ { PCI_DEVICE(0x11AB, 0x6121), },
+ { PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
{ } /* terminate list */
};
#define DRV_NAME "mpc52xx_ata"
-#define DRV_VERSION "0.1.0ac2"
+#define DRV_VERSION "0.1.2"
/* Private structures used by the driver */
#define DRV_NAME "pata_pcmcia"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
/*
* Private data structure to glue stuff together
#include <linux/libata.h>
#define DRV_NAME "pata_pdc2027x"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "1.0"
#undef PDC_DEBUG
#ifdef PDC_DEBUG
#include <linux/pata_platform.h>
#define DRV_NAME "pata_platform"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.1"
static int pio_mask = 1;
#include <linux/libata.h>
#define DRV_NAME "sc1200"
-#define DRV_VERSION "0.2.5"
+#define DRV_VERSION "0.2.6"
#define SC1200_REV_A 0x00
#define SC1200_REV_B1 0x01
#include <linux/libata.h>
#define DRV_NAME "pata_scc"
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
#include <linux/libata.h>
#define DRV_NAME "pata_serverworks"
-#define DRV_VERSION "0.4.1"
+#define DRV_VERSION "0.4.2"
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
#include <linux/libata.h>
#define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.4.6"
+#define DRV_VERSION "0.4.7"
#define SIL680_MMIO_BAR 5
#include <linux/libata.h>
#define DRV_NAME "pata_sl82c105"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
enum {
/*
#include <linux/libata.h>
#define DRV_NAME "pdc_adma"
-#define DRV_VERSION "0.06"
+#define DRV_VERSION "1.0"
/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
#include <scsi/scsi_device.h>
#define DRV_NAME "sata_inic162x"
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
enum {
MMIO_BAR = 5,
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
-#define DRV_VERSION "0.81"
+#define DRV_VERSION "1.0"
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "3.4"
+#define DRV_VERSION "3.5"
#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
-#define DRV_VERSION "2.09"
+#define DRV_VERSION "2.10"
enum {
PDC_MAX_PORTS = 4,
{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
- { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
- { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
+ { PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
+ { PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
-#define DRV_VERSION "0.08"
+#define DRV_VERSION "0.09"
enum {
QS_MMIO_BAR = 4,
#include <linux/libata.h>
#define DRV_NAME "sata_sil"
-#define DRV_VERSION "2.2"
+#define DRV_VERSION "2.3"
enum {
SIL_MMIO_BAR = 5,
#include <linux/libata.h>
#define DRV_NAME "sata_sil24"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "1.0"
/*
* Port request block (PRB) 32 bytes
#include "sis.h"
#define DRV_NAME "sata_sis"
-#define DRV_VERSION "0.8"
+#define DRV_VERSION "1.0"
enum {
sis_180 = 0,
#endif /* CONFIG_PPC_OF */
#define DRV_NAME "sata_svw"
-#define DRV_VERSION "2.2"
+#define DRV_VERSION "2.3"
enum {
/* ap->flags bits */
#include "sata_promise.h"
#define DRV_NAME "sata_sx4"
-#define DRV_VERSION "0.11"
+#define DRV_VERSION "0.12"
enum {
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "1.3"
enum {
uli_5289 = 0,
#include <linux/libata.h>
#define DRV_NAME "sata_via"
-#define DRV_VERSION "2.2"
+#define DRV_VERSION "2.3"
enum board_ids_enum {
vt6420,
#include <linux/libata.h>
#define DRV_NAME "sata_vsc"
-#define DRV_VERSION "2.2"
+#define DRV_VERSION "2.3"
enum {
VSC_MMIO_BAR = 0,
ATI IXP400
ATI SB600
ATI SB700
+ ATI SB800
Serverworks OSB4
Serverworks CSB5
Serverworks CSB6
Supports:
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000
- ATI IXP200, IXP300, IXP400, SB600, SB700
+ ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
SMSC Victory66
Note: we assume there can only be one device, with one SMBus interface.
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS),
.driver_data = 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SMBUS),
- .driver_data = 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SMBUS),
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4),
.driver_data = 0 },
/* Chip reset. */
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+ /* Reading back any register after chip reset will hang the
+ * bus on 5706 A0 and A1. The msleep below provides plenty
+ * of margin for write posting.
+ */
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ / 50);
- }
+ (CHIP_ID(bp) == CHIP_ID_5706_A1))
+ msleep(20);
/* Reset takes approximate 30 usec */
for (i = 0; i < 10; i++) {
/* enable IO_Space bit */
#define ITE_887x_POSIO_ENABLE (1 << 31)
-static int __devinit pci_ite887x_init(struct pci_dev *dev)
+static int pci_ite887x_init(struct pci_dev *dev)
{
/* inta_addr are the configuration addresses of the ITE */
static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0,
xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(xfer->tx_dma)) {
+ if (dma_mapping_error(xfer->rx_dma)) {
if (xfer->tx_buf)
dma_unmap_single(dev,
xfer->tx_dma, xfer->len,
#define bfin5xx_spi_resume NULL
#endif /* CONFIG_PM */
+MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
static struct platform_driver bfin5xx_spi_driver = {
- .driver = {
+ .driver = {
.name = "bfin-spi-master",
.owner = THIS_MODULE,
},
static struct platform_driver driver = {
.driver = {
- .name = "imx-spi",
+ .name = "spi_imx",
.bus = &platform_bus_type,
.owner = THIS_MODULE,
},
return 0;
}
+MODULE_ALIAS("mpc83xx_spi"); /* for platform bus hotplug */
static struct platform_driver mpc83xx_spi_driver = {
.probe = mpc83xx_spi_probe,
.remove = __devexit_p(mpc83xx_spi_remove),
#define s3c24xx_spi_resume NULL
#endif
+MODULE_ALIAS("s3c2410_spi"); /* for platform bus hotplug */
static struct platform_driver s3c24xx_spidrv = {
.probe = s3c24xx_spi_probe,
.remove = s3c24xx_spi_remove,
.suspend = s3c2410_spigpio_suspend,
.resume = s3c2410_spigpio_resume,
.driver = {
- .name = "s3c24xx-spi-gpio",
+ .name = "spi_s3c24xx_gpio",
.owner = THIS_MODULE,
},
};
static struct platform_driver txx9spi_driver = {
.remove = __exit_p(txx9spi_remove),
.driver = {
- .name = "txx9spi",
+ .name = "spi_txx9",
.owner = THIS_MODULE,
},
};
#include <syslib/virtex_devices.h>
-#define XILINX_SPI_NAME "xspi"
+#define XILINX_SPI_NAME "xilinx_spi"
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
config FONT_8x16
bool "VGA 8x16 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE=y || STI_CONSOLE || USB_SISUSBVGA_CON
+ depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
goto out;
}
- if (special_file(lower_inode->i_mode)) {
- ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
- goto out;
- }
if (!nd) {
ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
"as we *think* we are about to unlink\n");
ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
return;
}
- lower_page->mapping->a_ops->sync_page(lower_page);
+ if (lower_page->mapping->a_ops->sync_page)
+ lower_page->mapping->a_ops->sync_page(lower_page);
ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
lower_page->index);
unlock_page(lower_page);
int ret;
/*
- * vma alignment has already been checked by prepare_hugepage_range.
- * If you add any error returns here, do so after setting VM_HUGETLB,
- * so is_vm_hugetlb_page tests below unmap_region go the right way
- * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+ * vma address alignment (but not the pgoff alignment) has
+ * already been checked by prepare_hugepage_range. If you add
+ * any error returns here, do so after setting VM_HUGETLB, so
+ * is_vm_hugetlb_page tests below unmap_region go the right
+ * way when do_mmap_pgoff unwinds (may be important on powerpc
+ * and ia64).
*/
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
+ if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
+ return -EINVAL;
+
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
mutex_lock(&inode->i_mutex);
return -ENOMEM;
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
if (epos.offset + adsize > sb->s_blocksize) {
loffset = epos.offset;
aed->lengthAllocDescs = cpu_to_le32(adsize);
- sptr = UDF_I_DATA(inode) + epos.offset -
- udf_file_entry_alloc_offset(inode) +
- UDF_I_LENEATTR(inode) - adsize;
+ sptr = UDF_I_DATA(table) + epos.offset - adsize;
dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
memcpy(dptr, sptr, adsize);
epos.offset = sizeof(struct allocExtDesc) + adsize;
} else {
loffset = epos.offset + adsize;
aed->lengthAllocDescs = cpu_to_le32(0);
- sptr = oepos.bh->b_data + epos.offset;
- epos.offset = sizeof(struct allocExtDesc);
-
if (oepos.bh) {
+ sptr = oepos.bh->b_data + epos.offset;
aed = (struct allocExtDesc *)oepos.bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
} else {
+ sptr = UDF_I_DATA(table) + epos.offset;
UDF_I_LENALLOC(table) += adsize;
mark_inode_dirty(table);
}
+ epos.offset = sizeof(struct allocExtDesc);
}
if (UDF_SB_UDFREV(sb) >= 0x0200)
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
static void udf_load_fileset(struct super_block *, struct buffer_head *,
kernel_lb_addr *);
-static void udf_load_partdesc(struct super_block *, struct buffer_head *);
+static int udf_load_partdesc(struct super_block *, struct buffer_head *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
root->logicalBlockNum, root->partitionReferenceNum);
}
-static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
{
struct partitionDesc *p;
int i;
UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
udf_iget(sb, loc);
+ if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) {
+ udf_debug("cannot load unallocSpaceTable (part %d)\n",
+ i);
+ return 1;
+ }
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
udf_iget(sb, loc);
+ if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) {
+ udf_debug("cannot load freedSpaceTable (part %d)\n",
+ i);
+ return 1;
+ }
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
udf_debug("freedSpaceTable (part %d) @ %ld\n",
i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
}
+ return 0;
}
static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
udf_load_logicalvol(sb, bh, fileset);
} else if (i == VDS_POS_PARTITION_DESC) {
struct buffer_head *bh2 = NULL;
- udf_load_partdesc(sb, bh);
+ if (udf_load_partdesc(sb, bh)) {
+ brelse(bh);
+ return 1;
+ }
for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) {
bh2 = udf_read_tagged(sb, j, j, &ident);
gd = (struct generic_desc *)bh2->b_data;
if (ident == TAG_IDENT_PD)
- udf_load_partdesc(sb, bh2);
+ if (udf_load_partdesc(sb, bh2)) {
+ brelse(bh);
+ brelse(bh2);
+ return 1;
+ }
brelse(bh2);
}
}
struct device_node *prom_node;
struct of_device *op;
-
- unsigned int msi_num;
};
#endif /* _ASM_SPARC64_DEVICE_H */
#include <asm/ptrace.h>
/* IMAP/ICLR register defines */
-#define IMAP_VALID 0x80000000 /* IRQ Enabled */
-#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
-#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */
+#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
+#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
+#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
#define IMAP_TID_SHIFT 26
-#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */
+#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
#define IMAP_AID_SHIFT 26
-#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */
+#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
#define IMAP_NID_SHIFT 21
-#define IMAP_IGN 0x000007c0 /* IRQ Group Number */
-#define IMAP_INO 0x0000003f /* IRQ Number */
-#define IMAP_INR 0x000007ff /* Full interrupt number*/
+#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
+#define IMAP_INO 0x0000003fUL /* IRQ Number */
+#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
-#define ICLR_IDLE 0x00000000 /* Idle state */
-#define ICLR_TRANSMIT 0x00000001 /* Transmit state */
-#define ICLR_PENDING 0x00000003 /* Pending state */
+#define ICLR_IDLE 0x00000000UL /* Idle state */
+#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
+#define ICLR_PENDING 0x00000003UL /* Pending state */
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
extern void sun4v_destroy_msi(unsigned int virt_irq);
extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
+extern void sparc64_set_msi(unsigned int virt_irq, u32 msi);
+extern u32 sparc64_get_msi(unsigned int virt_irq);
+
extern void fixup_irqs(void);
static __inline__ void set_softint(unsigned long bits)
. = ALIGN(4096);
.note : { *(.note.*) }
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
BUG_TABLE
- __uml_setup_start = .;
- .uml.setup.init : { *(.uml.setup.init) }
- __uml_setup_end = .;
+ .uml.setup.init : {
+ __uml_setup_start = .;
+ *(.uml.setup.init)
+ __uml_setup_end = .;
+ }
- __uml_help_start = .;
- .uml.help.init : { *(.uml.help.init) }
- __uml_help_end = .;
+ .uml.help.init : {
+ __uml_help_start = .;
+ *(.uml.help.init)
+ __uml_help_end = .;
+ }
- __uml_postsetup_start = .;
- .uml.postsetup.init : { *(.uml.postsetup.init) }
- __uml_postsetup_end = .;
+ .uml.postsetup.init : {
+ __uml_postsetup_start = .;
+ *(.uml.postsetup.init)
+ __uml_postsetup_end = .;
+ }
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
. = ALIGN(32);
- __per_cpu_start = . ;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = . ;
+ .data.percpu : {
+ __per_cpu_start = . ;
+ *(.data.percpu)
+ __per_cpu_end = . ;
+ }
- __initcall_start = .;
.initcall.init : {
+ __initcall_start = .;
INITCALLS
+ __initcall_end = .;
}
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
- __uml_initcall_start = .;
- .uml.initcall.init : { *(.uml.initcall.init) }
- __uml_initcall_end = .;
+ .uml.initcall.init : {
+ __uml_initcall_start = .;
+ *(.uml.initcall.init)
+ __uml_initcall_end = .;
+ }
__init_end = .;
SECURITY_INIT
- __exitcall_begin = .;
- .exitcall : { *(.exitcall.exit) }
- __exitcall_end = .;
+ .exitcall : {
+ __exitcall_begin = .;
+ *(.exitcall.exit)
+ __exitcall_end = .;
+ }
- __uml_exitcall_begin = .;
- .uml.exitcall : { *(.uml.exitcall.exit) }
- __uml_exitcall_end = .;
+ .uml.exitcall : {
+ __uml_exitcall_begin = .;
+ *(.uml.exitcall.exit)
+ __uml_exitcall_end = .;
+ }
. = ALIGN(4);
- __alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
- __alt_instructions_end = .;
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
- __preinit_array_start = .;
- .preinit_array : { *(.preinit_array) }
- __preinit_array_end = .;
- __init_array_start = .;
- .init_array : { *(.init_array) }
- __init_array_end = .;
- __fini_array_start = .;
- .fini_array : { *(.fini_array) }
- __fini_array_end = .;
+ .preinit_array : {
+ __preinit_array_start = .;
+ *(.preinit_array)
+ __preinit_array_end = .;
+ }
+ .init_array : {
+ __init_array_start = .;
+ *(.init_array)
+ __init_array_end = .;
+ }
+ .fini_array : {
+ __fini_array_start = .;
+ *(.fini_array)
+ __fini_array_end = .;
+ }
. = ALIGN(4096);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ .init.ramfs : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
/* Sections to be discarded */
/DISCARD/ : {
#ifndef __UM_ELF_X86_64_H
#define __UM_ELF_X86_64_H
+#include <linux/sched.h>
#include <asm/user.h>
+#include "skas.h"
/* x86-64 relocation types, taken from asm-x86_64/elf.h */
#define R_X86_64_NONE 0 /* No reloc */
PT_REGS_R15(regs) = 0; \
} while (0)
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+ (pr_reg)[0] = (regs)->regs.gp[0]; \
+ (pr_reg)[1] = (regs)->regs.gp[1]; \
+ (pr_reg)[2] = (regs)->regs.gp[2]; \
+ (pr_reg)[3] = (regs)->regs.gp[3]; \
+ (pr_reg)[4] = (regs)->regs.gp[4]; \
+ (pr_reg)[5] = (regs)->regs.gp[5]; \
+ (pr_reg)[6] = (regs)->regs.gp[6]; \
+ (pr_reg)[7] = (regs)->regs.gp[7]; \
+ (pr_reg)[8] = (regs)->regs.gp[8]; \
+ (pr_reg)[9] = (regs)->regs.gp[9]; \
+ (pr_reg)[10] = (regs)->regs.gp[10]; \
+ (pr_reg)[11] = (regs)->regs.gp[11]; \
+ (pr_reg)[12] = (regs)->regs.gp[12]; \
+ (pr_reg)[13] = (regs)->regs.gp[13]; \
+ (pr_reg)[14] = (regs)->regs.gp[14]; \
+ (pr_reg)[15] = (regs)->regs.gp[15]; \
+ (pr_reg)[16] = (regs)->regs.gp[16]; \
+ (pr_reg)[17] = (regs)->regs.gp[17]; \
+ (pr_reg)[18] = (regs)->regs.gp[18]; \
+ (pr_reg)[19] = (regs)->regs.gp[19]; \
+ (pr_reg)[20] = (regs)->regs.gp[20]; \
+ (pr_reg)[21] = current->thread.arch.fs; \
+ (pr_reg)[22] = 0; \
+ (pr_reg)[23] = 0; \
+ (pr_reg)[24] = 0; \
+ (pr_reg)[25] = 0; \
+ (pr_reg)[26] = 0;
+
+static inline int elf_core_copy_fpregs(struct task_struct *t,
+ elf_fpregset_t *fpu)
+{
+ int cpu = current_thread->cpu;
+ return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+}
+
+#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
+
#ifdef TIF_IA32 /* XXX */
#error XXX, indeed
clear_thread_flag(TIF_IA32);
ATA_PIO5 = ATA_PIO4 | (1 << 5),
ATA_PIO6 = ATA_PIO5 | (1 << 6),
+ ATA_SWDMA0 = (1 << 0),
+ ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1),
+ ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2),
+
+ ATA_SWDMA2_ONLY = (1 << 2),
+
+ ATA_MWDMA0 = (1 << 0),
+ ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1),
+ ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2),
+
+ ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2),
+ ATA_MWDMA2_ONLY = (1 << 2),
+
ATA_UDMA0 = (1 << 0),
ATA_UDMA1 = ATA_UDMA0 | (1 << 1),
ATA_UDMA2 = ATA_UDMA1 | (1 << 2),
static inline int cpu_is_offline(int cpu) { return 0; }
#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef CONFIG_SUSPEND_SMP
+#ifdef CONFIG_PM_SLEEP_SMP
extern int suspend_cpu_hotplug;
extern int disable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
-#else
+#else /* !CONFIG_PM_SLEEP_SMP */
#define suspend_cpu_hotplug 0
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
-#endif
+#endif /* !CONFIG_PM_SLEEP_SMP */
#endif /* _LINUX_CPU_H_ */
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
- pgoff_t pgoff)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
- if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
- return -EINVAL;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return 0;
}
#else
-int prepare_hugepage_range(unsigned long addr, unsigned long len,
- pgoff_t pgoff);
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif
#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
+#define prepare_hugepage_range(addr,len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
+ ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
};
enum hsm_task_states {
#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379
#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a
#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380
-#define PCI_DEVICE_ID_ATI_IXP600_SMBUS 0x4385
+#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385
#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
-#define PCI_DEVICE_ID_ATI_IXP700_SMBUS 0x4395
#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
#define PCI_VENDOR_ID_VLSI 0x1004
#define RTAX_INITCWND RTAX_INITCWND
RTAX_FEATURES,
#define RTAX_FEATURES RTAX_FEATURES
+ RTAX_RTO_MIN,
+#define RTAX_RTO_MIN RTAX_RTO_MIN
__RTAX_MAX
};
u64 exec_start;
u64 sum_exec_runtime;
+ u64 prev_sum_exec_runtime;
u64 wait_start_fair;
u64 sleep_start_fair;
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-static inline int kmalloc_index(size_t size)
+static __always_inline int kmalloc_index(size_t size)
{
if (!size)
return 0;
* This ought to end up with a global pointer to the right cache
* in kmalloc_caches.
*/
-static inline struct kmem_cache *kmalloc_slab(size_t size)
+static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
{
int index = kmalloc_index(size);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
const struct sctp_chunk *);
struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
const struct sctp_chunk *);
-void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t);
+void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
const struct sctp_chunk *,
const size_t hint);
struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void *sctp_addto_param(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
struct sock *);
/* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */
return err;
}
-#ifdef CONFIG_SUSPEND_SMP
+#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_t frozen_cpus;
int disable_nonboot_cpus(void)
out:
mutex_unlock(&cpu_add_remove_lock);
}
-#endif
+#endif /* CONFIG_PM_SLEEP_SMP */
if (unlikely(tsk->audit_context))
audit_free(tsk);
+ tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm(tsk);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
- tsk->exit_code = code;
proc_exit_connector(tsk);
exit_task_namespaces(tsk);
exit_notify(tsk);
* We do this before actually registering it, to make sure that
* a 'real' IRQ doesn't run in parallel with our fake
*/
- if (irqflags & IRQF_DISABLED) {
- unsigned long flags;
+ unsigned long flags;
- local_irq_save(flags);
- handler(irq, dev_id);
- local_irq_restore(flags);
- } else
- handler(irq, dev_id);
+ local_irq_save(flags);
+ handler(irq, dev_id);
+ local_irq_restore(flags);
}
#endif
CAUTION: this option will cause your machine's real-time clock to be
set to an invalid time after a resume.
-config SUSPEND_SMP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) || (PPC64 && (PPC_PSERIES || PPC_PMAC))
- depends on SMP
- default y
-
-config SUSPEND_SMP
+config PM_SLEEP_SMP
bool
- depends on SUSPEND_SMP_POSSIBLE && PM_SLEEP
+ depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+ depends on PM_SLEEP
select HOTPLUG_CPU
default y
depends on SUSPEND || HIBERNATION
default y
+config SUSPEND_UP_POSSIBLE
+ bool
+ depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \
+ || SUPERH || FRV
+ depends on !SMP
+ default y
+
+config SUSPEND_SMP_POSSIBLE
+ bool
+ depends on (X86 && !X86_VOYAGER) \
+ || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM
+ depends on SMP
+ default y
+
config SUSPEND
bool "Suspend to RAM and standby"
depends on PM
- depends on !SMP || SUSPEND_SMP_POSSIBLE
+ depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE
default y
---help---
Allow the system to enter sleep states in which main memory is
powered and thus its contents are preserved, such as the
suspend-to-RAM state (i.e. the ACPI S3 state).
+config HIBERNATION_UP_POSSIBLE
+ bool
+ depends on X86 || PPC64_SWSUSP || FRV || PPC32
+ depends on !SMP
+ default y
+
+config HIBERNATION_SMP_POSSIBLE
+ bool
+ depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP
+ depends on SMP
+ default y
+
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on PM && SWAP
- depends on ((X86 || PPC64_SWSUSP || FRV || PPC32) && !SMP) || SUSPEND_SMP_POSSIBLE
+ depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
p->se.wait_start_fair = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
p->se.delta_exec = 0;
p->se.delta_fair_run = 0;
p->se.delta_fair_sleep = 0;
delta_fair = calc_delta_fair(delta_exec, lw);
delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
- if (cfs_rq->sleeper_bonus > sysctl_sched_latency) {
+ if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
delta = min(delta, (unsigned long)(
(long)sysctl_sched_runtime_limit - curr->wait_runtime));
{
unsigned long delta_fair;
+ if (unlikely(!se->wait_start_fair))
+ return;
+
delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
(u64)(cfs_rq->fair_clock - se->wait_start_fair));
/*
* Preempt the current task with a newly woken task if needed:
*/
-static void
+static int
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity)
{
* preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage:
*/
- if (__delta > niced_granularity(curr, granularity))
+ if (__delta > niced_granularity(curr, granularity)) {
resched_task(rq_of(cfs_rq)->curr);
+ return 1;
+ }
+ return 0;
}
static inline void
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
+ unsigned long gran, ideal_runtime, delta_exec;
struct sched_entity *next;
/*
if (next == curr)
return;
- __check_preempt_curr_fair(cfs_rq, next, curr,
- sched_granularity(cfs_rq));
+ gran = sched_granularity(cfs_rq);
+ ideal_runtime = niced_granularity(curr,
+ max(sysctl_sched_latency / cfs_rq->nr_running,
+ (unsigned long)sysctl_sched_min_granularity));
+ /*
+ * If we executed more than what the latency constraint suggests,
+ * reduce the rescheduling granularity. This way the total latency
+ * of how much a task is not scheduled converges to
+ * sysctl_sched_latency:
+ */
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
+ gran = 0;
+
+ if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
+ curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
}
/**************************************************
static void task_new_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- struct sched_entity *se = &p->se;
+ struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
sched_info_queued(p);
+ update_curr(cfs_rq);
update_stats_enqueue(cfs_rq, se);
/*
* Child runs first: we let it run before the parent
* until it reschedules once. We set up the key so that
* it will preempt the parent:
*/
- p->se.fair_key = current->se.fair_key -
- niced_granularity(&rq->curr->se, sched_granularity(cfs_rq)) - 1;
+ se->fair_key = curr->fair_key -
+ niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
/*
* The first wait is dominated by the child-runs-first logic,
* so do not credit it with that waiting time yet:
*/
if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
- p->se.wait_start_fair = 0;
+ se->wait_start_fair = 0;
/*
* The statistical average of wait_runtime is about
* -granularity/2, so initialize the task with that:
*/
- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
- p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2);
+ if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
+ se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
+ schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+ }
__enqueue_entity(cfs_rq, se);
}
void sigqueue_free(struct sigqueue *q)
{
unsigned long flags;
+ spinlock_t *lock = ¤t->sighand->siglock;
+
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
/*
* If the signal is still pending remove it from the
- * pending queue.
+ * pending queue. We must hold ->siglock while testing
+ * q->list to serialize with collect_signal().
*/
- if (unlikely(!list_empty(&q->list))) {
- spinlock_t *lock = ¤t->sighand->siglock;
- read_lock(&tasklist_lock);
- spin_lock_irqsave(lock, flags);
- if (!list_empty(&q->list))
- list_del_init(&q->list);
- spin_unlock_irqrestore(lock, flags);
- read_unlock(&tasklist_lock);
- }
+ spin_lock_irqsave(lock, flags);
+ if (!list_empty(&q->list))
+ list_del_init(&q->list);
+ spin_unlock_irqrestore(lock, flags);
+
q->flags &= ~SIGQUEUE_PREALLOC;
__sigqueue_free(q);
}
* Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
* LBT 04.03.94
*/
-
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
if (!thread_group_leader(p))
goto out;
- if (p->real_parent == group_leader) {
+ if (p->real_parent->tgid == group_leader->tgid) {
err = -EPERM;
if (task_session(p) != task_session(group_leader))
goto out;
struct user_namespace *ns;
ns = container_of(kref, struct user_namespace, kref);
+ free_uid(ns->root_user);
kfree(ns);
}
goto out;
}
+ if (!nodes_subset(new, node_online_map)) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = security_task_movememory(task);
if (err)
goto out;
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
+ int rcu_locked = 0;
if (!newpage)
return -ENOMEM;
* we cannot notice that anon_vma is freed while we migrates a page.
* This rcu_read_lock() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
+ * File Caches may use write_page() or lock_page() in migration, then,
+ * just care Anon page here.
*/
- rcu_read_lock();
+ if (PageAnon(page)) {
+ rcu_read_lock();
+ rcu_locked = 1;
+ }
/*
* This is a corner case handling.
* When a new swap-cache is read into, it is linked to LRU
if (rc)
remove_migration_ptes(page, page);
rcu_unlock:
- rcu_read_unlock();
+ if (rcu_locked)
+ rcu_read_unlock();
unlock:
return 0;
bad:
for_each_zone(dzone) {
+ if (!populated_zone(dzone))
+ continue;
if (dzone == zone)
break;
kfree(zone_pcp(dzone, cpu));
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
- BUG_ON(err);
+ if (err)
+ printk(KERN_ERR "SLUB: Unable to add boot slab %s"
+ " to sysfs\n", s->name);
}
while (alias_list) {
alias_list = alias_list->next;
err = sysfs_slab_alias(al->s, al->name);
- BUG_ON(err);
+ if (err)
+ printk(KERN_ERR "SLUB: Unable to add boot slab alias"
+ " %s to sysfs\n", s->name);
kfree(al);
}
if (hold_time(br) == 0)
return;
+ /* ignore packets unless we are using this port */
+ if (!(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return;
+
fdb = fdb_find(head, addr);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
*/
static int port_cost(struct net_device *dev)
{
- if (dev->ethtool_ops->get_settings) {
- struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- int err = dev->ethtool_ops->get_settings(dev, &ecmd);
- if (!err) {
+ if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
+
+ if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
switch(ecmd.speed) {
- case SPEED_100:
- return 19;
- case SPEED_1000:
- return 4;
case SPEED_10000:
return 2;
+ case SPEED_1000:
+ return 4;
+ case SPEED_100:
+ return 19;
case SPEED_10:
return 100;
}
{
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
- if (p && p->state != BR_STATE_DISABLED)
+ if (p)
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
-
return 0; /* process further */
}
/* Field for thread to receive "posted" events terminate, stop ifs etc. */
u32 control;
- int pid;
int cpu;
wait_queue_head_t queue;
}
if ((netif_queue_stopped(odev) ||
- netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
- need_resched()) {
+ (pkt_dev->skb &&
+ netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
+ need_resched()) {
idle_start = getCurUs();
if (!netif_running(odev)) {
init_waitqueue_head(&t->queue);
- t->pid = current->pid;
-
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
max_before_softirq = t->max_before_softirq;
tcp_grow_window(sk, skb);
}
+static u32 tcp_rto_min(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ u32 rto_min = TCP_RTO_MIN;
+
+ if (dst_metric_locked(dst, RTAX_RTO_MIN))
+ rto_min = dst->metrics[RTAX_RTO_MIN-1];
+ return rto_min;
+}
+
/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
tp->rtt_seq = tp->snd_nxt;
- tp->mdev_max = TCP_RTO_MIN;
+ tp->mdev_max = tcp_rto_min(sk);
}
} else {
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<1; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
}
void *matchinfo,
unsigned int hook_mask)
{
- const struct xt_tcp *udpinfo = matchinfo;
+ const struct xt_udp *udpinfo = matchinfo;
/* Must specify no unknown invflags */
return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
- switch (tc_classify(skb, q->filter_list, &res)) {
+ switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS;
break;
case SCTP_TRANSPORT_DOWN:
- transport->state = SCTP_INACTIVE;
+ /* if the transort was never confirmed, do not transition it
+ * to inactive state.
+ */
+ if (transport->state != SCTP_UNCONFIRMED)
+ transport->state = SCTP_INACTIVE;
+
spc_state = SCTP_ADDR_UNREACHABLE;
break;
*/
if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
+ /* If this chunk was sent less then 1 rto ago, do not
+ * retransmit this chunk, but give the peer time
+ * to acknowlege it.
+ */
+ if ((jiffies - chunk->sent_at) < transport->rto)
+ continue;
+
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
* abort chunk.
*/
void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
- const void *payload, size_t paylen)
+ size_t paylen)
{
sctp_errhdr_t err;
__u16 len;
len = sizeof(sctp_errhdr_t) + paylen;
err.length = htons(len);
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
- sctp_addto_chunk(chunk, paylen, payload);
}
/* 3.3.2 Initiation (INIT) (1)
/* Put the tsn back into network byte order. */
payload = htonl(tsn);
- sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
- sizeof(payload));
+ sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+ sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
goto err_copy;
}
- sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
if (paylen)
kfree(payload);
struct sctp_paramhdr phdr;
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
- + sizeof(sctp_chunkhdr_t));
+ + sizeof(sctp_paramhdr_t));
if (!retval)
goto end;
- sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+ + sizeof(sctp_paramhdr_t));
phdr.type = htons(chunk->chunk_hdr->type);
phdr.length = chunk->chunk_hdr->length;
- sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
+ sctp_addto_chunk(retval, paylen, payload);
+ sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
end:
return retval;
if (!retval)
goto nodata;
- sctp_init_cause(retval, cause_code, payload, paylen);
+ sctp_init_cause(retval, cause_code, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
nodata:
return retval;
void *target;
void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
- int padlen = chunklen % 4;
+ int padlen = WORD_ROUND(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len);
return target;
}
+/* Append bytes to the end of a parameter. Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
+{
+ void *target;
+ int chunklen = ntohs(chunk->chunk_hdr->length);
+
+ target = skb_put(chunk->skb, len);
+
+ memcpy(target, data, len);
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length = htons(chunklen + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return target;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
*/
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
+ struct sctp_datamsg *msg;
+ struct sctp_chunk *lchunk;
+ struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
- /* This is the last possible instant to assign a SSN. */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
- ssn = 0;
- } else {
- sid = ntohs(chunk->subh.data_hdr->stream);
- if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
- ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
- else
- ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
- }
+ /* All fragments will be on the same stream */
+ sid = ntohs(chunk->subh.data_hdr->stream);
+ stream = &chunk->asoc->ssnmap->out;
- chunk->subh.data_hdr->ssn = htons(ssn);
- chunk->has_ssn = 1;
+ /* Now assign the sequence number to the entire message.
+ * All fragments must have the same stream sequence number.
+ */
+ msg = chunk->msg;
+ list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ ssn = 0;
+ } else {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+ ssn = sctp_ssn_next(stream, sid);
+ else
+ ssn = sctp_ssn_peek(stream, sid);
+ }
+
+ lchunk->subh.data_hdr->ssn = htons(ssn);
+ lchunk->has_ssn = 1;
+ }
}
/* Helper function to assign a TSN if needed. This assumes that both
__be32 n = htonl(usecs);
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
- &n, sizeof(n));
+ sizeof(n));
+ sctp_addto_chunk(*errp, sizeof(n), &n);
*error = -SCTP_IERROR_STALE_COOKIE;
} else
*error = -SCTP_IERROR_NOMEM;
report.num_missing = htonl(1);
report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
- &report, sizeof(report));
+ sizeof(report));
+ sctp_addto_chunk(*errp, sizeof(report), &report);
}
/* Stop processing this chunk. */
*errp = sctp_make_op_error_space(asoc, chunk, 0);
if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
+ sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
/* Stop processing this chunk. */
return 0;
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
- sizeof(error));
- sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
+ sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
+ sizeof(error) + sizeof(sctp_paramhdr_t));
+ sctp_addto_chunk(*errp, sizeof(error), error);
+ sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
}
return 0;
if (!*errp)
*errp = sctp_make_op_error_space(asoc, chunk, len);
- if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
- param.v, len);
+ if (*errp) {
+ sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
+ sctp_addto_chunk(*errp, len, param.v);
+ }
/* Stop processing this chunk. */
return 0;
*errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
- if (*errp)
+ if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
+ }
break;
case SCTP_PARAM_ACTION_SKIP:
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
+ if (param.v != (void*)chunk->chunk_end) {
sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
return 0;
}
break;
case SCTP_DISPOSITION_VIOLATION:
- printk(KERN_ERR "sctp protocol violation state %d "
- "chunkid %d\n", state, subtype.chunk);
+ if (net_ratelimit())
+ printk(KERN_ERR "sctp protocol violation state %d "
+ "chunkid %d\n", state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
- struct sock *sk;
int len;
/* 6.10 Bundling
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
- sk = ep->base.sk;
- /* If the endpoint is not listening or if the number of associations
- * on the TCP-style socket exceed the max backlog, respond with an
- * ABORT.
- */
- if (!sctp_sstate(sk, LISTENING) ||
- (sctp_style(sk, TCP) &&
- sk_acceptq_is_full(sk)))
- return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
+ struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* If the endpoint is not listening or if the number of associations
+ * on the TCP-style socket exceed the max backlog, respond with an
+ * ABORT.
+ */
+ sk = ep->base.sk;
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIP6_FMT "\n",
- __FUNCTION__,
- asoc,
- NIP6(from_addr.v6.sin6_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIP6_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIP6(from_addr.v6.sin6_addr));
} else {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIPQUAD_FMT "\n",
- __FUNCTION__,
- asoc,
- NIPQUAD(from_addr.v4.sin_addr.s_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIPQUAD_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIPQUAD(from_addr.v4.sin_addr.s_addr));
}
return SCTP_DISPOSITION_DISCARD;
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
* The function sctp_get_port_local() does duplicate address
* detection.
*/
+ addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
if (ret == (long) sk) {
/* This endpoint has a conflicting address. */
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
/* Return if we are already listening. */
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
if (sctp_sstate(sk, LISTENING))
return retval;
}
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue. The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *event;
+ __u32 tsn;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ event = sctp_skb2event(pos);
+ tsn = event->tsn;
+
+ /* Since the entire message must be abandoned by the
+ * sender (item A3 in Section 3.5, RFC 3758), we can
+ * free all fragments on the list that are less then
+ * or equal to ctsn_point
+ */
+ if (TSN_lte(tsn, fwd_tsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ } else
+ break;
+ }
+}
+
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
csid = cevent->stream;
cssn = cevent->ssn;
- if (cssn != sctp_ssn_peek(in, csid))
+ /* Have we gone too far? */
+ if (csid > sid)
break;
- /* Found it, so mark in the ssnmap. */
- sctp_ssn_next(in, csid);
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ /* see if this ssn has been marked by skipping */
+ if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ break;
__skb_unlink(pos, &ulpq->lobby);
- if (!event) {
+ if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- } else {
- /* Attach all gathered skbs to the event. */
- __skb_queue_tail(&temp, pos);
- }
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(&temp, pos);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ /* see if we have more ordered that we can deliver */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
+ }
}
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
- sctp_ulpq_reap_ordered(ulpq);
+ sctp_ulpq_reap_ordered(ulpq, sid);
return;
}