/**
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
+ * @fn: workqueue function to be scheduled
+ * @data: data value to pass to workqueue function
+ * @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
* port_task. There is one port_task per port and it's the
return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
+static void ata_dev_config_ncq(struct ata_device *dev,
+ char *desc, size_t desc_sz)
+{
+ struct ata_port *ap = dev->ap;
+ int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
+
+ if (!ata_id_has_ncq(dev->id)) {
+ desc[0] = '\0';
+ return;
+ }
+
+ if (ap->flags & ATA_FLAG_NCQ) {
+ hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
+ dev->flags |= ATA_DFLAG_NCQ;
+ }
+
+ if (hdepth >= ddepth)
+ snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
+ else
+ snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
+}
+
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
if (ata_id_has_lba(id)) {
const char *lba_desc;
+ char ncq_desc[20];
lba_desc = "LBA";
dev->flags |= ATA_DFLAG_LBA;
lba_desc = "LBA48";
}
+ /* config NCQ */
+ ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+
/* print device info to dmesg */
if (print_info)
ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
- "max %s, %Lu sectors: %s\n",
+ "max %s, %Lu sectors: %s %s\n",
ata_id_major_version(id),
ata_mode_string(xfer_mask),
(unsigned long long)dev->n_sectors,
- lba_desc);
+ lba_desc, ncq_desc);
} else {
/* CHS */
if (classes[i] == ATA_DEV_UNKNOWN)
classes[i] = ATA_DEV_NONE;
+ /* after the reset the device state is PIO 0 and the controller
+ state is undefined. Record the mode */
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ap->device[i].pio_mode = XFER_PIO_0;
+
/* read IDENTIFY page and configure devices */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
dev = &ap->device[i];
/**
* ata_dev_init_params - Issue INIT DEV PARAMS command
* @dev: Device to which command will be sent
- * @heads: Number of heads
- * @sectors: Number of sectors
+ * @heads: Number of heads (taskfile parameter)
+ * @sectors: Number of sectors (taskfile parameter)
*
* LOCKING:
* Kernel thread context (may sleep)
/**
* ata_mmio_data_xfer - Transfer data by MMIO
- * @ap: port to read/write
+ * @dev: device for this I/O
* @buf: data buffer
* @buflen: buffer length
* @write_data: read/write
* Inherited from caller.
*/
-static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
- unsigned int buflen, int write_data)
+void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int write_data)
{
+ struct ata_port *ap = adev->ap;
unsigned int i;
unsigned int words = buflen >> 1;
u16 *buf16 = (u16 *) buf;
/**
* ata_pio_data_xfer - Transfer data by PIO
- * @ap: port to read/write
+ * @adev: device to target
* @buf: data buffer
* @buflen: buffer length
* @write_data: read/write
* Inherited from caller.
*/
-static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
- unsigned int buflen, int write_data)
+void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int write_data)
{
+ struct ata_port *ap = adev->ap;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
}
/**
- * ata_data_xfer - Transfer data from/to the data register.
- * @ap: port to read/write
+ * ata_pio_data_xfer_noirq - Transfer data by PIO
+ * @adev: device to target
* @buf: data buffer
* @buflen: buffer length
- * @do_write: read/write
+ * @write_data: read/write
*
- * Transfer data from/to the device data register.
+ * Transfer data from/to the device data register by PIO. Do the
+ * transfer with interrupts disabled.
*
* LOCKING:
* Inherited from caller.
*/
-static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
- unsigned int buflen, int do_write)
+void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int write_data)
{
- /* Make the crap hardware pay the costs not the good stuff */
- if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
- unsigned long flags;
- local_irq_save(flags);
- if (ap->flags & ATA_FLAG_MMIO)
- ata_mmio_data_xfer(ap, buf, buflen, do_write);
- else
- ata_pio_data_xfer(ap, buf, buflen, do_write);
- local_irq_restore(flags);
- } else {
- if (ap->flags & ATA_FLAG_MMIO)
- ata_mmio_data_xfer(ap, buf, buflen, do_write);
- else
- ata_pio_data_xfer(ap, buf, buflen, do_write);
- }
+ unsigned long flags;
+ local_irq_save(flags);
+ ata_pio_data_xfer(adev, buf, buflen, write_data);
+ local_irq_restore(flags);
}
+
/**
* ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
* @qc: Command on going
if (PageHighMem(page)) {
unsigned long flags;
+ /* FIXME: use a bounce buffer */
local_irq_save(flags);
buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */
- ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
kunmap_atomic(buf, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(page);
- ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
}
qc->cursect++;
* ata_pio_sectors - Transfer one or many 512-byte sectors.
* @qc: Command on going
*
- * Transfer one or many ATA_SECT_SIZE of data from/to the
+ * Transfer one or many ATA_SECT_SIZE of data from/to the
* ATA device for the DRQ request.
*
* LOCKING:
DPRINTK("send cdb\n");
WARN_ON(qc->dev->cdb_len < 12);
- ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
+ ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
ata_altstatus(ap); /* flush */
switch (qc->tf.protocol) {
"%u bytes trailing data\n", bytes);
for (i = 0; i < words; i++)
- ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
+ ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
ap->hsm_task_state = HSM_ST_LAST;
return;
if (PageHighMem(page)) {
unsigned long flags;
+ /* FIXME: use bounce buffer */
local_irq_save(flags);
buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */
- ata_data_xfer(ap, buf + offset, count, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
kunmap_atomic(buf, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(page);
- ata_data_xfer(ap, buf + offset, count, do_write);
+ ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
}
bytes -= count;
unsigned int ireason, bc_lo, bc_hi, bytes;
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
- ap->ops->tf_read(ap, &qc->tf);
- ireason = qc->tf.nsect;
- bc_lo = qc->tf.lbam;
- bc_hi = qc->tf.lbah;
+ /* Abuse qc->result_tf for temp storage of intermediate TF
+ * here to save some kernel stack usage.
+ * For normal completion, qc->result_tf is not relevant. For
+ * error, qc->result_tf is later overwritten by ata_qc_complete().
+ * So, the correctness of qc->result_tf is not affected.
+ */
+ ap->ops->tf_read(ap, &qc->result_tf);
+ ireason = qc->result_tf.nsect;
+ bc_lo = qc->result_tf.lbam;
+ bc_hi = qc->result_tf.lbah;
bytes = (bc_hi << 8) | bc_lo;
/* shall be cleared to zero, indicating xfer of data */
} else
ata_qc_complete(qc);
}
+
+ ata_altstatus(ap); /* flush */
}
/**
poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
/* check device status */
- if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
- /* Wrong status. Let EH handle this */
- qc->err_mask |= AC_ERR_HSM;
+ if (unlikely((status & ATA_DRQ) == 0)) {
+ /* handle BSY=0, DRQ=0 as error */
+ if (likely(status & (ATA_ERR | ATA_DF)))
+ /* device stops HSM for abort/error */
+ qc->err_mask |= AC_ERR_DEV;
+ else
+ /* HSM violation. Let EH handle this */
+ qc->err_mask |= AC_ERR_HSM;
+
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
if (unlikely(status & (ATA_ERR | ATA_DF))) {
printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
ap->id, status);
- qc->err_mask |= AC_ERR_DEV;
+ qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
if (qc->tf.protocol == ATA_PROT_ATAPI) {
/* ATAPI PIO protocol */
if ((status & ATA_DRQ) == 0) {
- /* no more data to transfer */
+ /* No more data to transfer or device error.
+ * Device error will be tagged in HSM_ST_LAST.
+ */
ap->hsm_task_state = HSM_ST_LAST;
goto fsm_start;
}
if (unlikely(status & (ATA_ERR | ATA_DF))) {
printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
ap->id, status);
- qc->err_mask |= AC_ERR_DEV;
+ qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
/* ATA PIO protocol */
if (unlikely((status & ATA_DRQ) == 0)) {
/* handle BSY=0, DRQ=0 as error */
- qc->err_mask |= AC_ERR_HSM;
+ if (likely(status & (ATA_ERR | ATA_DF)))
+ /* device stops HSM for abort/error */
+ qc->err_mask |= AC_ERR_DEV;
+ else
+ /* HSM violation. Let EH handle this */
+ qc->err_mask |= AC_ERR_HSM;
+
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
status = ata_wait_idle(ap);
}
+ if (status & (ATA_BUSY | ATA_DRQ))
+ qc->err_mask |= AC_ERR_HSM;
+
/* ata_pio_sectors() might change the
* state to HSM_ST_LAST. so, the state
* is changed after ata_pio_sectors().
struct ata_port *ap = qc->ap;
switch (qc->tf.protocol) {
+ case ATA_PROT_NCQ:
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
return 1;
if (ap->flags & ATA_FLAG_SUSPENDED) {
struct ata_device *failed_dev;
+
+ ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
+
ap->flags &= ~ATA_FLAG_SUSPENDED;
while (ata_set_mode(ap, &failed_dev))
ata_dev_disable(failed_dev);
/**
* ata_device_suspend - prepare a device for suspend
* @dev: the device to suspend
+ * @state: target power management state
*
* Flush the cache on the drive, if appropriate, then issue a
* standbynow command.
ap->sata_spd_limit = UINT_MAX;
ap->active_tag = ATA_TAG_POISON;
ap->last_ctl = 0xFF;
+ ap->msg_enable = ATA_MSG_DRV;
INIT_WORK(&ap->port_task, NULL, NULL);
INIT_LIST_HEAD(&ap->eh_done_q);
+ init_waitqueue_head(&ap->eh_wait_q);
/* set cable type */
ap->cbl = ATA_CBL_NONE;
EXPORT_SYMBOL_GPL(ata_port_stop);
EXPORT_SYMBOL_GPL(ata_host_stop);
EXPORT_SYMBOL_GPL(ata_interrupt);
+EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
+EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
+EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
EXPORT_SYMBOL_GPL(ata_qc_prep);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
EXPORT_SYMBOL_GPL(ata_scsi_release);
EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(sata_scr_valid);