2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
77 const struct ata_port_operations ata_base_port_ops
= {
78 .irq_clear
= ata_noop_irq_clear
,
79 .prereset
= ata_std_prereset
,
80 .hardreset
= sata_std_hardreset
,
81 .postreset
= ata_std_postreset
,
82 .error_handler
= ata_std_error_handler
,
85 const struct ata_port_operations sata_port_ops
= {
86 .inherits
= &ata_base_port_ops
,
88 .qc_defer
= ata_std_qc_defer
,
89 .dev_select
= ata_noop_dev_select
,
92 const struct ata_port_operations sata_pmp_port_ops
= {
93 .inherits
= &sata_port_ops
,
95 .pmp_prereset
= sata_pmp_std_prereset
,
96 .pmp_hardreset
= sata_pmp_std_hardreset
,
97 .pmp_postreset
= sata_pmp_std_postreset
,
98 .error_handler
= sata_pmp_error_handler
,
101 const struct ata_port_operations ata_sff_port_ops
= {
102 .inherits
= &ata_base_port_ops
,
104 .qc_prep
= ata_qc_prep
,
105 .qc_issue
= ata_qc_issue_prot
,
107 .freeze
= ata_bmdma_freeze
,
108 .thaw
= ata_bmdma_thaw
,
109 .softreset
= ata_std_softreset
,
110 .error_handler
= ata_bmdma_error_handler
,
111 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
113 .dev_select
= ata_std_dev_select
,
114 .check_status
= ata_check_status
,
115 .tf_load
= ata_tf_load
,
116 .tf_read
= ata_tf_read
,
117 .exec_command
= ata_exec_command
,
118 .data_xfer
= ata_data_xfer
,
119 .irq_on
= ata_irq_on
,
121 .port_start
= ata_sff_port_start
,
124 const struct ata_port_operations ata_bmdma_port_ops
= {
125 .inherits
= &ata_sff_port_ops
,
127 .mode_filter
= ata_pci_default_filter
,
129 .bmdma_setup
= ata_bmdma_setup
,
130 .bmdma_start
= ata_bmdma_start
,
131 .bmdma_stop
= ata_bmdma_stop
,
132 .bmdma_status
= ata_bmdma_status
,
133 .irq_clear
= ata_bmdma_irq_clear
,
136 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
137 u16 heads
, u16 sectors
);
138 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
139 static unsigned int ata_dev_set_feature(struct ata_device
*dev
,
140 u8 enable
, u8 feature
);
141 static void ata_dev_xfermask(struct ata_device
*dev
);
142 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
);
144 unsigned int ata_print_id
= 1;
145 static struct workqueue_struct
*ata_wq
;
147 struct workqueue_struct
*ata_aux_wq
;
149 struct ata_force_param
{
153 unsigned long xfer_mask
;
154 unsigned int horkage_on
;
155 unsigned int horkage_off
;
158 struct ata_force_ent
{
161 struct ata_force_param param
;
164 static struct ata_force_ent
*ata_force_tbl
;
165 static int ata_force_tbl_size
;
167 static char ata_force_param_buf
[PAGE_SIZE
] __initdata
;
168 /* param_buf is thrown away after initialization, disallow read */
169 module_param_string(force
, ata_force_param_buf
, sizeof(ata_force_param_buf
), 0);
170 MODULE_PARM_DESC(force
, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
172 int atapi_enabled
= 1;
173 module_param(atapi_enabled
, int, 0444);
174 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
176 static int atapi_dmadir
= 0;
177 module_param(atapi_dmadir
, int, 0444);
178 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
180 int atapi_passthru16
= 1;
181 module_param(atapi_passthru16
, int, 0444);
182 MODULE_PARM_DESC(atapi_passthru16
, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
185 module_param_named(fua
, libata_fua
, int, 0444);
186 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
188 static int ata_ignore_hpa
;
189 module_param_named(ignore_hpa
, ata_ignore_hpa
, int, 0644);
190 MODULE_PARM_DESC(ignore_hpa
, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
192 static int libata_dma_mask
= ATA_DMA_MASK_ATA
|ATA_DMA_MASK_ATAPI
|ATA_DMA_MASK_CFA
;
193 module_param_named(dma
, libata_dma_mask
, int, 0444);
194 MODULE_PARM_DESC(dma
, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
196 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
197 module_param(ata_probe_timeout
, int, 0444);
198 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
200 int libata_noacpi
= 0;
201 module_param_named(noacpi
, libata_noacpi
, int, 0444);
202 MODULE_PARM_DESC(noacpi
, "Disables the use of ACPI in probe/suspend/resume when set");
204 int libata_allow_tpm
= 0;
205 module_param_named(allow_tpm
, libata_allow_tpm
, int, 0444);
206 MODULE_PARM_DESC(allow_tpm
, "Permit the use of TPM commands");
208 MODULE_AUTHOR("Jeff Garzik");
209 MODULE_DESCRIPTION("Library module for ATA devices");
210 MODULE_LICENSE("GPL");
211 MODULE_VERSION(DRV_VERSION
);
215 * ata_force_cbl - force cable type according to libata.force
216 * @ap: ATA port of interest
218 * Force cable type according to libata.force and whine about it.
219 * The last entry which has matching port number is used, so it
220 * can be specified as part of device force parameters. For
221 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
227 void ata_force_cbl(struct ata_port
*ap
)
231 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
232 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
234 if (fe
->port
!= -1 && fe
->port
!= ap
->print_id
)
237 if (fe
->param
.cbl
== ATA_CBL_NONE
)
240 ap
->cbl
= fe
->param
.cbl
;
241 ata_port_printk(ap
, KERN_NOTICE
,
242 "FORCE: cable set to %s\n", fe
->param
.name
);
248 * ata_force_spd_limit - force SATA spd limit according to libata.force
249 * @link: ATA link of interest
251 * Force SATA spd limit according to libata.force and whine about
252 * it. When only the port part is specified (e.g. 1:), the limit
253 * applies to all links connected to both the host link and all
254 * fan-out ports connected via PMP. If the device part is
255 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
256 * link not the host link. Device number 15 always points to the
257 * host link whether PMP is attached or not.
262 static void ata_force_spd_limit(struct ata_link
*link
)
266 if (ata_is_host_link(link
))
271 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
272 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
274 if (fe
->port
!= -1 && fe
->port
!= link
->ap
->print_id
)
277 if (fe
->device
!= -1 && fe
->device
!= linkno
)
280 if (!fe
->param
.spd_limit
)
283 link
->hw_sata_spd_limit
= (1 << fe
->param
.spd_limit
) - 1;
284 ata_link_printk(link
, KERN_NOTICE
,
285 "FORCE: PHY spd limit set to %s\n", fe
->param
.name
);
291 * ata_force_xfermask - force xfermask according to libata.force
292 * @dev: ATA device of interest
294 * Force xfer_mask according to libata.force and whine about it.
295 * For consistency with link selection, device number 15 selects
296 * the first device connected to the host link.
301 static void ata_force_xfermask(struct ata_device
*dev
)
303 int devno
= dev
->link
->pmp
+ dev
->devno
;
304 int alt_devno
= devno
;
307 /* allow n.15 for the first device attached to host port */
308 if (ata_is_host_link(dev
->link
) && devno
== 0)
311 for (i
= ata_force_tbl_size
- 1; i
>= 0; i
--) {
312 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
313 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
315 if (fe
->port
!= -1 && fe
->port
!= dev
->link
->ap
->print_id
)
318 if (fe
->device
!= -1 && fe
->device
!= devno
&&
319 fe
->device
!= alt_devno
)
322 if (!fe
->param
.xfer_mask
)
325 ata_unpack_xfermask(fe
->param
.xfer_mask
,
326 &pio_mask
, &mwdma_mask
, &udma_mask
);
328 dev
->udma_mask
= udma_mask
;
329 else if (mwdma_mask
) {
331 dev
->mwdma_mask
= mwdma_mask
;
335 dev
->pio_mask
= pio_mask
;
338 ata_dev_printk(dev
, KERN_NOTICE
,
339 "FORCE: xfer_mask set to %s\n", fe
->param
.name
);
345 * ata_force_horkage - force horkage according to libata.force
346 * @dev: ATA device of interest
348 * Force horkage according to libata.force and whine about it.
349 * For consistency with link selection, device number 15 selects
350 * the first device connected to the host link.
355 static void ata_force_horkage(struct ata_device
*dev
)
357 int devno
= dev
->link
->pmp
+ dev
->devno
;
358 int alt_devno
= devno
;
361 /* allow n.15 for the first device attached to host port */
362 if (ata_is_host_link(dev
->link
) && devno
== 0)
365 for (i
= 0; i
< ata_force_tbl_size
; i
++) {
366 const struct ata_force_ent
*fe
= &ata_force_tbl
[i
];
368 if (fe
->port
!= -1 && fe
->port
!= dev
->link
->ap
->print_id
)
371 if (fe
->device
!= -1 && fe
->device
!= devno
&&
372 fe
->device
!= alt_devno
)
375 if (!(~dev
->horkage
& fe
->param
.horkage_on
) &&
376 !(dev
->horkage
& fe
->param
.horkage_off
))
379 dev
->horkage
|= fe
->param
.horkage_on
;
380 dev
->horkage
&= ~fe
->param
.horkage_off
;
382 ata_dev_printk(dev
, KERN_NOTICE
,
383 "FORCE: horkage modified (%s)\n", fe
->param
.name
);
388 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
389 * @opcode: SCSI opcode
391 * Determine ATAPI command type from @opcode.
397 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
399 int atapi_cmd_type(u8 opcode
)
408 case GPCMD_WRITE_AND_VERIFY_10
:
412 case GPCMD_READ_CD_MSF
:
413 return ATAPI_READ_CD
;
417 if (atapi_passthru16
)
418 return ATAPI_PASS_THRU
;
426 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
427 * @tf: Taskfile to convert
428 * @pmp: Port multiplier port
429 * @is_cmd: This FIS is for command
430 * @fis: Buffer into which data will output
432 * Converts a standard ATA taskfile to a Serial ATA
433 * FIS structure (Register - Host to Device).
436 * Inherited from caller.
438 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8 pmp
, int is_cmd
, u8
*fis
)
440 fis
[0] = 0x27; /* Register - Host to Device FIS */
441 fis
[1] = pmp
& 0xf; /* Port multiplier number*/
443 fis
[1] |= (1 << 7); /* bit 7 indicates Command FIS */
445 fis
[2] = tf
->command
;
446 fis
[3] = tf
->feature
;
453 fis
[8] = tf
->hob_lbal
;
454 fis
[9] = tf
->hob_lbam
;
455 fis
[10] = tf
->hob_lbah
;
456 fis
[11] = tf
->hob_feature
;
459 fis
[13] = tf
->hob_nsect
;
470 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
471 * @fis: Buffer from which data will be input
472 * @tf: Taskfile to output
474 * Converts a serial ATA FIS structure to a standard ATA taskfile.
477 * Inherited from caller.
480 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
482 tf
->command
= fis
[2]; /* status */
483 tf
->feature
= fis
[3]; /* error */
490 tf
->hob_lbal
= fis
[8];
491 tf
->hob_lbam
= fis
[9];
492 tf
->hob_lbah
= fis
[10];
495 tf
->hob_nsect
= fis
[13];
498 static const u8 ata_rw_cmds
[] = {
502 ATA_CMD_READ_MULTI_EXT
,
503 ATA_CMD_WRITE_MULTI_EXT
,
507 ATA_CMD_WRITE_MULTI_FUA_EXT
,
511 ATA_CMD_PIO_READ_EXT
,
512 ATA_CMD_PIO_WRITE_EXT
,
525 ATA_CMD_WRITE_FUA_EXT
529 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
530 * @tf: command to examine and configure
531 * @dev: device tf belongs to
533 * Examine the device configuration and tf->flags to calculate
534 * the proper read/write commands and protocol to use.
539 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
543 int index
, fua
, lba48
, write
;
545 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
546 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
547 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
549 if (dev
->flags
& ATA_DFLAG_PIO
) {
550 tf
->protocol
= ATA_PROT_PIO
;
551 index
= dev
->multi_count
? 0 : 8;
552 } else if (lba48
&& (dev
->link
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
553 /* Unable to use DMA due to host limitation */
554 tf
->protocol
= ATA_PROT_PIO
;
555 index
= dev
->multi_count
? 0 : 8;
557 tf
->protocol
= ATA_PROT_DMA
;
561 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
570 * ata_tf_read_block - Read block address from ATA taskfile
571 * @tf: ATA taskfile of interest
572 * @dev: ATA device @tf belongs to
577 * Read block address from @tf. This function can handle all
578 * three address formats - LBA, LBA48 and CHS. tf->protocol and
579 * flags select the address format to use.
582 * Block address read from @tf.
584 u64
ata_tf_read_block(struct ata_taskfile
*tf
, struct ata_device
*dev
)
588 if (tf
->flags
& ATA_TFLAG_LBA
) {
589 if (tf
->flags
& ATA_TFLAG_LBA48
) {
590 block
|= (u64
)tf
->hob_lbah
<< 40;
591 block
|= (u64
)tf
->hob_lbam
<< 32;
592 block
|= tf
->hob_lbal
<< 24;
594 block
|= (tf
->device
& 0xf) << 24;
596 block
|= tf
->lbah
<< 16;
597 block
|= tf
->lbam
<< 8;
602 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
603 head
= tf
->device
& 0xf;
606 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
;
613 * ata_build_rw_tf - Build ATA taskfile for given read/write request
614 * @tf: Target ATA taskfile
615 * @dev: ATA device @tf belongs to
616 * @block: Block address
617 * @n_block: Number of blocks
618 * @tf_flags: RW/FUA etc...
624 * Build ATA taskfile @tf for read/write request described by
625 * @block, @n_block, @tf_flags and @tag on @dev.
629 * 0 on success, -ERANGE if the request is too large for @dev,
630 * -EINVAL if the request is invalid.
632 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
633 u64 block
, u32 n_block
, unsigned int tf_flags
,
636 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
637 tf
->flags
|= tf_flags
;
639 if (ata_ncq_enabled(dev
) && likely(tag
!= ATA_TAG_INTERNAL
)) {
641 if (!lba_48_ok(block
, n_block
))
644 tf
->protocol
= ATA_PROT_NCQ
;
645 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
647 if (tf
->flags
& ATA_TFLAG_WRITE
)
648 tf
->command
= ATA_CMD_FPDMA_WRITE
;
650 tf
->command
= ATA_CMD_FPDMA_READ
;
652 tf
->nsect
= tag
<< 3;
653 tf
->hob_feature
= (n_block
>> 8) & 0xff;
654 tf
->feature
= n_block
& 0xff;
656 tf
->hob_lbah
= (block
>> 40) & 0xff;
657 tf
->hob_lbam
= (block
>> 32) & 0xff;
658 tf
->hob_lbal
= (block
>> 24) & 0xff;
659 tf
->lbah
= (block
>> 16) & 0xff;
660 tf
->lbam
= (block
>> 8) & 0xff;
661 tf
->lbal
= block
& 0xff;
664 if (tf
->flags
& ATA_TFLAG_FUA
)
665 tf
->device
|= 1 << 7;
666 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
667 tf
->flags
|= ATA_TFLAG_LBA
;
669 if (lba_28_ok(block
, n_block
)) {
671 tf
->device
|= (block
>> 24) & 0xf;
672 } else if (lba_48_ok(block
, n_block
)) {
673 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
677 tf
->flags
|= ATA_TFLAG_LBA48
;
679 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
681 tf
->hob_lbah
= (block
>> 40) & 0xff;
682 tf
->hob_lbam
= (block
>> 32) & 0xff;
683 tf
->hob_lbal
= (block
>> 24) & 0xff;
685 /* request too large even for LBA48 */
688 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
691 tf
->nsect
= n_block
& 0xff;
693 tf
->lbah
= (block
>> 16) & 0xff;
694 tf
->lbam
= (block
>> 8) & 0xff;
695 tf
->lbal
= block
& 0xff;
697 tf
->device
|= ATA_LBA
;
700 u32 sect
, head
, cyl
, track
;
702 /* The request -may- be too large for CHS addressing. */
703 if (!lba_28_ok(block
, n_block
))
706 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
709 /* Convert LBA to CHS */
710 track
= (u32
)block
/ dev
->sectors
;
711 cyl
= track
/ dev
->heads
;
712 head
= track
% dev
->heads
;
713 sect
= (u32
)block
% dev
->sectors
+ 1;
715 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
716 (u32
)block
, track
, cyl
, head
, sect
);
718 /* Check whether the converted CHS can fit.
722 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
725 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
736 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
737 * @pio_mask: pio_mask
738 * @mwdma_mask: mwdma_mask
739 * @udma_mask: udma_mask
741 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
742 * unsigned int xfer_mask.
750 unsigned long ata_pack_xfermask(unsigned long pio_mask
,
751 unsigned long mwdma_mask
,
752 unsigned long udma_mask
)
754 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
755 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
756 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
760 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
761 * @xfer_mask: xfer_mask to unpack
762 * @pio_mask: resulting pio_mask
763 * @mwdma_mask: resulting mwdma_mask
764 * @udma_mask: resulting udma_mask
766 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
767 * Any NULL distination masks will be ignored.
769 void ata_unpack_xfermask(unsigned long xfer_mask
, unsigned long *pio_mask
,
770 unsigned long *mwdma_mask
, unsigned long *udma_mask
)
773 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
775 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
777 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
780 static const struct ata_xfer_ent
{
784 { ATA_SHIFT_PIO
, ATA_NR_PIO_MODES
, XFER_PIO_0
},
785 { ATA_SHIFT_MWDMA
, ATA_NR_MWDMA_MODES
, XFER_MW_DMA_0
},
786 { ATA_SHIFT_UDMA
, ATA_NR_UDMA_MODES
, XFER_UDMA_0
},
791 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
792 * @xfer_mask: xfer_mask of interest
794 * Return matching XFER_* value for @xfer_mask. Only the highest
795 * bit of @xfer_mask is considered.
801 * Matching XFER_* value, 0xff if no match found.
803 u8
ata_xfer_mask2mode(unsigned long xfer_mask
)
805 int highbit
= fls(xfer_mask
) - 1;
806 const struct ata_xfer_ent
*ent
;
808 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
809 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
810 return ent
->base
+ highbit
- ent
->shift
;
815 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
816 * @xfer_mode: XFER_* of interest
818 * Return matching xfer_mask for @xfer_mode.
824 * Matching xfer_mask, 0 if no match found.
826 unsigned long ata_xfer_mode2mask(u8 xfer_mode
)
828 const struct ata_xfer_ent
*ent
;
830 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
831 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
832 return ((2 << (ent
->shift
+ xfer_mode
- ent
->base
)) - 1)
833 & ~((1 << ent
->shift
) - 1);
838 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
839 * @xfer_mode: XFER_* of interest
841 * Return matching xfer_shift for @xfer_mode.
847 * Matching xfer_shift, -1 if no match found.
849 int ata_xfer_mode2shift(unsigned long xfer_mode
)
851 const struct ata_xfer_ent
*ent
;
853 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
854 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
860 * ata_mode_string - convert xfer_mask to string
861 * @xfer_mask: mask of bits supported; only highest bit counts.
863 * Determine string which represents the highest speed
864 * (highest bit in @modemask).
870 * Constant C string representing highest speed listed in
871 * @mode_mask, or the constant C string "<n/a>".
873 const char *ata_mode_string(unsigned long xfer_mask
)
875 static const char * const xfer_mode_str
[] = {
899 highbit
= fls(xfer_mask
) - 1;
900 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
901 return xfer_mode_str
[highbit
];
905 static const char *sata_spd_string(unsigned int spd
)
907 static const char * const spd_str
[] = {
912 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
914 return spd_str
[spd
- 1];
917 void ata_dev_disable(struct ata_device
*dev
)
919 if (ata_dev_enabled(dev
)) {
920 if (ata_msg_drv(dev
->link
->ap
))
921 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
922 ata_acpi_on_disable(dev
);
923 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
|
929 static int ata_dev_set_dipm(struct ata_device
*dev
, enum link_pm policy
)
931 struct ata_link
*link
= dev
->link
;
932 struct ata_port
*ap
= link
->ap
;
934 unsigned int err_mask
;
938 * disallow DIPM for drivers which haven't set
939 * ATA_FLAG_IPM. This is because when DIPM is enabled,
940 * phy ready will be set in the interrupt status on
941 * state changes, which will cause some drivers to
942 * think there are errors - additionally drivers will
943 * need to disable hot plug.
945 if (!(ap
->flags
& ATA_FLAG_IPM
) || !ata_dev_enabled(dev
)) {
946 ap
->pm_policy
= NOT_AVAILABLE
;
951 * For DIPM, we will only enable it for the
954 * Why? Because Disks are too stupid to know that
955 * If the host rejects a request to go to SLUMBER
956 * they should retry at PARTIAL, and instead it
957 * just would give up. So, for medium_power to
958 * work at all, we need to only allow HIPM.
960 rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
966 /* no restrictions on IPM transitions */
967 scontrol
&= ~(0x3 << 8);
968 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
973 if (dev
->flags
& ATA_DFLAG_DIPM
)
974 err_mask
= ata_dev_set_feature(dev
,
975 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
978 /* allow IPM to PARTIAL */
979 scontrol
&= ~(0x1 << 8);
980 scontrol
|= (0x2 << 8);
981 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
986 * we don't have to disable DIPM since IPM flags
987 * disallow transitions to SLUMBER, which effectively
988 * disable DIPM if it does not support PARTIAL
992 case MAX_PERFORMANCE
:
993 /* disable all IPM transitions */
994 scontrol
|= (0x3 << 8);
995 rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
);
1000 * we don't have to disable DIPM since IPM flags
1001 * disallow all transitions which effectively
1002 * disable DIPM anyway.
1007 /* FIXME: handle SET FEATURES failure */
1014 * ata_dev_enable_pm - enable SATA interface power management
1015 * @dev: device to enable power management
1016 * @policy: the link power management policy
1018 * Enable SATA Interface power management. This will enable
1019 * Device Interface Power Management (DIPM) for min_power
1020 * policy, and then call driver specific callbacks for
1021 * enabling Host Initiated Power management.
1024 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1026 void ata_dev_enable_pm(struct ata_device
*dev
, enum link_pm policy
)
1029 struct ata_port
*ap
= dev
->link
->ap
;
1031 /* set HIPM first, then DIPM */
1032 if (ap
->ops
->enable_pm
)
1033 rc
= ap
->ops
->enable_pm(ap
, policy
);
1036 rc
= ata_dev_set_dipm(dev
, policy
);
1040 ap
->pm_policy
= MAX_PERFORMANCE
;
1042 ap
->pm_policy
= policy
;
1043 return /* rc */; /* hopefully we can use 'rc' eventually */
1048 * ata_dev_disable_pm - disable SATA interface power management
1049 * @dev: device to disable power management
1051 * Disable SATA Interface power management. This will disable
1052 * Device Interface Power Management (DIPM) without changing
1053 * policy, call driver specific callbacks for disabling Host
1054 * Initiated Power management.
1059 static void ata_dev_disable_pm(struct ata_device
*dev
)
1061 struct ata_port
*ap
= dev
->link
->ap
;
1063 ata_dev_set_dipm(dev
, MAX_PERFORMANCE
);
1064 if (ap
->ops
->disable_pm
)
1065 ap
->ops
->disable_pm(ap
);
1067 #endif /* CONFIG_PM */
1069 void ata_lpm_schedule(struct ata_port
*ap
, enum link_pm policy
)
1071 ap
->pm_policy
= policy
;
1072 ap
->link
.eh_info
.action
|= ATA_EH_LPM
;
1073 ap
->link
.eh_info
.flags
|= ATA_EHI_NO_AUTOPSY
;
1074 ata_port_schedule_eh(ap
);
1078 static void ata_lpm_enable(struct ata_host
*host
)
1080 struct ata_link
*link
;
1081 struct ata_port
*ap
;
1082 struct ata_device
*dev
;
1085 for (i
= 0; i
< host
->n_ports
; i
++) {
1086 ap
= host
->ports
[i
];
1087 ata_port_for_each_link(link
, ap
) {
1088 ata_link_for_each_dev(dev
, link
)
1089 ata_dev_disable_pm(dev
);
1094 static void ata_lpm_disable(struct ata_host
*host
)
1098 for (i
= 0; i
< host
->n_ports
; i
++) {
1099 struct ata_port
*ap
= host
->ports
[i
];
1100 ata_lpm_schedule(ap
, ap
->pm_policy
);
1103 #endif /* CONFIG_PM */
1107 * ata_devchk - PATA device presence detection
1108 * @ap: ATA channel to examine
1109 * @device: Device to examine (starting at zero)
1111 * This technique was originally described in
1112 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1113 * later found its way into the ATA/ATAPI spec.
1115 * Write a pattern to the ATA shadow registers,
1116 * and if a device is present, it will respond by
1117 * correctly storing and echoing back the
1118 * ATA shadow register contents.
1124 static unsigned int ata_devchk(struct ata_port
*ap
, unsigned int device
)
1126 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
1129 ap
->ops
->dev_select(ap
, device
);
1131 iowrite8(0x55, ioaddr
->nsect_addr
);
1132 iowrite8(0xaa, ioaddr
->lbal_addr
);
1134 iowrite8(0xaa, ioaddr
->nsect_addr
);
1135 iowrite8(0x55, ioaddr
->lbal_addr
);
1137 iowrite8(0x55, ioaddr
->nsect_addr
);
1138 iowrite8(0xaa, ioaddr
->lbal_addr
);
1140 nsect
= ioread8(ioaddr
->nsect_addr
);
1141 lbal
= ioread8(ioaddr
->lbal_addr
);
1143 if ((nsect
== 0x55) && (lbal
== 0xaa))
1144 return 1; /* we found a device */
1146 return 0; /* nothing found */
1150 * ata_dev_classify - determine device type based on ATA-spec signature
1151 * @tf: ATA taskfile register set for device to be identified
1153 * Determine from taskfile register contents whether a device is
1154 * ATA or ATAPI, as per "Signature and persistence" section
1155 * of ATA/PI spec (volume 1, sect 5.14).
1161 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1162 * %ATA_DEV_UNKNOWN the event of failure.
1164 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
1166 /* Apple's open source Darwin code hints that some devices only
1167 * put a proper signature into the LBA mid/high registers,
1168 * So, we only check those. It's sufficient for uniqueness.
1170 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1171 * signatures for ATA and ATAPI devices attached on SerialATA,
1172 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1173 * spec has never mentioned about using different signatures
1174 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1175 * Multiplier specification began to use 0x69/0x96 to identify
1176 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1177 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1178 * 0x69/0x96 shortly and described them as reserved for
1181 * We follow the current spec and consider that 0x69/0x96
1182 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1184 if ((tf
->lbam
== 0) && (tf
->lbah
== 0)) {
1185 DPRINTK("found ATA device by sig\n");
1189 if ((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) {
1190 DPRINTK("found ATAPI device by sig\n");
1191 return ATA_DEV_ATAPI
;
1194 if ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96)) {
1195 DPRINTK("found PMP device by sig\n");
1199 if ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3)) {
1200 printk(KERN_INFO
"ata: SEMB device ignored\n");
1201 return ATA_DEV_SEMB_UNSUP
; /* not yet */
1204 DPRINTK("unknown device\n");
1205 return ATA_DEV_UNKNOWN
;
1209 * ata_dev_try_classify - Parse returned ATA device signature
1210 * @dev: ATA device to classify (starting at zero)
1211 * @present: device seems present
1212 * @r_err: Value of error register on completion
1214 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1215 * an ATA/ATAPI-defined set of values is placed in the ATA
1216 * shadow registers, indicating the results of device detection
1219 * Select the ATA device, and read the values from the ATA shadow
1220 * registers. Then parse according to the Error register value,
1221 * and the spec-defined values examined by ata_dev_classify().
1227 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1229 unsigned int ata_dev_try_classify(struct ata_device
*dev
, int present
,
1232 struct ata_port
*ap
= dev
->link
->ap
;
1233 struct ata_taskfile tf
;
1237 ap
->ops
->dev_select(ap
, dev
->devno
);
1239 memset(&tf
, 0, sizeof(tf
));
1241 ap
->ops
->tf_read(ap
, &tf
);
1246 /* see if device passed diags: continue and warn later */
1248 /* diagnostic fail : do nothing _YET_ */
1249 dev
->horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
1252 else if ((dev
->devno
== 0) && (err
== 0x81))
1255 return ATA_DEV_NONE
;
1257 /* determine if device is ATA or ATAPI */
1258 class = ata_dev_classify(&tf
);
1260 if (class == ATA_DEV_UNKNOWN
) {
1261 /* If the device failed diagnostic, it's likely to
1262 * have reported incorrect device signature too.
1263 * Assume ATA device if the device seems present but
1264 * device signature is invalid with diagnostic
1267 if (present
&& (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
))
1268 class = ATA_DEV_ATA
;
1270 class = ATA_DEV_NONE
;
1271 } else if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
1272 class = ATA_DEV_NONE
;
1278 * ata_id_string - Convert IDENTIFY DEVICE page into string
1279 * @id: IDENTIFY DEVICE results we will examine
1280 * @s: string into which data is output
1281 * @ofs: offset into identify device page
1282 * @len: length of string to return. must be an even number.
1284 * The strings in the IDENTIFY DEVICE page are broken up into
1285 * 16-bit chunks. Run through the string, and output each
1286 * 8-bit chunk linearly, regardless of platform.
1292 void ata_id_string(const u16
*id
, unsigned char *s
,
1293 unsigned int ofs
, unsigned int len
)
1312 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1313 * @id: IDENTIFY DEVICE results we will examine
1314 * @s: string into which data is output
1315 * @ofs: offset into identify device page
1316 * @len: length of string to return. must be an odd number.
1318 * This function is identical to ata_id_string except that it
1319 * trims trailing spaces and terminates the resulting string with
1320 * null. @len must be actual maximum length (even number) + 1.
1325 void ata_id_c_string(const u16
*id
, unsigned char *s
,
1326 unsigned int ofs
, unsigned int len
)
1330 WARN_ON(!(len
& 1));
1332 ata_id_string(id
, s
, ofs
, len
- 1);
1334 p
= s
+ strnlen(s
, len
- 1);
1335 while (p
> s
&& p
[-1] == ' ')
1340 static u64
ata_id_n_sectors(const u16
*id
)
1342 if (ata_id_has_lba(id
)) {
1343 if (ata_id_has_lba48(id
))
1344 return ata_id_u64(id
, 100);
1346 return ata_id_u32(id
, 60);
1348 if (ata_id_current_chs_valid(id
))
1349 return ata_id_u32(id
, 57);
1351 return id
[1] * id
[3] * id
[6];
1355 static u64
ata_tf_to_lba48(struct ata_taskfile
*tf
)
1359 sectors
|= ((u64
)(tf
->hob_lbah
& 0xff)) << 40;
1360 sectors
|= ((u64
)(tf
->hob_lbam
& 0xff)) << 32;
1361 sectors
|= (tf
->hob_lbal
& 0xff) << 24;
1362 sectors
|= (tf
->lbah
& 0xff) << 16;
1363 sectors
|= (tf
->lbam
& 0xff) << 8;
1364 sectors
|= (tf
->lbal
& 0xff);
1369 static u64
ata_tf_to_lba(struct ata_taskfile
*tf
)
1373 sectors
|= (tf
->device
& 0x0f) << 24;
1374 sectors
|= (tf
->lbah
& 0xff) << 16;
1375 sectors
|= (tf
->lbam
& 0xff) << 8;
1376 sectors
|= (tf
->lbal
& 0xff);
1382 * ata_read_native_max_address - Read native max address
1383 * @dev: target device
1384 * @max_sectors: out parameter for the result native max address
1386 * Perform an LBA48 or LBA28 native size query upon the device in
1390 * 0 on success, -EACCES if command is aborted by the drive.
1391 * -EIO on other errors.
1393 static int ata_read_native_max_address(struct ata_device
*dev
, u64
*max_sectors
)
1395 unsigned int err_mask
;
1396 struct ata_taskfile tf
;
1397 int lba48
= ata_id_has_lba48(dev
->id
);
1399 ata_tf_init(dev
, &tf
);
1401 /* always clear all address registers */
1402 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1405 tf
.command
= ATA_CMD_READ_NATIVE_MAX_EXT
;
1406 tf
.flags
|= ATA_TFLAG_LBA48
;
1408 tf
.command
= ATA_CMD_READ_NATIVE_MAX
;
1410 tf
.protocol
|= ATA_PROT_NODATA
;
1411 tf
.device
|= ATA_LBA
;
1413 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1415 ata_dev_printk(dev
, KERN_WARNING
, "failed to read native "
1416 "max address (err_mask=0x%x)\n", err_mask
);
1417 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
1423 *max_sectors
= ata_tf_to_lba48(&tf
);
1425 *max_sectors
= ata_tf_to_lba(&tf
);
1426 if (dev
->horkage
& ATA_HORKAGE_HPA_SIZE
)
1432 * ata_set_max_sectors - Set max sectors
1433 * @dev: target device
1434 * @new_sectors: new max sectors value to set for the device
1436 * Set max sectors of @dev to @new_sectors.
1439 * 0 on success, -EACCES if command is aborted or denied (due to
1440 * previous non-volatile SET_MAX) by the drive. -EIO on other
1443 static int ata_set_max_sectors(struct ata_device
*dev
, u64 new_sectors
)
1445 unsigned int err_mask
;
1446 struct ata_taskfile tf
;
1447 int lba48
= ata_id_has_lba48(dev
->id
);
1451 ata_tf_init(dev
, &tf
);
1453 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
1456 tf
.command
= ATA_CMD_SET_MAX_EXT
;
1457 tf
.flags
|= ATA_TFLAG_LBA48
;
1459 tf
.hob_lbal
= (new_sectors
>> 24) & 0xff;
1460 tf
.hob_lbam
= (new_sectors
>> 32) & 0xff;
1461 tf
.hob_lbah
= (new_sectors
>> 40) & 0xff;
1463 tf
.command
= ATA_CMD_SET_MAX
;
1465 tf
.device
|= (new_sectors
>> 24) & 0xf;
1468 tf
.protocol
|= ATA_PROT_NODATA
;
1469 tf
.device
|= ATA_LBA
;
1471 tf
.lbal
= (new_sectors
>> 0) & 0xff;
1472 tf
.lbam
= (new_sectors
>> 8) & 0xff;
1473 tf
.lbah
= (new_sectors
>> 16) & 0xff;
1475 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1477 ata_dev_printk(dev
, KERN_WARNING
, "failed to set "
1478 "max address (err_mask=0x%x)\n", err_mask
);
1479 if (err_mask
== AC_ERR_DEV
&&
1480 (tf
.feature
& (ATA_ABORTED
| ATA_IDNF
)))
1489 * ata_hpa_resize - Resize a device with an HPA set
1490 * @dev: Device to resize
1492 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1493 * it if required to the full size of the media. The caller must check
1494 * the drive has the HPA feature set enabled.
1497 * 0 on success, -errno on failure.
1499 static int ata_hpa_resize(struct ata_device
*dev
)
1501 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
1502 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
1503 u64 sectors
= ata_id_n_sectors(dev
->id
);
1507 /* do we need to do it? */
1508 if (dev
->class != ATA_DEV_ATA
||
1509 !ata_id_has_lba(dev
->id
) || !ata_id_hpa_enabled(dev
->id
) ||
1510 (dev
->horkage
& ATA_HORKAGE_BROKEN_HPA
))
1513 /* read native max address */
1514 rc
= ata_read_native_max_address(dev
, &native_sectors
);
1516 /* If device aborted the command or HPA isn't going to
1517 * be unlocked, skip HPA resizing.
1519 if (rc
== -EACCES
|| !ata_ignore_hpa
) {
1520 ata_dev_printk(dev
, KERN_WARNING
, "HPA support seems "
1521 "broken, skipping HPA handling\n");
1522 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1524 /* we can continue if device aborted the command */
1532 /* nothing to do? */
1533 if (native_sectors
<= sectors
|| !ata_ignore_hpa
) {
1534 if (!print_info
|| native_sectors
== sectors
)
1537 if (native_sectors
> sectors
)
1538 ata_dev_printk(dev
, KERN_INFO
,
1539 "HPA detected: current %llu, native %llu\n",
1540 (unsigned long long)sectors
,
1541 (unsigned long long)native_sectors
);
1542 else if (native_sectors
< sectors
)
1543 ata_dev_printk(dev
, KERN_WARNING
,
1544 "native sectors (%llu) is smaller than "
1546 (unsigned long long)native_sectors
,
1547 (unsigned long long)sectors
);
1551 /* let's unlock HPA */
1552 rc
= ata_set_max_sectors(dev
, native_sectors
);
1553 if (rc
== -EACCES
) {
1554 /* if device aborted the command, skip HPA resizing */
1555 ata_dev_printk(dev
, KERN_WARNING
, "device aborted resize "
1556 "(%llu -> %llu), skipping HPA handling\n",
1557 (unsigned long long)sectors
,
1558 (unsigned long long)native_sectors
);
1559 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1564 /* re-read IDENTIFY data */
1565 rc
= ata_dev_reread_id(dev
, 0);
1567 ata_dev_printk(dev
, KERN_ERR
, "failed to re-read IDENTIFY "
1568 "data after HPA resizing\n");
1573 u64 new_sectors
= ata_id_n_sectors(dev
->id
);
1574 ata_dev_printk(dev
, KERN_INFO
,
1575 "HPA unlocked: %llu -> %llu, native %llu\n",
1576 (unsigned long long)sectors
,
1577 (unsigned long long)new_sectors
,
1578 (unsigned long long)native_sectors
);
1585 * ata_noop_dev_select - Select device 0/1 on ATA bus
1586 * @ap: ATA channel to manipulate
1587 * @device: ATA device (numbered from zero) to select
1589 * This function performs no actual function.
1591 * May be used as the dev_select() entry in ata_port_operations.
1596 void ata_noop_dev_select(struct ata_port
*ap
, unsigned int device
)
1602 * ata_std_dev_select - Select device 0/1 on ATA bus
1603 * @ap: ATA channel to manipulate
1604 * @device: ATA device (numbered from zero) to select
1606 * Use the method defined in the ATA specification to
1607 * make either device 0, or device 1, active on the
1608 * ATA channel. Works with both PIO and MMIO.
1610 * May be used as the dev_select() entry in ata_port_operations.
1616 void ata_std_dev_select(struct ata_port
*ap
, unsigned int device
)
1621 tmp
= ATA_DEVICE_OBS
;
1623 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
1625 iowrite8(tmp
, ap
->ioaddr
.device_addr
);
1626 ata_pause(ap
); /* needed; also flushes, for mmio */
1630 * ata_dev_select - Select device 0/1 on ATA bus
1631 * @ap: ATA channel to manipulate
1632 * @device: ATA device (numbered from zero) to select
1633 * @wait: non-zero to wait for Status register BSY bit to clear
1634 * @can_sleep: non-zero if context allows sleeping
1636 * Use the method defined in the ATA specification to
1637 * make either device 0, or device 1, active on the
1640 * This is a high-level version of ata_std_dev_select(),
1641 * which additionally provides the services of inserting
1642 * the proper pauses and status polling, where needed.
1648 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
1649 unsigned int wait
, unsigned int can_sleep
)
1651 if (ata_msg_probe(ap
))
1652 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, "
1653 "device %u, wait %u\n", device
, wait
);
1658 ap
->ops
->dev_select(ap
, device
);
1661 if (can_sleep
&& ap
->link
.device
[device
].class == ATA_DEV_ATAPI
)
1668 * ata_dump_id - IDENTIFY DEVICE info debugging output
1669 * @id: IDENTIFY DEVICE page to dump
1671 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1678 static inline void ata_dump_id(const u16
*id
)
1680 DPRINTK("49==0x%04x "
1690 DPRINTK("80==0x%04x "
1700 DPRINTK("88==0x%04x "
1707 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1708 * @id: IDENTIFY data to compute xfer mask from
1710 * Compute the xfermask for this device. This is not as trivial
1711 * as it seems if we must consider early devices correctly.
1713 * FIXME: pre IDE drive timing (do we care ?).
1721 unsigned long ata_id_xfermask(const u16
*id
)
1723 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
1725 /* Usual case. Word 53 indicates word 64 is valid */
1726 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1727 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1731 /* If word 64 isn't valid then Word 51 high byte holds
1732 * the PIO timing number for the maximum. Turn it into
1735 u8 mode
= (id
[ATA_ID_OLD_PIO_MODES
] >> 8) & 0xFF;
1736 if (mode
< 5) /* Valid PIO range */
1737 pio_mask
= (2 << mode
) - 1;
1741 /* But wait.. there's more. Design your standards by
1742 * committee and you too can get a free iordy field to
1743 * process. However its the speeds not the modes that
1744 * are supported... Note drivers using the timing API
1745 * will get this right anyway
1749 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1751 if (ata_id_is_cfa(id
)) {
1753 * Process compact flash extended modes
1755 int pio
= id
[163] & 0x7;
1756 int dma
= (id
[163] >> 3) & 7;
1759 pio_mask
|= (1 << 5);
1761 pio_mask
|= (1 << 6);
1763 mwdma_mask
|= (1 << 3);
1765 mwdma_mask
|= (1 << 4);
1769 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1770 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1772 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1776 * ata_pio_queue_task - Queue port_task
1777 * @ap: The ata_port to queue port_task for
1778 * @fn: workqueue function to be scheduled
1779 * @data: data for @fn to use
1780 * @delay: delay time for workqueue function
1782 * Schedule @fn(@data) for execution after @delay jiffies using
1783 * port_task. There is one port_task per port and it's the
1784 * user(low level driver)'s responsibility to make sure that only
1785 * one task is active at any given time.
1787 * libata core layer takes care of synchronization between
1788 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1792 * Inherited from caller.
1794 static void ata_pio_queue_task(struct ata_port
*ap
, void *data
,
1795 unsigned long delay
)
1797 ap
->port_task_data
= data
;
1799 /* may fail if ata_port_flush_task() in progress */
1800 queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
1804 * ata_port_flush_task - Flush port_task
1805 * @ap: The ata_port to flush port_task for
1807 * After this function completes, port_task is guranteed not to
1808 * be running or scheduled.
1811 * Kernel thread context (may sleep)
1813 void ata_port_flush_task(struct ata_port
*ap
)
1817 cancel_rearming_delayed_work(&ap
->port_task
);
1819 if (ata_msg_ctl(ap
))
1820 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __func__
);
1823 static void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1825 struct completion
*waiting
= qc
->private_data
;
1831 * ata_exec_internal_sg - execute libata internal command
1832 * @dev: Device to which the command is sent
1833 * @tf: Taskfile registers for the command and the result
1834 * @cdb: CDB for packet command
1835 * @dma_dir: Data tranfer direction of the command
1836 * @sgl: sg list for the data buffer of the command
1837 * @n_elem: Number of sg entries
1838 * @timeout: Timeout in msecs (0 for default)
1840 * Executes libata internal command with timeout. @tf contains
1841 * command on entry and result on return. Timeout and error
1842 * conditions are reported via return value. No recovery action
1843 * is taken after a command times out. It's caller's duty to
1844 * clean up after timeout.
1847 * None. Should be called with kernel context, might sleep.
1850 * Zero on success, AC_ERR_* mask on failure
1852 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1853 struct ata_taskfile
*tf
, const u8
*cdb
,
1854 int dma_dir
, struct scatterlist
*sgl
,
1855 unsigned int n_elem
, unsigned long timeout
)
1857 struct ata_link
*link
= dev
->link
;
1858 struct ata_port
*ap
= link
->ap
;
1859 u8 command
= tf
->command
;
1860 struct ata_queued_cmd
*qc
;
1861 unsigned int tag
, preempted_tag
;
1862 u32 preempted_sactive
, preempted_qc_active
;
1863 int preempted_nr_active_links
;
1864 DECLARE_COMPLETION_ONSTACK(wait
);
1865 unsigned long flags
;
1866 unsigned int err_mask
;
1869 spin_lock_irqsave(ap
->lock
, flags
);
1871 /* no internal command while frozen */
1872 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1873 spin_unlock_irqrestore(ap
->lock
, flags
);
1874 return AC_ERR_SYSTEM
;
1877 /* initialize internal qc */
1879 /* XXX: Tag 0 is used for drivers with legacy EH as some
1880 * drivers choke if any other tag is given. This breaks
1881 * ata_tag_internal() test for those drivers. Don't use new
1882 * EH stuff without converting to it.
1884 if (ap
->ops
->error_handler
)
1885 tag
= ATA_TAG_INTERNAL
;
1889 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1891 qc
= __ata_qc_from_tag(ap
, tag
);
1899 preempted_tag
= link
->active_tag
;
1900 preempted_sactive
= link
->sactive
;
1901 preempted_qc_active
= ap
->qc_active
;
1902 preempted_nr_active_links
= ap
->nr_active_links
;
1903 link
->active_tag
= ATA_TAG_POISON
;
1906 ap
->nr_active_links
= 0;
1908 /* prepare & issue qc */
1911 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1912 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1913 qc
->dma_dir
= dma_dir
;
1914 if (dma_dir
!= DMA_NONE
) {
1915 unsigned int i
, buflen
= 0;
1916 struct scatterlist
*sg
;
1918 for_each_sg(sgl
, sg
, n_elem
, i
)
1919 buflen
+= sg
->length
;
1921 ata_sg_init(qc
, sgl
, n_elem
);
1922 qc
->nbytes
= buflen
;
1925 qc
->private_data
= &wait
;
1926 qc
->complete_fn
= ata_qc_complete_internal
;
1930 spin_unlock_irqrestore(ap
->lock
, flags
);
1933 timeout
= ata_probe_timeout
* 1000 / HZ
;
1935 rc
= wait_for_completion_timeout(&wait
, msecs_to_jiffies(timeout
));
1937 ata_port_flush_task(ap
);
1940 spin_lock_irqsave(ap
->lock
, flags
);
1942 /* We're racing with irq here. If we lose, the
1943 * following test prevents us from completing the qc
1944 * twice. If we win, the port is frozen and will be
1945 * cleaned up by ->post_internal_cmd().
1947 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1948 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1950 if (ap
->ops
->error_handler
)
1951 ata_port_freeze(ap
);
1953 ata_qc_complete(qc
);
1955 if (ata_msg_warn(ap
))
1956 ata_dev_printk(dev
, KERN_WARNING
,
1957 "qc timeout (cmd 0x%x)\n", command
);
1960 spin_unlock_irqrestore(ap
->lock
, flags
);
1963 /* do post_internal_cmd */
1964 if (ap
->ops
->post_internal_cmd
)
1965 ap
->ops
->post_internal_cmd(qc
);
1967 /* perform minimal error analysis */
1968 if (qc
->flags
& ATA_QCFLAG_FAILED
) {
1969 if (qc
->result_tf
.command
& (ATA_ERR
| ATA_DF
))
1970 qc
->err_mask
|= AC_ERR_DEV
;
1973 qc
->err_mask
|= AC_ERR_OTHER
;
1975 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1976 qc
->err_mask
&= ~AC_ERR_OTHER
;
1980 spin_lock_irqsave(ap
->lock
, flags
);
1982 *tf
= qc
->result_tf
;
1983 err_mask
= qc
->err_mask
;
1986 link
->active_tag
= preempted_tag
;
1987 link
->sactive
= preempted_sactive
;
1988 ap
->qc_active
= preempted_qc_active
;
1989 ap
->nr_active_links
= preempted_nr_active_links
;
1991 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1992 * Until those drivers are fixed, we detect the condition
1993 * here, fail the command with AC_ERR_SYSTEM and reenable the
1996 * Note that this doesn't change any behavior as internal
1997 * command failure results in disabling the device in the
1998 * higher layer for LLDDs without new reset/EH callbacks.
2000 * Kill the following code as soon as those drivers are fixed.
2002 if (ap
->flags
& ATA_FLAG_DISABLED
) {
2003 err_mask
|= AC_ERR_SYSTEM
;
2007 spin_unlock_irqrestore(ap
->lock
, flags
);
2013 * ata_exec_internal - execute libata internal command
2014 * @dev: Device to which the command is sent
2015 * @tf: Taskfile registers for the command and the result
2016 * @cdb: CDB for packet command
2017 * @dma_dir: Data tranfer direction of the command
2018 * @buf: Data buffer of the command
2019 * @buflen: Length of data buffer
2020 * @timeout: Timeout in msecs (0 for default)
2022 * Wrapper around ata_exec_internal_sg() which takes simple
2023 * buffer instead of sg list.
2026 * None. Should be called with kernel context, might sleep.
2029 * Zero on success, AC_ERR_* mask on failure
2031 unsigned ata_exec_internal(struct ata_device
*dev
,
2032 struct ata_taskfile
*tf
, const u8
*cdb
,
2033 int dma_dir
, void *buf
, unsigned int buflen
,
2034 unsigned long timeout
)
2036 struct scatterlist
*psg
= NULL
, sg
;
2037 unsigned int n_elem
= 0;
2039 if (dma_dir
!= DMA_NONE
) {
2041 sg_init_one(&sg
, buf
, buflen
);
2046 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, psg
, n_elem
,
2051 * ata_do_simple_cmd - execute simple internal command
2052 * @dev: Device to which the command is sent
2053 * @cmd: Opcode to execute
2055 * Execute a 'simple' command, that only consists of the opcode
2056 * 'cmd' itself, without filling any other registers
2059 * Kernel thread context (may sleep).
2062 * Zero on success, AC_ERR_* mask on failure
2064 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
2066 struct ata_taskfile tf
;
2068 ata_tf_init(dev
, &tf
);
2071 tf
.flags
|= ATA_TFLAG_DEVICE
;
2072 tf
.protocol
= ATA_PROT_NODATA
;
2074 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
2078 * ata_pio_need_iordy - check if iordy needed
2081 * Check if the current speed of the device requires IORDY. Used
2082 * by various controllers for chip configuration.
2085 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
2087 /* Controller doesn't support IORDY. Probably a pointless check
2088 as the caller should know this */
2089 if (adev
->link
->ap
->flags
& ATA_FLAG_NO_IORDY
)
2091 /* PIO3 and higher it is mandatory */
2092 if (adev
->pio_mode
> XFER_PIO_2
)
2094 /* We turn it on when possible */
2095 if (ata_id_has_iordy(adev
->id
))
2101 * ata_pio_mask_no_iordy - Return the non IORDY mask
2104 * Compute the highest mode possible if we are not using iordy. Return
2105 * -1 if no iordy mode is available.
2108 static u32
ata_pio_mask_no_iordy(const struct ata_device
*adev
)
2110 /* If we have no drive specific rule, then PIO 2 is non IORDY */
2111 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
2112 u16 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
2113 /* Is the speed faster than the drive allows non IORDY ? */
2115 /* This is cycle times not frequency - watch the logic! */
2116 if (pio
> 240) /* PIO2 is 240nS per cycle */
2117 return 3 << ATA_SHIFT_PIO
;
2118 return 7 << ATA_SHIFT_PIO
;
2121 return 3 << ATA_SHIFT_PIO
;
2125 * ata_dev_read_id - Read ID data from the specified device
2126 * @dev: target device
2127 * @p_class: pointer to class of the target device (may be changed)
2128 * @flags: ATA_READID_* flags
2129 * @id: buffer to read IDENTIFY data into
2131 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2132 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2133 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2134 * for pre-ATA4 drives.
2136 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2137 * now we abort if we hit that case.
2140 * Kernel thread context (may sleep)
2143 * 0 on success, -errno otherwise.
2145 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
2146 unsigned int flags
, u16
*id
)
2148 struct ata_port
*ap
= dev
->link
->ap
;
2149 unsigned int class = *p_class
;
2150 struct ata_taskfile tf
;
2151 unsigned int err_mask
= 0;
2153 int may_fallback
= 1, tried_spinup
= 0;
2156 if (ata_msg_ctl(ap
))
2157 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __func__
);
2159 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
2161 ata_tf_init(dev
, &tf
);
2165 tf
.command
= ATA_CMD_ID_ATA
;
2168 tf
.command
= ATA_CMD_ID_ATAPI
;
2172 reason
= "unsupported class";
2176 tf
.protocol
= ATA_PROT_PIO
;
2178 /* Some devices choke if TF registers contain garbage. Make
2179 * sure those are properly initialized.
2181 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
2183 /* Device presence detection is unreliable on some
2184 * controllers. Always poll IDENTIFY if available.
2186 tf
.flags
|= ATA_TFLAG_POLLING
;
2188 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
2189 id
, sizeof(id
[0]) * ATA_ID_WORDS
, 0);
2191 if (err_mask
& AC_ERR_NODEV_HINT
) {
2192 ata_dev_printk(dev
, KERN_DEBUG
,
2193 "NODEV after polling detection\n");
2197 if ((err_mask
== AC_ERR_DEV
) && (tf
.feature
& ATA_ABORTED
)) {
2198 /* Device or controller might have reported
2199 * the wrong device class. Give a shot at the
2200 * other IDENTIFY if the current one is
2201 * aborted by the device.
2206 if (class == ATA_DEV_ATA
)
2207 class = ATA_DEV_ATAPI
;
2209 class = ATA_DEV_ATA
;
2213 /* Control reaches here iff the device aborted
2214 * both flavors of IDENTIFYs which happens
2215 * sometimes with phantom devices.
2217 ata_dev_printk(dev
, KERN_DEBUG
,
2218 "both IDENTIFYs aborted, assuming NODEV\n");
2223 reason
= "I/O error";
2227 /* Falling back doesn't make sense if ID data was read
2228 * successfully at least once.
2232 swap_buf_le16(id
, ATA_ID_WORDS
);
2236 reason
= "device reports invalid type";
2238 if (class == ATA_DEV_ATA
) {
2239 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
2242 if (ata_id_is_ata(id
))
2246 if (!tried_spinup
&& (id
[2] == 0x37c8 || id
[2] == 0x738c)) {
2249 * Drive powered-up in standby mode, and requires a specific
2250 * SET_FEATURES spin-up subcommand before it will accept
2251 * anything other than the original IDENTIFY command.
2253 err_mask
= ata_dev_set_feature(dev
, SETFEATURES_SPINUP
, 0);
2254 if (err_mask
&& id
[2] != 0x738c) {
2256 reason
= "SPINUP failed";
2260 * If the drive initially returned incomplete IDENTIFY info,
2261 * we now must reissue the IDENTIFY command.
2263 if (id
[2] == 0x37c8)
2267 if ((flags
& ATA_READID_POSTRESET
) && class == ATA_DEV_ATA
) {
2269 * The exact sequence expected by certain pre-ATA4 drives is:
2271 * IDENTIFY (optional in early ATA)
2272 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2274 * Some drives were very specific about that exact sequence.
2276 * Note that ATA4 says lba is mandatory so the second check
2277 * shoud never trigger.
2279 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
2280 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
2283 reason
= "INIT_DEV_PARAMS failed";
2287 /* current CHS translation info (id[53-58]) might be
2288 * changed. reread the identify device info.
2290 flags
&= ~ATA_READID_POSTRESET
;
2300 if (ata_msg_warn(ap
))
2301 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
2302 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
2306 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
2308 struct ata_port
*ap
= dev
->link
->ap
;
2309 return ((ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
2312 static void ata_dev_config_ncq(struct ata_device
*dev
,
2313 char *desc
, size_t desc_sz
)
2315 struct ata_port
*ap
= dev
->link
->ap
;
2316 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
2318 if (!ata_id_has_ncq(dev
->id
)) {
2322 if (dev
->horkage
& ATA_HORKAGE_NONCQ
) {
2323 snprintf(desc
, desc_sz
, "NCQ (not used)");
2326 if (ap
->flags
& ATA_FLAG_NCQ
) {
2327 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
- 1);
2328 dev
->flags
|= ATA_DFLAG_NCQ
;
2331 if (hdepth
>= ddepth
)
2332 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
2334 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
2338 * ata_dev_configure - Configure the specified ATA/ATAPI device
2339 * @dev: Target device to configure
2341 * Configure @dev according to @dev->id. Generic and low-level
2342 * driver specific fixups are also applied.
2345 * Kernel thread context (may sleep)
2348 * 0 on success, -errno otherwise
2350 int ata_dev_configure(struct ata_device
*dev
)
2352 struct ata_port
*ap
= dev
->link
->ap
;
2353 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
2354 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
2355 const u16
*id
= dev
->id
;
2356 unsigned long xfer_mask
;
2357 char revbuf
[7]; /* XYZ-99\0 */
2358 char fwrevbuf
[ATA_ID_FW_REV_LEN
+1];
2359 char modelbuf
[ATA_ID_PROD_LEN
+1];
2362 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
2363 ata_dev_printk(dev
, KERN_INFO
, "%s: ENTER/EXIT -- nodev\n",
2368 if (ata_msg_probe(ap
))
2369 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __func__
);
2372 dev
->horkage
|= ata_dev_blacklisted(dev
);
2373 ata_force_horkage(dev
);
2375 /* let ACPI work its magic */
2376 rc
= ata_acpi_on_devcfg(dev
);
2380 /* massage HPA, do it early as it might change IDENTIFY data */
2381 rc
= ata_hpa_resize(dev
);
2385 /* print device capabilities */
2386 if (ata_msg_probe(ap
))
2387 ata_dev_printk(dev
, KERN_DEBUG
,
2388 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2389 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2391 id
[49], id
[82], id
[83], id
[84],
2392 id
[85], id
[86], id
[87], id
[88]);
2394 /* initialize to-be-configured parameters */
2395 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
2396 dev
->max_sectors
= 0;
2404 * common ATA, ATAPI feature tests
2407 /* find max transfer mode; for printk only */
2408 xfer_mask
= ata_id_xfermask(id
);
2410 if (ata_msg_probe(ap
))
2413 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2414 ata_id_c_string(dev
->id
, fwrevbuf
, ATA_ID_FW_REV
,
2417 ata_id_c_string(dev
->id
, modelbuf
, ATA_ID_PROD
,
2420 /* ATA-specific feature tests */
2421 if (dev
->class == ATA_DEV_ATA
) {
2422 if (ata_id_is_cfa(id
)) {
2423 if (id
[162] & 1) /* CPRM may make this media unusable */
2424 ata_dev_printk(dev
, KERN_WARNING
,
2425 "supports DRM functions and may "
2426 "not be fully accessable.\n");
2427 snprintf(revbuf
, 7, "CFA");
2429 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
2430 /* Warn the user if the device has TPM extensions */
2431 if (ata_id_has_tpm(id
))
2432 ata_dev_printk(dev
, KERN_WARNING
,
2433 "supports DRM functions and may "
2434 "not be fully accessable.\n");
2437 dev
->n_sectors
= ata_id_n_sectors(id
);
2439 if (dev
->id
[59] & 0x100)
2440 dev
->multi_count
= dev
->id
[59] & 0xff;
2442 if (ata_id_has_lba(id
)) {
2443 const char *lba_desc
;
2447 dev
->flags
|= ATA_DFLAG_LBA
;
2448 if (ata_id_has_lba48(id
)) {
2449 dev
->flags
|= ATA_DFLAG_LBA48
;
2452 if (dev
->n_sectors
>= (1UL << 28) &&
2453 ata_id_has_flush_ext(id
))
2454 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
2458 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
2460 /* print device info to dmesg */
2461 if (ata_msg_drv(ap
) && print_info
) {
2462 ata_dev_printk(dev
, KERN_INFO
,
2463 "%s: %s, %s, max %s\n",
2464 revbuf
, modelbuf
, fwrevbuf
,
2465 ata_mode_string(xfer_mask
));
2466 ata_dev_printk(dev
, KERN_INFO
,
2467 "%Lu sectors, multi %u: %s %s\n",
2468 (unsigned long long)dev
->n_sectors
,
2469 dev
->multi_count
, lba_desc
, ncq_desc
);
2474 /* Default translation */
2475 dev
->cylinders
= id
[1];
2477 dev
->sectors
= id
[6];
2479 if (ata_id_current_chs_valid(id
)) {
2480 /* Current CHS translation is valid. */
2481 dev
->cylinders
= id
[54];
2482 dev
->heads
= id
[55];
2483 dev
->sectors
= id
[56];
2486 /* print device info to dmesg */
2487 if (ata_msg_drv(ap
) && print_info
) {
2488 ata_dev_printk(dev
, KERN_INFO
,
2489 "%s: %s, %s, max %s\n",
2490 revbuf
, modelbuf
, fwrevbuf
,
2491 ata_mode_string(xfer_mask
));
2492 ata_dev_printk(dev
, KERN_INFO
,
2493 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2494 (unsigned long long)dev
->n_sectors
,
2495 dev
->multi_count
, dev
->cylinders
,
2496 dev
->heads
, dev
->sectors
);
2503 /* ATAPI-specific feature tests */
2504 else if (dev
->class == ATA_DEV_ATAPI
) {
2505 const char *cdb_intr_string
= "";
2506 const char *atapi_an_string
= "";
2507 const char *dma_dir_string
= "";
2510 rc
= atapi_cdb_len(id
);
2511 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
2512 if (ata_msg_warn(ap
))
2513 ata_dev_printk(dev
, KERN_WARNING
,
2514 "unsupported CDB len\n");
2518 dev
->cdb_len
= (unsigned int) rc
;
2520 /* Enable ATAPI AN if both the host and device have
2521 * the support. If PMP is attached, SNTF is required
2522 * to enable ATAPI AN to discern between PHY status
2523 * changed notifications and ATAPI ANs.
2525 if ((ap
->flags
& ATA_FLAG_AN
) && ata_id_has_atapi_AN(id
) &&
2526 (!ap
->nr_pmp_links
||
2527 sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
) == 0)) {
2528 unsigned int err_mask
;
2530 /* issue SET feature command to turn this on */
2531 err_mask
= ata_dev_set_feature(dev
,
2532 SETFEATURES_SATA_ENABLE
, SATA_AN
);
2534 ata_dev_printk(dev
, KERN_ERR
,
2535 "failed to enable ATAPI AN "
2536 "(err_mask=0x%x)\n", err_mask
);
2538 dev
->flags
|= ATA_DFLAG_AN
;
2539 atapi_an_string
= ", ATAPI AN";
2543 if (ata_id_cdb_intr(dev
->id
)) {
2544 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
2545 cdb_intr_string
= ", CDB intr";
2548 if (atapi_dmadir
|| atapi_id_dmadir(dev
->id
)) {
2549 dev
->flags
|= ATA_DFLAG_DMADIR
;
2550 dma_dir_string
= ", DMADIR";
2553 /* print device info to dmesg */
2554 if (ata_msg_drv(ap
) && print_info
)
2555 ata_dev_printk(dev
, KERN_INFO
,
2556 "ATAPI: %s, %s, max %s%s%s%s\n",
2558 ata_mode_string(xfer_mask
),
2559 cdb_intr_string
, atapi_an_string
,
2563 /* determine max_sectors */
2564 dev
->max_sectors
= ATA_MAX_SECTORS
;
2565 if (dev
->flags
& ATA_DFLAG_LBA48
)
2566 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
2568 if (!(dev
->horkage
& ATA_HORKAGE_IPM
)) {
2569 if (ata_id_has_hipm(dev
->id
))
2570 dev
->flags
|= ATA_DFLAG_HIPM
;
2571 if (ata_id_has_dipm(dev
->id
))
2572 dev
->flags
|= ATA_DFLAG_DIPM
;
2575 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2577 if (ata_dev_knobble(dev
)) {
2578 if (ata_msg_drv(ap
) && print_info
)
2579 ata_dev_printk(dev
, KERN_INFO
,
2580 "applying bridge limits\n");
2581 dev
->udma_mask
&= ATA_UDMA5
;
2582 dev
->max_sectors
= ATA_MAX_SECTORS
;
2585 if ((dev
->class == ATA_DEV_ATAPI
) &&
2586 (atapi_command_packet_set(id
) == TYPE_TAPE
)) {
2587 dev
->max_sectors
= ATA_MAX_SECTORS_TAPE
;
2588 dev
->horkage
|= ATA_HORKAGE_STUCK_ERR
;
2591 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_128
)
2592 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_128
,
2595 if (ata_dev_blacklisted(dev
) & ATA_HORKAGE_IPM
) {
2596 dev
->horkage
|= ATA_HORKAGE_IPM
;
2598 /* reset link pm_policy for this port to no pm */
2599 ap
->pm_policy
= MAX_PERFORMANCE
;
2602 if (ap
->ops
->dev_config
)
2603 ap
->ops
->dev_config(dev
);
2605 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
2606 /* Let the user know. We don't want to disallow opens for
2607 rescue purposes, or in case the vendor is just a blithering
2608 idiot. Do this after the dev_config call as some controllers
2609 with buggy firmware may want to avoid reporting false device
2613 ata_dev_printk(dev
, KERN_WARNING
,
2614 "Drive reports diagnostics failure. This may indicate a drive\n");
2615 ata_dev_printk(dev
, KERN_WARNING
,
2616 "fault or invalid emulation. Contact drive vendor for information.\n");
2620 if (ata_msg_probe(ap
))
2621 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
2622 __func__
, ata_chk_status(ap
));
2626 if (ata_msg_probe(ap
))
2627 ata_dev_printk(dev
, KERN_DEBUG
,
2628 "%s: EXIT, err\n", __func__
);
2633 * ata_cable_40wire - return 40 wire cable type
2636 * Helper method for drivers which want to hardwire 40 wire cable
2640 int ata_cable_40wire(struct ata_port
*ap
)
2642 return ATA_CBL_PATA40
;
2646 * ata_cable_80wire - return 80 wire cable type
2649 * Helper method for drivers which want to hardwire 80 wire cable
2653 int ata_cable_80wire(struct ata_port
*ap
)
2655 return ATA_CBL_PATA80
;
2659 * ata_cable_unknown - return unknown PATA cable.
2662 * Helper method for drivers which have no PATA cable detection.
2665 int ata_cable_unknown(struct ata_port
*ap
)
2667 return ATA_CBL_PATA_UNK
;
2671 * ata_cable_ignore - return ignored PATA cable.
2674 * Helper method for drivers which don't use cable type to limit
2677 int ata_cable_ignore(struct ata_port
*ap
)
2679 return ATA_CBL_PATA_IGN
;
2683 * ata_cable_sata - return SATA cable type
2686 * Helper method for drivers which have SATA cables
2689 int ata_cable_sata(struct ata_port
*ap
)
2691 return ATA_CBL_SATA
;
2695 * ata_bus_probe - Reset and probe ATA bus
2698 * Master ATA bus probing function. Initiates a hardware-dependent
2699 * bus reset, then attempts to identify any devices found on
2703 * PCI/etc. bus probe sem.
2706 * Zero on success, negative errno otherwise.
2709 int ata_bus_probe(struct ata_port
*ap
)
2711 unsigned int classes
[ATA_MAX_DEVICES
];
2712 int tries
[ATA_MAX_DEVICES
];
2714 struct ata_device
*dev
;
2718 ata_link_for_each_dev(dev
, &ap
->link
)
2719 tries
[dev
->devno
] = ATA_PROBE_MAX_TRIES
;
2722 ata_link_for_each_dev(dev
, &ap
->link
) {
2723 /* If we issue an SRST then an ATA drive (not ATAPI)
2724 * may change configuration and be in PIO0 timing. If
2725 * we do a hard reset (or are coming from power on)
2726 * this is true for ATA or ATAPI. Until we've set a
2727 * suitable controller mode we should not touch the
2728 * bus as we may be talking too fast.
2730 dev
->pio_mode
= XFER_PIO_0
;
2732 /* If the controller has a pio mode setup function
2733 * then use it to set the chipset to rights. Don't
2734 * touch the DMA setup as that will be dealt with when
2735 * configuring devices.
2737 if (ap
->ops
->set_piomode
)
2738 ap
->ops
->set_piomode(ap
, dev
);
2741 /* reset and determine device classes */
2742 ap
->ops
->phy_reset(ap
);
2744 ata_link_for_each_dev(dev
, &ap
->link
) {
2745 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
2746 dev
->class != ATA_DEV_UNKNOWN
)
2747 classes
[dev
->devno
] = dev
->class;
2749 classes
[dev
->devno
] = ATA_DEV_NONE
;
2751 dev
->class = ATA_DEV_UNKNOWN
;
2756 /* read IDENTIFY page and configure devices. We have to do the identify
2757 specific sequence bass-ackwards so that PDIAG- is released by
2760 ata_link_for_each_dev_reverse(dev
, &ap
->link
) {
2761 if (tries
[dev
->devno
])
2762 dev
->class = classes
[dev
->devno
];
2764 if (!ata_dev_enabled(dev
))
2767 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
2773 /* Now ask for the cable type as PDIAG- should have been released */
2774 if (ap
->ops
->cable_detect
)
2775 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2777 /* We may have SATA bridge glue hiding here irrespective of the
2778 reported cable types and sensed types */
2779 ata_link_for_each_dev(dev
, &ap
->link
) {
2780 if (!ata_dev_enabled(dev
))
2782 /* SATA drives indicate we have a bridge. We don't know which
2783 end of the link the bridge is which is a problem */
2784 if (ata_id_is_sata(dev
->id
))
2785 ap
->cbl
= ATA_CBL_SATA
;
2788 /* After the identify sequence we can now set up the devices. We do
2789 this in the normal order so that the user doesn't get confused */
2791 ata_link_for_each_dev(dev
, &ap
->link
) {
2792 if (!ata_dev_enabled(dev
))
2795 ap
->link
.eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
2796 rc
= ata_dev_configure(dev
);
2797 ap
->link
.eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
2802 /* configure transfer mode */
2803 rc
= ata_set_mode(&ap
->link
, &dev
);
2807 ata_link_for_each_dev(dev
, &ap
->link
)
2808 if (ata_dev_enabled(dev
))
2811 /* no device present, disable port */
2812 ata_port_disable(ap
);
2816 tries
[dev
->devno
]--;
2820 /* eeek, something went very wrong, give up */
2821 tries
[dev
->devno
] = 0;
2825 /* give it just one more chance */
2826 tries
[dev
->devno
] = min(tries
[dev
->devno
], 1);
2828 if (tries
[dev
->devno
] == 1) {
2829 /* This is the last chance, better to slow
2830 * down than lose it.
2832 sata_down_spd_limit(&ap
->link
);
2833 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
2837 if (!tries
[dev
->devno
])
2838 ata_dev_disable(dev
);
2844 * ata_port_probe - Mark port as enabled
2845 * @ap: Port for which we indicate enablement
2847 * Modify @ap data structure such that the system
2848 * thinks that the entire port is enabled.
2850 * LOCKING: host lock, or some other form of
2854 void ata_port_probe(struct ata_port
*ap
)
2856 ap
->flags
&= ~ATA_FLAG_DISABLED
;
2860 * sata_print_link_status - Print SATA link status
2861 * @link: SATA link to printk link status about
2863 * This function prints link speed and status of a SATA link.
2868 void sata_print_link_status(struct ata_link
*link
)
2870 u32 sstatus
, scontrol
, tmp
;
2872 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2874 sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
2876 if (ata_link_online(link
)) {
2877 tmp
= (sstatus
>> 4) & 0xf;
2878 ata_link_printk(link
, KERN_INFO
,
2879 "SATA link up %s (SStatus %X SControl %X)\n",
2880 sata_spd_string(tmp
), sstatus
, scontrol
);
2882 ata_link_printk(link
, KERN_INFO
,
2883 "SATA link down (SStatus %X SControl %X)\n",
2889 * ata_dev_pair - return other device on cable
2892 * Obtain the other device on the same cable, or if none is
2893 * present NULL is returned
2896 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
2898 struct ata_link
*link
= adev
->link
;
2899 struct ata_device
*pair
= &link
->device
[1 - adev
->devno
];
2900 if (!ata_dev_enabled(pair
))
2906 * ata_port_disable - Disable port.
2907 * @ap: Port to be disabled.
2909 * Modify @ap data structure such that the system
2910 * thinks that the entire port is disabled, and should
2911 * never attempt to probe or communicate with devices
2914 * LOCKING: host lock, or some other form of
2918 void ata_port_disable(struct ata_port
*ap
)
2920 ap
->link
.device
[0].class = ATA_DEV_NONE
;
2921 ap
->link
.device
[1].class = ATA_DEV_NONE
;
2922 ap
->flags
|= ATA_FLAG_DISABLED
;
2926 * sata_down_spd_limit - adjust SATA spd limit downward
2927 * @link: Link to adjust SATA spd limit for
2929 * Adjust SATA spd limit of @link downward. Note that this
2930 * function only adjusts the limit. The change must be applied
2931 * using sata_set_spd().
2934 * Inherited from caller.
2937 * 0 on success, negative errno on failure
2939 int sata_down_spd_limit(struct ata_link
*link
)
2941 u32 sstatus
, spd
, mask
;
2944 if (!sata_scr_valid(link
))
2947 /* If SCR can be read, use it to determine the current SPD.
2948 * If not, use cached value in link->sata_spd.
2950 rc
= sata_scr_read(link
, SCR_STATUS
, &sstatus
);
2952 spd
= (sstatus
>> 4) & 0xf;
2954 spd
= link
->sata_spd
;
2956 mask
= link
->sata_spd_limit
;
2960 /* unconditionally mask off the highest bit */
2961 highbit
= fls(mask
) - 1;
2962 mask
&= ~(1 << highbit
);
2964 /* Mask off all speeds higher than or equal to the current
2965 * one. Force 1.5Gbps if current SPD is not available.
2968 mask
&= (1 << (spd
- 1)) - 1;
2972 /* were we already at the bottom? */
2976 link
->sata_spd_limit
= mask
;
2978 ata_link_printk(link
, KERN_WARNING
, "limiting SATA link speed to %s\n",
2979 sata_spd_string(fls(mask
)));
2984 static int __sata_set_spd_needed(struct ata_link
*link
, u32
*scontrol
)
2986 struct ata_link
*host_link
= &link
->ap
->link
;
2987 u32 limit
, target
, spd
;
2989 limit
= link
->sata_spd_limit
;
2991 /* Don't configure downstream link faster than upstream link.
2992 * It doesn't speed up anything and some PMPs choke on such
2995 if (!ata_is_host_link(link
) && host_link
->sata_spd
)
2996 limit
&= (1 << host_link
->sata_spd
) - 1;
2998 if (limit
== UINT_MAX
)
3001 target
= fls(limit
);
3003 spd
= (*scontrol
>> 4) & 0xf;
3004 *scontrol
= (*scontrol
& ~0xf0) | ((target
& 0xf) << 4);
3006 return spd
!= target
;
3010 * sata_set_spd_needed - is SATA spd configuration needed
3011 * @link: Link in question
3013 * Test whether the spd limit in SControl matches
3014 * @link->sata_spd_limit. This function is used to determine
3015 * whether hardreset is necessary to apply SATA spd
3019 * Inherited from caller.
3022 * 1 if SATA spd configuration is needed, 0 otherwise.
3024 int sata_set_spd_needed(struct ata_link
*link
)
3028 if (sata_scr_read(link
, SCR_CONTROL
, &scontrol
))
3031 return __sata_set_spd_needed(link
, &scontrol
);
3035 * sata_set_spd - set SATA spd according to spd limit
3036 * @link: Link to set SATA spd for
3038 * Set SATA spd of @link according to sata_spd_limit.
3041 * Inherited from caller.
3044 * 0 if spd doesn't need to be changed, 1 if spd has been
3045 * changed. Negative errno if SCR registers are inaccessible.
3047 int sata_set_spd(struct ata_link
*link
)
3052 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3055 if (!__sata_set_spd_needed(link
, &scontrol
))
3058 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3065 * This mode timing computation functionality is ported over from
3066 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3069 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3070 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3071 * for UDMA6, which is currently supported only by Maxtor drives.
3073 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3076 static const struct ata_timing ata_timing
[] = {
3077 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3078 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
3079 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
3080 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
3081 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
3082 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
3083 { XFER_PIO_5
, 15, 65, 25, 100, 65, 25, 100, 0 },
3084 { XFER_PIO_6
, 10, 55, 20, 80, 55, 20, 80, 0 },
3086 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
3087 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
3088 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
3090 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
3091 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
3092 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
3093 { XFER_MW_DMA_3
, 25, 0, 0, 0, 65, 25, 100, 0 },
3094 { XFER_MW_DMA_4
, 25, 0, 0, 0, 55, 20, 80, 0 },
3096 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3097 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
3098 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
3099 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
3100 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
3101 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
3102 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
3103 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
3108 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3109 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3111 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
3113 q
->setup
= EZ(t
->setup
* 1000, T
);
3114 q
->act8b
= EZ(t
->act8b
* 1000, T
);
3115 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
3116 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
3117 q
->active
= EZ(t
->active
* 1000, T
);
3118 q
->recover
= EZ(t
->recover
* 1000, T
);
3119 q
->cycle
= EZ(t
->cycle
* 1000, T
);
3120 q
->udma
= EZ(t
->udma
* 1000, UT
);
3123 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
3124 struct ata_timing
*m
, unsigned int what
)
3126 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
3127 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
3128 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
3129 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
3130 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
3131 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
3132 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
3133 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
3136 const struct ata_timing
*ata_timing_find_mode(u8 xfer_mode
)
3138 const struct ata_timing
*t
= ata_timing
;
3140 while (xfer_mode
> t
->mode
)
3143 if (xfer_mode
== t
->mode
)
3148 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
3149 struct ata_timing
*t
, int T
, int UT
)
3151 const struct ata_timing
*s
;
3152 struct ata_timing p
;
3158 if (!(s
= ata_timing_find_mode(speed
)))
3161 memcpy(t
, s
, sizeof(*s
));
3164 * If the drive is an EIDE drive, it can tell us it needs extended
3165 * PIO/MW_DMA cycle timing.
3168 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
3169 memset(&p
, 0, sizeof(p
));
3170 if (speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
3171 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
3172 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
3173 } else if (speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
3174 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
3176 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
3180 * Convert the timing to bus clock counts.
3183 ata_timing_quantize(t
, t
, T
, UT
);
3186 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3187 * S.M.A.R.T * and some other commands. We have to ensure that the
3188 * DMA cycle timing is slower/equal than the fastest PIO timing.
3191 if (speed
> XFER_PIO_6
) {
3192 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
3193 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
3197 * Lengthen active & recovery time so that cycle time is correct.
3200 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
3201 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
3202 t
->rec8b
= t
->cyc8b
- t
->act8b
;
3205 if (t
->active
+ t
->recover
< t
->cycle
) {
3206 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
3207 t
->recover
= t
->cycle
- t
->active
;
3210 /* In a few cases quantisation may produce enough errors to
3211 leave t->cycle too low for the sum of active and recovery
3212 if so we must correct this */
3213 if (t
->active
+ t
->recover
> t
->cycle
)
3214 t
->cycle
= t
->active
+ t
->recover
;
3220 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3221 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3222 * @cycle: cycle duration in ns
3224 * Return matching xfer mode for @cycle. The returned mode is of
3225 * the transfer type specified by @xfer_shift. If @cycle is too
3226 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3227 * than the fastest known mode, the fasted mode is returned.
3233 * Matching xfer_mode, 0xff if no match found.
3235 u8
ata_timing_cycle2mode(unsigned int xfer_shift
, int cycle
)
3237 u8 base_mode
= 0xff, last_mode
= 0xff;
3238 const struct ata_xfer_ent
*ent
;
3239 const struct ata_timing
*t
;
3241 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
3242 if (ent
->shift
== xfer_shift
)
3243 base_mode
= ent
->base
;
3245 for (t
= ata_timing_find_mode(base_mode
);
3246 t
&& ata_xfer_mode2shift(t
->mode
) == xfer_shift
; t
++) {
3247 unsigned short this_cycle
;
3249 switch (xfer_shift
) {
3251 case ATA_SHIFT_MWDMA
:
3252 this_cycle
= t
->cycle
;
3254 case ATA_SHIFT_UDMA
:
3255 this_cycle
= t
->udma
;
3261 if (cycle
> this_cycle
)
3264 last_mode
= t
->mode
;
3271 * ata_down_xfermask_limit - adjust dev xfer masks downward
3272 * @dev: Device to adjust xfer masks
3273 * @sel: ATA_DNXFER_* selector
3275 * Adjust xfer masks of @dev downward. Note that this function
3276 * does not apply the change. Invoking ata_set_mode() afterwards
3277 * will apply the limit.
3280 * Inherited from caller.
3283 * 0 on success, negative errno on failure
3285 int ata_down_xfermask_limit(struct ata_device
*dev
, unsigned int sel
)
3288 unsigned long orig_mask
, xfer_mask
;
3289 unsigned long pio_mask
, mwdma_mask
, udma_mask
;
3292 quiet
= !!(sel
& ATA_DNXFER_QUIET
);
3293 sel
&= ~ATA_DNXFER_QUIET
;
3295 xfer_mask
= orig_mask
= ata_pack_xfermask(dev
->pio_mask
,
3298 ata_unpack_xfermask(xfer_mask
, &pio_mask
, &mwdma_mask
, &udma_mask
);
3301 case ATA_DNXFER_PIO
:
3302 highbit
= fls(pio_mask
) - 1;
3303 pio_mask
&= ~(1 << highbit
);
3306 case ATA_DNXFER_DMA
:
3308 highbit
= fls(udma_mask
) - 1;
3309 udma_mask
&= ~(1 << highbit
);
3312 } else if (mwdma_mask
) {
3313 highbit
= fls(mwdma_mask
) - 1;
3314 mwdma_mask
&= ~(1 << highbit
);
3320 case ATA_DNXFER_40C
:
3321 udma_mask
&= ATA_UDMA_MASK_40C
;
3324 case ATA_DNXFER_FORCE_PIO0
:
3326 case ATA_DNXFER_FORCE_PIO
:
3335 xfer_mask
&= ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
3337 if (!(xfer_mask
& ATA_MASK_PIO
) || xfer_mask
== orig_mask
)
3341 if (xfer_mask
& (ATA_MASK_MWDMA
| ATA_MASK_UDMA
))
3342 snprintf(buf
, sizeof(buf
), "%s:%s",
3343 ata_mode_string(xfer_mask
),
3344 ata_mode_string(xfer_mask
& ATA_MASK_PIO
));
3346 snprintf(buf
, sizeof(buf
), "%s",
3347 ata_mode_string(xfer_mask
));
3349 ata_dev_printk(dev
, KERN_WARNING
,
3350 "limiting speed to %s\n", buf
);
3353 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
3359 static int ata_dev_set_mode(struct ata_device
*dev
)
3361 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3362 const char *dev_err_whine
= "";
3363 int ign_dev_err
= 0;
3364 unsigned int err_mask
;
3367 dev
->flags
&= ~ATA_DFLAG_PIO
;
3368 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
3369 dev
->flags
|= ATA_DFLAG_PIO
;
3371 err_mask
= ata_dev_set_xfermode(dev
);
3373 if (err_mask
& ~AC_ERR_DEV
)
3377 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
3378 rc
= ata_dev_revalidate(dev
, ATA_DEV_UNKNOWN
, 0);
3379 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
3383 /* Old CFA may refuse this command, which is just fine */
3384 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& ata_id_is_cfa(dev
->id
))
3387 /* Some very old devices and some bad newer ones fail any kind of
3388 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3389 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& !ata_id_has_iordy(dev
->id
) &&
3390 dev
->pio_mode
<= XFER_PIO_2
)
3393 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3394 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3395 if (dev
->xfer_shift
== ATA_SHIFT_MWDMA
&&
3396 dev
->dma_mode
== XFER_MW_DMA_0
&&
3397 (dev
->id
[63] >> 8) & 1)
3400 /* if the device is actually configured correctly, ignore dev err */
3401 if (dev
->xfer_mode
== ata_xfer_mask2mode(ata_id_xfermask(dev
->id
)))
3404 if (err_mask
& AC_ERR_DEV
) {
3408 dev_err_whine
= " (device error ignored)";
3411 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3412 dev
->xfer_shift
, (int)dev
->xfer_mode
);
3414 ata_dev_printk(dev
, KERN_INFO
, "configured for %s%s\n",
3415 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)),
3421 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
3422 "(err_mask=0x%x)\n", err_mask
);
3427 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3428 * @link: link on which timings will be programmed
3429 * @r_failed_dev: out parameter for failed device
3431 * Standard implementation of the function used to tune and set
3432 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3433 * ata_dev_set_mode() fails, pointer to the failing device is
3434 * returned in @r_failed_dev.
3437 * PCI/etc. bus probe sem.
3440 * 0 on success, negative errno otherwise
3443 int ata_do_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3445 struct ata_port
*ap
= link
->ap
;
3446 struct ata_device
*dev
;
3447 int rc
= 0, used_dma
= 0, found
= 0;
3449 /* step 1: calculate xfer_mask */
3450 ata_link_for_each_dev(dev
, link
) {
3451 unsigned long pio_mask
, dma_mask
;
3452 unsigned int mode_mask
;
3454 if (!ata_dev_enabled(dev
))
3457 mode_mask
= ATA_DMA_MASK_ATA
;
3458 if (dev
->class == ATA_DEV_ATAPI
)
3459 mode_mask
= ATA_DMA_MASK_ATAPI
;
3460 else if (ata_id_is_cfa(dev
->id
))
3461 mode_mask
= ATA_DMA_MASK_CFA
;
3463 ata_dev_xfermask(dev
);
3464 ata_force_xfermask(dev
);
3466 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
3467 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
3469 if (libata_dma_mask
& mode_mask
)
3470 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
3474 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
3475 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
3478 if (dev
->dma_mode
!= 0xff)
3484 /* step 2: always set host PIO timings */
3485 ata_link_for_each_dev(dev
, link
) {
3486 if (!ata_dev_enabled(dev
))
3489 if (dev
->pio_mode
== 0xff) {
3490 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
3495 dev
->xfer_mode
= dev
->pio_mode
;
3496 dev
->xfer_shift
= ATA_SHIFT_PIO
;
3497 if (ap
->ops
->set_piomode
)
3498 ap
->ops
->set_piomode(ap
, dev
);
3501 /* step 3: set host DMA timings */
3502 ata_link_for_each_dev(dev
, link
) {
3503 if (!ata_dev_enabled(dev
) || dev
->dma_mode
== 0xff)
3506 dev
->xfer_mode
= dev
->dma_mode
;
3507 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
3508 if (ap
->ops
->set_dmamode
)
3509 ap
->ops
->set_dmamode(ap
, dev
);
3512 /* step 4: update devices' xfer mode */
3513 ata_link_for_each_dev(dev
, link
) {
3514 /* don't update suspended devices' xfer mode */
3515 if (!ata_dev_enabled(dev
))
3518 rc
= ata_dev_set_mode(dev
);
3523 /* Record simplex status. If we selected DMA then the other
3524 * host channels are not permitted to do so.
3526 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
3527 ap
->host
->simplex_claimed
= ap
;
3531 *r_failed_dev
= dev
;
3536 * ata_tf_to_host - issue ATA taskfile to host controller
3537 * @ap: port to which command is being issued
3538 * @tf: ATA taskfile register set
3540 * Issues ATA taskfile register set to ATA host controller,
3541 * with proper synchronization with interrupt handler and
3545 * spin_lock_irqsave(host lock)
3548 static inline void ata_tf_to_host(struct ata_port
*ap
,
3549 const struct ata_taskfile
*tf
)
3551 ap
->ops
->tf_load(ap
, tf
);
3552 ap
->ops
->exec_command(ap
, tf
);
3556 * ata_busy_sleep - sleep until BSY clears, or timeout
3557 * @ap: port containing status register to be polled
3558 * @tmout_pat: impatience timeout
3559 * @tmout: overall timeout
3561 * Sleep until ATA Status register bit BSY clears,
3562 * or a timeout occurs.
3565 * Kernel thread context (may sleep).
3568 * 0 on success, -errno otherwise.
3570 int ata_busy_sleep(struct ata_port
*ap
,
3571 unsigned long tmout_pat
, unsigned long tmout
)
3573 unsigned long timer_start
, timeout
;
3576 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
3577 timer_start
= jiffies
;
3578 timeout
= timer_start
+ tmout_pat
;
3579 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3580 time_before(jiffies
, timeout
)) {
3582 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
3585 if (status
!= 0xff && (status
& ATA_BUSY
))
3586 ata_port_printk(ap
, KERN_WARNING
,
3587 "port is slow to respond, please be patient "
3588 "(Status 0x%x)\n", status
);
3590 timeout
= timer_start
+ tmout
;
3591 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3592 time_before(jiffies
, timeout
)) {
3594 status
= ata_chk_status(ap
);
3600 if (status
& ATA_BUSY
) {
3601 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
3602 "(%lu secs, Status 0x%x)\n",
3603 tmout
/ HZ
, status
);
3611 * ata_wait_after_reset - wait before checking status after reset
3612 * @ap: port containing status register to be polled
3613 * @deadline: deadline jiffies for the operation
3615 * After reset, we need to pause a while before reading status.
3616 * Also, certain combination of controller and device report 0xff
3617 * for some duration (e.g. until SATA PHY is up and running)
3618 * which is interpreted as empty port in ATA world. This
3619 * function also waits for such devices to get out of 0xff
3623 * Kernel thread context (may sleep).
3625 void ata_wait_after_reset(struct ata_port
*ap
, unsigned long deadline
)
3627 unsigned long until
= jiffies
+ ATA_TMOUT_FF_WAIT
;
3629 if (time_before(until
, deadline
))
3632 /* Spec mandates ">= 2ms" before checking status. We wait
3633 * 150ms, because that was the magic delay used for ATAPI
3634 * devices in Hale Landis's ATADRVR, for the period of time
3635 * between when the ATA command register is written, and then
3636 * status is checked. Because waiting for "a while" before
3637 * checking status is fine, post SRST, we perform this magic
3638 * delay here as well.
3640 * Old drivers/ide uses the 2mS rule and then waits for ready.
3644 /* Wait for 0xff to clear. Some SATA devices take a long time
3645 * to clear 0xff after reset. For example, HHD424020F7SV00
3646 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3649 * Note that some PATA controllers (pata_ali) explode if
3650 * status register is read more than once when there's no
3653 if (ap
->flags
& ATA_FLAG_SATA
) {
3655 u8 status
= ata_chk_status(ap
);
3657 if (status
!= 0xff || time_after(jiffies
, deadline
))
3666 * ata_wait_ready - sleep until BSY clears, or timeout
3667 * @ap: port containing status register to be polled
3668 * @deadline: deadline jiffies for the operation
3670 * Sleep until ATA Status register bit BSY clears, or timeout
3674 * Kernel thread context (may sleep).
3677 * 0 on success, -errno otherwise.
3679 int ata_wait_ready(struct ata_port
*ap
, unsigned long deadline
)
3681 unsigned long start
= jiffies
;
3685 u8 status
= ata_chk_status(ap
);
3686 unsigned long now
= jiffies
;
3688 if (!(status
& ATA_BUSY
))
3690 if (!ata_link_online(&ap
->link
) && status
== 0xff)
3692 if (time_after(now
, deadline
))
3695 if (!warned
&& time_after(now
, start
+ 5 * HZ
) &&
3696 (deadline
- now
> 3 * HZ
)) {
3697 ata_port_printk(ap
, KERN_WARNING
,
3698 "port is slow to respond, please be patient "
3699 "(Status 0x%x)\n", status
);
3707 static int ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
,
3708 unsigned long deadline
)
3710 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3711 unsigned int dev0
= devmask
& (1 << 0);
3712 unsigned int dev1
= devmask
& (1 << 1);
3715 /* if device 0 was found in ata_devchk, wait for its
3719 rc
= ata_wait_ready(ap
, deadline
);
3727 /* if device 1 was found in ata_devchk, wait for register
3728 * access briefly, then wait for BSY to clear.
3733 ap
->ops
->dev_select(ap
, 1);
3735 /* Wait for register access. Some ATAPI devices fail
3736 * to set nsect/lbal after reset, so don't waste too
3737 * much time on it. We're gonna wait for !BSY anyway.
3739 for (i
= 0; i
< 2; i
++) {
3742 nsect
= ioread8(ioaddr
->nsect_addr
);
3743 lbal
= ioread8(ioaddr
->lbal_addr
);
3744 if ((nsect
== 1) && (lbal
== 1))
3746 msleep(50); /* give drive a breather */
3749 rc
= ata_wait_ready(ap
, deadline
);
3757 /* is all this really necessary? */
3758 ap
->ops
->dev_select(ap
, 0);
3760 ap
->ops
->dev_select(ap
, 1);
3762 ap
->ops
->dev_select(ap
, 0);
3767 static int ata_bus_softreset(struct ata_port
*ap
, unsigned int devmask
,
3768 unsigned long deadline
)
3770 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3772 DPRINTK("ata%u: bus reset via SRST\n", ap
->print_id
);
3774 /* software reset. causes dev0 to be selected */
3775 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3776 udelay(20); /* FIXME: flush */
3777 iowrite8(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
3778 udelay(20); /* FIXME: flush */
3779 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3781 /* wait a while before checking status */
3782 ata_wait_after_reset(ap
, deadline
);
3784 /* Before we perform post reset processing we want to see if
3785 * the bus shows 0xFF because the odd clown forgets the D7
3786 * pulldown resistor.
3788 if (ata_chk_status(ap
) == 0xFF)
3791 return ata_bus_post_reset(ap
, devmask
, deadline
);
3795 * ata_bus_reset - reset host port and associated ATA channel
3796 * @ap: port to reset
3798 * This is typically the first time we actually start issuing
3799 * commands to the ATA channel. We wait for BSY to clear, then
3800 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3801 * result. Determine what devices, if any, are on the channel
3802 * by looking at the device 0/1 error register. Look at the signature
3803 * stored in each device's taskfile registers, to determine if
3804 * the device is ATA or ATAPI.
3807 * PCI/etc. bus probe sem.
3808 * Obtains host lock.
3811 * Sets ATA_FLAG_DISABLED if bus reset fails.
3814 void ata_bus_reset(struct ata_port
*ap
)
3816 struct ata_device
*device
= ap
->link
.device
;
3817 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3818 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3820 unsigned int dev0
, dev1
= 0, devmask
= 0;
3823 DPRINTK("ENTER, host %u, port %u\n", ap
->print_id
, ap
->port_no
);
3825 /* determine if device 0/1 are present */
3826 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
3829 dev0
= ata_devchk(ap
, 0);
3831 dev1
= ata_devchk(ap
, 1);
3835 devmask
|= (1 << 0);
3837 devmask
|= (1 << 1);
3839 /* select device 0 again */
3840 ap
->ops
->dev_select(ap
, 0);
3842 /* issue bus reset */
3843 if (ap
->flags
& ATA_FLAG_SRST
) {
3844 rc
= ata_bus_softreset(ap
, devmask
, jiffies
+ 40 * HZ
);
3845 if (rc
&& rc
!= -ENODEV
)
3850 * determine by signature whether we have ATA or ATAPI devices
3852 device
[0].class = ata_dev_try_classify(&device
[0], dev0
, &err
);
3853 if ((slave_possible
) && (err
!= 0x81))
3854 device
[1].class = ata_dev_try_classify(&device
[1], dev1
, &err
);
3856 /* is double-select really necessary? */
3857 if (device
[1].class != ATA_DEV_NONE
)
3858 ap
->ops
->dev_select(ap
, 1);
3859 if (device
[0].class != ATA_DEV_NONE
)
3860 ap
->ops
->dev_select(ap
, 0);
3862 /* if no devices were detected, disable this port */
3863 if ((device
[0].class == ATA_DEV_NONE
) &&
3864 (device
[1].class == ATA_DEV_NONE
))
3867 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
3868 /* set up device control for ATA_FLAG_SATA_RESET */
3869 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3876 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
3877 ata_port_disable(ap
);
3883 * sata_link_debounce - debounce SATA phy status
3884 * @link: ATA link to debounce SATA phy status for
3885 * @params: timing parameters { interval, duratinon, timeout } in msec
3886 * @deadline: deadline jiffies for the operation
3888 * Make sure SStatus of @link reaches stable state, determined by
3889 * holding the same value where DET is not 1 for @duration polled
3890 * every @interval, before @timeout. Timeout constraints the
3891 * beginning of the stable state. Because DET gets stuck at 1 on
3892 * some controllers after hot unplugging, this functions waits
3893 * until timeout then returns 0 if DET is stable at 1.
3895 * @timeout is further limited by @deadline. The sooner of the
3899 * Kernel thread context (may sleep)
3902 * 0 on success, -errno on failure.
3904 int sata_link_debounce(struct ata_link
*link
, const unsigned long *params
,
3905 unsigned long deadline
)
3907 unsigned long interval_msec
= params
[0];
3908 unsigned long duration
= msecs_to_jiffies(params
[1]);
3909 unsigned long last_jiffies
, t
;
3913 t
= jiffies
+ msecs_to_jiffies(params
[2]);
3914 if (time_before(t
, deadline
))
3917 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3922 last_jiffies
= jiffies
;
3925 msleep(interval_msec
);
3926 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3932 if (cur
== 1 && time_before(jiffies
, deadline
))
3934 if (time_after(jiffies
, last_jiffies
+ duration
))
3939 /* unstable, start over */
3941 last_jiffies
= jiffies
;
3943 /* Check deadline. If debouncing failed, return
3944 * -EPIPE to tell upper layer to lower link speed.
3946 if (time_after(jiffies
, deadline
))
3952 * sata_link_resume - resume SATA link
3953 * @link: ATA link to resume SATA
3954 * @params: timing parameters { interval, duratinon, timeout } in msec
3955 * @deadline: deadline jiffies for the operation
3957 * Resume SATA phy @link and debounce it.
3960 * Kernel thread context (may sleep)
3963 * 0 on success, -errno on failure.
3965 int sata_link_resume(struct ata_link
*link
, const unsigned long *params
,
3966 unsigned long deadline
)
3971 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3974 scontrol
= (scontrol
& 0x0f0) | 0x300;
3976 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3979 /* Some PHYs react badly if SStatus is pounded immediately
3980 * after resuming. Delay 200ms before debouncing.
3984 return sata_link_debounce(link
, params
, deadline
);
3988 * ata_std_prereset - prepare for reset
3989 * @link: ATA link to be reset
3990 * @deadline: deadline jiffies for the operation
3992 * @link is about to be reset. Initialize it. Failure from
3993 * prereset makes libata abort whole reset sequence and give up
3994 * that port, so prereset should be best-effort. It does its
3995 * best to prepare for reset sequence but if things go wrong, it
3996 * should just whine, not fail.
3999 * Kernel thread context (may sleep)
4002 * 0 on success, -errno otherwise.
4004 int ata_std_prereset(struct ata_link
*link
, unsigned long deadline
)
4006 struct ata_port
*ap
= link
->ap
;
4007 struct ata_eh_context
*ehc
= &link
->eh_context
;
4008 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
4011 /* if we're about to do hardreset, nothing more to do */
4012 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
4015 /* if SATA, resume link */
4016 if (ap
->flags
& ATA_FLAG_SATA
) {
4017 rc
= sata_link_resume(link
, timing
, deadline
);
4018 /* whine about phy resume failure but proceed */
4019 if (rc
&& rc
!= -EOPNOTSUPP
)
4020 ata_link_printk(link
, KERN_WARNING
, "failed to resume "
4021 "link for reset (errno=%d)\n", rc
);
4024 /* wait for !BSY if we don't know that no device is attached */
4025 if (!ata_link_offline(link
)) {
4026 rc
= ata_wait_ready(ap
, deadline
);
4027 if (rc
&& rc
!= -ENODEV
) {
4028 ata_link_printk(link
, KERN_WARNING
, "device not ready "
4029 "(errno=%d), forcing hardreset\n", rc
);
4030 ehc
->i
.action
|= ATA_EH_HARDRESET
;
4038 * ata_std_softreset - reset host port via ATA SRST
4039 * @link: ATA link to reset
4040 * @classes: resulting classes of attached devices
4041 * @deadline: deadline jiffies for the operation
4043 * Reset host port using ATA SRST.
4046 * Kernel thread context (may sleep)
4049 * 0 on success, -errno otherwise.
4051 int ata_std_softreset(struct ata_link
*link
, unsigned int *classes
,
4052 unsigned long deadline
)
4054 struct ata_port
*ap
= link
->ap
;
4055 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
4056 unsigned int devmask
= 0;
4062 if (ata_link_offline(link
)) {
4063 classes
[0] = ATA_DEV_NONE
;
4067 /* determine if device 0/1 are present */
4068 if (ata_devchk(ap
, 0))
4069 devmask
|= (1 << 0);
4070 if (slave_possible
&& ata_devchk(ap
, 1))
4071 devmask
|= (1 << 1);
4073 /* select device 0 again */
4074 ap
->ops
->dev_select(ap
, 0);
4076 /* issue bus reset */
4077 DPRINTK("about to softreset, devmask=%x\n", devmask
);
4078 rc
= ata_bus_softreset(ap
, devmask
, deadline
);
4079 /* if link is occupied, -ENODEV too is an error */
4080 if (rc
&& (rc
!= -ENODEV
|| sata_scr_valid(link
))) {
4081 ata_link_printk(link
, KERN_ERR
, "SRST failed (errno=%d)\n", rc
);
4085 /* determine by signature whether we have ATA or ATAPI devices */
4086 classes
[0] = ata_dev_try_classify(&link
->device
[0],
4087 devmask
& (1 << 0), &err
);
4088 if (slave_possible
&& err
!= 0x81)
4089 classes
[1] = ata_dev_try_classify(&link
->device
[1],
4090 devmask
& (1 << 1), &err
);
4093 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
4098 * sata_link_hardreset - reset link via SATA phy reset
4099 * @link: link to reset
4100 * @timing: timing parameters { interval, duratinon, timeout } in msec
4101 * @deadline: deadline jiffies for the operation
4103 * SATA phy-reset @link using DET bits of SControl register.
4106 * Kernel thread context (may sleep)
4109 * 0 on success, -errno otherwise.
4111 int sata_link_hardreset(struct ata_link
*link
, const unsigned long *timing
,
4112 unsigned long deadline
)
4119 if (sata_set_spd_needed(link
)) {
4120 /* SATA spec says nothing about how to reconfigure
4121 * spd. To be on the safe side, turn off phy during
4122 * reconfiguration. This works for at least ICH7 AHCI
4125 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
4128 scontrol
= (scontrol
& 0x0f0) | 0x304;
4130 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
4136 /* issue phy wake/reset */
4137 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
4140 scontrol
= (scontrol
& 0x0f0) | 0x301;
4142 if ((rc
= sata_scr_write_flush(link
, SCR_CONTROL
, scontrol
)))
4145 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4146 * 10.4.2 says at least 1 ms.
4150 /* bring link back */
4151 rc
= sata_link_resume(link
, timing
, deadline
);
4153 DPRINTK("EXIT, rc=%d\n", rc
);
4158 * sata_std_hardreset - reset host port via SATA phy reset
4159 * @link: link to reset
4160 * @class: resulting class of attached device
4161 * @deadline: deadline jiffies for the operation
4163 * SATA phy-reset host port using DET bits of SControl register,
4164 * wait for !BSY and classify the attached device.
4167 * Kernel thread context (may sleep)
4170 * 0 on success, -errno otherwise.
4172 int sata_std_hardreset(struct ata_link
*link
, unsigned int *class,
4173 unsigned long deadline
)
4175 struct ata_port
*ap
= link
->ap
;
4176 const unsigned long *timing
= sata_ehc_deb_timing(&link
->eh_context
);
4182 rc
= sata_link_hardreset(link
, timing
, deadline
);
4184 ata_link_printk(link
, KERN_ERR
,
4185 "COMRESET failed (errno=%d)\n", rc
);
4189 /* TODO: phy layer with polling, timeouts, etc. */
4190 if (ata_link_offline(link
)) {
4191 *class = ATA_DEV_NONE
;
4192 DPRINTK("EXIT, link offline\n");
4196 /* wait a while before checking status */
4197 ata_wait_after_reset(ap
, deadline
);
4199 /* If PMP is supported, we have to do follow-up SRST. Note
4200 * that some PMPs don't send D2H Reg FIS after hardreset at
4201 * all if the first port is empty. Wait for it just for a
4202 * second and request follow-up SRST.
4204 if (ap
->flags
& ATA_FLAG_PMP
) {
4205 ata_wait_ready(ap
, jiffies
+ HZ
);
4209 rc
= ata_wait_ready(ap
, deadline
);
4210 /* link occupied, -ENODEV too is an error */
4212 ata_link_printk(link
, KERN_ERR
,
4213 "COMRESET failed (errno=%d)\n", rc
);
4217 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
4219 *class = ata_dev_try_classify(link
->device
, 1, NULL
);
4221 DPRINTK("EXIT, class=%u\n", *class);
4226 * ata_std_postreset - standard postreset callback
4227 * @link: the target ata_link
4228 * @classes: classes of attached devices
4230 * This function is invoked after a successful reset. Note that
4231 * the device might have been reset more than once using
4232 * different reset methods before postreset is invoked.
4235 * Kernel thread context (may sleep)
4237 void ata_std_postreset(struct ata_link
*link
, unsigned int *classes
)
4239 struct ata_port
*ap
= link
->ap
;
4244 /* print link status */
4245 sata_print_link_status(link
);
4248 if (sata_scr_read(link
, SCR_ERROR
, &serror
) == 0)
4249 sata_scr_write(link
, SCR_ERROR
, serror
);
4250 link
->eh_info
.serror
= 0;
4252 /* is double-select really necessary? */
4253 if (classes
[0] != ATA_DEV_NONE
)
4254 ap
->ops
->dev_select(ap
, 1);
4255 if (classes
[1] != ATA_DEV_NONE
)
4256 ap
->ops
->dev_select(ap
, 0);
4258 /* bail out if no device is present */
4259 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
4260 DPRINTK("EXIT, no device\n");
4264 /* set up device control */
4265 if (ap
->ioaddr
.ctl_addr
)
4266 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
4272 * ata_dev_same_device - Determine whether new ID matches configured device
4273 * @dev: device to compare against
4274 * @new_class: class of the new device
4275 * @new_id: IDENTIFY page of the new device
4277 * Compare @new_class and @new_id against @dev and determine
4278 * whether @dev is the device indicated by @new_class and
4285 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4287 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
4290 const u16
*old_id
= dev
->id
;
4291 unsigned char model
[2][ATA_ID_PROD_LEN
+ 1];
4292 unsigned char serial
[2][ATA_ID_SERNO_LEN
+ 1];
4294 if (dev
->class != new_class
) {
4295 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
4296 dev
->class, new_class
);
4300 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD
, sizeof(model
[0]));
4301 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD
, sizeof(model
[1]));
4302 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO
, sizeof(serial
[0]));
4303 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO
, sizeof(serial
[1]));
4305 if (strcmp(model
[0], model
[1])) {
4306 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
4307 "'%s' != '%s'\n", model
[0], model
[1]);
4311 if (strcmp(serial
[0], serial
[1])) {
4312 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
4313 "'%s' != '%s'\n", serial
[0], serial
[1]);
4321 * ata_dev_reread_id - Re-read IDENTIFY data
4322 * @dev: target ATA device
4323 * @readid_flags: read ID flags
4325 * Re-read IDENTIFY page and make sure @dev is still attached to
4329 * Kernel thread context (may sleep)
4332 * 0 on success, negative errno otherwise
4334 int ata_dev_reread_id(struct ata_device
*dev
, unsigned int readid_flags
)
4336 unsigned int class = dev
->class;
4337 u16
*id
= (void *)dev
->link
->ap
->sector_buf
;
4341 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
4345 /* is the device still there? */
4346 if (!ata_dev_same_device(dev
, class, id
))
4349 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
4354 * ata_dev_revalidate - Revalidate ATA device
4355 * @dev: device to revalidate
4356 * @new_class: new class code
4357 * @readid_flags: read ID flags
4359 * Re-read IDENTIFY page, make sure @dev is still attached to the
4360 * port and reconfigure it according to the new IDENTIFY page.
4363 * Kernel thread context (may sleep)
4366 * 0 on success, negative errno otherwise
4368 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int new_class
,
4369 unsigned int readid_flags
)
4371 u64 n_sectors
= dev
->n_sectors
;
4374 if (!ata_dev_enabled(dev
))
4377 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4378 if (ata_class_enabled(new_class
) &&
4379 new_class
!= ATA_DEV_ATA
&& new_class
!= ATA_DEV_ATAPI
) {
4380 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %u != %u\n",
4381 dev
->class, new_class
);
4387 rc
= ata_dev_reread_id(dev
, readid_flags
);
4391 /* configure device according to the new ID */
4392 rc
= ata_dev_configure(dev
);
4396 /* verify n_sectors hasn't changed */
4397 if (dev
->class == ATA_DEV_ATA
&& n_sectors
&&
4398 dev
->n_sectors
!= n_sectors
) {
4399 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
4401 (unsigned long long)n_sectors
,
4402 (unsigned long long)dev
->n_sectors
);
4404 /* restore original n_sectors */
4405 dev
->n_sectors
= n_sectors
;
4414 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
4418 struct ata_blacklist_entry
{
4419 const char *model_num
;
4420 const char *model_rev
;
4421 unsigned long horkage
;
4424 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
4425 /* Devices with DMA related problems under Linux */
4426 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
4427 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
4428 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
4429 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
4430 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
4431 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
4432 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
4433 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
4434 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
4435 { "CRD-8480B", NULL
, ATA_HORKAGE_NODMA
},
4436 { "CRD-8482B", NULL
, ATA_HORKAGE_NODMA
},
4437 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
4438 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
4439 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
4440 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
4441 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
4442 { "HITACHI CDR-8335", NULL
, ATA_HORKAGE_NODMA
},
4443 { "HITACHI CDR-8435", NULL
, ATA_HORKAGE_NODMA
},
4444 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
4445 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
4446 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
4447 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
4448 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
4449 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
4450 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
4451 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
4452 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
4453 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
4454 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA
},
4455 { "Seagate STT20000A", NULL
, ATA_HORKAGE_NODMA
},
4456 /* Odd clown on sil3726/4726 PMPs */
4457 { "Config Disk", NULL
, ATA_HORKAGE_NODMA
|
4458 ATA_HORKAGE_SKIP_PM
},
4460 /* Weird ATAPI devices */
4461 { "TORiSAN DVD-ROM DRD-N216", NULL
, ATA_HORKAGE_MAX_SEC_128
},
4463 /* Devices we expect to fail diagnostics */
4465 /* Devices where NCQ should be avoided */
4467 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
4468 { "WDC WD740ADFD-00NLR1", NULL
, ATA_HORKAGE_NONCQ
, },
4469 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4470 { "FUJITSU MHT2060BH", NULL
, ATA_HORKAGE_NONCQ
},
4472 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ
},
4473 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ
},
4474 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ
},
4475 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ
},
4477 /* Blacklist entries taken from Silicon Image 3124/3132
4478 Windows driver .inf file - also several Linux problem reports */
4479 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ
, },
4480 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ
, },
4481 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ
, },
4483 /* devices which puke on READ_NATIVE_MAX */
4484 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA
, },
4485 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA
},
4486 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA
},
4487 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA
},
4489 /* Devices which report 1 sector over size HPA */
4490 { "ST340823A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
4491 { "ST320413A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
4492 { "ST310211A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
4494 /* Devices which get the IVB wrong */
4495 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB
, },
4496 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB
, },
4497 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB
, },
4498 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB
, },
4499 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB
, },
4505 static int strn_pattern_cmp(const char *patt
, const char *name
, int wildchar
)
4511 * check for trailing wildcard: *\0
4513 p
= strchr(patt
, wildchar
);
4514 if (p
&& ((*(p
+ 1)) == 0))
4525 return strncmp(patt
, name
, len
);
4528 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
)
4530 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
4531 unsigned char model_rev
[ATA_ID_FW_REV_LEN
+ 1];
4532 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
4534 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
4535 ata_id_c_string(dev
->id
, model_rev
, ATA_ID_FW_REV
, sizeof(model_rev
));
4537 while (ad
->model_num
) {
4538 if (!strn_pattern_cmp(ad
->model_num
, model_num
, '*')) {
4539 if (ad
->model_rev
== NULL
)
4541 if (!strn_pattern_cmp(ad
->model_rev
, model_rev
, '*'))
4549 static int ata_dma_blacklisted(const struct ata_device
*dev
)
4551 /* We don't support polling DMA.
4552 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4553 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4555 if ((dev
->link
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
4556 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
4558 return (dev
->horkage
& ATA_HORKAGE_NODMA
) ? 1 : 0;
4562 * ata_is_40wire - check drive side detection
4565 * Perform drive side detection decoding, allowing for device vendors
4566 * who can't follow the documentation.
4569 static int ata_is_40wire(struct ata_device
*dev
)
4571 if (dev
->horkage
& ATA_HORKAGE_IVB
)
4572 return ata_drive_40wire_relaxed(dev
->id
);
4573 return ata_drive_40wire(dev
->id
);
4577 * ata_dev_xfermask - Compute supported xfermask of the given device
4578 * @dev: Device to compute xfermask for
4580 * Compute supported xfermask of @dev and store it in
4581 * dev->*_mask. This function is responsible for applying all
4582 * known limits including host controller limits, device
4588 static void ata_dev_xfermask(struct ata_device
*dev
)
4590 struct ata_link
*link
= dev
->link
;
4591 struct ata_port
*ap
= link
->ap
;
4592 struct ata_host
*host
= ap
->host
;
4593 unsigned long xfer_mask
;
4595 /* controller modes available */
4596 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
4597 ap
->mwdma_mask
, ap
->udma_mask
);
4599 /* drive modes available */
4600 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
4601 dev
->mwdma_mask
, dev
->udma_mask
);
4602 xfer_mask
&= ata_id_xfermask(dev
->id
);
4605 * CFA Advanced TrueIDE timings are not allowed on a shared
4608 if (ata_dev_pair(dev
)) {
4609 /* No PIO5 or PIO6 */
4610 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
4611 /* No MWDMA3 or MWDMA 4 */
4612 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
4615 if (ata_dma_blacklisted(dev
)) {
4616 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4617 ata_dev_printk(dev
, KERN_WARNING
,
4618 "device is on DMA blacklist, disabling DMA\n");
4621 if ((host
->flags
& ATA_HOST_SIMPLEX
) &&
4622 host
->simplex_claimed
&& host
->simplex_claimed
!= ap
) {
4623 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4624 ata_dev_printk(dev
, KERN_WARNING
, "simplex DMA is claimed by "
4625 "other device, disabling DMA\n");
4628 if (ap
->flags
& ATA_FLAG_NO_IORDY
)
4629 xfer_mask
&= ata_pio_mask_no_iordy(dev
);
4631 if (ap
->ops
->mode_filter
)
4632 xfer_mask
= ap
->ops
->mode_filter(dev
, xfer_mask
);
4634 /* Apply cable rule here. Don't apply it early because when
4635 * we handle hot plug the cable type can itself change.
4636 * Check this last so that we know if the transfer rate was
4637 * solely limited by the cable.
4638 * Unknown or 80 wire cables reported host side are checked
4639 * drive side as well. Cases where we know a 40wire cable
4640 * is used safely for 80 are not checked here.
4642 if (xfer_mask
& (0xF8 << ATA_SHIFT_UDMA
))
4643 /* UDMA/44 or higher would be available */
4644 if ((ap
->cbl
== ATA_CBL_PATA40
) ||
4645 (ata_is_40wire(dev
) &&
4646 (ap
->cbl
== ATA_CBL_PATA_UNK
||
4647 ap
->cbl
== ATA_CBL_PATA80
))) {
4648 ata_dev_printk(dev
, KERN_WARNING
,
4649 "limited to UDMA/33 due to 40-wire cable\n");
4650 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
4653 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
4654 &dev
->mwdma_mask
, &dev
->udma_mask
);
4658 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4659 * @dev: Device to which command will be sent
4661 * Issue SET FEATURES - XFER MODE command to device @dev
4665 * PCI/etc. bus probe sem.
4668 * 0 on success, AC_ERR_* mask otherwise.
4671 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
4673 struct ata_taskfile tf
;
4674 unsigned int err_mask
;
4676 /* set up set-features taskfile */
4677 DPRINTK("set features - xfer mode\n");
4679 /* Some controllers and ATAPI devices show flaky interrupt
4680 * behavior after setting xfer mode. Use polling instead.
4682 ata_tf_init(dev
, &tf
);
4683 tf
.command
= ATA_CMD_SET_FEATURES
;
4684 tf
.feature
= SETFEATURES_XFER
;
4685 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
| ATA_TFLAG_POLLING
;
4686 tf
.protocol
= ATA_PROT_NODATA
;
4687 /* If we are using IORDY we must send the mode setting command */
4688 if (ata_pio_need_iordy(dev
))
4689 tf
.nsect
= dev
->xfer_mode
;
4690 /* If the device has IORDY and the controller does not - turn it off */
4691 else if (ata_id_has_iordy(dev
->id
))
4693 else /* In the ancient relic department - skip all of this */
4696 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4698 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4702 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4703 * @dev: Device to which command will be sent
4704 * @enable: Whether to enable or disable the feature
4705 * @feature: The sector count represents the feature to set
4707 * Issue SET FEATURES - SATA FEATURES command to device @dev
4708 * on port @ap with sector count
4711 * PCI/etc. bus probe sem.
4714 * 0 on success, AC_ERR_* mask otherwise.
4716 static unsigned int ata_dev_set_feature(struct ata_device
*dev
, u8 enable
,
4719 struct ata_taskfile tf
;
4720 unsigned int err_mask
;
4722 /* set up set-features taskfile */
4723 DPRINTK("set features - SATA features\n");
4725 ata_tf_init(dev
, &tf
);
4726 tf
.command
= ATA_CMD_SET_FEATURES
;
4727 tf
.feature
= enable
;
4728 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4729 tf
.protocol
= ATA_PROT_NODATA
;
4732 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4734 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4739 * ata_dev_init_params - Issue INIT DEV PARAMS command
4740 * @dev: Device to which command will be sent
4741 * @heads: Number of heads (taskfile parameter)
4742 * @sectors: Number of sectors (taskfile parameter)
4745 * Kernel thread context (may sleep)
4748 * 0 on success, AC_ERR_* mask otherwise.
4750 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
4751 u16 heads
, u16 sectors
)
4753 struct ata_taskfile tf
;
4754 unsigned int err_mask
;
4756 /* Number of sectors per track 1-255. Number of heads 1-16 */
4757 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
4758 return AC_ERR_INVALID
;
4760 /* set up init dev params taskfile */
4761 DPRINTK("init dev params \n");
4763 ata_tf_init(dev
, &tf
);
4764 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
4765 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4766 tf
.protocol
= ATA_PROT_NODATA
;
4768 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
4770 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
4771 /* A clean abort indicates an original or just out of spec drive
4772 and we should continue as we issue the setup based on the
4773 drive reported working geometry */
4774 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
4777 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4782 * ata_sg_clean - Unmap DMA memory associated with command
4783 * @qc: Command containing DMA memory to be released
4785 * Unmap all mapped DMA memory associated with this command.
4788 * spin_lock_irqsave(host lock)
4790 void ata_sg_clean(struct ata_queued_cmd
*qc
)
4792 struct ata_port
*ap
= qc
->ap
;
4793 struct scatterlist
*sg
= qc
->sg
;
4794 int dir
= qc
->dma_dir
;
4796 WARN_ON(sg
== NULL
);
4798 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
4801 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
4803 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4808 * ata_fill_sg - Fill PCI IDE PRD table
4809 * @qc: Metadata associated with taskfile to be transferred
4811 * Fill PCI IDE PRD (scatter-gather) table with segments
4812 * associated with the current disk command.
4815 * spin_lock_irqsave(host lock)
4818 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
4820 struct ata_port
*ap
= qc
->ap
;
4821 struct scatterlist
*sg
;
4822 unsigned int si
, pi
;
4825 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
4829 /* determine if physical DMA addr spans 64K boundary.
4830 * Note h/w doesn't support 64-bit, so we unconditionally
4831 * truncate dma_addr_t to u32.
4833 addr
= (u32
) sg_dma_address(sg
);
4834 sg_len
= sg_dma_len(sg
);
4837 offset
= addr
& 0xffff;
4839 if ((offset
+ sg_len
) > 0x10000)
4840 len
= 0x10000 - offset
;
4842 ap
->prd
[pi
].addr
= cpu_to_le32(addr
);
4843 ap
->prd
[pi
].flags_len
= cpu_to_le32(len
& 0xffff);
4844 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi
, addr
, len
);
4852 ap
->prd
[pi
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4856 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4857 * @qc: Metadata associated with taskfile to be transferred
4859 * Fill PCI IDE PRD (scatter-gather) table with segments
4860 * associated with the current disk command. Perform the fill
4861 * so that we avoid writing any length 64K records for
4862 * controllers that don't follow the spec.
4865 * spin_lock_irqsave(host lock)
4868 static void ata_fill_sg_dumb(struct ata_queued_cmd
*qc
)
4870 struct ata_port
*ap
= qc
->ap
;
4871 struct scatterlist
*sg
;
4872 unsigned int si
, pi
;
4875 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
4877 u32 sg_len
, len
, blen
;
4879 /* determine if physical DMA addr spans 64K boundary.
4880 * Note h/w doesn't support 64-bit, so we unconditionally
4881 * truncate dma_addr_t to u32.
4883 addr
= (u32
) sg_dma_address(sg
);
4884 sg_len
= sg_dma_len(sg
);
4887 offset
= addr
& 0xffff;
4889 if ((offset
+ sg_len
) > 0x10000)
4890 len
= 0x10000 - offset
;
4892 blen
= len
& 0xffff;
4893 ap
->prd
[pi
].addr
= cpu_to_le32(addr
);
4895 /* Some PATA chipsets like the CS5530 can't
4896 cope with 0x0000 meaning 64K as the spec says */
4897 ap
->prd
[pi
].flags_len
= cpu_to_le32(0x8000);
4899 ap
->prd
[++pi
].addr
= cpu_to_le32(addr
+ 0x8000);
4901 ap
->prd
[pi
].flags_len
= cpu_to_le32(blen
);
4902 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi
, addr
, len
);
4910 ap
->prd
[pi
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4914 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4915 * @qc: Metadata associated with taskfile to check
4917 * Allow low-level driver to filter ATA PACKET commands, returning
4918 * a status indicating whether or not it is OK to use DMA for the
4919 * supplied PACKET command.
4922 * spin_lock_irqsave(host lock)
4924 * RETURNS: 0 when ATAPI DMA can be used
4927 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
4929 struct ata_port
*ap
= qc
->ap
;
4931 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4932 * few ATAPI devices choke on such DMA requests.
4934 if (unlikely(qc
->nbytes
& 15))
4937 if (ap
->ops
->check_atapi_dma
)
4938 return ap
->ops
->check_atapi_dma(qc
);
4944 * ata_std_qc_defer - Check whether a qc needs to be deferred
4945 * @qc: ATA command in question
4947 * Non-NCQ commands cannot run with any other command, NCQ or
4948 * not. As upper layer only knows the queue depth, we are
4949 * responsible for maintaining exclusion. This function checks
4950 * whether a new command @qc can be issued.
4953 * spin_lock_irqsave(host lock)
4956 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4958 int ata_std_qc_defer(struct ata_queued_cmd
*qc
)
4960 struct ata_link
*link
= qc
->dev
->link
;
4962 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
4963 if (!ata_tag_valid(link
->active_tag
))
4966 if (!ata_tag_valid(link
->active_tag
) && !link
->sactive
)
4970 return ATA_DEFER_LINK
;
4974 * ata_qc_prep - Prepare taskfile for submission
4975 * @qc: Metadata associated with taskfile to be prepared
4977 * Prepare ATA taskfile for submission.
4980 * spin_lock_irqsave(host lock)
4982 void ata_qc_prep(struct ata_queued_cmd
*qc
)
4984 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4991 * ata_dumb_qc_prep - Prepare taskfile for submission
4992 * @qc: Metadata associated with taskfile to be prepared
4994 * Prepare ATA taskfile for submission.
4997 * spin_lock_irqsave(host lock)
4999 void ata_dumb_qc_prep(struct ata_queued_cmd
*qc
)
5001 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
5004 ata_fill_sg_dumb(qc
);
5007 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
5010 * ata_sg_init - Associate command with scatter-gather table.
5011 * @qc: Command to be associated
5012 * @sg: Scatter-gather table.
5013 * @n_elem: Number of elements in s/g table.
5015 * Initialize the data-related elements of queued_cmd @qc
5016 * to point to a scatter-gather table @sg, containing @n_elem
5020 * spin_lock_irqsave(host lock)
5022 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
5023 unsigned int n_elem
)
5026 qc
->n_elem
= n_elem
;
5031 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5032 * @qc: Command with scatter-gather table to be mapped.
5034 * DMA-map the scatter-gather table associated with queued_cmd @qc.
5037 * spin_lock_irqsave(host lock)
5040 * Zero on success, negative on error.
5043 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
5045 struct ata_port
*ap
= qc
->ap
;
5046 unsigned int n_elem
;
5048 VPRINTK("ENTER, ata%u\n", ap
->print_id
);
5050 n_elem
= dma_map_sg(ap
->dev
, qc
->sg
, qc
->n_elem
, qc
->dma_dir
);
5054 DPRINTK("%d sg elements mapped\n", n_elem
);
5056 qc
->n_elem
= n_elem
;
5057 qc
->flags
|= ATA_QCFLAG_DMAMAP
;
5063 * swap_buf_le16 - swap halves of 16-bit words in place
5064 * @buf: Buffer to swap
5065 * @buf_words: Number of 16-bit words in buffer.
5067 * Swap halves of 16-bit words if needed to convert from
5068 * little-endian byte order to native cpu byte order, or
5072 * Inherited from caller.
5074 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
5079 for (i
= 0; i
< buf_words
; i
++)
5080 buf
[i
] = le16_to_cpu(buf
[i
]);
5081 #endif /* __BIG_ENDIAN */
5085 * ata_data_xfer - Transfer data by PIO
5086 * @dev: device to target
5088 * @buflen: buffer length
5091 * Transfer data from/to the device data register by PIO.
5094 * Inherited from caller.
5099 unsigned int ata_data_xfer(struct ata_device
*dev
, unsigned char *buf
,
5100 unsigned int buflen
, int rw
)
5102 struct ata_port
*ap
= dev
->link
->ap
;
5103 void __iomem
*data_addr
= ap
->ioaddr
.data_addr
;
5104 unsigned int words
= buflen
>> 1;
5106 /* Transfer multiple of 2 bytes */
5108 ioread16_rep(data_addr
, buf
, words
);
5110 iowrite16_rep(data_addr
, buf
, words
);
5112 /* Transfer trailing 1 byte, if any. */
5113 if (unlikely(buflen
& 0x01)) {
5114 __le16 align_buf
[1] = { 0 };
5115 unsigned char *trailing_buf
= buf
+ buflen
- 1;
5118 align_buf
[0] = cpu_to_le16(ioread16(data_addr
));
5119 memcpy(trailing_buf
, align_buf
, 1);
5121 memcpy(align_buf
, trailing_buf
, 1);
5122 iowrite16(le16_to_cpu(align_buf
[0]), data_addr
);
5131 * ata_data_xfer_noirq - Transfer data by PIO
5132 * @dev: device to target
5134 * @buflen: buffer length
5137 * Transfer data from/to the device data register by PIO. Do the
5138 * transfer with interrupts disabled.
5141 * Inherited from caller.
5146 unsigned int ata_data_xfer_noirq(struct ata_device
*dev
, unsigned char *buf
,
5147 unsigned int buflen
, int rw
)
5149 unsigned long flags
;
5150 unsigned int consumed
;
5152 local_irq_save(flags
);
5153 consumed
= ata_data_xfer(dev
, buf
, buflen
, rw
);
5154 local_irq_restore(flags
);
5161 * ata_pio_sector - Transfer a sector of data.
5162 * @qc: Command on going
5164 * Transfer qc->sect_size bytes of data from/to the ATA device.
5167 * Inherited from caller.
5170 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
5172 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
5173 struct ata_port
*ap
= qc
->ap
;
5175 unsigned int offset
;
5178 if (qc
->curbytes
== qc
->nbytes
- qc
->sect_size
)
5179 ap
->hsm_task_state
= HSM_ST_LAST
;
5181 page
= sg_page(qc
->cursg
);
5182 offset
= qc
->cursg
->offset
+ qc
->cursg_ofs
;
5184 /* get the current page and offset */
5185 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
5186 offset
%= PAGE_SIZE
;
5188 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
5190 if (PageHighMem(page
)) {
5191 unsigned long flags
;
5193 /* FIXME: use a bounce buffer */
5194 local_irq_save(flags
);
5195 buf
= kmap_atomic(page
, KM_IRQ0
);
5197 /* do the actual data transfer */
5198 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
5200 kunmap_atomic(buf
, KM_IRQ0
);
5201 local_irq_restore(flags
);
5203 buf
= page_address(page
);
5204 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
5207 qc
->curbytes
+= qc
->sect_size
;
5208 qc
->cursg_ofs
+= qc
->sect_size
;
5210 if (qc
->cursg_ofs
== qc
->cursg
->length
) {
5211 qc
->cursg
= sg_next(qc
->cursg
);
5217 * ata_pio_sectors - Transfer one or many sectors.
5218 * @qc: Command on going
5220 * Transfer one or many sectors of data from/to the
5221 * ATA device for the DRQ request.
5224 * Inherited from caller.
5227 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
5229 if (is_multi_taskfile(&qc
->tf
)) {
5230 /* READ/WRITE MULTIPLE */
5233 WARN_ON(qc
->dev
->multi_count
== 0);
5235 nsect
= min((qc
->nbytes
- qc
->curbytes
) / qc
->sect_size
,
5236 qc
->dev
->multi_count
);
5242 ata_altstatus(qc
->ap
); /* flush */
5246 * atapi_send_cdb - Write CDB bytes to hardware
5247 * @ap: Port to which ATAPI device is attached.
5248 * @qc: Taskfile currently active
5250 * When device has indicated its readiness to accept
5251 * a CDB, this function is called. Send the CDB.
5257 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
5260 DPRINTK("send cdb\n");
5261 WARN_ON(qc
->dev
->cdb_len
< 12);
5263 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
5264 ata_altstatus(ap
); /* flush */
5266 switch (qc
->tf
.protocol
) {
5267 case ATAPI_PROT_PIO
:
5268 ap
->hsm_task_state
= HSM_ST
;
5270 case ATAPI_PROT_NODATA
:
5271 ap
->hsm_task_state
= HSM_ST_LAST
;
5273 case ATAPI_PROT_DMA
:
5274 ap
->hsm_task_state
= HSM_ST_LAST
;
5275 /* initiate bmdma */
5276 ap
->ops
->bmdma_start(qc
);
5282 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5283 * @qc: Command on going
5284 * @bytes: number of bytes
5286 * Transfer Transfer data from/to the ATAPI device.
5289 * Inherited from caller.
5292 static int __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
5294 int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? WRITE
: READ
;
5295 struct ata_port
*ap
= qc
->ap
;
5296 struct ata_device
*dev
= qc
->dev
;
5297 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
5298 struct scatterlist
*sg
;
5301 unsigned int offset
, count
, consumed
;
5305 if (unlikely(!sg
)) {
5306 ata_ehi_push_desc(ehi
, "unexpected or too much trailing data "
5307 "buf=%u cur=%u bytes=%u",
5308 qc
->nbytes
, qc
->curbytes
, bytes
);
5313 offset
= sg
->offset
+ qc
->cursg_ofs
;
5315 /* get the current page and offset */
5316 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
5317 offset
%= PAGE_SIZE
;
5319 /* don't overrun current sg */
5320 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
5322 /* don't cross page boundaries */
5323 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
5325 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
5327 if (PageHighMem(page
)) {
5328 unsigned long flags
;
5330 /* FIXME: use bounce buffer */
5331 local_irq_save(flags
);
5332 buf
= kmap_atomic(page
, KM_IRQ0
);
5334 /* do the actual data transfer */
5335 consumed
= ap
->ops
->data_xfer(dev
, buf
+ offset
, count
, rw
);
5337 kunmap_atomic(buf
, KM_IRQ0
);
5338 local_irq_restore(flags
);
5340 buf
= page_address(page
);
5341 consumed
= ap
->ops
->data_xfer(dev
, buf
+ offset
, count
, rw
);
5344 bytes
-= min(bytes
, consumed
);
5345 qc
->curbytes
+= count
;
5346 qc
->cursg_ofs
+= count
;
5348 if (qc
->cursg_ofs
== sg
->length
) {
5349 qc
->cursg
= sg_next(qc
->cursg
);
5353 /* consumed can be larger than count only for the last transfer */
5354 WARN_ON(qc
->cursg
&& count
!= consumed
);
5362 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5363 * @qc: Command on going
5365 * Transfer Transfer data from/to the ATAPI device.
5368 * Inherited from caller.
5371 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
5373 struct ata_port
*ap
= qc
->ap
;
5374 struct ata_device
*dev
= qc
->dev
;
5375 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
5376 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
5377 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
5379 /* Abuse qc->result_tf for temp storage of intermediate TF
5380 * here to save some kernel stack usage.
5381 * For normal completion, qc->result_tf is not relevant. For
5382 * error, qc->result_tf is later overwritten by ata_qc_complete().
5383 * So, the correctness of qc->result_tf is not affected.
5385 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5386 ireason
= qc
->result_tf
.nsect
;
5387 bc_lo
= qc
->result_tf
.lbam
;
5388 bc_hi
= qc
->result_tf
.lbah
;
5389 bytes
= (bc_hi
<< 8) | bc_lo
;
5391 /* shall be cleared to zero, indicating xfer of data */
5392 if (unlikely(ireason
& (1 << 0)))
5395 /* make sure transfer direction matches expected */
5396 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
5397 if (unlikely(do_write
!= i_write
))
5400 if (unlikely(!bytes
))
5403 VPRINTK("ata%u: xfering %d bytes\n", ap
->print_id
, bytes
);
5405 if (unlikely(__atapi_pio_bytes(qc
, bytes
)))
5407 ata_altstatus(ap
); /* flush */
5412 ata_ehi_push_desc(ehi
, "ATAPI check failed (ireason=0x%x bytes=%u)",
5415 qc
->err_mask
|= AC_ERR_HSM
;
5416 ap
->hsm_task_state
= HSM_ST_ERR
;
5420 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5421 * @ap: the target ata_port
5425 * 1 if ok in workqueue, 0 otherwise.
5428 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
5430 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5433 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
5434 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
5435 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
5438 if (ata_is_atapi(qc
->tf
.protocol
) &&
5439 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5447 * ata_hsm_qc_complete - finish a qc running on standard HSM
5448 * @qc: Command to complete
5449 * @in_wq: 1 if called from workqueue, 0 otherwise
5451 * Finish @qc which is running on standard HSM.
5454 * If @in_wq is zero, spin_lock_irqsave(host lock).
5455 * Otherwise, none on entry and grabs host lock.
5457 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
5459 struct ata_port
*ap
= qc
->ap
;
5460 unsigned long flags
;
5462 if (ap
->ops
->error_handler
) {
5464 spin_lock_irqsave(ap
->lock
, flags
);
5466 /* EH might have kicked in while host lock is
5469 qc
= ata_qc_from_tag(ap
, qc
->tag
);
5471 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
5472 ap
->ops
->irq_on(ap
);
5473 ata_qc_complete(qc
);
5475 ata_port_freeze(ap
);
5478 spin_unlock_irqrestore(ap
->lock
, flags
);
5480 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
5481 ata_qc_complete(qc
);
5483 ata_port_freeze(ap
);
5487 spin_lock_irqsave(ap
->lock
, flags
);
5488 ap
->ops
->irq_on(ap
);
5489 ata_qc_complete(qc
);
5490 spin_unlock_irqrestore(ap
->lock
, flags
);
5492 ata_qc_complete(qc
);
5497 * ata_hsm_move - move the HSM to the next state.
5498 * @ap: the target ata_port
5500 * @status: current device status
5501 * @in_wq: 1 if called from workqueue, 0 otherwise
5504 * 1 when poll next status needed, 0 otherwise.
5506 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
5507 u8 status
, int in_wq
)
5509 unsigned long flags
= 0;
5512 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
5514 /* Make sure ata_qc_issue_prot() does not throw things
5515 * like DMA polling into the workqueue. Notice that
5516 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5518 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
5521 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5522 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
5524 switch (ap
->hsm_task_state
) {
5526 /* Send first data block or PACKET CDB */
5528 /* If polling, we will stay in the work queue after
5529 * sending the data. Otherwise, interrupt handler
5530 * takes over after sending the data.
5532 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5534 /* check device status */
5535 if (unlikely((status
& ATA_DRQ
) == 0)) {
5536 /* handle BSY=0, DRQ=0 as error */
5537 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5538 /* device stops HSM for abort/error */
5539 qc
->err_mask
|= AC_ERR_DEV
;
5541 /* HSM violation. Let EH handle this */
5542 qc
->err_mask
|= AC_ERR_HSM
;
5544 ap
->hsm_task_state
= HSM_ST_ERR
;
5548 /* Device should not ask for data transfer (DRQ=1)
5549 * when it finds something wrong.
5550 * We ignore DRQ here and stop the HSM by
5551 * changing hsm_task_state to HSM_ST_ERR and
5552 * let the EH abort the command or reset the device.
5554 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5555 /* Some ATAPI tape drives forget to clear the ERR bit
5556 * when doing the next command (mostly request sense).
5557 * We ignore ERR here to workaround and proceed sending
5560 if (!(qc
->dev
->horkage
& ATA_HORKAGE_STUCK_ERR
)) {
5561 ata_port_printk(ap
, KERN_WARNING
,
5562 "DRQ=1 with device error, "
5563 "dev_stat 0x%X\n", status
);
5564 qc
->err_mask
|= AC_ERR_HSM
;
5565 ap
->hsm_task_state
= HSM_ST_ERR
;
5570 /* Send the CDB (atapi) or the first data block (ata pio out).
5571 * During the state transition, interrupt handler shouldn't
5572 * be invoked before the data transfer is complete and
5573 * hsm_task_state is changed. Hence, the following locking.
5576 spin_lock_irqsave(ap
->lock
, flags
);
5578 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
5579 /* PIO data out protocol.
5580 * send first data block.
5583 /* ata_pio_sectors() might change the state
5584 * to HSM_ST_LAST. so, the state is changed here
5585 * before ata_pio_sectors().
5587 ap
->hsm_task_state
= HSM_ST
;
5588 ata_pio_sectors(qc
);
5591 atapi_send_cdb(ap
, qc
);
5594 spin_unlock_irqrestore(ap
->lock
, flags
);
5596 /* if polling, ata_pio_task() handles the rest.
5597 * otherwise, interrupt handler takes over from here.
5602 /* complete command or read/write the data register */
5603 if (qc
->tf
.protocol
== ATAPI_PROT_PIO
) {
5604 /* ATAPI PIO protocol */
5605 if ((status
& ATA_DRQ
) == 0) {
5606 /* No more data to transfer or device error.
5607 * Device error will be tagged in HSM_ST_LAST.
5609 ap
->hsm_task_state
= HSM_ST_LAST
;
5613 /* Device should not ask for data transfer (DRQ=1)
5614 * when it finds something wrong.
5615 * We ignore DRQ here and stop the HSM by
5616 * changing hsm_task_state to HSM_ST_ERR and
5617 * let the EH abort the command or reset the device.
5619 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5620 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with "
5621 "device error, dev_stat 0x%X\n",
5623 qc
->err_mask
|= AC_ERR_HSM
;
5624 ap
->hsm_task_state
= HSM_ST_ERR
;
5628 atapi_pio_bytes(qc
);
5630 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
5631 /* bad ireason reported by device */
5635 /* ATA PIO protocol */
5636 if (unlikely((status
& ATA_DRQ
) == 0)) {
5637 /* handle BSY=0, DRQ=0 as error */
5638 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5639 /* device stops HSM for abort/error */
5640 qc
->err_mask
|= AC_ERR_DEV
;
5642 /* HSM violation. Let EH handle this.
5643 * Phantom devices also trigger this
5644 * condition. Mark hint.
5646 qc
->err_mask
|= AC_ERR_HSM
|
5649 ap
->hsm_task_state
= HSM_ST_ERR
;
5653 /* For PIO reads, some devices may ask for
5654 * data transfer (DRQ=1) alone with ERR=1.
5655 * We respect DRQ here and transfer one
5656 * block of junk data before changing the
5657 * hsm_task_state to HSM_ST_ERR.
5659 * For PIO writes, ERR=1 DRQ=1 doesn't make
5660 * sense since the data block has been
5661 * transferred to the device.
5663 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5664 /* data might be corrputed */
5665 qc
->err_mask
|= AC_ERR_DEV
;
5667 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
5668 ata_pio_sectors(qc
);
5669 status
= ata_wait_idle(ap
);
5672 if (status
& (ATA_BUSY
| ATA_DRQ
))
5673 qc
->err_mask
|= AC_ERR_HSM
;
5675 /* ata_pio_sectors() might change the
5676 * state to HSM_ST_LAST. so, the state
5677 * is changed after ata_pio_sectors().
5679 ap
->hsm_task_state
= HSM_ST_ERR
;
5683 ata_pio_sectors(qc
);
5685 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
5686 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
5688 status
= ata_wait_idle(ap
);
5697 if (unlikely(!ata_ok(status
))) {
5698 qc
->err_mask
|= __ac_err_mask(status
);
5699 ap
->hsm_task_state
= HSM_ST_ERR
;
5703 /* no more data to transfer */
5704 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5705 ap
->print_id
, qc
->dev
->devno
, status
);
5707 WARN_ON(qc
->err_mask
);
5709 ap
->hsm_task_state
= HSM_ST_IDLE
;
5711 /* complete taskfile transaction */
5712 ata_hsm_qc_complete(qc
, in_wq
);
5718 /* make sure qc->err_mask is available to
5719 * know what's wrong and recover
5721 WARN_ON(qc
->err_mask
== 0);
5723 ap
->hsm_task_state
= HSM_ST_IDLE
;
5725 /* complete taskfile transaction */
5726 ata_hsm_qc_complete(qc
, in_wq
);
5738 static void ata_pio_task(struct work_struct
*work
)
5740 struct ata_port
*ap
=
5741 container_of(work
, struct ata_port
, port_task
.work
);
5742 struct ata_queued_cmd
*qc
= ap
->port_task_data
;
5747 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
5750 * This is purely heuristic. This is a fast path.
5751 * Sometimes when we enter, BSY will be cleared in
5752 * a chk-status or two. If not, the drive is probably seeking
5753 * or something. Snooze for a couple msecs, then
5754 * chk-status again. If still busy, queue delayed work.
5756 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
5757 if (status
& ATA_BUSY
) {
5759 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
5760 if (status
& ATA_BUSY
) {
5761 ata_pio_queue_task(ap
, qc
, ATA_SHORT_PAUSE
);
5767 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
5769 /* another command or interrupt handler
5770 * may be running at this point.
5777 * ata_qc_new - Request an available ATA command, for queueing
5778 * @ap: Port associated with device @dev
5779 * @dev: Device from whom we request an available command structure
5785 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
5787 struct ata_queued_cmd
*qc
= NULL
;
5790 /* no command while frozen */
5791 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
5794 /* the last tag is reserved for internal command. */
5795 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
5796 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
5797 qc
= __ata_qc_from_tag(ap
, i
);
5808 * ata_qc_new_init - Request an available ATA command, and initialize it
5809 * @dev: Device from whom we request an available command structure
5815 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
5817 struct ata_port
*ap
= dev
->link
->ap
;
5818 struct ata_queued_cmd
*qc
;
5820 qc
= ata_qc_new(ap
);
5833 * ata_qc_free - free unused ata_queued_cmd
5834 * @qc: Command to complete
5836 * Designed to free unused ata_queued_cmd object
5837 * in case something prevents using it.
5840 * spin_lock_irqsave(host lock)
5842 void ata_qc_free(struct ata_queued_cmd
*qc
)
5844 struct ata_port
*ap
= qc
->ap
;
5847 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5851 if (likely(ata_tag_valid(tag
))) {
5852 qc
->tag
= ATA_TAG_POISON
;
5853 clear_bit(tag
, &ap
->qc_allocated
);
5857 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
5859 struct ata_port
*ap
= qc
->ap
;
5860 struct ata_link
*link
= qc
->dev
->link
;
5862 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5863 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
5865 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
5868 /* command should be marked inactive atomically with qc completion */
5869 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5870 link
->sactive
&= ~(1 << qc
->tag
);
5872 ap
->nr_active_links
--;
5874 link
->active_tag
= ATA_TAG_POISON
;
5875 ap
->nr_active_links
--;
5878 /* clear exclusive status */
5879 if (unlikely(qc
->flags
& ATA_QCFLAG_CLEAR_EXCL
&&
5880 ap
->excl_link
== link
))
5881 ap
->excl_link
= NULL
;
5883 /* atapi: mark qc as inactive to prevent the interrupt handler
5884 * from completing the command twice later, before the error handler
5885 * is called. (when rc != 0 and atapi request sense is needed)
5887 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
5888 ap
->qc_active
&= ~(1 << qc
->tag
);
5890 /* call completion callback */
5891 qc
->complete_fn(qc
);
5894 static void fill_result_tf(struct ata_queued_cmd
*qc
)
5896 struct ata_port
*ap
= qc
->ap
;
5898 qc
->result_tf
.flags
= qc
->tf
.flags
;
5899 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5902 static void ata_verify_xfer(struct ata_queued_cmd
*qc
)
5904 struct ata_device
*dev
= qc
->dev
;
5906 if (ata_tag_internal(qc
->tag
))
5909 if (ata_is_nodata(qc
->tf
.protocol
))
5912 if ((dev
->mwdma_mask
|| dev
->udma_mask
) && ata_is_pio(qc
->tf
.protocol
))
5915 dev
->flags
&= ~ATA_DFLAG_DUBIOUS_XFER
;
5919 * ata_qc_complete - Complete an active ATA command
5920 * @qc: Command to complete
5921 * @err_mask: ATA Status register contents
5923 * Indicate to the mid and upper layers that an ATA
5924 * command has completed, with either an ok or not-ok status.
5927 * spin_lock_irqsave(host lock)
5929 void ata_qc_complete(struct ata_queued_cmd
*qc
)
5931 struct ata_port
*ap
= qc
->ap
;
5933 /* XXX: New EH and old EH use different mechanisms to
5934 * synchronize EH with regular execution path.
5936 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5937 * Normal execution path is responsible for not accessing a
5938 * failed qc. libata core enforces the rule by returning NULL
5939 * from ata_qc_from_tag() for failed qcs.
5941 * Old EH depends on ata_qc_complete() nullifying completion
5942 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5943 * not synchronize with interrupt handler. Only PIO task is
5946 if (ap
->ops
->error_handler
) {
5947 struct ata_device
*dev
= qc
->dev
;
5948 struct ata_eh_info
*ehi
= &dev
->link
->eh_info
;
5950 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
5952 if (unlikely(qc
->err_mask
))
5953 qc
->flags
|= ATA_QCFLAG_FAILED
;
5955 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
5956 if (!ata_tag_internal(qc
->tag
)) {
5957 /* always fill result TF for failed qc */
5959 ata_qc_schedule_eh(qc
);
5964 /* read result TF if requested */
5965 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5968 /* Some commands need post-processing after successful
5971 switch (qc
->tf
.command
) {
5972 case ATA_CMD_SET_FEATURES
:
5973 if (qc
->tf
.feature
!= SETFEATURES_WC_ON
&&
5974 qc
->tf
.feature
!= SETFEATURES_WC_OFF
)
5977 case ATA_CMD_INIT_DEV_PARAMS
: /* CHS translation changed */
5978 case ATA_CMD_SET_MULTI
: /* multi_count changed */
5979 /* revalidate device */
5980 ehi
->dev_action
[dev
->devno
] |= ATA_EH_REVALIDATE
;
5981 ata_port_schedule_eh(ap
);
5985 dev
->flags
|= ATA_DFLAG_SLEEPING
;
5989 if (unlikely(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
))
5990 ata_verify_xfer(qc
);
5992 __ata_qc_complete(qc
);
5994 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
5997 /* read result TF if failed or requested */
5998 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
6001 __ata_qc_complete(qc
);
6006 * ata_qc_complete_multiple - Complete multiple qcs successfully
6007 * @ap: port in question
6008 * @qc_active: new qc_active mask
6009 * @finish_qc: LLDD callback invoked before completing a qc
6011 * Complete in-flight commands. This functions is meant to be
6012 * called from low-level driver's interrupt routine to complete
6013 * requests normally. ap->qc_active and @qc_active is compared
6014 * and commands are completed accordingly.
6017 * spin_lock_irqsave(host lock)
6020 * Number of completed commands on success, -errno otherwise.
6022 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
6023 void (*finish_qc
)(struct ata_queued_cmd
*))
6029 done_mask
= ap
->qc_active
^ qc_active
;
6031 if (unlikely(done_mask
& qc_active
)) {
6032 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
6033 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
6037 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
6038 struct ata_queued_cmd
*qc
;
6040 if (!(done_mask
& (1 << i
)))
6043 if ((qc
= ata_qc_from_tag(ap
, i
))) {
6046 ata_qc_complete(qc
);
6055 * ata_qc_issue - issue taskfile to device
6056 * @qc: command to issue to device
6058 * Prepare an ATA command to submission to device.
6059 * This includes mapping the data into a DMA-able
6060 * area, filling in the S/G table, and finally
6061 * writing the taskfile to hardware, starting the command.
6064 * spin_lock_irqsave(host lock)
6066 void ata_qc_issue(struct ata_queued_cmd
*qc
)
6068 struct ata_port
*ap
= qc
->ap
;
6069 struct ata_link
*link
= qc
->dev
->link
;
6070 u8 prot
= qc
->tf
.protocol
;
6072 /* Make sure only one non-NCQ command is outstanding. The
6073 * check is skipped for old EH because it reuses active qc to
6074 * request ATAPI sense.
6076 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(link
->active_tag
));
6078 if (ata_is_ncq(prot
)) {
6079 WARN_ON(link
->sactive
& (1 << qc
->tag
));
6082 ap
->nr_active_links
++;
6083 link
->sactive
|= 1 << qc
->tag
;
6085 WARN_ON(link
->sactive
);
6087 ap
->nr_active_links
++;
6088 link
->active_tag
= qc
->tag
;
6091 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
6092 ap
->qc_active
|= 1 << qc
->tag
;
6094 /* We guarantee to LLDs that they will have at least one
6095 * non-zero sg if the command is a data command.
6097 BUG_ON(ata_is_data(prot
) && (!qc
->sg
|| !qc
->n_elem
|| !qc
->nbytes
));
6099 if (ata_is_dma(prot
) || (ata_is_pio(prot
) &&
6100 (ap
->flags
& ATA_FLAG_PIO_DMA
)))
6101 if (ata_sg_setup(qc
))
6104 /* if device is sleeping, schedule reset and abort the link */
6105 if (unlikely(qc
->dev
->flags
& ATA_DFLAG_SLEEPING
)) {
6106 link
->eh_info
.action
|= ATA_EH_RESET
;
6107 ata_ehi_push_desc(&link
->eh_info
, "waking up from sleep");
6108 ata_link_abort(link
);
6112 ap
->ops
->qc_prep(qc
);
6114 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
6115 if (unlikely(qc
->err_mask
))
6120 qc
->err_mask
|= AC_ERR_SYSTEM
;
6122 ata_qc_complete(qc
);
6126 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6127 * @qc: command to issue to device
6129 * Using various libata functions and hooks, this function
6130 * starts an ATA command. ATA commands are grouped into
6131 * classes called "protocols", and issuing each type of protocol
6132 * is slightly different.
6134 * May be used as the qc_issue() entry in ata_port_operations.
6137 * spin_lock_irqsave(host lock)
6140 * Zero on success, AC_ERR_* mask on failure
6143 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
6145 struct ata_port
*ap
= qc
->ap
;
6147 /* Use polling pio if the LLD doesn't handle
6148 * interrupt driven pio and atapi CDB interrupt.
6150 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
6151 switch (qc
->tf
.protocol
) {
6153 case ATA_PROT_NODATA
:
6154 case ATAPI_PROT_PIO
:
6155 case ATAPI_PROT_NODATA
:
6156 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
6158 case ATAPI_PROT_DMA
:
6159 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
6160 /* see ata_dma_blacklisted() */
6168 /* select the device */
6169 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
6171 /* start the command */
6172 switch (qc
->tf
.protocol
) {
6173 case ATA_PROT_NODATA
:
6174 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6175 ata_qc_set_polling(qc
);
6177 ata_tf_to_host(ap
, &qc
->tf
);
6178 ap
->hsm_task_state
= HSM_ST_LAST
;
6180 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6181 ata_pio_queue_task(ap
, qc
, 0);
6186 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
6188 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
6189 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
6190 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
6191 ap
->hsm_task_state
= HSM_ST_LAST
;
6195 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6196 ata_qc_set_polling(qc
);
6198 ata_tf_to_host(ap
, &qc
->tf
);
6200 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
6201 /* PIO data out protocol */
6202 ap
->hsm_task_state
= HSM_ST_FIRST
;
6203 ata_pio_queue_task(ap
, qc
, 0);
6205 /* always send first data block using
6206 * the ata_pio_task() codepath.
6209 /* PIO data in protocol */
6210 ap
->hsm_task_state
= HSM_ST
;
6212 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6213 ata_pio_queue_task(ap
, qc
, 0);
6215 /* if polling, ata_pio_task() handles the rest.
6216 * otherwise, interrupt handler takes over from here.
6222 case ATAPI_PROT_PIO
:
6223 case ATAPI_PROT_NODATA
:
6224 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
6225 ata_qc_set_polling(qc
);
6227 ata_tf_to_host(ap
, &qc
->tf
);
6229 ap
->hsm_task_state
= HSM_ST_FIRST
;
6231 /* send cdb by polling if no cdb interrupt */
6232 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
6233 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
6234 ata_pio_queue_task(ap
, qc
, 0);
6237 case ATAPI_PROT_DMA
:
6238 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
6240 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
6241 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
6242 ap
->hsm_task_state
= HSM_ST_FIRST
;
6244 /* send cdb by polling if no cdb interrupt */
6245 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
6246 ata_pio_queue_task(ap
, qc
, 0);
6251 return AC_ERR_SYSTEM
;
6258 * ata_host_intr - Handle host interrupt for given (port, task)
6259 * @ap: Port on which interrupt arrived (possibly...)
6260 * @qc: Taskfile currently active in engine
6262 * Handle host interrupt for given queued command. Currently,
6263 * only DMA interrupts are handled. All other commands are
6264 * handled via polling with interrupts disabled (nIEN bit).
6267 * spin_lock_irqsave(host lock)
6270 * One if interrupt was handled, zero if not (shared irq).
6273 inline unsigned int ata_host_intr(struct ata_port
*ap
,
6274 struct ata_queued_cmd
*qc
)
6276 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
6277 u8 status
, host_stat
= 0;
6279 VPRINTK("ata%u: protocol %d task_state %d\n",
6280 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
6282 /* Check whether we are expecting interrupt in this state */
6283 switch (ap
->hsm_task_state
) {
6285 /* Some pre-ATAPI-4 devices assert INTRQ
6286 * at this state when ready to receive CDB.
6289 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6290 * The flag was turned on only for atapi devices. No
6291 * need to check ata_is_atapi(qc->tf.protocol) again.
6293 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
6297 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
6298 qc
->tf
.protocol
== ATAPI_PROT_DMA
) {
6299 /* check status of DMA engine */
6300 host_stat
= ap
->ops
->bmdma_status(ap
);
6301 VPRINTK("ata%u: host_stat 0x%X\n",
6302 ap
->print_id
, host_stat
);
6304 /* if it's not our irq... */
6305 if (!(host_stat
& ATA_DMA_INTR
))
6308 /* before we do anything else, clear DMA-Start bit */
6309 ap
->ops
->bmdma_stop(qc
);
6311 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
6312 /* error when transfering data to/from memory */
6313 qc
->err_mask
|= AC_ERR_HOST_BUS
;
6314 ap
->hsm_task_state
= HSM_ST_ERR
;
6324 /* check altstatus */
6325 status
= ata_altstatus(ap
);
6326 if (status
& ATA_BUSY
)
6329 /* check main status, clearing INTRQ */
6330 status
= ata_chk_status(ap
);
6331 if (unlikely(status
& ATA_BUSY
))
6334 /* ack bmdma irq events */
6335 ap
->ops
->irq_clear(ap
);
6337 ata_hsm_move(ap
, qc
, status
, 0);
6339 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
6340 qc
->tf
.protocol
== ATAPI_PROT_DMA
))
6341 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
6343 return 1; /* irq handled */
6346 ap
->stats
.idle_irq
++;
6349 if ((ap
->stats
.idle_irq
% 1000) == 0) {
6351 ap
->ops
->irq_clear(ap
);
6352 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
6356 return 0; /* irq not handled */
6360 * ata_interrupt - Default ATA host interrupt handler
6361 * @irq: irq line (unused)
6362 * @dev_instance: pointer to our ata_host information structure
6364 * Default interrupt handler for PCI IDE devices. Calls
6365 * ata_host_intr() for each port that is not disabled.
6368 * Obtains host lock during operation.
6371 * IRQ_NONE or IRQ_HANDLED.
6374 irqreturn_t
ata_interrupt(int irq
, void *dev_instance
)
6376 struct ata_host
*host
= dev_instance
;
6378 unsigned int handled
= 0;
6379 unsigned long flags
;
6381 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6382 spin_lock_irqsave(&host
->lock
, flags
);
6384 for (i
= 0; i
< host
->n_ports
; i
++) {
6385 struct ata_port
*ap
;
6387 ap
= host
->ports
[i
];
6389 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
6390 struct ata_queued_cmd
*qc
;
6392 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
6393 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
6394 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
6395 handled
|= ata_host_intr(ap
, qc
);
6399 spin_unlock_irqrestore(&host
->lock
, flags
);
6401 return IRQ_RETVAL(handled
);
6405 * sata_scr_valid - test whether SCRs are accessible
6406 * @link: ATA link to test SCR accessibility for
6408 * Test whether SCRs are accessible for @link.
6414 * 1 if SCRs are accessible, 0 otherwise.
6416 int sata_scr_valid(struct ata_link
*link
)
6418 struct ata_port
*ap
= link
->ap
;
6420 return (ap
->flags
& ATA_FLAG_SATA
) && ap
->ops
->scr_read
;
6424 * sata_scr_read - read SCR register of the specified port
6425 * @link: ATA link to read SCR for
6427 * @val: Place to store read value
6429 * Read SCR register @reg of @link into *@val. This function is
6430 * guaranteed to succeed if @link is ap->link, the cable type of
6431 * the port is SATA and the port implements ->scr_read.
6434 * None if @link is ap->link. Kernel thread context otherwise.
6437 * 0 on success, negative errno on failure.
6439 int sata_scr_read(struct ata_link
*link
, int reg
, u32
*val
)
6441 if (ata_is_host_link(link
)) {
6442 struct ata_port
*ap
= link
->ap
;
6444 if (sata_scr_valid(link
))
6445 return ap
->ops
->scr_read(ap
, reg
, val
);
6449 return sata_pmp_scr_read(link
, reg
, val
);
6453 * sata_scr_write - write SCR register of the specified port
6454 * @link: ATA link to write SCR for
6455 * @reg: SCR to write
6456 * @val: value to write
6458 * Write @val to SCR register @reg of @link. This function is
6459 * guaranteed to succeed if @link is ap->link, the cable type of
6460 * the port is SATA and the port implements ->scr_read.
6463 * None if @link is ap->link. Kernel thread context otherwise.
6466 * 0 on success, negative errno on failure.
6468 int sata_scr_write(struct ata_link
*link
, int reg
, u32 val
)
6470 if (ata_is_host_link(link
)) {
6471 struct ata_port
*ap
= link
->ap
;
6473 if (sata_scr_valid(link
))
6474 return ap
->ops
->scr_write(ap
, reg
, val
);
6478 return sata_pmp_scr_write(link
, reg
, val
);
6482 * sata_scr_write_flush - write SCR register of the specified port and flush
6483 * @link: ATA link to write SCR for
6484 * @reg: SCR to write
6485 * @val: value to write
6487 * This function is identical to sata_scr_write() except that this
6488 * function performs flush after writing to the register.
6491 * None if @link is ap->link. Kernel thread context otherwise.
6494 * 0 on success, negative errno on failure.
6496 int sata_scr_write_flush(struct ata_link
*link
, int reg
, u32 val
)
6498 if (ata_is_host_link(link
)) {
6499 struct ata_port
*ap
= link
->ap
;
6502 if (sata_scr_valid(link
)) {
6503 rc
= ap
->ops
->scr_write(ap
, reg
, val
);
6505 rc
= ap
->ops
->scr_read(ap
, reg
, &val
);
6511 return sata_pmp_scr_write(link
, reg
, val
);
6515 * ata_link_online - test whether the given link is online
6516 * @link: ATA link to test
6518 * Test whether @link is online. Note that this function returns
6519 * 0 if online status of @link cannot be obtained, so
6520 * ata_link_online(link) != !ata_link_offline(link).
6526 * 1 if the port online status is available and online.
6528 int ata_link_online(struct ata_link
*link
)
6532 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6533 (sstatus
& 0xf) == 0x3)
6539 * ata_link_offline - test whether the given link is offline
6540 * @link: ATA link to test
6542 * Test whether @link is offline. Note that this function
6543 * returns 0 if offline status of @link cannot be obtained, so
6544 * ata_link_online(link) != !ata_link_offline(link).
6550 * 1 if the port offline status is available and offline.
6552 int ata_link_offline(struct ata_link
*link
)
6556 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6557 (sstatus
& 0xf) != 0x3)
6562 int ata_flush_cache(struct ata_device
*dev
)
6564 unsigned int err_mask
;
6567 if (!ata_try_flush_cache(dev
))
6570 if (dev
->flags
& ATA_DFLAG_FLUSH_EXT
)
6571 cmd
= ATA_CMD_FLUSH_EXT
;
6573 cmd
= ATA_CMD_FLUSH
;
6575 /* This is wrong. On a failed flush we get back the LBA of the lost
6576 sector and we should (assuming it wasn't aborted as unknown) issue
6577 a further flush command to continue the writeback until it
6579 err_mask
= ata_do_simple_cmd(dev
, cmd
);
6581 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
6589 static int ata_host_request_pm(struct ata_host
*host
, pm_message_t mesg
,
6590 unsigned int action
, unsigned int ehi_flags
,
6593 unsigned long flags
;
6596 for (i
= 0; i
< host
->n_ports
; i
++) {
6597 struct ata_port
*ap
= host
->ports
[i
];
6598 struct ata_link
*link
;
6600 /* Previous resume operation might still be in
6601 * progress. Wait for PM_PENDING to clear.
6603 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
6604 ata_port_wait_eh(ap
);
6605 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6608 /* request PM ops to EH */
6609 spin_lock_irqsave(ap
->lock
, flags
);
6614 ap
->pm_result
= &rc
;
6617 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
6618 __ata_port_for_each_link(link
, ap
) {
6619 link
->eh_info
.action
|= action
;
6620 link
->eh_info
.flags
|= ehi_flags
;
6623 ata_port_schedule_eh(ap
);
6625 spin_unlock_irqrestore(ap
->lock
, flags
);
6627 /* wait and check result */
6629 ata_port_wait_eh(ap
);
6630 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6640 * ata_host_suspend - suspend host
6641 * @host: host to suspend
6644 * Suspend @host. Actual operation is performed by EH. This
6645 * function requests EH to perform PM operations and waits for EH
6649 * Kernel thread context (may sleep).
6652 * 0 on success, -errno on failure.
6654 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
6659 * disable link pm on all ports before requesting
6662 ata_lpm_enable(host
);
6664 rc
= ata_host_request_pm(host
, mesg
, 0, ATA_EHI_QUIET
, 1);
6666 host
->dev
->power
.power_state
= mesg
;
6671 * ata_host_resume - resume host
6672 * @host: host to resume
6674 * Resume @host. Actual operation is performed by EH. This
6675 * function requests EH to perform PM operations and returns.
6676 * Note that all resume operations are performed parallely.
6679 * Kernel thread context (may sleep).
6681 void ata_host_resume(struct ata_host
*host
)
6683 ata_host_request_pm(host
, PMSG_ON
, ATA_EH_RESET
,
6684 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
6685 host
->dev
->power
.power_state
= PMSG_ON
;
6687 /* reenable link pm */
6688 ata_lpm_disable(host
);
6693 * ata_port_start - Set port up for dma.
6694 * @ap: Port to initialize
6696 * Called just after data structures for each port are
6697 * initialized. Allocates space for PRD table.
6699 * May be used as the port_start() entry in ata_port_operations.
6702 * Inherited from caller.
6704 int ata_port_start(struct ata_port
*ap
)
6706 struct device
*dev
= ap
->dev
;
6708 ap
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
,
6717 * ata_dev_init - Initialize an ata_device structure
6718 * @dev: Device structure to initialize
6720 * Initialize @dev in preparation for probing.
6723 * Inherited from caller.
6725 void ata_dev_init(struct ata_device
*dev
)
6727 struct ata_link
*link
= dev
->link
;
6728 struct ata_port
*ap
= link
->ap
;
6729 unsigned long flags
;
6731 /* SATA spd limit is bound to the first device */
6732 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6735 /* High bits of dev->flags are used to record warm plug
6736 * requests which occur asynchronously. Synchronize using
6739 spin_lock_irqsave(ap
->lock
, flags
);
6740 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
6742 spin_unlock_irqrestore(ap
->lock
, flags
);
6744 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
6745 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
6746 dev
->pio_mask
= UINT_MAX
;
6747 dev
->mwdma_mask
= UINT_MAX
;
6748 dev
->udma_mask
= UINT_MAX
;
6752 * ata_link_init - Initialize an ata_link structure
6753 * @ap: ATA port link is attached to
6754 * @link: Link structure to initialize
6755 * @pmp: Port multiplier port number
6760 * Kernel thread context (may sleep)
6762 void ata_link_init(struct ata_port
*ap
, struct ata_link
*link
, int pmp
)
6766 /* clear everything except for devices */
6767 memset(link
, 0, offsetof(struct ata_link
, device
[0]));
6771 link
->active_tag
= ATA_TAG_POISON
;
6772 link
->hw_sata_spd_limit
= UINT_MAX
;
6774 /* can't use iterator, ap isn't initialized yet */
6775 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
6776 struct ata_device
*dev
= &link
->device
[i
];
6779 dev
->devno
= dev
- link
->device
;
6785 * sata_link_init_spd - Initialize link->sata_spd_limit
6786 * @link: Link to configure sata_spd_limit for
6788 * Initialize @link->[hw_]sata_spd_limit to the currently
6792 * Kernel thread context (may sleep).
6795 * 0 on success, -errno on failure.
6797 int sata_link_init_spd(struct ata_link
*link
)
6803 rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
6807 spd
= (scontrol
>> 4) & 0xf;
6809 link
->hw_sata_spd_limit
&= (1 << spd
) - 1;
6811 ata_force_spd_limit(link
);
6813 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6819 * ata_port_alloc - allocate and initialize basic ATA port resources
6820 * @host: ATA host this allocated port belongs to
6822 * Allocate and initialize basic ATA port resources.
6825 * Allocate ATA port on success, NULL on failure.
6828 * Inherited from calling layer (may sleep).
6830 struct ata_port
*ata_port_alloc(struct ata_host
*host
)
6832 struct ata_port
*ap
;
6836 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
6840 ap
->pflags
|= ATA_PFLAG_INITIALIZING
;
6841 ap
->lock
= &host
->lock
;
6842 ap
->flags
= ATA_FLAG_DISABLED
;
6844 ap
->ctl
= ATA_DEVCTL_OBS
;
6846 ap
->dev
= host
->dev
;
6847 ap
->last_ctl
= 0xFF;
6849 #if defined(ATA_VERBOSE_DEBUG)
6850 /* turn on all debugging levels */
6851 ap
->msg_enable
= 0x00FF;
6852 #elif defined(ATA_DEBUG)
6853 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
6855 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
6858 INIT_DELAYED_WORK(&ap
->port_task
, ata_pio_task
);
6859 INIT_DELAYED_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
);
6860 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
);
6861 INIT_LIST_HEAD(&ap
->eh_done_q
);
6862 init_waitqueue_head(&ap
->eh_wait_q
);
6863 init_timer_deferrable(&ap
->fastdrain_timer
);
6864 ap
->fastdrain_timer
.function
= ata_eh_fastdrain_timerfn
;
6865 ap
->fastdrain_timer
.data
= (unsigned long)ap
;
6867 ap
->cbl
= ATA_CBL_NONE
;
6869 ata_link_init(ap
, &ap
->link
, 0);
6872 ap
->stats
.unhandled_irq
= 1;
6873 ap
->stats
.idle_irq
= 1;
6878 static void ata_host_release(struct device
*gendev
, void *res
)
6880 struct ata_host
*host
= dev_get_drvdata(gendev
);
6883 for (i
= 0; i
< host
->n_ports
; i
++) {
6884 struct ata_port
*ap
= host
->ports
[i
];
6890 scsi_host_put(ap
->scsi_host
);
6892 kfree(ap
->pmp_link
);
6894 host
->ports
[i
] = NULL
;
6897 dev_set_drvdata(gendev
, NULL
);
6901 * ata_host_alloc - allocate and init basic ATA host resources
6902 * @dev: generic device this host is associated with
6903 * @max_ports: maximum number of ATA ports associated with this host
6905 * Allocate and initialize basic ATA host resources. LLD calls
6906 * this function to allocate a host, initializes it fully and
6907 * attaches it using ata_host_register().
6909 * @max_ports ports are allocated and host->n_ports is
6910 * initialized to @max_ports. The caller is allowed to decrease
6911 * host->n_ports before calling ata_host_register(). The unused
6912 * ports will be automatically freed on registration.
6915 * Allocate ATA host on success, NULL on failure.
6918 * Inherited from calling layer (may sleep).
6920 struct ata_host
*ata_host_alloc(struct device
*dev
, int max_ports
)
6922 struct ata_host
*host
;
6928 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
6931 /* alloc a container for our list of ATA ports (buses) */
6932 sz
= sizeof(struct ata_host
) + (max_ports
+ 1) * sizeof(void *);
6933 /* alloc a container for our list of ATA ports (buses) */
6934 host
= devres_alloc(ata_host_release
, sz
, GFP_KERNEL
);
6938 devres_add(dev
, host
);
6939 dev_set_drvdata(dev
, host
);
6941 spin_lock_init(&host
->lock
);
6943 host
->n_ports
= max_ports
;
6945 /* allocate ports bound to this host */
6946 for (i
= 0; i
< max_ports
; i
++) {
6947 struct ata_port
*ap
;
6949 ap
= ata_port_alloc(host
);
6954 host
->ports
[i
] = ap
;
6957 devres_remove_group(dev
, NULL
);
6961 devres_release_group(dev
, NULL
);
6966 * ata_host_alloc_pinfo - alloc host and init with port_info array
6967 * @dev: generic device this host is associated with
6968 * @ppi: array of ATA port_info to initialize host with
6969 * @n_ports: number of ATA ports attached to this host
6971 * Allocate ATA host and initialize with info from @ppi. If NULL
6972 * terminated, @ppi may contain fewer entries than @n_ports. The
6973 * last entry will be used for the remaining ports.
6976 * Allocate ATA host on success, NULL on failure.
6979 * Inherited from calling layer (may sleep).
6981 struct ata_host
*ata_host_alloc_pinfo(struct device
*dev
,
6982 const struct ata_port_info
* const * ppi
,
6985 const struct ata_port_info
*pi
;
6986 struct ata_host
*host
;
6989 host
= ata_host_alloc(dev
, n_ports
);
6993 for (i
= 0, j
= 0, pi
= NULL
; i
< host
->n_ports
; i
++) {
6994 struct ata_port
*ap
= host
->ports
[i
];
6999 ap
->pio_mask
= pi
->pio_mask
;
7000 ap
->mwdma_mask
= pi
->mwdma_mask
;
7001 ap
->udma_mask
= pi
->udma_mask
;
7002 ap
->flags
|= pi
->flags
;
7003 ap
->link
.flags
|= pi
->link_flags
;
7004 ap
->ops
= pi
->port_ops
;
7006 if (!host
->ops
&& (pi
->port_ops
!= &ata_dummy_port_ops
))
7007 host
->ops
= pi
->port_ops
;
7013 static void ata_host_stop(struct device
*gendev
, void *res
)
7015 struct ata_host
*host
= dev_get_drvdata(gendev
);
7018 WARN_ON(!(host
->flags
& ATA_HOST_STARTED
));
7020 for (i
= 0; i
< host
->n_ports
; i
++) {
7021 struct ata_port
*ap
= host
->ports
[i
];
7023 if (ap
->ops
->port_stop
)
7024 ap
->ops
->port_stop(ap
);
7027 if (host
->ops
->host_stop
)
7028 host
->ops
->host_stop(host
);
7032 * ata_finalize_port_ops - finalize ata_port_operations
7033 * @ops: ata_port_operations to finalize
7035 * An ata_port_operations can inherit from another ops and that
7036 * ops can again inherit from another. This can go on as many
7037 * times as necessary as long as there is no loop in the
7038 * inheritance chain.
7040 * Ops tables are finalized when the host is started. NULL or
7041 * unspecified entries are inherited from the closet ancestor
7042 * which has the method and the entry is populated with it.
7043 * After finalization, the ops table directly points to all the
7044 * methods and ->inherits is no longer necessary and cleared.
7046 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
7051 static void ata_finalize_port_ops(struct ata_port_operations
*ops
)
7053 static spinlock_t lock
= SPIN_LOCK_UNLOCKED
;
7054 const struct ata_port_operations
*cur
;
7055 void **begin
= (void **)ops
;
7056 void **end
= (void **)&ops
->inherits
;
7059 if (!ops
|| !ops
->inherits
)
7064 for (cur
= ops
->inherits
; cur
; cur
= cur
->inherits
) {
7065 void **inherit
= (void **)cur
;
7067 for (pp
= begin
; pp
< end
; pp
++, inherit
++)
7072 for (pp
= begin
; pp
< end
; pp
++)
7076 ops
->inherits
= NULL
;
7082 * ata_host_start - start and freeze ports of an ATA host
7083 * @host: ATA host to start ports for
7085 * Start and then freeze ports of @host. Started status is
7086 * recorded in host->flags, so this function can be called
7087 * multiple times. Ports are guaranteed to get started only
7088 * once. If host->ops isn't initialized yet, its set to the
7089 * first non-dummy port ops.
7092 * Inherited from calling layer (may sleep).
7095 * 0 if all ports are started successfully, -errno otherwise.
7097 int ata_host_start(struct ata_host
*host
)
7100 void *start_dr
= NULL
;
7103 if (host
->flags
& ATA_HOST_STARTED
)
7106 ata_finalize_port_ops(host
->ops
);
7108 for (i
= 0; i
< host
->n_ports
; i
++) {
7109 struct ata_port
*ap
= host
->ports
[i
];
7111 ata_finalize_port_ops(ap
->ops
);
7113 if (!host
->ops
&& !ata_port_is_dummy(ap
))
7114 host
->ops
= ap
->ops
;
7116 if (ap
->ops
->port_stop
)
7120 if (host
->ops
->host_stop
)
7124 start_dr
= devres_alloc(ata_host_stop
, 0, GFP_KERNEL
);
7129 for (i
= 0; i
< host
->n_ports
; i
++) {
7130 struct ata_port
*ap
= host
->ports
[i
];
7132 if (ap
->ops
->port_start
) {
7133 rc
= ap
->ops
->port_start(ap
);
7136 dev_printk(KERN_ERR
, host
->dev
,
7137 "failed to start port %d "
7138 "(errno=%d)\n", i
, rc
);
7142 ata_eh_freeze_port(ap
);
7146 devres_add(host
->dev
, start_dr
);
7147 host
->flags
|= ATA_HOST_STARTED
;
7152 struct ata_port
*ap
= host
->ports
[i
];
7154 if (ap
->ops
->port_stop
)
7155 ap
->ops
->port_stop(ap
);
7157 devres_free(start_dr
);
7162 * ata_sas_host_init - Initialize a host struct
7163 * @host: host to initialize
7164 * @dev: device host is attached to
7165 * @flags: host flags
7169 * PCI/etc. bus probe sem.
7172 /* KILLME - the only user left is ipr */
7173 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
7174 unsigned long flags
, struct ata_port_operations
*ops
)
7176 spin_lock_init(&host
->lock
);
7178 host
->flags
= flags
;
7183 * ata_host_register - register initialized ATA host
7184 * @host: ATA host to register
7185 * @sht: template for SCSI host
7187 * Register initialized ATA host. @host is allocated using
7188 * ata_host_alloc() and fully initialized by LLD. This function
7189 * starts ports, registers @host with ATA and SCSI layers and
7190 * probe registered devices.
7193 * Inherited from calling layer (may sleep).
7196 * 0 on success, -errno otherwise.
7198 int ata_host_register(struct ata_host
*host
, struct scsi_host_template
*sht
)
7202 /* host must have been started */
7203 if (!(host
->flags
& ATA_HOST_STARTED
)) {
7204 dev_printk(KERN_ERR
, host
->dev
,
7205 "BUG: trying to register unstarted host\n");
7210 /* Blow away unused ports. This happens when LLD can't
7211 * determine the exact number of ports to allocate at
7214 for (i
= host
->n_ports
; host
->ports
[i
]; i
++)
7215 kfree(host
->ports
[i
]);
7217 /* give ports names and add SCSI hosts */
7218 for (i
= 0; i
< host
->n_ports
; i
++)
7219 host
->ports
[i
]->print_id
= ata_print_id
++;
7221 rc
= ata_scsi_add_hosts(host
, sht
);
7225 /* associate with ACPI nodes */
7226 ata_acpi_associate(host
);
7228 /* set cable, sata_spd_limit and report */
7229 for (i
= 0; i
< host
->n_ports
; i
++) {
7230 struct ata_port
*ap
= host
->ports
[i
];
7231 unsigned long xfer_mask
;
7233 /* set SATA cable type if still unset */
7234 if (ap
->cbl
== ATA_CBL_NONE
&& (ap
->flags
& ATA_FLAG_SATA
))
7235 ap
->cbl
= ATA_CBL_SATA
;
7237 /* init sata_spd_limit to the current value */
7238 sata_link_init_spd(&ap
->link
);
7240 /* print per-port info to dmesg */
7241 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
, ap
->mwdma_mask
,
7244 if (!ata_port_is_dummy(ap
)) {
7245 ata_port_printk(ap
, KERN_INFO
,
7246 "%cATA max %s %s\n",
7247 (ap
->flags
& ATA_FLAG_SATA
) ? 'S' : 'P',
7248 ata_mode_string(xfer_mask
),
7249 ap
->link
.eh_info
.desc
);
7250 ata_ehi_clear_desc(&ap
->link
.eh_info
);
7252 ata_port_printk(ap
, KERN_INFO
, "DUMMY\n");
7255 /* perform each probe synchronously */
7256 DPRINTK("probe begin\n");
7257 for (i
= 0; i
< host
->n_ports
; i
++) {
7258 struct ata_port
*ap
= host
->ports
[i
];
7261 if (ap
->ops
->error_handler
) {
7262 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
7263 unsigned long flags
;
7267 /* kick EH for boot probing */
7268 spin_lock_irqsave(ap
->lock
, flags
);
7270 ehi
->probe_mask
|= ATA_ALL_DEVICES
;
7271 ehi
->action
|= ATA_EH_RESET
;
7272 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
7274 ap
->pflags
&= ~ATA_PFLAG_INITIALIZING
;
7275 ap
->pflags
|= ATA_PFLAG_LOADING
;
7276 ata_port_schedule_eh(ap
);
7278 spin_unlock_irqrestore(ap
->lock
, flags
);
7280 /* wait for EH to finish */
7281 ata_port_wait_eh(ap
);
7283 DPRINTK("ata%u: bus probe begin\n", ap
->print_id
);
7284 rc
= ata_bus_probe(ap
);
7285 DPRINTK("ata%u: bus probe end\n", ap
->print_id
);
7288 /* FIXME: do something useful here?
7289 * Current libata behavior will
7290 * tear down everything when
7291 * the module is removed
7292 * or the h/w is unplugged.
7298 /* probes are done, now scan each port's disk(s) */
7299 DPRINTK("host probe begin\n");
7300 for (i
= 0; i
< host
->n_ports
; i
++) {
7301 struct ata_port
*ap
= host
->ports
[i
];
7303 ata_scsi_scan_host(ap
, 1);
7304 ata_lpm_schedule(ap
, ap
->pm_policy
);
7311 * ata_host_activate - start host, request IRQ and register it
7312 * @host: target ATA host
7313 * @irq: IRQ to request
7314 * @irq_handler: irq_handler used when requesting IRQ
7315 * @irq_flags: irq_flags used when requesting IRQ
7316 * @sht: scsi_host_template to use when registering the host
7318 * After allocating an ATA host and initializing it, most libata
7319 * LLDs perform three steps to activate the host - start host,
7320 * request IRQ and register it. This helper takes necessasry
7321 * arguments and performs the three steps in one go.
7323 * An invalid IRQ skips the IRQ registration and expects the host to
7324 * have set polling mode on the port. In this case, @irq_handler
7328 * Inherited from calling layer (may sleep).
7331 * 0 on success, -errno otherwise.
7333 int ata_host_activate(struct ata_host
*host
, int irq
,
7334 irq_handler_t irq_handler
, unsigned long irq_flags
,
7335 struct scsi_host_template
*sht
)
7339 rc
= ata_host_start(host
);
7343 /* Special case for polling mode */
7345 WARN_ON(irq_handler
);
7346 return ata_host_register(host
, sht
);
7349 rc
= devm_request_irq(host
->dev
, irq
, irq_handler
, irq_flags
,
7350 dev_driver_string(host
->dev
), host
);
7354 for (i
= 0; i
< host
->n_ports
; i
++)
7355 ata_port_desc(host
->ports
[i
], "irq %d", irq
);
7357 rc
= ata_host_register(host
, sht
);
7358 /* if failed, just free the IRQ and leave ports alone */
7360 devm_free_irq(host
->dev
, irq
, host
);
7366 * ata_port_detach - Detach ATA port in prepration of device removal
7367 * @ap: ATA port to be detached
7369 * Detach all ATA devices and the associated SCSI devices of @ap;
7370 * then, remove the associated SCSI host. @ap is guaranteed to
7371 * be quiescent on return from this function.
7374 * Kernel thread context (may sleep).
7376 static void ata_port_detach(struct ata_port
*ap
)
7378 unsigned long flags
;
7379 struct ata_link
*link
;
7380 struct ata_device
*dev
;
7382 if (!ap
->ops
->error_handler
)
7385 /* tell EH we're leaving & flush EH */
7386 spin_lock_irqsave(ap
->lock
, flags
);
7387 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
7388 spin_unlock_irqrestore(ap
->lock
, flags
);
7390 ata_port_wait_eh(ap
);
7392 /* EH is now guaranteed to see UNLOADING - EH context belongs
7393 * to us. Disable all existing devices.
7395 ata_port_for_each_link(link
, ap
) {
7396 ata_link_for_each_dev(dev
, link
)
7397 ata_dev_disable(dev
);
7400 /* Final freeze & EH. All in-flight commands are aborted. EH
7401 * will be skipped and retrials will be terminated with bad
7404 spin_lock_irqsave(ap
->lock
, flags
);
7405 ata_port_freeze(ap
); /* won't be thawed */
7406 spin_unlock_irqrestore(ap
->lock
, flags
);
7408 ata_port_wait_eh(ap
);
7409 cancel_rearming_delayed_work(&ap
->hotplug_task
);
7412 /* remove the associated SCSI host */
7413 scsi_remove_host(ap
->scsi_host
);
7417 * ata_host_detach - Detach all ports of an ATA host
7418 * @host: Host to detach
7420 * Detach all ports of @host.
7423 * Kernel thread context (may sleep).
7425 void ata_host_detach(struct ata_host
*host
)
7429 for (i
= 0; i
< host
->n_ports
; i
++)
7430 ata_port_detach(host
->ports
[i
]);
7432 /* the host is dead now, dissociate ACPI */
7433 ata_acpi_dissociate(host
);
7437 * ata_std_ports - initialize ioaddr with standard port offsets.
7438 * @ioaddr: IO address structure to be initialized
7440 * Utility function which initializes data_addr, error_addr,
7441 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7442 * device_addr, status_addr, and command_addr to standard offsets
7443 * relative to cmd_addr.
7445 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7448 void ata_std_ports(struct ata_ioports
*ioaddr
)
7450 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
7451 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
7452 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
7453 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
7454 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
7455 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
7456 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
7457 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
7458 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
7459 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
7466 * ata_pci_remove_one - PCI layer callback for device removal
7467 * @pdev: PCI device that was removed
7469 * PCI layer indicates to libata via this hook that hot-unplug or
7470 * module unload event has occurred. Detach all ports. Resource
7471 * release is handled via devres.
7474 * Inherited from PCI layer (may sleep).
7476 void ata_pci_remove_one(struct pci_dev
*pdev
)
7478 struct device
*dev
= &pdev
->dev
;
7479 struct ata_host
*host
= dev_get_drvdata(dev
);
7481 ata_host_detach(host
);
7484 /* move to PCI subsystem */
7485 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
7487 unsigned long tmp
= 0;
7489 switch (bits
->width
) {
7492 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
7498 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
7504 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
7515 return (tmp
== bits
->val
) ? 1 : 0;
7519 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
7521 pci_save_state(pdev
);
7522 pci_disable_device(pdev
);
7524 if (mesg
.event
& PM_EVENT_SLEEP
)
7525 pci_set_power_state(pdev
, PCI_D3hot
);
7528 int ata_pci_device_do_resume(struct pci_dev
*pdev
)
7532 pci_set_power_state(pdev
, PCI_D0
);
7533 pci_restore_state(pdev
);
7535 rc
= pcim_enable_device(pdev
);
7537 dev_printk(KERN_ERR
, &pdev
->dev
,
7538 "failed to enable device after resume (%d)\n", rc
);
7542 pci_set_master(pdev
);
7546 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
7548 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
7551 rc
= ata_host_suspend(host
, mesg
);
7555 ata_pci_device_do_suspend(pdev
, mesg
);
7560 int ata_pci_device_resume(struct pci_dev
*pdev
)
7562 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
7565 rc
= ata_pci_device_do_resume(pdev
);
7567 ata_host_resume(host
);
7570 #endif /* CONFIG_PM */
7572 #endif /* CONFIG_PCI */
7574 static int __init
ata_parse_force_one(char **cur
,
7575 struct ata_force_ent
*force_ent
,
7576 const char **reason
)
7578 /* FIXME: Currently, there's no way to tag init const data and
7579 * using __initdata causes build failure on some versions of
7580 * gcc. Once __initdataconst is implemented, add const to the
7581 * following structure.
7583 static struct ata_force_param force_tbl
[] __initdata
= {
7584 { "40c", .cbl
= ATA_CBL_PATA40
},
7585 { "80c", .cbl
= ATA_CBL_PATA80
},
7586 { "short40c", .cbl
= ATA_CBL_PATA40_SHORT
},
7587 { "unk", .cbl
= ATA_CBL_PATA_UNK
},
7588 { "ign", .cbl
= ATA_CBL_PATA_IGN
},
7589 { "sata", .cbl
= ATA_CBL_SATA
},
7590 { "1.5Gbps", .spd_limit
= 1 },
7591 { "3.0Gbps", .spd_limit
= 2 },
7592 { "noncq", .horkage_on
= ATA_HORKAGE_NONCQ
},
7593 { "ncq", .horkage_off
= ATA_HORKAGE_NONCQ
},
7594 { "pio0", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 0) },
7595 { "pio1", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 1) },
7596 { "pio2", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 2) },
7597 { "pio3", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 3) },
7598 { "pio4", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 4) },
7599 { "pio5", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 5) },
7600 { "pio6", .xfer_mask
= 1 << (ATA_SHIFT_PIO
+ 6) },
7601 { "mwdma0", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 0) },
7602 { "mwdma1", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 1) },
7603 { "mwdma2", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 2) },
7604 { "mwdma3", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 3) },
7605 { "mwdma4", .xfer_mask
= 1 << (ATA_SHIFT_MWDMA
+ 4) },
7606 { "udma0", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
7607 { "udma16", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
7608 { "udma/16", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 0) },
7609 { "udma1", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
7610 { "udma25", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
7611 { "udma/25", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 1) },
7612 { "udma2", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
7613 { "udma33", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
7614 { "udma/33", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 2) },
7615 { "udma3", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
7616 { "udma44", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
7617 { "udma/44", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 3) },
7618 { "udma4", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
7619 { "udma66", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
7620 { "udma/66", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 4) },
7621 { "udma5", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
7622 { "udma100", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
7623 { "udma/100", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 5) },
7624 { "udma6", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
7625 { "udma133", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
7626 { "udma/133", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 6) },
7627 { "udma7", .xfer_mask
= 1 << (ATA_SHIFT_UDMA
+ 7) },
7629 char *start
= *cur
, *p
= *cur
;
7630 char *id
, *val
, *endp
;
7631 const struct ata_force_param
*match_fp
= NULL
;
7632 int nr_matches
= 0, i
;
7634 /* find where this param ends and update *cur */
7635 while (*p
!= '\0' && *p
!= ',')
7646 p
= strchr(start
, ':');
7648 val
= strstrip(start
);
7653 id
= strstrip(start
);
7654 val
= strstrip(p
+ 1);
7657 p
= strchr(id
, '.');
7660 force_ent
->device
= simple_strtoul(p
, &endp
, 10);
7661 if (p
== endp
|| *endp
!= '\0') {
7662 *reason
= "invalid device";
7667 force_ent
->port
= simple_strtoul(id
, &endp
, 10);
7668 if (p
== endp
|| *endp
!= '\0') {
7669 *reason
= "invalid port/link";
7674 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7675 for (i
= 0; i
< ARRAY_SIZE(force_tbl
); i
++) {
7676 const struct ata_force_param
*fp
= &force_tbl
[i
];
7678 if (strncasecmp(val
, fp
->name
, strlen(val
)))
7684 if (strcasecmp(val
, fp
->name
) == 0) {
7691 *reason
= "unknown value";
7694 if (nr_matches
> 1) {
7695 *reason
= "ambigious value";
7699 force_ent
->param
= *match_fp
;
7704 static void __init
ata_parse_force_param(void)
7706 int idx
= 0, size
= 1;
7707 int last_port
= -1, last_device
= -1;
7708 char *p
, *cur
, *next
;
7710 /* calculate maximum number of params and allocate force_tbl */
7711 for (p
= ata_force_param_buf
; *p
; p
++)
7715 ata_force_tbl
= kzalloc(sizeof(ata_force_tbl
[0]) * size
, GFP_KERNEL
);
7716 if (!ata_force_tbl
) {
7717 printk(KERN_WARNING
"ata: failed to extend force table, "
7718 "libata.force ignored\n");
7722 /* parse and populate the table */
7723 for (cur
= ata_force_param_buf
; *cur
!= '\0'; cur
= next
) {
7724 const char *reason
= "";
7725 struct ata_force_ent te
= { .port
= -1, .device
= -1 };
7728 if (ata_parse_force_one(&next
, &te
, &reason
)) {
7729 printk(KERN_WARNING
"ata: failed to parse force "
7730 "parameter \"%s\" (%s)\n",
7735 if (te
.port
== -1) {
7736 te
.port
= last_port
;
7737 te
.device
= last_device
;
7740 ata_force_tbl
[idx
++] = te
;
7742 last_port
= te
.port
;
7743 last_device
= te
.device
;
7746 ata_force_tbl_size
= idx
;
7749 static int __init
ata_init(void)
7751 ata_probe_timeout
*= HZ
;
7753 ata_parse_force_param();
7755 ata_wq
= create_workqueue("ata");
7759 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
7761 destroy_workqueue(ata_wq
);
7765 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
7769 static void __exit
ata_exit(void)
7771 kfree(ata_force_tbl
);
7772 destroy_workqueue(ata_wq
);
7773 destroy_workqueue(ata_aux_wq
);
7776 subsys_initcall(ata_init
);
7777 module_exit(ata_exit
);
7779 static unsigned long ratelimit_time
;
7780 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
7782 int ata_ratelimit(void)
7785 unsigned long flags
;
7787 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
7789 if (time_after(jiffies
, ratelimit_time
)) {
7791 ratelimit_time
= jiffies
+ (HZ
/5);
7795 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
7801 * ata_wait_register - wait until register value changes
7802 * @reg: IO-mapped register
7803 * @mask: Mask to apply to read register value
7804 * @val: Wait condition
7805 * @interval_msec: polling interval in milliseconds
7806 * @timeout_msec: timeout in milliseconds
7808 * Waiting for some bits of register to change is a common
7809 * operation for ATA controllers. This function reads 32bit LE
7810 * IO-mapped register @reg and tests for the following condition.
7812 * (*@reg & mask) != val
7814 * If the condition is met, it returns; otherwise, the process is
7815 * repeated after @interval_msec until timeout.
7818 * Kernel thread context (may sleep)
7821 * The final register value.
7823 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
7824 unsigned long interval_msec
,
7825 unsigned long timeout_msec
)
7827 unsigned long timeout
;
7830 tmp
= ioread32(reg
);
7832 /* Calculate timeout _after_ the first read to make sure
7833 * preceding writes reach the controller before starting to
7834 * eat away the timeout.
7836 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
7838 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
7839 msleep(interval_msec
);
7840 tmp
= ioread32(reg
);
7849 static void ata_dummy_noret(struct ata_port
*ap
) { }
7850 static int ata_dummy_ret0(struct ata_port
*ap
) { return 0; }
7851 static void ata_dummy_qc_noret(struct ata_queued_cmd
*qc
) { }
7853 static u8
ata_dummy_check_status(struct ata_port
*ap
)
7858 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
7860 return AC_ERR_SYSTEM
;
7863 struct ata_port_operations ata_dummy_port_ops
= {
7864 .check_status
= ata_dummy_check_status
,
7865 .check_altstatus
= ata_dummy_check_status
,
7866 .dev_select
= ata_noop_dev_select
,
7867 .qc_prep
= ata_noop_qc_prep
,
7868 .qc_issue
= ata_dummy_qc_issue
,
7869 .freeze
= ata_dummy_noret
,
7870 .thaw
= ata_dummy_noret
,
7871 .error_handler
= ata_dummy_noret
,
7872 .post_internal_cmd
= ata_dummy_qc_noret
,
7873 .irq_clear
= ata_dummy_noret
,
7874 .port_start
= ata_dummy_ret0
,
7875 .port_stop
= ata_dummy_noret
,
7878 const struct ata_port_info ata_dummy_port_info
= {
7879 .port_ops
= &ata_dummy_port_ops
,
7883 * libata is essentially a library of internal helper functions for
7884 * low-level ATA host controller drivers. As such, the API/ABI is
7885 * likely to change as new drivers are added and updated.
7886 * Do not depend on ABI/API stability.
7888 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
7889 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
7890 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
7891 EXPORT_SYMBOL_GPL(ata_base_port_ops
);
7892 EXPORT_SYMBOL_GPL(sata_port_ops
);
7893 EXPORT_SYMBOL_GPL(sata_pmp_port_ops
);
7894 EXPORT_SYMBOL_GPL(ata_sff_port_ops
);
7895 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops
);
7896 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
7897 EXPORT_SYMBOL_GPL(ata_dummy_port_info
);
7898 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
7899 EXPORT_SYMBOL_GPL(ata_std_ports
);
7900 EXPORT_SYMBOL_GPL(ata_host_init
);
7901 EXPORT_SYMBOL_GPL(ata_host_alloc
);
7902 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo
);
7903 EXPORT_SYMBOL_GPL(ata_host_start
);
7904 EXPORT_SYMBOL_GPL(ata_host_register
);
7905 EXPORT_SYMBOL_GPL(ata_host_activate
);
7906 EXPORT_SYMBOL_GPL(ata_host_detach
);
7907 EXPORT_SYMBOL_GPL(ata_sg_init
);
7908 EXPORT_SYMBOL_GPL(ata_hsm_move
);
7909 EXPORT_SYMBOL_GPL(ata_qc_complete
);
7910 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
7911 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
7912 EXPORT_SYMBOL_GPL(ata_tf_load
);
7913 EXPORT_SYMBOL_GPL(ata_tf_read
);
7914 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
7915 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
7916 EXPORT_SYMBOL_GPL(sata_print_link_status
);
7917 EXPORT_SYMBOL_GPL(atapi_cmd_type
);
7918 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
7919 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
7920 EXPORT_SYMBOL_GPL(ata_pack_xfermask
);
7921 EXPORT_SYMBOL_GPL(ata_unpack_xfermask
);
7922 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode
);
7923 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask
);
7924 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift
);
7925 EXPORT_SYMBOL_GPL(ata_mode_string
);
7926 EXPORT_SYMBOL_GPL(ata_id_xfermask
);
7927 EXPORT_SYMBOL_GPL(ata_check_status
);
7928 EXPORT_SYMBOL_GPL(ata_altstatus
);
7929 EXPORT_SYMBOL_GPL(ata_exec_command
);
7930 EXPORT_SYMBOL_GPL(ata_port_start
);
7931 EXPORT_SYMBOL_GPL(ata_sff_port_start
);
7932 EXPORT_SYMBOL_GPL(ata_interrupt
);
7933 EXPORT_SYMBOL_GPL(ata_do_set_mode
);
7934 EXPORT_SYMBOL_GPL(ata_data_xfer
);
7935 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq
);
7936 EXPORT_SYMBOL_GPL(ata_std_qc_defer
);
7937 EXPORT_SYMBOL_GPL(ata_qc_prep
);
7938 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep
);
7939 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
7940 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
7941 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
7942 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
7943 EXPORT_SYMBOL_GPL(ata_noop_irq_clear
);
7944 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
7945 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
7946 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
7947 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
7948 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
7949 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
7950 EXPORT_SYMBOL_GPL(ata_port_probe
);
7951 EXPORT_SYMBOL_GPL(ata_dev_disable
);
7952 EXPORT_SYMBOL_GPL(sata_set_spd
);
7953 EXPORT_SYMBOL_GPL(sata_link_debounce
);
7954 EXPORT_SYMBOL_GPL(sata_link_resume
);
7955 EXPORT_SYMBOL_GPL(ata_bus_reset
);
7956 EXPORT_SYMBOL_GPL(ata_std_prereset
);
7957 EXPORT_SYMBOL_GPL(ata_std_softreset
);
7958 EXPORT_SYMBOL_GPL(sata_link_hardreset
);
7959 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
7960 EXPORT_SYMBOL_GPL(ata_std_postreset
);
7961 EXPORT_SYMBOL_GPL(ata_dev_classify
);
7962 EXPORT_SYMBOL_GPL(ata_dev_pair
);
7963 EXPORT_SYMBOL_GPL(ata_port_disable
);
7964 EXPORT_SYMBOL_GPL(ata_ratelimit
);
7965 EXPORT_SYMBOL_GPL(ata_wait_register
);
7966 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
7967 EXPORT_SYMBOL_GPL(ata_wait_after_reset
);
7968 EXPORT_SYMBOL_GPL(ata_wait_ready
);
7969 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
7970 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
7971 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
7972 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
7973 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
7974 EXPORT_SYMBOL_GPL(ata_host_intr
);
7975 EXPORT_SYMBOL_GPL(sata_scr_valid
);
7976 EXPORT_SYMBOL_GPL(sata_scr_read
);
7977 EXPORT_SYMBOL_GPL(sata_scr_write
);
7978 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
7979 EXPORT_SYMBOL_GPL(ata_link_online
);
7980 EXPORT_SYMBOL_GPL(ata_link_offline
);
7982 EXPORT_SYMBOL_GPL(ata_host_suspend
);
7983 EXPORT_SYMBOL_GPL(ata_host_resume
);
7984 #endif /* CONFIG_PM */
7985 EXPORT_SYMBOL_GPL(ata_id_string
);
7986 EXPORT_SYMBOL_GPL(ata_id_c_string
);
7987 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
7989 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
7990 EXPORT_SYMBOL_GPL(ata_timing_find_mode
);
7991 EXPORT_SYMBOL_GPL(ata_timing_compute
);
7992 EXPORT_SYMBOL_GPL(ata_timing_merge
);
7993 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode
);
7996 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
7997 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host
);
7998 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma
);
7999 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host
);
8000 EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host
);
8001 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
8002 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
8004 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
8005 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
8006 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
8007 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
8008 #endif /* CONFIG_PM */
8009 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
8010 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
8011 #endif /* CONFIG_PCI */
8013 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch
);
8014 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset
);
8015 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset
);
8016 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset
);
8017 EXPORT_SYMBOL_GPL(sata_pmp_error_handler
);
8019 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc
);
8020 EXPORT_SYMBOL_GPL(ata_ehi_push_desc
);
8021 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc
);
8022 EXPORT_SYMBOL_GPL(ata_port_desc
);
8024 EXPORT_SYMBOL_GPL(ata_port_pbar_desc
);
8025 #endif /* CONFIG_PCI */
8026 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
8027 EXPORT_SYMBOL_GPL(ata_link_abort
);
8028 EXPORT_SYMBOL_GPL(ata_port_abort
);
8029 EXPORT_SYMBOL_GPL(ata_port_freeze
);
8030 EXPORT_SYMBOL_GPL(sata_async_notification
);
8031 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
8032 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
8033 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
8034 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
8035 EXPORT_SYMBOL_GPL(ata_do_eh
);
8036 EXPORT_SYMBOL_GPL(ata_std_error_handler
);
8037 EXPORT_SYMBOL_GPL(ata_irq_on
);
8038 EXPORT_SYMBOL_GPL(ata_dev_try_classify
);
8040 EXPORT_SYMBOL_GPL(ata_cable_40wire
);
8041 EXPORT_SYMBOL_GPL(ata_cable_80wire
);
8042 EXPORT_SYMBOL_GPL(ata_cable_unknown
);
8043 EXPORT_SYMBOL_GPL(ata_cable_ignore
);
8044 EXPORT_SYMBOL_GPL(ata_cable_sata
);