2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
63 /* debounce timing parameters in msecs { interval, duration, timeout } */
64 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
65 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
66 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
68 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
69 u16 heads
, u16 sectors
);
70 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
71 static unsigned int ata_dev_set_AN(struct ata_device
*dev
, u8 enable
);
72 static void ata_dev_xfermask(struct ata_device
*dev
);
73 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
);
75 unsigned int ata_print_id
= 1;
76 static struct workqueue_struct
*ata_wq
;
78 struct workqueue_struct
*ata_aux_wq
;
80 int atapi_enabled
= 1;
81 module_param(atapi_enabled
, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
85 module_param(atapi_dmadir
, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 int atapi_passthru16
= 1;
89 module_param(atapi_passthru16
, int, 0444);
90 MODULE_PARM_DESC(atapi_passthru16
, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
93 module_param_named(fua
, libata_fua
, int, 0444);
94 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
96 static int ata_ignore_hpa
= 0;
97 module_param_named(ignore_hpa
, ata_ignore_hpa
, int, 0644);
98 MODULE_PARM_DESC(ignore_hpa
, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
101 module_param(ata_probe_timeout
, int, 0444);
102 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
104 int libata_noacpi
= 1;
105 module_param_named(noacpi
, libata_noacpi
, int, 0444);
106 MODULE_PARM_DESC(noacpi
, "Disables the use of ACPI in suspend/resume when set");
108 MODULE_AUTHOR("Jeff Garzik");
109 MODULE_DESCRIPTION("Library module for ATA devices");
110 MODULE_LICENSE("GPL");
111 MODULE_VERSION(DRV_VERSION
);
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
117 * @pmp: Port multiplier port
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
125 * Inherited from caller.
127 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8 pmp
, int is_cmd
, u8
*fis
)
129 fis
[0] = 0x27; /* Register - Host to Device FIS */
130 fis
[1] = pmp
& 0xf; /* Port multiplier number*/
132 fis
[1] |= (1 << 7); /* bit 7 indicates Command FIS */
134 fis
[2] = tf
->command
;
135 fis
[3] = tf
->feature
;
142 fis
[8] = tf
->hob_lbal
;
143 fis
[9] = tf
->hob_lbam
;
144 fis
[10] = tf
->hob_lbah
;
145 fis
[11] = tf
->hob_feature
;
148 fis
[13] = tf
->hob_nsect
;
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
166 * Inherited from caller.
169 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
171 tf
->command
= fis
[2]; /* status */
172 tf
->feature
= fis
[3]; /* error */
179 tf
->hob_lbal
= fis
[8];
180 tf
->hob_lbam
= fis
[9];
181 tf
->hob_lbah
= fis
[10];
184 tf
->hob_nsect
= fis
[13];
187 static const u8 ata_rw_cmds
[] = {
191 ATA_CMD_READ_MULTI_EXT
,
192 ATA_CMD_WRITE_MULTI_EXT
,
196 ATA_CMD_WRITE_MULTI_FUA_EXT
,
200 ATA_CMD_PIO_READ_EXT
,
201 ATA_CMD_PIO_WRITE_EXT
,
214 ATA_CMD_WRITE_FUA_EXT
218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
222 * Examine the device configuration and tf->flags to calculate
223 * the proper read/write commands and protocol to use.
228 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
232 int index
, fua
, lba48
, write
;
234 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
235 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
236 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
238 if (dev
->flags
& ATA_DFLAG_PIO
) {
239 tf
->protocol
= ATA_PROT_PIO
;
240 index
= dev
->multi_count
? 0 : 8;
241 } else if (lba48
&& (dev
->link
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
242 /* Unable to use DMA due to host limitation */
243 tf
->protocol
= ATA_PROT_PIO
;
244 index
= dev
->multi_count
? 0 : 8;
246 tf
->protocol
= ATA_PROT_DMA
;
250 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
271 * Block address read from @tf.
273 u64
ata_tf_read_block(struct ata_taskfile
*tf
, struct ata_device
*dev
)
277 if (tf
->flags
& ATA_TFLAG_LBA
) {
278 if (tf
->flags
& ATA_TFLAG_LBA48
) {
279 block
|= (u64
)tf
->hob_lbah
<< 40;
280 block
|= (u64
)tf
->hob_lbam
<< 32;
281 block
|= tf
->hob_lbal
<< 24;
283 block
|= (tf
->device
& 0xf) << 24;
285 block
|= tf
->lbah
<< 16;
286 block
|= tf
->lbam
<< 8;
291 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
292 head
= tf
->device
& 0xf;
295 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
;
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
321 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
322 u64 block
, u32 n_block
, unsigned int tf_flags
,
325 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
326 tf
->flags
|= tf_flags
;
328 if (ata_ncq_enabled(dev
) && likely(tag
!= ATA_TAG_INTERNAL
)) {
330 if (!lba_48_ok(block
, n_block
))
333 tf
->protocol
= ATA_PROT_NCQ
;
334 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
336 if (tf
->flags
& ATA_TFLAG_WRITE
)
337 tf
->command
= ATA_CMD_FPDMA_WRITE
;
339 tf
->command
= ATA_CMD_FPDMA_READ
;
341 tf
->nsect
= tag
<< 3;
342 tf
->hob_feature
= (n_block
>> 8) & 0xff;
343 tf
->feature
= n_block
& 0xff;
345 tf
->hob_lbah
= (block
>> 40) & 0xff;
346 tf
->hob_lbam
= (block
>> 32) & 0xff;
347 tf
->hob_lbal
= (block
>> 24) & 0xff;
348 tf
->lbah
= (block
>> 16) & 0xff;
349 tf
->lbam
= (block
>> 8) & 0xff;
350 tf
->lbal
= block
& 0xff;
353 if (tf
->flags
& ATA_TFLAG_FUA
)
354 tf
->device
|= 1 << 7;
355 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
356 tf
->flags
|= ATA_TFLAG_LBA
;
358 if (lba_28_ok(block
, n_block
)) {
360 tf
->device
|= (block
>> 24) & 0xf;
361 } else if (lba_48_ok(block
, n_block
)) {
362 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
366 tf
->flags
|= ATA_TFLAG_LBA48
;
368 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
370 tf
->hob_lbah
= (block
>> 40) & 0xff;
371 tf
->hob_lbam
= (block
>> 32) & 0xff;
372 tf
->hob_lbal
= (block
>> 24) & 0xff;
374 /* request too large even for LBA48 */
377 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
380 tf
->nsect
= n_block
& 0xff;
382 tf
->lbah
= (block
>> 16) & 0xff;
383 tf
->lbam
= (block
>> 8) & 0xff;
384 tf
->lbal
= block
& 0xff;
386 tf
->device
|= ATA_LBA
;
389 u32 sect
, head
, cyl
, track
;
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block
, n_block
))
395 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
398 /* Convert LBA to CHS */
399 track
= (u32
)block
/ dev
->sectors
;
400 cyl
= track
/ dev
->heads
;
401 head
= track
% dev
->heads
;
402 sect
= (u32
)block
% dev
->sectors
+ 1;
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32
)block
, track
, cyl
, head
, sect
);
407 /* Check whether the converted CHS can fit.
411 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
414 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
439 static unsigned int ata_pack_xfermask(unsigned int pio_mask
,
440 unsigned int mwdma_mask
,
441 unsigned int udma_mask
)
443 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
444 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
445 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
458 static void ata_unpack_xfermask(unsigned int xfer_mask
,
459 unsigned int *pio_mask
,
460 unsigned int *mwdma_mask
,
461 unsigned int *udma_mask
)
464 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
466 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
468 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
471 static const struct ata_xfer_ent
{
475 { ATA_SHIFT_PIO
, ATA_BITS_PIO
, XFER_PIO_0
},
476 { ATA_SHIFT_MWDMA
, ATA_BITS_MWDMA
, XFER_MW_DMA_0
},
477 { ATA_SHIFT_UDMA
, ATA_BITS_UDMA
, XFER_UDMA_0
},
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
492 * Matching XFER_* value, 0 if no match found.
494 static u8
ata_xfer_mask2mode(unsigned int xfer_mask
)
496 int highbit
= fls(xfer_mask
) - 1;
497 const struct ata_xfer_ent
*ent
;
499 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
500 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
501 return ent
->base
+ highbit
- ent
->shift
;
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
509 * Return matching xfer_mask for @xfer_mode.
515 * Matching xfer_mask, 0 if no match found.
517 static unsigned int ata_xfer_mode2mask(u8 xfer_mode
)
519 const struct ata_xfer_ent
*ent
;
521 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
522 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
523 return 1 << (ent
->shift
+ xfer_mode
- ent
->base
);
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
531 * Return matching xfer_shift for @xfer_mode.
537 * Matching xfer_shift, -1 if no match found.
539 static int ata_xfer_mode2shift(unsigned int xfer_mode
)
541 const struct ata_xfer_ent
*ent
;
543 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
544 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
553 * Determine string which represents the highest speed
554 * (highest bit in @modemask).
560 * Constant C string representing highest speed listed in
561 * @mode_mask, or the constant C string "<n/a>".
563 static const char *ata_mode_string(unsigned int xfer_mask
)
565 static const char * const xfer_mode_str
[] = {
589 highbit
= fls(xfer_mask
) - 1;
590 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
591 return xfer_mode_str
[highbit
];
595 static const char *sata_spd_string(unsigned int spd
)
597 static const char * const spd_str
[] = {
602 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
604 return spd_str
[spd
- 1];
607 void ata_dev_disable(struct ata_device
*dev
)
609 if (ata_dev_enabled(dev
)) {
610 if (ata_msg_drv(dev
->link
->ap
))
611 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
612 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
|
619 * ata_devchk - PATA device presence detection
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
636 static unsigned int ata_devchk(struct ata_port
*ap
, unsigned int device
)
638 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
641 ap
->ops
->dev_select(ap
, device
);
643 iowrite8(0x55, ioaddr
->nsect_addr
);
644 iowrite8(0xaa, ioaddr
->lbal_addr
);
646 iowrite8(0xaa, ioaddr
->nsect_addr
);
647 iowrite8(0x55, ioaddr
->lbal_addr
);
649 iowrite8(0x55, ioaddr
->nsect_addr
);
650 iowrite8(0xaa, ioaddr
->lbal_addr
);
652 nsect
= ioread8(ioaddr
->nsect_addr
);
653 lbal
= ioread8(ioaddr
->lbal_addr
);
655 if ((nsect
== 0x55) && (lbal
== 0xaa))
656 return 1; /* we found a device */
658 return 0; /* nothing found */
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
677 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
684 if (((tf
->lbam
== 0) && (tf
->lbah
== 0)) ||
685 ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
690 if (((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) ||
691 ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI
;
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN
;
701 * ata_dev_try_classify - Parse returned ATA device signature
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
704 * @r_err: Value of error register on completion
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
721 unsigned int ata_dev_try_classify(struct ata_device
*dev
, int present
,
724 struct ata_port
*ap
= dev
->link
->ap
;
725 struct ata_taskfile tf
;
729 ap
->ops
->dev_select(ap
, dev
->devno
);
731 memset(&tf
, 0, sizeof(tf
));
733 ap
->ops
->tf_read(ap
, &tf
);
738 /* see if device passed diags: if master then continue and warn later */
739 if (err
== 0 && dev
->devno
== 0)
740 /* diagnostic fail : do nothing _YET_ */
741 dev
->horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
744 else if ((dev
->devno
== 0) && (err
== 0x81))
749 /* determine if device is ATA or ATAPI */
750 class = ata_dev_classify(&tf
);
752 if (class == ATA_DEV_UNKNOWN
) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
759 if (present
&& (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
))
762 class = ATA_DEV_NONE
;
763 } else if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
764 class = ATA_DEV_NONE
;
770 * ata_id_string - Convert IDENTIFY DEVICE page into string
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
784 void ata_id_string(const u16
*id
, unsigned char *s
,
785 unsigned int ofs
, unsigned int len
)
804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
810 * This function is identical to ata_id_string except that it
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
817 void ata_id_c_string(const u16
*id
, unsigned char *s
,
818 unsigned int ofs
, unsigned int len
)
824 ata_id_string(id
, s
, ofs
, len
- 1);
826 p
= s
+ strnlen(s
, len
- 1);
827 while (p
> s
&& p
[-1] == ' ')
832 static u64
ata_id_n_sectors(const u16
*id
)
834 if (ata_id_has_lba(id
)) {
835 if (ata_id_has_lba48(id
))
836 return ata_id_u64(id
, 100);
838 return ata_id_u32(id
, 60);
840 if (ata_id_current_chs_valid(id
))
841 return ata_id_u32(id
, 57);
843 return id
[1] * id
[3] * id
[6];
847 static u64
ata_tf_to_lba48(struct ata_taskfile
*tf
)
851 sectors
|= ((u64
)(tf
->hob_lbah
& 0xff)) << 40;
852 sectors
|= ((u64
)(tf
->hob_lbam
& 0xff)) << 32;
853 sectors
|= (tf
->hob_lbal
& 0xff) << 24;
854 sectors
|= (tf
->lbah
& 0xff) << 16;
855 sectors
|= (tf
->lbam
& 0xff) << 8;
856 sectors
|= (tf
->lbal
& 0xff);
861 static u64
ata_tf_to_lba(struct ata_taskfile
*tf
)
865 sectors
|= (tf
->device
& 0x0f) << 24;
866 sectors
|= (tf
->lbah
& 0xff) << 16;
867 sectors
|= (tf
->lbam
& 0xff) << 8;
868 sectors
|= (tf
->lbal
& 0xff);
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
878 * Perform an LBA48 or LBA28 native size query upon the device in
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
885 static int ata_read_native_max_address(struct ata_device
*dev
, u64
*max_sectors
)
887 unsigned int err_mask
;
888 struct ata_taskfile tf
;
889 int lba48
= ata_id_has_lba48(dev
->id
);
891 ata_tf_init(dev
, &tf
);
893 /* always clear all address registers */
894 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
897 tf
.command
= ATA_CMD_READ_NATIVE_MAX_EXT
;
898 tf
.flags
|= ATA_TFLAG_LBA48
;
900 tf
.command
= ATA_CMD_READ_NATIVE_MAX
;
902 tf
.protocol
|= ATA_PROT_NODATA
;
903 tf
.device
|= ATA_LBA
;
905 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
907 ata_dev_printk(dev
, KERN_WARNING
, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask
);
909 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
915 *max_sectors
= ata_tf_to_lba48(&tf
);
917 *max_sectors
= ata_tf_to_lba(&tf
);
918 if (dev
->horkage
& ATA_HORKAGE_HPA_SIZE
)
924 * ata_set_max_sectors - Set max sectors
925 * @dev: target device
926 * @new_sectors: new max sectors value to set for the device
928 * Set max sectors of @dev to @new_sectors.
931 * 0 on success, -EACCES if command is aborted or denied (due to
932 * previous non-volatile SET_MAX) by the drive. -EIO on other
935 static int ata_set_max_sectors(struct ata_device
*dev
, u64 new_sectors
)
937 unsigned int err_mask
;
938 struct ata_taskfile tf
;
939 int lba48
= ata_id_has_lba48(dev
->id
);
943 ata_tf_init(dev
, &tf
);
945 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
948 tf
.command
= ATA_CMD_SET_MAX_EXT
;
949 tf
.flags
|= ATA_TFLAG_LBA48
;
951 tf
.hob_lbal
= (new_sectors
>> 24) & 0xff;
952 tf
.hob_lbam
= (new_sectors
>> 32) & 0xff;
953 tf
.hob_lbah
= (new_sectors
>> 40) & 0xff;
955 tf
.command
= ATA_CMD_SET_MAX
;
957 tf
.device
|= (new_sectors
>> 24) & 0xf;
960 tf
.protocol
|= ATA_PROT_NODATA
;
961 tf
.device
|= ATA_LBA
;
963 tf
.lbal
= (new_sectors
>> 0) & 0xff;
964 tf
.lbam
= (new_sectors
>> 8) & 0xff;
965 tf
.lbah
= (new_sectors
>> 16) & 0xff;
967 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
969 ata_dev_printk(dev
, KERN_WARNING
, "failed to set "
970 "max address (err_mask=0x%x)\n", err_mask
);
971 if (err_mask
== AC_ERR_DEV
&&
972 (tf
.feature
& (ATA_ABORTED
| ATA_IDNF
)))
981 * ata_hpa_resize - Resize a device with an HPA set
982 * @dev: Device to resize
984 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
985 * it if required to the full size of the media. The caller must check
986 * the drive has the HPA feature set enabled.
989 * 0 on success, -errno on failure.
991 static int ata_hpa_resize(struct ata_device
*dev
)
993 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
994 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
995 u64 sectors
= ata_id_n_sectors(dev
->id
);
999 /* do we need to do it? */
1000 if (dev
->class != ATA_DEV_ATA
||
1001 !ata_id_has_lba(dev
->id
) || !ata_id_hpa_enabled(dev
->id
) ||
1002 (dev
->horkage
& ATA_HORKAGE_BROKEN_HPA
))
1005 /* read native max address */
1006 rc
= ata_read_native_max_address(dev
, &native_sectors
);
1008 /* If HPA isn't going to be unlocked, skip HPA
1009 * resizing from the next try.
1011 if (!ata_ignore_hpa
) {
1012 ata_dev_printk(dev
, KERN_WARNING
, "HPA support seems "
1013 "broken, will skip HPA handling\n");
1014 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1016 /* we can continue if device aborted the command */
1024 /* nothing to do? */
1025 if (native_sectors
<= sectors
|| !ata_ignore_hpa
) {
1026 if (!print_info
|| native_sectors
== sectors
)
1029 if (native_sectors
> sectors
)
1030 ata_dev_printk(dev
, KERN_INFO
,
1031 "HPA detected: current %llu, native %llu\n",
1032 (unsigned long long)sectors
,
1033 (unsigned long long)native_sectors
);
1034 else if (native_sectors
< sectors
)
1035 ata_dev_printk(dev
, KERN_WARNING
,
1036 "native sectors (%llu) is smaller than "
1038 (unsigned long long)native_sectors
,
1039 (unsigned long long)sectors
);
1043 /* let's unlock HPA */
1044 rc
= ata_set_max_sectors(dev
, native_sectors
);
1045 if (rc
== -EACCES
) {
1046 /* if device aborted the command, skip HPA resizing */
1047 ata_dev_printk(dev
, KERN_WARNING
, "device aborted resize "
1048 "(%llu -> %llu), skipping HPA handling\n",
1049 (unsigned long long)sectors
,
1050 (unsigned long long)native_sectors
);
1051 dev
->horkage
|= ATA_HORKAGE_BROKEN_HPA
;
1056 /* re-read IDENTIFY data */
1057 rc
= ata_dev_reread_id(dev
, 0);
1059 ata_dev_printk(dev
, KERN_ERR
, "failed to re-read IDENTIFY "
1060 "data after HPA resizing\n");
1065 u64 new_sectors
= ata_id_n_sectors(dev
->id
);
1066 ata_dev_printk(dev
, KERN_INFO
,
1067 "HPA unlocked: %llu -> %llu, native %llu\n",
1068 (unsigned long long)sectors
,
1069 (unsigned long long)new_sectors
,
1070 (unsigned long long)native_sectors
);
1077 * ata_id_to_dma_mode - Identify DMA mode from id block
1078 * @dev: device to identify
1079 * @unknown: mode to assume if we cannot tell
1081 * Set up the timing values for the device based upon the identify
1082 * reported values for the DMA mode. This function is used by drivers
1083 * which rely upon firmware configured modes, but wish to report the
1084 * mode correctly when possible.
1086 * In addition we emit similarly formatted messages to the default
1087 * ata_dev_set_mode handler, in order to provide consistency of
1091 void ata_id_to_dma_mode(struct ata_device
*dev
, u8 unknown
)
1096 /* Pack the DMA modes */
1097 mask
= ((dev
->id
[63] >> 8) << ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
;
1098 if (dev
->id
[53] & 0x04)
1099 mask
|= ((dev
->id
[88] >> 8) << ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
;
1101 /* Select the mode in use */
1102 mode
= ata_xfer_mask2mode(mask
);
1105 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
1106 ata_mode_string(mask
));
1108 /* SWDMA perhaps ? */
1110 ata_dev_printk(dev
, KERN_INFO
, "configured for DMA\n");
1113 /* Configure the device reporting */
1114 dev
->xfer_mode
= mode
;
1115 dev
->xfer_shift
= ata_xfer_mode2shift(mode
);
1119 * ata_noop_dev_select - Select device 0/1 on ATA bus
1120 * @ap: ATA channel to manipulate
1121 * @device: ATA device (numbered from zero) to select
1123 * This function performs no actual function.
1125 * May be used as the dev_select() entry in ata_port_operations.
1130 void ata_noop_dev_select (struct ata_port
*ap
, unsigned int device
)
1136 * ata_std_dev_select - Select device 0/1 on ATA bus
1137 * @ap: ATA channel to manipulate
1138 * @device: ATA device (numbered from zero) to select
1140 * Use the method defined in the ATA specification to
1141 * make either device 0, or device 1, active on the
1142 * ATA channel. Works with both PIO and MMIO.
1144 * May be used as the dev_select() entry in ata_port_operations.
1150 void ata_std_dev_select (struct ata_port
*ap
, unsigned int device
)
1155 tmp
= ATA_DEVICE_OBS
;
1157 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
1159 iowrite8(tmp
, ap
->ioaddr
.device_addr
);
1160 ata_pause(ap
); /* needed; also flushes, for mmio */
1164 * ata_dev_select - Select device 0/1 on ATA bus
1165 * @ap: ATA channel to manipulate
1166 * @device: ATA device (numbered from zero) to select
1167 * @wait: non-zero to wait for Status register BSY bit to clear
1168 * @can_sleep: non-zero if context allows sleeping
1170 * Use the method defined in the ATA specification to
1171 * make either device 0, or device 1, active on the
1174 * This is a high-level version of ata_std_dev_select(),
1175 * which additionally provides the services of inserting
1176 * the proper pauses and status polling, where needed.
1182 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
1183 unsigned int wait
, unsigned int can_sleep
)
1185 if (ata_msg_probe(ap
))
1186 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, "
1187 "device %u, wait %u\n", device
, wait
);
1192 ap
->ops
->dev_select(ap
, device
);
1195 if (can_sleep
&& ap
->link
.device
[device
].class == ATA_DEV_ATAPI
)
1202 * ata_dump_id - IDENTIFY DEVICE info debugging output
1203 * @id: IDENTIFY DEVICE page to dump
1205 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1212 static inline void ata_dump_id(const u16
*id
)
1214 DPRINTK("49==0x%04x "
1224 DPRINTK("80==0x%04x "
1234 DPRINTK("88==0x%04x "
1241 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1242 * @id: IDENTIFY data to compute xfer mask from
1244 * Compute the xfermask for this device. This is not as trivial
1245 * as it seems if we must consider early devices correctly.
1247 * FIXME: pre IDE drive timing (do we care ?).
1255 static unsigned int ata_id_xfermask(const u16
*id
)
1257 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
1259 /* Usual case. Word 53 indicates word 64 is valid */
1260 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1261 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1265 /* If word 64 isn't valid then Word 51 high byte holds
1266 * the PIO timing number for the maximum. Turn it into
1269 u8 mode
= (id
[ATA_ID_OLD_PIO_MODES
] >> 8) & 0xFF;
1270 if (mode
< 5) /* Valid PIO range */
1271 pio_mask
= (2 << mode
) - 1;
1275 /* But wait.. there's more. Design your standards by
1276 * committee and you too can get a free iordy field to
1277 * process. However its the speeds not the modes that
1278 * are supported... Note drivers using the timing API
1279 * will get this right anyway
1283 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1285 if (ata_id_is_cfa(id
)) {
1287 * Process compact flash extended modes
1289 int pio
= id
[163] & 0x7;
1290 int dma
= (id
[163] >> 3) & 7;
1293 pio_mask
|= (1 << 5);
1295 pio_mask
|= (1 << 6);
1297 mwdma_mask
|= (1 << 3);
1299 mwdma_mask
|= (1 << 4);
1303 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1304 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1306 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1310 * ata_port_queue_task - Queue port_task
1311 * @ap: The ata_port to queue port_task for
1312 * @fn: workqueue function to be scheduled
1313 * @data: data for @fn to use
1314 * @delay: delay time for workqueue function
1316 * Schedule @fn(@data) for execution after @delay jiffies using
1317 * port_task. There is one port_task per port and it's the
1318 * user(low level driver)'s responsibility to make sure that only
1319 * one task is active at any given time.
1321 * libata core layer takes care of synchronization between
1322 * port_task and EH. ata_port_queue_task() may be ignored for EH
1326 * Inherited from caller.
1328 void ata_port_queue_task(struct ata_port
*ap
, work_func_t fn
, void *data
,
1329 unsigned long delay
)
1331 PREPARE_DELAYED_WORK(&ap
->port_task
, fn
);
1332 ap
->port_task_data
= data
;
1334 /* may fail if ata_port_flush_task() in progress */
1335 queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
1339 * ata_port_flush_task - Flush port_task
1340 * @ap: The ata_port to flush port_task for
1342 * After this function completes, port_task is guranteed not to
1343 * be running or scheduled.
1346 * Kernel thread context (may sleep)
1348 void ata_port_flush_task(struct ata_port
*ap
)
1352 cancel_rearming_delayed_work(&ap
->port_task
);
1354 if (ata_msg_ctl(ap
))
1355 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __FUNCTION__
);
1358 static void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1360 struct completion
*waiting
= qc
->private_data
;
1366 * ata_exec_internal_sg - execute libata internal command
1367 * @dev: Device to which the command is sent
1368 * @tf: Taskfile registers for the command and the result
1369 * @cdb: CDB for packet command
1370 * @dma_dir: Data tranfer direction of the command
1371 * @sg: sg list for the data buffer of the command
1372 * @n_elem: Number of sg entries
1374 * Executes libata internal command with timeout. @tf contains
1375 * command on entry and result on return. Timeout and error
1376 * conditions are reported via return value. No recovery action
1377 * is taken after a command times out. It's caller's duty to
1378 * clean up after timeout.
1381 * None. Should be called with kernel context, might sleep.
1384 * Zero on success, AC_ERR_* mask on failure
1386 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1387 struct ata_taskfile
*tf
, const u8
*cdb
,
1388 int dma_dir
, struct scatterlist
*sg
,
1389 unsigned int n_elem
)
1391 struct ata_link
*link
= dev
->link
;
1392 struct ata_port
*ap
= link
->ap
;
1393 u8 command
= tf
->command
;
1394 struct ata_queued_cmd
*qc
;
1395 unsigned int tag
, preempted_tag
;
1396 u32 preempted_sactive
, preempted_qc_active
;
1397 int preempted_nr_active_links
;
1398 DECLARE_COMPLETION_ONSTACK(wait
);
1399 unsigned long flags
;
1400 unsigned int err_mask
;
1403 spin_lock_irqsave(ap
->lock
, flags
);
1405 /* no internal command while frozen */
1406 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1407 spin_unlock_irqrestore(ap
->lock
, flags
);
1408 return AC_ERR_SYSTEM
;
1411 /* initialize internal qc */
1413 /* XXX: Tag 0 is used for drivers with legacy EH as some
1414 * drivers choke if any other tag is given. This breaks
1415 * ata_tag_internal() test for those drivers. Don't use new
1416 * EH stuff without converting to it.
1418 if (ap
->ops
->error_handler
)
1419 tag
= ATA_TAG_INTERNAL
;
1423 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1425 qc
= __ata_qc_from_tag(ap
, tag
);
1433 preempted_tag
= link
->active_tag
;
1434 preempted_sactive
= link
->sactive
;
1435 preempted_qc_active
= ap
->qc_active
;
1436 preempted_nr_active_links
= ap
->nr_active_links
;
1437 link
->active_tag
= ATA_TAG_POISON
;
1440 ap
->nr_active_links
= 0;
1442 /* prepare & issue qc */
1445 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1446 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1447 qc
->dma_dir
= dma_dir
;
1448 if (dma_dir
!= DMA_NONE
) {
1449 unsigned int i
, buflen
= 0;
1451 for (i
= 0; i
< n_elem
; i
++)
1452 buflen
+= sg
[i
].length
;
1454 ata_sg_init(qc
, sg
, n_elem
);
1455 qc
->nbytes
= buflen
;
1458 qc
->private_data
= &wait
;
1459 qc
->complete_fn
= ata_qc_complete_internal
;
1463 spin_unlock_irqrestore(ap
->lock
, flags
);
1465 rc
= wait_for_completion_timeout(&wait
, ata_probe_timeout
);
1467 ata_port_flush_task(ap
);
1470 spin_lock_irqsave(ap
->lock
, flags
);
1472 /* We're racing with irq here. If we lose, the
1473 * following test prevents us from completing the qc
1474 * twice. If we win, the port is frozen and will be
1475 * cleaned up by ->post_internal_cmd().
1477 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1478 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1480 if (ap
->ops
->error_handler
)
1481 ata_port_freeze(ap
);
1483 ata_qc_complete(qc
);
1485 if (ata_msg_warn(ap
))
1486 ata_dev_printk(dev
, KERN_WARNING
,
1487 "qc timeout (cmd 0x%x)\n", command
);
1490 spin_unlock_irqrestore(ap
->lock
, flags
);
1493 /* do post_internal_cmd */
1494 if (ap
->ops
->post_internal_cmd
)
1495 ap
->ops
->post_internal_cmd(qc
);
1497 /* perform minimal error analysis */
1498 if (qc
->flags
& ATA_QCFLAG_FAILED
) {
1499 if (qc
->result_tf
.command
& (ATA_ERR
| ATA_DF
))
1500 qc
->err_mask
|= AC_ERR_DEV
;
1503 qc
->err_mask
|= AC_ERR_OTHER
;
1505 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1506 qc
->err_mask
&= ~AC_ERR_OTHER
;
1510 spin_lock_irqsave(ap
->lock
, flags
);
1512 *tf
= qc
->result_tf
;
1513 err_mask
= qc
->err_mask
;
1516 link
->active_tag
= preempted_tag
;
1517 link
->sactive
= preempted_sactive
;
1518 ap
->qc_active
= preempted_qc_active
;
1519 ap
->nr_active_links
= preempted_nr_active_links
;
1521 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1522 * Until those drivers are fixed, we detect the condition
1523 * here, fail the command with AC_ERR_SYSTEM and reenable the
1526 * Note that this doesn't change any behavior as internal
1527 * command failure results in disabling the device in the
1528 * higher layer for LLDDs without new reset/EH callbacks.
1530 * Kill the following code as soon as those drivers are fixed.
1532 if (ap
->flags
& ATA_FLAG_DISABLED
) {
1533 err_mask
|= AC_ERR_SYSTEM
;
1537 spin_unlock_irqrestore(ap
->lock
, flags
);
1543 * ata_exec_internal - execute libata internal command
1544 * @dev: Device to which the command is sent
1545 * @tf: Taskfile registers for the command and the result
1546 * @cdb: CDB for packet command
1547 * @dma_dir: Data tranfer direction of the command
1548 * @buf: Data buffer of the command
1549 * @buflen: Length of data buffer
1551 * Wrapper around ata_exec_internal_sg() which takes simple
1552 * buffer instead of sg list.
1555 * None. Should be called with kernel context, might sleep.
1558 * Zero on success, AC_ERR_* mask on failure
1560 unsigned ata_exec_internal(struct ata_device
*dev
,
1561 struct ata_taskfile
*tf
, const u8
*cdb
,
1562 int dma_dir
, void *buf
, unsigned int buflen
)
1564 struct scatterlist
*psg
= NULL
, sg
;
1565 unsigned int n_elem
= 0;
1567 if (dma_dir
!= DMA_NONE
) {
1569 sg_init_one(&sg
, buf
, buflen
);
1574 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, psg
, n_elem
);
1578 * ata_do_simple_cmd - execute simple internal command
1579 * @dev: Device to which the command is sent
1580 * @cmd: Opcode to execute
1582 * Execute a 'simple' command, that only consists of the opcode
1583 * 'cmd' itself, without filling any other registers
1586 * Kernel thread context (may sleep).
1589 * Zero on success, AC_ERR_* mask on failure
1591 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
1593 struct ata_taskfile tf
;
1595 ata_tf_init(dev
, &tf
);
1598 tf
.flags
|= ATA_TFLAG_DEVICE
;
1599 tf
.protocol
= ATA_PROT_NODATA
;
1601 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1605 * ata_pio_need_iordy - check if iordy needed
1608 * Check if the current speed of the device requires IORDY. Used
1609 * by various controllers for chip configuration.
1612 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1614 /* Controller doesn't support IORDY. Probably a pointless check
1615 as the caller should know this */
1616 if (adev
->link
->ap
->flags
& ATA_FLAG_NO_IORDY
)
1618 /* PIO3 and higher it is mandatory */
1619 if (adev
->pio_mode
> XFER_PIO_2
)
1621 /* We turn it on when possible */
1622 if (ata_id_has_iordy(adev
->id
))
1628 * ata_pio_mask_no_iordy - Return the non IORDY mask
1631 * Compute the highest mode possible if we are not using iordy. Return
1632 * -1 if no iordy mode is available.
1635 static u32
ata_pio_mask_no_iordy(const struct ata_device
*adev
)
1637 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1638 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1639 u16 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1640 /* Is the speed faster than the drive allows non IORDY ? */
1642 /* This is cycle times not frequency - watch the logic! */
1643 if (pio
> 240) /* PIO2 is 240nS per cycle */
1644 return 3 << ATA_SHIFT_PIO
;
1645 return 7 << ATA_SHIFT_PIO
;
1648 return 3 << ATA_SHIFT_PIO
;
1652 * ata_dev_read_id - Read ID data from the specified device
1653 * @dev: target device
1654 * @p_class: pointer to class of the target device (may be changed)
1655 * @flags: ATA_READID_* flags
1656 * @id: buffer to read IDENTIFY data into
1658 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1659 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1660 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1661 * for pre-ATA4 drives.
1663 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1664 * now we abort if we hit that case.
1667 * Kernel thread context (may sleep)
1670 * 0 on success, -errno otherwise.
1672 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1673 unsigned int flags
, u16
*id
)
1675 struct ata_port
*ap
= dev
->link
->ap
;
1676 unsigned int class = *p_class
;
1677 struct ata_taskfile tf
;
1678 unsigned int err_mask
= 0;
1680 int may_fallback
= 1, tried_spinup
= 0;
1683 if (ata_msg_ctl(ap
))
1684 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
1686 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
1688 ata_tf_init(dev
, &tf
);
1692 tf
.command
= ATA_CMD_ID_ATA
;
1695 tf
.command
= ATA_CMD_ID_ATAPI
;
1699 reason
= "unsupported class";
1703 tf
.protocol
= ATA_PROT_PIO
;
1705 /* Some devices choke if TF registers contain garbage. Make
1706 * sure those are properly initialized.
1708 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1710 /* Device presence detection is unreliable on some
1711 * controllers. Always poll IDENTIFY if available.
1713 tf
.flags
|= ATA_TFLAG_POLLING
;
1715 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1716 id
, sizeof(id
[0]) * ATA_ID_WORDS
);
1718 if (err_mask
& AC_ERR_NODEV_HINT
) {
1719 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1720 ap
->print_id
, dev
->devno
);
1724 /* Device or controller might have reported the wrong
1725 * device class. Give a shot at the other IDENTIFY if
1726 * the current one is aborted by the device.
1729 (err_mask
== AC_ERR_DEV
) && (tf
.feature
& ATA_ABORTED
)) {
1732 if (class == ATA_DEV_ATA
)
1733 class = ATA_DEV_ATAPI
;
1735 class = ATA_DEV_ATA
;
1740 reason
= "I/O error";
1744 /* Falling back doesn't make sense if ID data was read
1745 * successfully at least once.
1749 swap_buf_le16(id
, ATA_ID_WORDS
);
1753 reason
= "device reports invalid type";
1755 if (class == ATA_DEV_ATA
) {
1756 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1759 if (ata_id_is_ata(id
))
1763 if (!tried_spinup
&& (id
[2] == 0x37c8 || id
[2] == 0x738c)) {
1766 * Drive powered-up in standby mode, and requires a specific
1767 * SET_FEATURES spin-up subcommand before it will accept
1768 * anything other than the original IDENTIFY command.
1770 ata_tf_init(dev
, &tf
);
1771 tf
.command
= ATA_CMD_SET_FEATURES
;
1772 tf
.feature
= SETFEATURES_SPINUP
;
1773 tf
.protocol
= ATA_PROT_NODATA
;
1774 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1775 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1776 if (err_mask
&& id
[2] != 0x738c) {
1778 reason
= "SPINUP failed";
1782 * If the drive initially returned incomplete IDENTIFY info,
1783 * we now must reissue the IDENTIFY command.
1785 if (id
[2] == 0x37c8)
1789 if ((flags
& ATA_READID_POSTRESET
) && class == ATA_DEV_ATA
) {
1791 * The exact sequence expected by certain pre-ATA4 drives is:
1793 * IDENTIFY (optional in early ATA)
1794 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1796 * Some drives were very specific about that exact sequence.
1798 * Note that ATA4 says lba is mandatory so the second check
1799 * shoud never trigger.
1801 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
1802 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
1805 reason
= "INIT_DEV_PARAMS failed";
1809 /* current CHS translation info (id[53-58]) might be
1810 * changed. reread the identify device info.
1812 flags
&= ~ATA_READID_POSTRESET
;
1822 if (ata_msg_warn(ap
))
1823 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
1824 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
1828 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
1830 struct ata_port
*ap
= dev
->link
->ap
;
1831 return ((ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
1834 static void ata_dev_config_ncq(struct ata_device
*dev
,
1835 char *desc
, size_t desc_sz
)
1837 struct ata_port
*ap
= dev
->link
->ap
;
1838 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
1840 if (!ata_id_has_ncq(dev
->id
)) {
1844 if (dev
->horkage
& ATA_HORKAGE_NONCQ
) {
1845 snprintf(desc
, desc_sz
, "NCQ (not used)");
1848 if (ap
->flags
& ATA_FLAG_NCQ
) {
1849 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
- 1);
1850 dev
->flags
|= ATA_DFLAG_NCQ
;
1853 if (hdepth
>= ddepth
)
1854 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
1856 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
1860 * ata_dev_configure - Configure the specified ATA/ATAPI device
1861 * @dev: Target device to configure
1863 * Configure @dev according to @dev->id. Generic and low-level
1864 * driver specific fixups are also applied.
1867 * Kernel thread context (may sleep)
1870 * 0 on success, -errno otherwise
1872 int ata_dev_configure(struct ata_device
*dev
)
1874 struct ata_port
*ap
= dev
->link
->ap
;
1875 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
1876 int print_info
= ehc
->i
.flags
& ATA_EHI_PRINTINFO
;
1877 const u16
*id
= dev
->id
;
1878 unsigned int xfer_mask
;
1879 char revbuf
[7]; /* XYZ-99\0 */
1880 char fwrevbuf
[ATA_ID_FW_REV_LEN
+1];
1881 char modelbuf
[ATA_ID_PROD_LEN
+1];
1884 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
1885 ata_dev_printk(dev
, KERN_INFO
, "%s: ENTER/EXIT -- nodev\n",
1890 if (ata_msg_probe(ap
))
1891 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
1894 dev
->horkage
|= ata_dev_blacklisted(dev
);
1896 /* let ACPI work its magic */
1897 rc
= ata_acpi_on_devcfg(dev
);
1901 /* massage HPA, do it early as it might change IDENTIFY data */
1902 rc
= ata_hpa_resize(dev
);
1906 /* print device capabilities */
1907 if (ata_msg_probe(ap
))
1908 ata_dev_printk(dev
, KERN_DEBUG
,
1909 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1910 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1912 id
[49], id
[82], id
[83], id
[84],
1913 id
[85], id
[86], id
[87], id
[88]);
1915 /* initialize to-be-configured parameters */
1916 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
1917 dev
->max_sectors
= 0;
1925 * common ATA, ATAPI feature tests
1928 /* find max transfer mode; for printk only */
1929 xfer_mask
= ata_id_xfermask(id
);
1931 if (ata_msg_probe(ap
))
1934 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1935 ata_id_c_string(dev
->id
, fwrevbuf
, ATA_ID_FW_REV
,
1938 ata_id_c_string(dev
->id
, modelbuf
, ATA_ID_PROD
,
1941 /* ATA-specific feature tests */
1942 if (dev
->class == ATA_DEV_ATA
) {
1943 if (ata_id_is_cfa(id
)) {
1944 if (id
[162] & 1) /* CPRM may make this media unusable */
1945 ata_dev_printk(dev
, KERN_WARNING
,
1946 "supports DRM functions and may "
1947 "not be fully accessable.\n");
1948 snprintf(revbuf
, 7, "CFA");
1951 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
1953 dev
->n_sectors
= ata_id_n_sectors(id
);
1955 if (dev
->id
[59] & 0x100)
1956 dev
->multi_count
= dev
->id
[59] & 0xff;
1958 if (ata_id_has_lba(id
)) {
1959 const char *lba_desc
;
1963 dev
->flags
|= ATA_DFLAG_LBA
;
1964 if (ata_id_has_lba48(id
)) {
1965 dev
->flags
|= ATA_DFLAG_LBA48
;
1968 if (dev
->n_sectors
>= (1UL << 28) &&
1969 ata_id_has_flush_ext(id
))
1970 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
1974 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
1976 /* print device info to dmesg */
1977 if (ata_msg_drv(ap
) && print_info
) {
1978 ata_dev_printk(dev
, KERN_INFO
,
1979 "%s: %s, %s, max %s\n",
1980 revbuf
, modelbuf
, fwrevbuf
,
1981 ata_mode_string(xfer_mask
));
1982 ata_dev_printk(dev
, KERN_INFO
,
1983 "%Lu sectors, multi %u: %s %s\n",
1984 (unsigned long long)dev
->n_sectors
,
1985 dev
->multi_count
, lba_desc
, ncq_desc
);
1990 /* Default translation */
1991 dev
->cylinders
= id
[1];
1993 dev
->sectors
= id
[6];
1995 if (ata_id_current_chs_valid(id
)) {
1996 /* Current CHS translation is valid. */
1997 dev
->cylinders
= id
[54];
1998 dev
->heads
= id
[55];
1999 dev
->sectors
= id
[56];
2002 /* print device info to dmesg */
2003 if (ata_msg_drv(ap
) && print_info
) {
2004 ata_dev_printk(dev
, KERN_INFO
,
2005 "%s: %s, %s, max %s\n",
2006 revbuf
, modelbuf
, fwrevbuf
,
2007 ata_mode_string(xfer_mask
));
2008 ata_dev_printk(dev
, KERN_INFO
,
2009 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2010 (unsigned long long)dev
->n_sectors
,
2011 dev
->multi_count
, dev
->cylinders
,
2012 dev
->heads
, dev
->sectors
);
2019 /* ATAPI-specific feature tests */
2020 else if (dev
->class == ATA_DEV_ATAPI
) {
2021 const char *cdb_intr_string
= "";
2022 const char *atapi_an_string
= "";
2025 rc
= atapi_cdb_len(id
);
2026 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
2027 if (ata_msg_warn(ap
))
2028 ata_dev_printk(dev
, KERN_WARNING
,
2029 "unsupported CDB len\n");
2033 dev
->cdb_len
= (unsigned int) rc
;
2035 /* Enable ATAPI AN if both the host and device have
2036 * the support. If PMP is attached, SNTF is required
2037 * to enable ATAPI AN to discern between PHY status
2038 * changed notifications and ATAPI ANs.
2040 if ((ap
->flags
& ATA_FLAG_AN
) && ata_id_has_atapi_AN(id
) &&
2041 (!ap
->nr_pmp_links
||
2042 sata_scr_read(&ap
->link
, SCR_NOTIFICATION
, &sntf
) == 0)) {
2043 unsigned int err_mask
;
2045 /* issue SET feature command to turn this on */
2046 err_mask
= ata_dev_set_AN(dev
, SETFEATURES_SATA_ENABLE
);
2048 ata_dev_printk(dev
, KERN_ERR
,
2049 "failed to enable ATAPI AN "
2050 "(err_mask=0x%x)\n", err_mask
);
2052 dev
->flags
|= ATA_DFLAG_AN
;
2053 atapi_an_string
= ", ATAPI AN";
2057 if (ata_id_cdb_intr(dev
->id
)) {
2058 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
2059 cdb_intr_string
= ", CDB intr";
2062 /* print device info to dmesg */
2063 if (ata_msg_drv(ap
) && print_info
)
2064 ata_dev_printk(dev
, KERN_INFO
,
2065 "ATAPI: %s, %s, max %s%s%s\n",
2067 ata_mode_string(xfer_mask
),
2068 cdb_intr_string
, atapi_an_string
);
2071 /* determine max_sectors */
2072 dev
->max_sectors
= ATA_MAX_SECTORS
;
2073 if (dev
->flags
& ATA_DFLAG_LBA48
)
2074 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
2076 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
2077 /* Let the user know. We don't want to disallow opens for
2078 rescue purposes, or in case the vendor is just a blithering
2081 ata_dev_printk(dev
, KERN_WARNING
,
2082 "Drive reports diagnostics failure. This may indicate a drive\n");
2083 ata_dev_printk(dev
, KERN_WARNING
,
2084 "fault or invalid emulation. Contact drive vendor for information.\n");
2088 /* limit bridge transfers to udma5, 200 sectors */
2089 if (ata_dev_knobble(dev
)) {
2090 if (ata_msg_drv(ap
) && print_info
)
2091 ata_dev_printk(dev
, KERN_INFO
,
2092 "applying bridge limits\n");
2093 dev
->udma_mask
&= ATA_UDMA5
;
2094 dev
->max_sectors
= ATA_MAX_SECTORS
;
2097 if (dev
->horkage
& ATA_HORKAGE_MAX_SEC_128
)
2098 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_128
,
2101 if (ap
->ops
->dev_config
)
2102 ap
->ops
->dev_config(dev
);
2104 if (ata_msg_probe(ap
))
2105 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
2106 __FUNCTION__
, ata_chk_status(ap
));
2110 if (ata_msg_probe(ap
))
2111 ata_dev_printk(dev
, KERN_DEBUG
,
2112 "%s: EXIT, err\n", __FUNCTION__
);
2117 * ata_cable_40wire - return 40 wire cable type
2120 * Helper method for drivers which want to hardwire 40 wire cable
2124 int ata_cable_40wire(struct ata_port
*ap
)
2126 return ATA_CBL_PATA40
;
2130 * ata_cable_80wire - return 80 wire cable type
2133 * Helper method for drivers which want to hardwire 80 wire cable
2137 int ata_cable_80wire(struct ata_port
*ap
)
2139 return ATA_CBL_PATA80
;
2143 * ata_cable_unknown - return unknown PATA cable.
2146 * Helper method for drivers which have no PATA cable detection.
2149 int ata_cable_unknown(struct ata_port
*ap
)
2151 return ATA_CBL_PATA_UNK
;
2155 * ata_cable_sata - return SATA cable type
2158 * Helper method for drivers which have SATA cables
2161 int ata_cable_sata(struct ata_port
*ap
)
2163 return ATA_CBL_SATA
;
2167 * ata_bus_probe - Reset and probe ATA bus
2170 * Master ATA bus probing function. Initiates a hardware-dependent
2171 * bus reset, then attempts to identify any devices found on
2175 * PCI/etc. bus probe sem.
2178 * Zero on success, negative errno otherwise.
2181 int ata_bus_probe(struct ata_port
*ap
)
2183 unsigned int classes
[ATA_MAX_DEVICES
];
2184 int tries
[ATA_MAX_DEVICES
];
2186 struct ata_device
*dev
;
2190 ata_link_for_each_dev(dev
, &ap
->link
)
2191 tries
[dev
->devno
] = ATA_PROBE_MAX_TRIES
;
2194 /* reset and determine device classes */
2195 ap
->ops
->phy_reset(ap
);
2197 ata_link_for_each_dev(dev
, &ap
->link
) {
2198 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
2199 dev
->class != ATA_DEV_UNKNOWN
)
2200 classes
[dev
->devno
] = dev
->class;
2202 classes
[dev
->devno
] = ATA_DEV_NONE
;
2204 dev
->class = ATA_DEV_UNKNOWN
;
2209 /* after the reset the device state is PIO 0 and the controller
2210 state is undefined. Record the mode */
2212 ata_link_for_each_dev(dev
, &ap
->link
)
2213 dev
->pio_mode
= XFER_PIO_0
;
2215 /* read IDENTIFY page and configure devices. We have to do the identify
2216 specific sequence bass-ackwards so that PDIAG- is released by
2219 ata_link_for_each_dev(dev
, &ap
->link
) {
2220 if (tries
[dev
->devno
])
2221 dev
->class = classes
[dev
->devno
];
2223 if (!ata_dev_enabled(dev
))
2226 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
2232 /* Now ask for the cable type as PDIAG- should have been released */
2233 if (ap
->ops
->cable_detect
)
2234 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2236 /* We may have SATA bridge glue hiding here irrespective of the
2237 reported cable types and sensed types */
2238 ata_link_for_each_dev(dev
, &ap
->link
) {
2239 if (!ata_dev_enabled(dev
))
2241 /* SATA drives indicate we have a bridge. We don't know which
2242 end of the link the bridge is which is a problem */
2243 if (ata_id_is_sata(dev
->id
))
2244 ap
->cbl
= ATA_CBL_SATA
;
2247 /* After the identify sequence we can now set up the devices. We do
2248 this in the normal order so that the user doesn't get confused */
2250 ata_link_for_each_dev(dev
, &ap
->link
) {
2251 if (!ata_dev_enabled(dev
))
2254 ap
->link
.eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
2255 rc
= ata_dev_configure(dev
);
2256 ap
->link
.eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
2261 /* configure transfer mode */
2262 rc
= ata_set_mode(&ap
->link
, &dev
);
2266 ata_link_for_each_dev(dev
, &ap
->link
)
2267 if (ata_dev_enabled(dev
))
2270 /* no device present, disable port */
2271 ata_port_disable(ap
);
2275 tries
[dev
->devno
]--;
2279 /* eeek, something went very wrong, give up */
2280 tries
[dev
->devno
] = 0;
2284 /* give it just one more chance */
2285 tries
[dev
->devno
] = min(tries
[dev
->devno
], 1);
2287 if (tries
[dev
->devno
] == 1) {
2288 /* This is the last chance, better to slow
2289 * down than lose it.
2291 sata_down_spd_limit(&ap
->link
);
2292 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
2296 if (!tries
[dev
->devno
])
2297 ata_dev_disable(dev
);
2303 * ata_port_probe - Mark port as enabled
2304 * @ap: Port for which we indicate enablement
2306 * Modify @ap data structure such that the system
2307 * thinks that the entire port is enabled.
2309 * LOCKING: host lock, or some other form of
2313 void ata_port_probe(struct ata_port
*ap
)
2315 ap
->flags
&= ~ATA_FLAG_DISABLED
;
2319 * sata_print_link_status - Print SATA link status
2320 * @link: SATA link to printk link status about
2322 * This function prints link speed and status of a SATA link.
2327 void sata_print_link_status(struct ata_link
*link
)
2329 u32 sstatus
, scontrol
, tmp
;
2331 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2333 sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
2335 if (ata_link_online(link
)) {
2336 tmp
= (sstatus
>> 4) & 0xf;
2337 ata_link_printk(link
, KERN_INFO
,
2338 "SATA link up %s (SStatus %X SControl %X)\n",
2339 sata_spd_string(tmp
), sstatus
, scontrol
);
2341 ata_link_printk(link
, KERN_INFO
,
2342 "SATA link down (SStatus %X SControl %X)\n",
2348 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2349 * @ap: SATA port associated with target SATA PHY.
2351 * This function issues commands to standard SATA Sxxx
2352 * PHY registers, to wake up the phy (and device), and
2353 * clear any reset condition.
2356 * PCI/etc. bus probe sem.
2359 void __sata_phy_reset(struct ata_port
*ap
)
2361 struct ata_link
*link
= &ap
->link
;
2362 unsigned long timeout
= jiffies
+ (HZ
* 5);
2365 if (ap
->flags
& ATA_FLAG_SATA_RESET
) {
2366 /* issue phy wake/reset */
2367 sata_scr_write_flush(link
, SCR_CONTROL
, 0x301);
2368 /* Couldn't find anything in SATA I/II specs, but
2369 * AHCI-1.1 10.4.2 says at least 1 ms. */
2372 /* phy wake/clear reset */
2373 sata_scr_write_flush(link
, SCR_CONTROL
, 0x300);
2375 /* wait for phy to become ready, if necessary */
2378 sata_scr_read(link
, SCR_STATUS
, &sstatus
);
2379 if ((sstatus
& 0xf) != 1)
2381 } while (time_before(jiffies
, timeout
));
2383 /* print link status */
2384 sata_print_link_status(link
);
2386 /* TODO: phy layer with polling, timeouts, etc. */
2387 if (!ata_link_offline(link
))
2390 ata_port_disable(ap
);
2392 if (ap
->flags
& ATA_FLAG_DISABLED
)
2395 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
2396 ata_port_disable(ap
);
2400 ap
->cbl
= ATA_CBL_SATA
;
2404 * sata_phy_reset - Reset SATA bus.
2405 * @ap: SATA port associated with target SATA PHY.
2407 * This function resets the SATA bus, and then probes
2408 * the bus for devices.
2411 * PCI/etc. bus probe sem.
2414 void sata_phy_reset(struct ata_port
*ap
)
2416 __sata_phy_reset(ap
);
2417 if (ap
->flags
& ATA_FLAG_DISABLED
)
2423 * ata_dev_pair - return other device on cable
2426 * Obtain the other device on the same cable, or if none is
2427 * present NULL is returned
2430 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
2432 struct ata_link
*link
= adev
->link
;
2433 struct ata_device
*pair
= &link
->device
[1 - adev
->devno
];
2434 if (!ata_dev_enabled(pair
))
2440 * ata_port_disable - Disable port.
2441 * @ap: Port to be disabled.
2443 * Modify @ap data structure such that the system
2444 * thinks that the entire port is disabled, and should
2445 * never attempt to probe or communicate with devices
2448 * LOCKING: host lock, or some other form of
2452 void ata_port_disable(struct ata_port
*ap
)
2454 ap
->link
.device
[0].class = ATA_DEV_NONE
;
2455 ap
->link
.device
[1].class = ATA_DEV_NONE
;
2456 ap
->flags
|= ATA_FLAG_DISABLED
;
2460 * sata_down_spd_limit - adjust SATA spd limit downward
2461 * @link: Link to adjust SATA spd limit for
2463 * Adjust SATA spd limit of @link downward. Note that this
2464 * function only adjusts the limit. The change must be applied
2465 * using sata_set_spd().
2468 * Inherited from caller.
2471 * 0 on success, negative errno on failure
2473 int sata_down_spd_limit(struct ata_link
*link
)
2475 u32 sstatus
, spd
, mask
;
2478 if (!sata_scr_valid(link
))
2481 /* If SCR can be read, use it to determine the current SPD.
2482 * If not, use cached value in link->sata_spd.
2484 rc
= sata_scr_read(link
, SCR_STATUS
, &sstatus
);
2486 spd
= (sstatus
>> 4) & 0xf;
2488 spd
= link
->sata_spd
;
2490 mask
= link
->sata_spd_limit
;
2494 /* unconditionally mask off the highest bit */
2495 highbit
= fls(mask
) - 1;
2496 mask
&= ~(1 << highbit
);
2498 /* Mask off all speeds higher than or equal to the current
2499 * one. Force 1.5Gbps if current SPD is not available.
2502 mask
&= (1 << (spd
- 1)) - 1;
2506 /* were we already at the bottom? */
2510 link
->sata_spd_limit
= mask
;
2512 ata_link_printk(link
, KERN_WARNING
, "limiting SATA link speed to %s\n",
2513 sata_spd_string(fls(mask
)));
2518 static int __sata_set_spd_needed(struct ata_link
*link
, u32
*scontrol
)
2522 if (link
->sata_spd_limit
== UINT_MAX
)
2525 limit
= fls(link
->sata_spd_limit
);
2527 spd
= (*scontrol
>> 4) & 0xf;
2528 *scontrol
= (*scontrol
& ~0xf0) | ((limit
& 0xf) << 4);
2530 return spd
!= limit
;
2534 * sata_set_spd_needed - is SATA spd configuration needed
2535 * @link: Link in question
2537 * Test whether the spd limit in SControl matches
2538 * @link->sata_spd_limit. This function is used to determine
2539 * whether hardreset is necessary to apply SATA spd
2543 * Inherited from caller.
2546 * 1 if SATA spd configuration is needed, 0 otherwise.
2548 int sata_set_spd_needed(struct ata_link
*link
)
2552 if (sata_scr_read(link
, SCR_CONTROL
, &scontrol
))
2555 return __sata_set_spd_needed(link
, &scontrol
);
2559 * sata_set_spd - set SATA spd according to spd limit
2560 * @link: Link to set SATA spd for
2562 * Set SATA spd of @link according to sata_spd_limit.
2565 * Inherited from caller.
2568 * 0 if spd doesn't need to be changed, 1 if spd has been
2569 * changed. Negative errno if SCR registers are inaccessible.
2571 int sata_set_spd(struct ata_link
*link
)
2576 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
2579 if (!__sata_set_spd_needed(link
, &scontrol
))
2582 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
2589 * This mode timing computation functionality is ported over from
2590 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2593 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2594 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2595 * for UDMA6, which is currently supported only by Maxtor drives.
2597 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2600 static const struct ata_timing ata_timing
[] = {
2602 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
2603 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
2604 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
2605 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
2607 { XFER_MW_DMA_4
, 25, 0, 0, 0, 55, 20, 80, 0 },
2608 { XFER_MW_DMA_3
, 25, 0, 0, 0, 65, 25, 100, 0 },
2609 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
2610 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
2611 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
2613 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2615 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
2616 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
2617 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
2619 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
2620 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
2621 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
2623 { XFER_PIO_6
, 10, 55, 20, 80, 55, 20, 80, 0 },
2624 { XFER_PIO_5
, 15, 65, 25, 100, 65, 25, 100, 0 },
2625 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
2626 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
2628 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
2629 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
2630 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
2632 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2637 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2638 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2640 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
2642 q
->setup
= EZ(t
->setup
* 1000, T
);
2643 q
->act8b
= EZ(t
->act8b
* 1000, T
);
2644 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
2645 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
2646 q
->active
= EZ(t
->active
* 1000, T
);
2647 q
->recover
= EZ(t
->recover
* 1000, T
);
2648 q
->cycle
= EZ(t
->cycle
* 1000, T
);
2649 q
->udma
= EZ(t
->udma
* 1000, UT
);
2652 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
2653 struct ata_timing
*m
, unsigned int what
)
2655 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
2656 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
2657 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
2658 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
2659 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
2660 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
2661 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
2662 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
2665 static const struct ata_timing
* ata_timing_find_mode(unsigned short speed
)
2667 const struct ata_timing
*t
;
2669 for (t
= ata_timing
; t
->mode
!= speed
; t
++)
2670 if (t
->mode
== 0xFF)
2675 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
2676 struct ata_timing
*t
, int T
, int UT
)
2678 const struct ata_timing
*s
;
2679 struct ata_timing p
;
2685 if (!(s
= ata_timing_find_mode(speed
)))
2688 memcpy(t
, s
, sizeof(*s
));
2691 * If the drive is an EIDE drive, it can tell us it needs extended
2692 * PIO/MW_DMA cycle timing.
2695 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
2696 memset(&p
, 0, sizeof(p
));
2697 if(speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
2698 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
2699 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
2700 } else if(speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
2701 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
2703 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
2707 * Convert the timing to bus clock counts.
2710 ata_timing_quantize(t
, t
, T
, UT
);
2713 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2714 * S.M.A.R.T * and some other commands. We have to ensure that the
2715 * DMA cycle timing is slower/equal than the fastest PIO timing.
2718 if (speed
> XFER_PIO_6
) {
2719 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
2720 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
2724 * Lengthen active & recovery time so that cycle time is correct.
2727 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
2728 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
2729 t
->rec8b
= t
->cyc8b
- t
->act8b
;
2732 if (t
->active
+ t
->recover
< t
->cycle
) {
2733 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
2734 t
->recover
= t
->cycle
- t
->active
;
2737 /* In a few cases quantisation may produce enough errors to
2738 leave t->cycle too low for the sum of active and recovery
2739 if so we must correct this */
2740 if (t
->active
+ t
->recover
> t
->cycle
)
2741 t
->cycle
= t
->active
+ t
->recover
;
2747 * ata_down_xfermask_limit - adjust dev xfer masks downward
2748 * @dev: Device to adjust xfer masks
2749 * @sel: ATA_DNXFER_* selector
2751 * Adjust xfer masks of @dev downward. Note that this function
2752 * does not apply the change. Invoking ata_set_mode() afterwards
2753 * will apply the limit.
2756 * Inherited from caller.
2759 * 0 on success, negative errno on failure
2761 int ata_down_xfermask_limit(struct ata_device
*dev
, unsigned int sel
)
2764 unsigned int orig_mask
, xfer_mask
;
2765 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
2768 quiet
= !!(sel
& ATA_DNXFER_QUIET
);
2769 sel
&= ~ATA_DNXFER_QUIET
;
2771 xfer_mask
= orig_mask
= ata_pack_xfermask(dev
->pio_mask
,
2774 ata_unpack_xfermask(xfer_mask
, &pio_mask
, &mwdma_mask
, &udma_mask
);
2777 case ATA_DNXFER_PIO
:
2778 highbit
= fls(pio_mask
) - 1;
2779 pio_mask
&= ~(1 << highbit
);
2782 case ATA_DNXFER_DMA
:
2784 highbit
= fls(udma_mask
) - 1;
2785 udma_mask
&= ~(1 << highbit
);
2788 } else if (mwdma_mask
) {
2789 highbit
= fls(mwdma_mask
) - 1;
2790 mwdma_mask
&= ~(1 << highbit
);
2796 case ATA_DNXFER_40C
:
2797 udma_mask
&= ATA_UDMA_MASK_40C
;
2800 case ATA_DNXFER_FORCE_PIO0
:
2802 case ATA_DNXFER_FORCE_PIO
:
2811 xfer_mask
&= ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
2813 if (!(xfer_mask
& ATA_MASK_PIO
) || xfer_mask
== orig_mask
)
2817 if (xfer_mask
& (ATA_MASK_MWDMA
| ATA_MASK_UDMA
))
2818 snprintf(buf
, sizeof(buf
), "%s:%s",
2819 ata_mode_string(xfer_mask
),
2820 ata_mode_string(xfer_mask
& ATA_MASK_PIO
));
2822 snprintf(buf
, sizeof(buf
), "%s",
2823 ata_mode_string(xfer_mask
));
2825 ata_dev_printk(dev
, KERN_WARNING
,
2826 "limiting speed to %s\n", buf
);
2829 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
2835 static int ata_dev_set_mode(struct ata_device
*dev
)
2837 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
2838 unsigned int err_mask
;
2841 dev
->flags
&= ~ATA_DFLAG_PIO
;
2842 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
2843 dev
->flags
|= ATA_DFLAG_PIO
;
2845 err_mask
= ata_dev_set_xfermode(dev
);
2846 /* Old CFA may refuse this command, which is just fine */
2847 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& ata_id_is_cfa(dev
->id
))
2848 err_mask
&= ~AC_ERR_DEV
;
2849 /* Some very old devices and some bad newer ones fail any kind of
2850 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2851 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& !ata_id_has_iordy(dev
->id
) &&
2852 dev
->pio_mode
<= XFER_PIO_2
)
2853 err_mask
&= ~AC_ERR_DEV
;
2855 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
2856 "(err_mask=0x%x)\n", err_mask
);
2860 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
2861 rc
= ata_dev_revalidate(dev
, ATA_DEV_UNKNOWN
, 0);
2862 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
2866 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2867 dev
->xfer_shift
, (int)dev
->xfer_mode
);
2869 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
2870 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)));
2875 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2876 * @link: link on which timings will be programmed
2877 * @r_failed_dev: out paramter for failed device
2879 * Standard implementation of the function used to tune and set
2880 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2881 * ata_dev_set_mode() fails, pointer to the failing device is
2882 * returned in @r_failed_dev.
2885 * PCI/etc. bus probe sem.
2888 * 0 on success, negative errno otherwise
2891 int ata_do_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
2893 struct ata_port
*ap
= link
->ap
;
2894 struct ata_device
*dev
;
2895 int rc
= 0, used_dma
= 0, found
= 0;
2897 /* step 1: calculate xfer_mask */
2898 ata_link_for_each_dev(dev
, link
) {
2899 unsigned int pio_mask
, dma_mask
;
2901 if (!ata_dev_enabled(dev
))
2904 ata_dev_xfermask(dev
);
2906 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
2907 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
2908 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
2909 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
2918 /* step 2: always set host PIO timings */
2919 ata_link_for_each_dev(dev
, link
) {
2920 if (!ata_dev_enabled(dev
))
2923 if (!dev
->pio_mode
) {
2924 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
2929 dev
->xfer_mode
= dev
->pio_mode
;
2930 dev
->xfer_shift
= ATA_SHIFT_PIO
;
2931 if (ap
->ops
->set_piomode
)
2932 ap
->ops
->set_piomode(ap
, dev
);
2935 /* step 3: set host DMA timings */
2936 ata_link_for_each_dev(dev
, link
) {
2937 if (!ata_dev_enabled(dev
) || !dev
->dma_mode
)
2940 dev
->xfer_mode
= dev
->dma_mode
;
2941 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
2942 if (ap
->ops
->set_dmamode
)
2943 ap
->ops
->set_dmamode(ap
, dev
);
2946 /* step 4: update devices' xfer mode */
2947 ata_link_for_each_dev(dev
, link
) {
2948 /* don't update suspended devices' xfer mode */
2949 if (!ata_dev_enabled(dev
))
2952 rc
= ata_dev_set_mode(dev
);
2957 /* Record simplex status. If we selected DMA then the other
2958 * host channels are not permitted to do so.
2960 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
2961 ap
->host
->simplex_claimed
= ap
;
2965 *r_failed_dev
= dev
;
2970 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2971 * @link: link on which timings will be programmed
2972 * @r_failed_dev: out paramter for failed device
2974 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2975 * ata_set_mode() fails, pointer to the failing device is
2976 * returned in @r_failed_dev.
2979 * PCI/etc. bus probe sem.
2982 * 0 on success, negative errno otherwise
2984 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
2986 struct ata_port
*ap
= link
->ap
;
2988 /* has private set_mode? */
2989 if (ap
->ops
->set_mode
)
2990 return ap
->ops
->set_mode(link
, r_failed_dev
);
2991 return ata_do_set_mode(link
, r_failed_dev
);
2995 * ata_tf_to_host - issue ATA taskfile to host controller
2996 * @ap: port to which command is being issued
2997 * @tf: ATA taskfile register set
2999 * Issues ATA taskfile register set to ATA host controller,
3000 * with proper synchronization with interrupt handler and
3004 * spin_lock_irqsave(host lock)
3007 static inline void ata_tf_to_host(struct ata_port
*ap
,
3008 const struct ata_taskfile
*tf
)
3010 ap
->ops
->tf_load(ap
, tf
);
3011 ap
->ops
->exec_command(ap
, tf
);
3015 * ata_busy_sleep - sleep until BSY clears, or timeout
3016 * @ap: port containing status register to be polled
3017 * @tmout_pat: impatience timeout
3018 * @tmout: overall timeout
3020 * Sleep until ATA Status register bit BSY clears,
3021 * or a timeout occurs.
3024 * Kernel thread context (may sleep).
3027 * 0 on success, -errno otherwise.
3029 int ata_busy_sleep(struct ata_port
*ap
,
3030 unsigned long tmout_pat
, unsigned long tmout
)
3032 unsigned long timer_start
, timeout
;
3035 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
3036 timer_start
= jiffies
;
3037 timeout
= timer_start
+ tmout_pat
;
3038 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3039 time_before(jiffies
, timeout
)) {
3041 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
3044 if (status
!= 0xff && (status
& ATA_BUSY
))
3045 ata_port_printk(ap
, KERN_WARNING
,
3046 "port is slow to respond, please be patient "
3047 "(Status 0x%x)\n", status
);
3049 timeout
= timer_start
+ tmout
;
3050 while (status
!= 0xff && (status
& ATA_BUSY
) &&
3051 time_before(jiffies
, timeout
)) {
3053 status
= ata_chk_status(ap
);
3059 if (status
& ATA_BUSY
) {
3060 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
3061 "(%lu secs, Status 0x%x)\n",
3062 tmout
/ HZ
, status
);
3070 * ata_wait_ready - sleep until BSY clears, or timeout
3071 * @ap: port containing status register to be polled
3072 * @deadline: deadline jiffies for the operation
3074 * Sleep until ATA Status register bit BSY clears, or timeout
3078 * Kernel thread context (may sleep).
3081 * 0 on success, -errno otherwise.
3083 int ata_wait_ready(struct ata_port
*ap
, unsigned long deadline
)
3085 unsigned long start
= jiffies
;
3089 u8 status
= ata_chk_status(ap
);
3090 unsigned long now
= jiffies
;
3092 if (!(status
& ATA_BUSY
))
3094 if (!ata_link_online(&ap
->link
) && status
== 0xff)
3096 if (time_after(now
, deadline
))
3099 if (!warned
&& time_after(now
, start
+ 5 * HZ
) &&
3100 (deadline
- now
> 3 * HZ
)) {
3101 ata_port_printk(ap
, KERN_WARNING
,
3102 "port is slow to respond, please be patient "
3103 "(Status 0x%x)\n", status
);
3111 static int ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
,
3112 unsigned long deadline
)
3114 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3115 unsigned int dev0
= devmask
& (1 << 0);
3116 unsigned int dev1
= devmask
& (1 << 1);
3119 /* if device 0 was found in ata_devchk, wait for its
3123 rc
= ata_wait_ready(ap
, deadline
);
3131 /* if device 1 was found in ata_devchk, wait for register
3132 * access briefly, then wait for BSY to clear.
3137 ap
->ops
->dev_select(ap
, 1);
3139 /* Wait for register access. Some ATAPI devices fail
3140 * to set nsect/lbal after reset, so don't waste too
3141 * much time on it. We're gonna wait for !BSY anyway.
3143 for (i
= 0; i
< 2; i
++) {
3146 nsect
= ioread8(ioaddr
->nsect_addr
);
3147 lbal
= ioread8(ioaddr
->lbal_addr
);
3148 if ((nsect
== 1) && (lbal
== 1))
3150 msleep(50); /* give drive a breather */
3153 rc
= ata_wait_ready(ap
, deadline
);
3161 /* is all this really necessary? */
3162 ap
->ops
->dev_select(ap
, 0);
3164 ap
->ops
->dev_select(ap
, 1);
3166 ap
->ops
->dev_select(ap
, 0);
3171 static int ata_bus_softreset(struct ata_port
*ap
, unsigned int devmask
,
3172 unsigned long deadline
)
3174 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3176 DPRINTK("ata%u: bus reset via SRST\n", ap
->print_id
);
3178 /* software reset. causes dev0 to be selected */
3179 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3180 udelay(20); /* FIXME: flush */
3181 iowrite8(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
3182 udelay(20); /* FIXME: flush */
3183 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3185 /* spec mandates ">= 2ms" before checking status.
3186 * We wait 150ms, because that was the magic delay used for
3187 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3188 * between when the ATA command register is written, and then
3189 * status is checked. Because waiting for "a while" before
3190 * checking status is fine, post SRST, we perform this magic
3191 * delay here as well.
3193 * Old drivers/ide uses the 2mS rule and then waits for ready
3197 /* Before we perform post reset processing we want to see if
3198 * the bus shows 0xFF because the odd clown forgets the D7
3199 * pulldown resistor.
3201 if (ata_check_status(ap
) == 0xFF)
3204 return ata_bus_post_reset(ap
, devmask
, deadline
);
3208 * ata_bus_reset - reset host port and associated ATA channel
3209 * @ap: port to reset
3211 * This is typically the first time we actually start issuing
3212 * commands to the ATA channel. We wait for BSY to clear, then
3213 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3214 * result. Determine what devices, if any, are on the channel
3215 * by looking at the device 0/1 error register. Look at the signature
3216 * stored in each device's taskfile registers, to determine if
3217 * the device is ATA or ATAPI.
3220 * PCI/etc. bus probe sem.
3221 * Obtains host lock.
3224 * Sets ATA_FLAG_DISABLED if bus reset fails.
3227 void ata_bus_reset(struct ata_port
*ap
)
3229 struct ata_device
*device
= ap
->link
.device
;
3230 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3231 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3233 unsigned int dev0
, dev1
= 0, devmask
= 0;
3236 DPRINTK("ENTER, host %u, port %u\n", ap
->print_id
, ap
->port_no
);
3238 /* determine if device 0/1 are present */
3239 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
3242 dev0
= ata_devchk(ap
, 0);
3244 dev1
= ata_devchk(ap
, 1);
3248 devmask
|= (1 << 0);
3250 devmask
|= (1 << 1);
3252 /* select device 0 again */
3253 ap
->ops
->dev_select(ap
, 0);
3255 /* issue bus reset */
3256 if (ap
->flags
& ATA_FLAG_SRST
) {
3257 rc
= ata_bus_softreset(ap
, devmask
, jiffies
+ 40 * HZ
);
3258 if (rc
&& rc
!= -ENODEV
)
3263 * determine by signature whether we have ATA or ATAPI devices
3265 device
[0].class = ata_dev_try_classify(&device
[0], dev0
, &err
);
3266 if ((slave_possible
) && (err
!= 0x81))
3267 device
[1].class = ata_dev_try_classify(&device
[1], dev1
, &err
);
3269 /* is double-select really necessary? */
3270 if (device
[1].class != ATA_DEV_NONE
)
3271 ap
->ops
->dev_select(ap
, 1);
3272 if (device
[0].class != ATA_DEV_NONE
)
3273 ap
->ops
->dev_select(ap
, 0);
3275 /* if no devices were detected, disable this port */
3276 if ((device
[0].class == ATA_DEV_NONE
) &&
3277 (device
[1].class == ATA_DEV_NONE
))
3280 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
3281 /* set up device control for ATA_FLAG_SATA_RESET */
3282 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3289 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
3290 ata_port_disable(ap
);
3296 * sata_link_debounce - debounce SATA phy status
3297 * @link: ATA link to debounce SATA phy status for
3298 * @params: timing parameters { interval, duratinon, timeout } in msec
3299 * @deadline: deadline jiffies for the operation
3301 * Make sure SStatus of @link reaches stable state, determined by
3302 * holding the same value where DET is not 1 for @duration polled
3303 * every @interval, before @timeout. Timeout constraints the
3304 * beginning of the stable state. Because DET gets stuck at 1 on
3305 * some controllers after hot unplugging, this functions waits
3306 * until timeout then returns 0 if DET is stable at 1.
3308 * @timeout is further limited by @deadline. The sooner of the
3312 * Kernel thread context (may sleep)
3315 * 0 on success, -errno on failure.
3317 int sata_link_debounce(struct ata_link
*link
, const unsigned long *params
,
3318 unsigned long deadline
)
3320 unsigned long interval_msec
= params
[0];
3321 unsigned long duration
= msecs_to_jiffies(params
[1]);
3322 unsigned long last_jiffies
, t
;
3326 t
= jiffies
+ msecs_to_jiffies(params
[2]);
3327 if (time_before(t
, deadline
))
3330 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3335 last_jiffies
= jiffies
;
3338 msleep(interval_msec
);
3339 if ((rc
= sata_scr_read(link
, SCR_STATUS
, &cur
)))
3345 if (cur
== 1 && time_before(jiffies
, deadline
))
3347 if (time_after(jiffies
, last_jiffies
+ duration
))
3352 /* unstable, start over */
3354 last_jiffies
= jiffies
;
3356 /* Check deadline. If debouncing failed, return
3357 * -EPIPE to tell upper layer to lower link speed.
3359 if (time_after(jiffies
, deadline
))
3365 * sata_link_resume - resume SATA link
3366 * @link: ATA link to resume SATA
3367 * @params: timing parameters { interval, duratinon, timeout } in msec
3368 * @deadline: deadline jiffies for the operation
3370 * Resume SATA phy @link and debounce it.
3373 * Kernel thread context (may sleep)
3376 * 0 on success, -errno on failure.
3378 int sata_link_resume(struct ata_link
*link
, const unsigned long *params
,
3379 unsigned long deadline
)
3384 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3387 scontrol
= (scontrol
& 0x0f0) | 0x300;
3389 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3392 /* Some PHYs react badly if SStatus is pounded immediately
3393 * after resuming. Delay 200ms before debouncing.
3397 return sata_link_debounce(link
, params
, deadline
);
3401 * ata_std_prereset - prepare for reset
3402 * @link: ATA link to be reset
3403 * @deadline: deadline jiffies for the operation
3405 * @link is about to be reset. Initialize it. Failure from
3406 * prereset makes libata abort whole reset sequence and give up
3407 * that port, so prereset should be best-effort. It does its
3408 * best to prepare for reset sequence but if things go wrong, it
3409 * should just whine, not fail.
3412 * Kernel thread context (may sleep)
3415 * 0 on success, -errno otherwise.
3417 int ata_std_prereset(struct ata_link
*link
, unsigned long deadline
)
3419 struct ata_port
*ap
= link
->ap
;
3420 struct ata_eh_context
*ehc
= &link
->eh_context
;
3421 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
3424 /* handle link resume */
3425 if ((ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) &&
3426 (link
->flags
& ATA_LFLAG_HRST_TO_RESUME
))
3427 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3429 /* if we're about to do hardreset, nothing more to do */
3430 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
3433 /* if SATA, resume link */
3434 if (ap
->flags
& ATA_FLAG_SATA
) {
3435 rc
= sata_link_resume(link
, timing
, deadline
);
3436 /* whine about phy resume failure but proceed */
3437 if (rc
&& rc
!= -EOPNOTSUPP
)
3438 ata_link_printk(link
, KERN_WARNING
, "failed to resume "
3439 "link for reset (errno=%d)\n", rc
);
3442 /* Wait for !BSY if the controller can wait for the first D2H
3443 * Reg FIS and we don't know that no device is attached.
3445 if (!(link
->flags
& ATA_LFLAG_SKIP_D2H_BSY
) && !ata_link_offline(link
)) {
3446 rc
= ata_wait_ready(ap
, deadline
);
3447 if (rc
&& rc
!= -ENODEV
) {
3448 ata_link_printk(link
, KERN_WARNING
, "device not ready "
3449 "(errno=%d), forcing hardreset\n", rc
);
3450 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3458 * ata_std_softreset - reset host port via ATA SRST
3459 * @link: ATA link to reset
3460 * @classes: resulting classes of attached devices
3461 * @deadline: deadline jiffies for the operation
3463 * Reset host port using ATA SRST.
3466 * Kernel thread context (may sleep)
3469 * 0 on success, -errno otherwise.
3471 int ata_std_softreset(struct ata_link
*link
, unsigned int *classes
,
3472 unsigned long deadline
)
3474 struct ata_port
*ap
= link
->ap
;
3475 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3476 unsigned int devmask
= 0;
3482 if (ata_link_offline(link
)) {
3483 classes
[0] = ATA_DEV_NONE
;
3487 /* determine if device 0/1 are present */
3488 if (ata_devchk(ap
, 0))
3489 devmask
|= (1 << 0);
3490 if (slave_possible
&& ata_devchk(ap
, 1))
3491 devmask
|= (1 << 1);
3493 /* select device 0 again */
3494 ap
->ops
->dev_select(ap
, 0);
3496 /* issue bus reset */
3497 DPRINTK("about to softreset, devmask=%x\n", devmask
);
3498 rc
= ata_bus_softreset(ap
, devmask
, deadline
);
3499 /* if link is occupied, -ENODEV too is an error */
3500 if (rc
&& (rc
!= -ENODEV
|| sata_scr_valid(link
))) {
3501 ata_link_printk(link
, KERN_ERR
, "SRST failed (errno=%d)\n", rc
);
3505 /* determine by signature whether we have ATA or ATAPI devices */
3506 classes
[0] = ata_dev_try_classify(&link
->device
[0],
3507 devmask
& (1 << 0), &err
);
3508 if (slave_possible
&& err
!= 0x81)
3509 classes
[1] = ata_dev_try_classify(&link
->device
[1],
3510 devmask
& (1 << 1), &err
);
3513 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
3518 * sata_link_hardreset - reset link via SATA phy reset
3519 * @link: link to reset
3520 * @timing: timing parameters { interval, duratinon, timeout } in msec
3521 * @deadline: deadline jiffies for the operation
3523 * SATA phy-reset @link using DET bits of SControl register.
3526 * Kernel thread context (may sleep)
3529 * 0 on success, -errno otherwise.
3531 int sata_link_hardreset(struct ata_link
*link
, const unsigned long *timing
,
3532 unsigned long deadline
)
3539 if (sata_set_spd_needed(link
)) {
3540 /* SATA spec says nothing about how to reconfigure
3541 * spd. To be on the safe side, turn off phy during
3542 * reconfiguration. This works for at least ICH7 AHCI
3545 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3548 scontrol
= (scontrol
& 0x0f0) | 0x304;
3550 if ((rc
= sata_scr_write(link
, SCR_CONTROL
, scontrol
)))
3556 /* issue phy wake/reset */
3557 if ((rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
)))
3560 scontrol
= (scontrol
& 0x0f0) | 0x301;
3562 if ((rc
= sata_scr_write_flush(link
, SCR_CONTROL
, scontrol
)))
3565 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3566 * 10.4.2 says at least 1 ms.
3570 /* bring link back */
3571 rc
= sata_link_resume(link
, timing
, deadline
);
3573 DPRINTK("EXIT, rc=%d\n", rc
);
3578 * sata_std_hardreset - reset host port via SATA phy reset
3579 * @link: link to reset
3580 * @class: resulting class of attached device
3581 * @deadline: deadline jiffies for the operation
3583 * SATA phy-reset host port using DET bits of SControl register,
3584 * wait for !BSY and classify the attached device.
3587 * Kernel thread context (may sleep)
3590 * 0 on success, -errno otherwise.
3592 int sata_std_hardreset(struct ata_link
*link
, unsigned int *class,
3593 unsigned long deadline
)
3595 struct ata_port
*ap
= link
->ap
;
3596 const unsigned long *timing
= sata_ehc_deb_timing(&link
->eh_context
);
3602 rc
= sata_link_hardreset(link
, timing
, deadline
);
3604 ata_link_printk(link
, KERN_ERR
,
3605 "COMRESET failed (errno=%d)\n", rc
);
3609 /* TODO: phy layer with polling, timeouts, etc. */
3610 if (ata_link_offline(link
)) {
3611 *class = ATA_DEV_NONE
;
3612 DPRINTK("EXIT, link offline\n");
3616 /* wait a while before checking status, see SRST for more info */
3619 rc
= ata_wait_ready(ap
, deadline
);
3620 /* link occupied, -ENODEV too is an error */
3622 ata_link_printk(link
, KERN_ERR
,
3623 "COMRESET failed (errno=%d)\n", rc
);
3627 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
3629 *class = ata_dev_try_classify(link
->device
, 1, NULL
);
3631 DPRINTK("EXIT, class=%u\n", *class);
3636 * ata_std_postreset - standard postreset callback
3637 * @link: the target ata_link
3638 * @classes: classes of attached devices
3640 * This function is invoked after a successful reset. Note that
3641 * the device might have been reset more than once using
3642 * different reset methods before postreset is invoked.
3645 * Kernel thread context (may sleep)
3647 void ata_std_postreset(struct ata_link
*link
, unsigned int *classes
)
3649 struct ata_port
*ap
= link
->ap
;
3654 /* print link status */
3655 sata_print_link_status(link
);
3658 if (sata_scr_read(link
, SCR_ERROR
, &serror
) == 0)
3659 sata_scr_write(link
, SCR_ERROR
, serror
);
3661 /* is double-select really necessary? */
3662 if (classes
[0] != ATA_DEV_NONE
)
3663 ap
->ops
->dev_select(ap
, 1);
3664 if (classes
[1] != ATA_DEV_NONE
)
3665 ap
->ops
->dev_select(ap
, 0);
3667 /* bail out if no device is present */
3668 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
3669 DPRINTK("EXIT, no device\n");
3673 /* set up device control */
3674 if (ap
->ioaddr
.ctl_addr
)
3675 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
3681 * ata_dev_same_device - Determine whether new ID matches configured device
3682 * @dev: device to compare against
3683 * @new_class: class of the new device
3684 * @new_id: IDENTIFY page of the new device
3686 * Compare @new_class and @new_id against @dev and determine
3687 * whether @dev is the device indicated by @new_class and
3694 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3696 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
3699 const u16
*old_id
= dev
->id
;
3700 unsigned char model
[2][ATA_ID_PROD_LEN
+ 1];
3701 unsigned char serial
[2][ATA_ID_SERNO_LEN
+ 1];
3703 if (dev
->class != new_class
) {
3704 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
3705 dev
->class, new_class
);
3709 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD
, sizeof(model
[0]));
3710 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD
, sizeof(model
[1]));
3711 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO
, sizeof(serial
[0]));
3712 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO
, sizeof(serial
[1]));
3714 if (strcmp(model
[0], model
[1])) {
3715 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
3716 "'%s' != '%s'\n", model
[0], model
[1]);
3720 if (strcmp(serial
[0], serial
[1])) {
3721 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
3722 "'%s' != '%s'\n", serial
[0], serial
[1]);
3730 * ata_dev_reread_id - Re-read IDENTIFY data
3731 * @dev: target ATA device
3732 * @readid_flags: read ID flags
3734 * Re-read IDENTIFY page and make sure @dev is still attached to
3738 * Kernel thread context (may sleep)
3741 * 0 on success, negative errno otherwise
3743 int ata_dev_reread_id(struct ata_device
*dev
, unsigned int readid_flags
)
3745 unsigned int class = dev
->class;
3746 u16
*id
= (void *)dev
->link
->ap
->sector_buf
;
3750 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
3754 /* is the device still there? */
3755 if (!ata_dev_same_device(dev
, class, id
))
3758 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
3763 * ata_dev_revalidate - Revalidate ATA device
3764 * @dev: device to revalidate
3765 * @new_class: new class code
3766 * @readid_flags: read ID flags
3768 * Re-read IDENTIFY page, make sure @dev is still attached to the
3769 * port and reconfigure it according to the new IDENTIFY page.
3772 * Kernel thread context (may sleep)
3775 * 0 on success, negative errno otherwise
3777 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int new_class
,
3778 unsigned int readid_flags
)
3780 u64 n_sectors
= dev
->n_sectors
;
3783 if (!ata_dev_enabled(dev
))
3786 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3787 if (ata_class_enabled(new_class
) &&
3788 new_class
!= ATA_DEV_ATA
&& new_class
!= ATA_DEV_ATAPI
) {
3789 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %u != %u\n",
3790 dev
->class, new_class
);
3796 rc
= ata_dev_reread_id(dev
, readid_flags
);
3800 /* configure device according to the new ID */
3801 rc
= ata_dev_configure(dev
);
3805 /* verify n_sectors hasn't changed */
3806 if (dev
->class == ATA_DEV_ATA
&& n_sectors
&&
3807 dev
->n_sectors
!= n_sectors
) {
3808 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
3810 (unsigned long long)n_sectors
,
3811 (unsigned long long)dev
->n_sectors
);
3813 /* restore original n_sectors */
3814 dev
->n_sectors
= n_sectors
;
3823 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
3827 struct ata_blacklist_entry
{
3828 const char *model_num
;
3829 const char *model_rev
;
3830 unsigned long horkage
;
3833 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
3834 /* Devices with DMA related problems under Linux */
3835 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
3836 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
3837 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
3838 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
3839 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
3840 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
3841 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
3842 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
3843 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
3844 { "CRD-8480B", NULL
, ATA_HORKAGE_NODMA
},
3845 { "CRD-8482B", NULL
, ATA_HORKAGE_NODMA
},
3846 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
3847 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
3848 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
3849 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
3850 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
3851 { "HITACHI CDR-8335", NULL
, ATA_HORKAGE_NODMA
},
3852 { "HITACHI CDR-8435", NULL
, ATA_HORKAGE_NODMA
},
3853 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
3854 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
3855 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
3856 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
3857 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
3858 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
3859 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
3860 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
3861 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
3862 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
3863 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA
},
3864 { "Seagate STT20000A", NULL
, ATA_HORKAGE_NODMA
},
3865 { "IOMEGA ZIP 250 ATAPI", NULL
, ATA_HORKAGE_NODMA
}, /* temporary fix */
3866 { "IOMEGA ZIP 250 ATAPI Floppy",
3867 NULL
, ATA_HORKAGE_NODMA
},
3869 /* Weird ATAPI devices */
3870 { "TORiSAN DVD-ROM DRD-N216", NULL
, ATA_HORKAGE_MAX_SEC_128
},
3872 /* Devices we expect to fail diagnostics */
3874 /* Devices where NCQ should be avoided */
3876 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
3877 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3878 { "FUJITSU MHT2060BH", NULL
, ATA_HORKAGE_NONCQ
},
3880 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ
},
3881 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ
},
3882 { "HITACHI HDS7250SASUN500G*", NULL
, ATA_HORKAGE_NONCQ
},
3883 { "HITACHI HDS7225SBSUN250G*", NULL
, ATA_HORKAGE_NONCQ
},
3885 /* Blacklist entries taken from Silicon Image 3124/3132
3886 Windows driver .inf file - also several Linux problem reports */
3887 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ
, },
3888 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ
, },
3889 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ
, },
3890 /* Drives which do spurious command completion */
3891 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ
, },
3892 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ
, },
3893 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ
, },
3894 { "WDC WD740ADFD-00NLR1", NULL
, ATA_HORKAGE_NONCQ
, },
3895 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ
, },
3896 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ
, },
3897 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ
, },
3898 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ
, },
3899 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ
, },
3900 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ
, },
3901 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ
, },
3903 /* devices which puke on READ_NATIVE_MAX */
3904 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA
, },
3905 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA
},
3906 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA
},
3907 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA
},
3909 /* Devices which report 1 sector over size HPA */
3910 { "ST340823A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
3911 { "ST320413A", NULL
, ATA_HORKAGE_HPA_SIZE
, },
3917 int strn_pattern_cmp(const char *patt
, const char *name
, int wildchar
)
3923 * check for trailing wildcard: *\0
3925 p
= strchr(patt
, wildchar
);
3926 if (p
&& ((*(p
+ 1)) == 0))
3931 return strncmp(patt
, name
, len
);
3934 static unsigned long ata_dev_blacklisted(const struct ata_device
*dev
)
3936 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
3937 unsigned char model_rev
[ATA_ID_FW_REV_LEN
+ 1];
3938 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
3940 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
3941 ata_id_c_string(dev
->id
, model_rev
, ATA_ID_FW_REV
, sizeof(model_rev
));
3943 while (ad
->model_num
) {
3944 if (!strn_pattern_cmp(ad
->model_num
, model_num
, '*')) {
3945 if (ad
->model_rev
== NULL
)
3947 if (!strn_pattern_cmp(ad
->model_rev
, model_rev
, '*'))
3955 static int ata_dma_blacklisted(const struct ata_device
*dev
)
3957 /* We don't support polling DMA.
3958 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3959 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3961 if ((dev
->link
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
3962 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
3964 return (dev
->horkage
& ATA_HORKAGE_NODMA
) ? 1 : 0;
3968 * ata_dev_xfermask - Compute supported xfermask of the given device
3969 * @dev: Device to compute xfermask for
3971 * Compute supported xfermask of @dev and store it in
3972 * dev->*_mask. This function is responsible for applying all
3973 * known limits including host controller limits, device
3979 static void ata_dev_xfermask(struct ata_device
*dev
)
3981 struct ata_link
*link
= dev
->link
;
3982 struct ata_port
*ap
= link
->ap
;
3983 struct ata_host
*host
= ap
->host
;
3984 unsigned long xfer_mask
;
3986 /* controller modes available */
3987 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
3988 ap
->mwdma_mask
, ap
->udma_mask
);
3990 /* drive modes available */
3991 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
3992 dev
->mwdma_mask
, dev
->udma_mask
);
3993 xfer_mask
&= ata_id_xfermask(dev
->id
);
3996 * CFA Advanced TrueIDE timings are not allowed on a shared
3999 if (ata_dev_pair(dev
)) {
4000 /* No PIO5 or PIO6 */
4001 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
4002 /* No MWDMA3 or MWDMA 4 */
4003 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
4006 if (ata_dma_blacklisted(dev
)) {
4007 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4008 ata_dev_printk(dev
, KERN_WARNING
,
4009 "device is on DMA blacklist, disabling DMA\n");
4012 if ((host
->flags
& ATA_HOST_SIMPLEX
) &&
4013 host
->simplex_claimed
&& host
->simplex_claimed
!= ap
) {
4014 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
4015 ata_dev_printk(dev
, KERN_WARNING
, "simplex DMA is claimed by "
4016 "other device, disabling DMA\n");
4019 if (ap
->flags
& ATA_FLAG_NO_IORDY
)
4020 xfer_mask
&= ata_pio_mask_no_iordy(dev
);
4022 if (ap
->ops
->mode_filter
)
4023 xfer_mask
= ap
->ops
->mode_filter(dev
, xfer_mask
);
4025 /* Apply cable rule here. Don't apply it early because when
4026 * we handle hot plug the cable type can itself change.
4027 * Check this last so that we know if the transfer rate was
4028 * solely limited by the cable.
4029 * Unknown or 80 wire cables reported host side are checked
4030 * drive side as well. Cases where we know a 40wire cable
4031 * is used safely for 80 are not checked here.
4033 if (xfer_mask
& (0xF8 << ATA_SHIFT_UDMA
))
4034 /* UDMA/44 or higher would be available */
4035 if((ap
->cbl
== ATA_CBL_PATA40
) ||
4036 (ata_drive_40wire(dev
->id
) &&
4037 (ap
->cbl
== ATA_CBL_PATA_UNK
||
4038 ap
->cbl
== ATA_CBL_PATA80
))) {
4039 ata_dev_printk(dev
, KERN_WARNING
,
4040 "limited to UDMA/33 due to 40-wire cable\n");
4041 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
4044 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
4045 &dev
->mwdma_mask
, &dev
->udma_mask
);
4049 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4050 * @dev: Device to which command will be sent
4052 * Issue SET FEATURES - XFER MODE command to device @dev
4056 * PCI/etc. bus probe sem.
4059 * 0 on success, AC_ERR_* mask otherwise.
4062 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
4064 struct ata_taskfile tf
;
4065 unsigned int err_mask
;
4067 /* set up set-features taskfile */
4068 DPRINTK("set features - xfer mode\n");
4070 /* Some controllers and ATAPI devices show flaky interrupt
4071 * behavior after setting xfer mode. Use polling instead.
4073 ata_tf_init(dev
, &tf
);
4074 tf
.command
= ATA_CMD_SET_FEATURES
;
4075 tf
.feature
= SETFEATURES_XFER
;
4076 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
| ATA_TFLAG_POLLING
;
4077 tf
.protocol
= ATA_PROT_NODATA
;
4078 tf
.nsect
= dev
->xfer_mode
;
4080 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
4082 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4087 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4088 * @dev: Device to which command will be sent
4089 * @enable: Whether to enable or disable the feature
4091 * Issue SET FEATURES - SATA FEATURES command to device @dev
4092 * on port @ap with sector count set to indicate Asynchronous
4093 * Notification feature
4096 * PCI/etc. bus probe sem.
4099 * 0 on success, AC_ERR_* mask otherwise.
4101 static unsigned int ata_dev_set_AN(struct ata_device
*dev
, u8 enable
)
4103 struct ata_taskfile tf
;
4104 unsigned int err_mask
;
4106 /* set up set-features taskfile */
4107 DPRINTK("set features - SATA features\n");
4109 ata_tf_init(dev
, &tf
);
4110 tf
.command
= ATA_CMD_SET_FEATURES
;
4111 tf
.feature
= enable
;
4112 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4113 tf
.protocol
= ATA_PROT_NODATA
;
4116 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
4118 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4123 * ata_dev_init_params - Issue INIT DEV PARAMS command
4124 * @dev: Device to which command will be sent
4125 * @heads: Number of heads (taskfile parameter)
4126 * @sectors: Number of sectors (taskfile parameter)
4129 * Kernel thread context (may sleep)
4132 * 0 on success, AC_ERR_* mask otherwise.
4134 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
4135 u16 heads
, u16 sectors
)
4137 struct ata_taskfile tf
;
4138 unsigned int err_mask
;
4140 /* Number of sectors per track 1-255. Number of heads 1-16 */
4141 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
4142 return AC_ERR_INVALID
;
4144 /* set up init dev params taskfile */
4145 DPRINTK("init dev params \n");
4147 ata_tf_init(dev
, &tf
);
4148 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
4149 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
4150 tf
.protocol
= ATA_PROT_NODATA
;
4152 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
4154 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
4155 /* A clean abort indicates an original or just out of spec drive
4156 and we should continue as we issue the setup based on the
4157 drive reported working geometry */
4158 if (err_mask
== AC_ERR_DEV
&& (tf
.feature
& ATA_ABORTED
))
4161 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
4166 * ata_sg_clean - Unmap DMA memory associated with command
4167 * @qc: Command containing DMA memory to be released
4169 * Unmap all mapped DMA memory associated with this command.
4172 * spin_lock_irqsave(host lock)
4174 void ata_sg_clean(struct ata_queued_cmd
*qc
)
4176 struct ata_port
*ap
= qc
->ap
;
4177 struct scatterlist
*sg
= qc
->__sg
;
4178 int dir
= qc
->dma_dir
;
4179 void *pad_buf
= NULL
;
4181 WARN_ON(!(qc
->flags
& ATA_QCFLAG_DMAMAP
));
4182 WARN_ON(sg
== NULL
);
4184 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
4185 WARN_ON(qc
->n_elem
> 1);
4187 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
4189 /* if we padded the buffer out to 32-bit bound, and data
4190 * xfer direction is from-device, we must copy from the
4191 * pad buffer back into the supplied buffer
4193 if (qc
->pad_len
&& !(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
4194 pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4196 if (qc
->flags
& ATA_QCFLAG_SG
) {
4198 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
4199 /* restore last sg */
4200 sg
[qc
->orig_n_elem
- 1].length
+= qc
->pad_len
;
4202 struct scatterlist
*psg
= &qc
->pad_sgent
;
4203 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
4204 memcpy(addr
+ psg
->offset
, pad_buf
, qc
->pad_len
);
4205 kunmap_atomic(addr
, KM_IRQ0
);
4209 dma_unmap_single(ap
->dev
,
4210 sg_dma_address(&sg
[0]), sg_dma_len(&sg
[0]),
4213 sg
->length
+= qc
->pad_len
;
4215 memcpy(qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
4216 pad_buf
, qc
->pad_len
);
4219 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4224 * ata_fill_sg - Fill PCI IDE PRD table
4225 * @qc: Metadata associated with taskfile to be transferred
4227 * Fill PCI IDE PRD (scatter-gather) table with segments
4228 * associated with the current disk command.
4231 * spin_lock_irqsave(host lock)
4234 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
4236 struct ata_port
*ap
= qc
->ap
;
4237 struct scatterlist
*sg
;
4240 WARN_ON(qc
->__sg
== NULL
);
4241 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
4244 ata_for_each_sg(sg
, qc
) {
4248 /* determine if physical DMA addr spans 64K boundary.
4249 * Note h/w doesn't support 64-bit, so we unconditionally
4250 * truncate dma_addr_t to u32.
4252 addr
= (u32
) sg_dma_address(sg
);
4253 sg_len
= sg_dma_len(sg
);
4256 offset
= addr
& 0xffff;
4258 if ((offset
+ sg_len
) > 0x10000)
4259 len
= 0x10000 - offset
;
4261 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
4262 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
4263 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
4272 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4276 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4277 * @qc: Metadata associated with taskfile to be transferred
4279 * Fill PCI IDE PRD (scatter-gather) table with segments
4280 * associated with the current disk command. Perform the fill
4281 * so that we avoid writing any length 64K records for
4282 * controllers that don't follow the spec.
4285 * spin_lock_irqsave(host lock)
4288 static void ata_fill_sg_dumb(struct ata_queued_cmd
*qc
)
4290 struct ata_port
*ap
= qc
->ap
;
4291 struct scatterlist
*sg
;
4294 WARN_ON(qc
->__sg
== NULL
);
4295 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
4298 ata_for_each_sg(sg
, qc
) {
4300 u32 sg_len
, len
, blen
;
4302 /* determine if physical DMA addr spans 64K boundary.
4303 * Note h/w doesn't support 64-bit, so we unconditionally
4304 * truncate dma_addr_t to u32.
4306 addr
= (u32
) sg_dma_address(sg
);
4307 sg_len
= sg_dma_len(sg
);
4310 offset
= addr
& 0xffff;
4312 if ((offset
+ sg_len
) > 0x10000)
4313 len
= 0x10000 - offset
;
4315 blen
= len
& 0xffff;
4316 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
4318 /* Some PATA chipsets like the CS5530 can't
4319 cope with 0x0000 meaning 64K as the spec says */
4320 ap
->prd
[idx
].flags_len
= cpu_to_le32(0x8000);
4322 ap
->prd
[++idx
].addr
= cpu_to_le32(addr
+ 0x8000);
4324 ap
->prd
[idx
].flags_len
= cpu_to_le32(blen
);
4325 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
4334 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
4338 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4339 * @qc: Metadata associated with taskfile to check
4341 * Allow low-level driver to filter ATA PACKET commands, returning
4342 * a status indicating whether or not it is OK to use DMA for the
4343 * supplied PACKET command.
4346 * spin_lock_irqsave(host lock)
4348 * RETURNS: 0 when ATAPI DMA can be used
4351 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
4353 struct ata_port
*ap
= qc
->ap
;
4355 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4356 * few ATAPI devices choke on such DMA requests.
4358 if (unlikely(qc
->nbytes
& 15))
4361 if (ap
->ops
->check_atapi_dma
)
4362 return ap
->ops
->check_atapi_dma(qc
);
4368 * ata_std_qc_defer - Check whether a qc needs to be deferred
4369 * @qc: ATA command in question
4371 * Non-NCQ commands cannot run with any other command, NCQ or
4372 * not. As upper layer only knows the queue depth, we are
4373 * responsible for maintaining exclusion. This function checks
4374 * whether a new command @qc can be issued.
4377 * spin_lock_irqsave(host lock)
4380 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4382 int ata_std_qc_defer(struct ata_queued_cmd
*qc
)
4384 struct ata_link
*link
= qc
->dev
->link
;
4386 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
4387 if (!ata_tag_valid(link
->active_tag
))
4390 if (!ata_tag_valid(link
->active_tag
) && !link
->sactive
)
4394 return ATA_DEFER_LINK
;
4398 * ata_qc_prep - Prepare taskfile for submission
4399 * @qc: Metadata associated with taskfile to be prepared
4401 * Prepare ATA taskfile for submission.
4404 * spin_lock_irqsave(host lock)
4406 void ata_qc_prep(struct ata_queued_cmd
*qc
)
4408 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4415 * ata_dumb_qc_prep - Prepare taskfile for submission
4416 * @qc: Metadata associated with taskfile to be prepared
4418 * Prepare ATA taskfile for submission.
4421 * spin_lock_irqsave(host lock)
4423 void ata_dumb_qc_prep(struct ata_queued_cmd
*qc
)
4425 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4428 ata_fill_sg_dumb(qc
);
4431 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
4434 * ata_sg_init_one - Associate command with memory buffer
4435 * @qc: Command to be associated
4436 * @buf: Memory buffer
4437 * @buflen: Length of memory buffer, in bytes.
4439 * Initialize the data-related elements of queued_cmd @qc
4440 * to point to a single memory buffer, @buf of byte length @buflen.
4443 * spin_lock_irqsave(host lock)
4446 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
4448 qc
->flags
|= ATA_QCFLAG_SINGLE
;
4450 qc
->__sg
= &qc
->sgent
;
4452 qc
->orig_n_elem
= 1;
4454 qc
->nbytes
= buflen
;
4456 sg_init_one(&qc
->sgent
, buf
, buflen
);
4460 * ata_sg_init - Associate command with scatter-gather table.
4461 * @qc: Command to be associated
4462 * @sg: Scatter-gather table.
4463 * @n_elem: Number of elements in s/g table.
4465 * Initialize the data-related elements of queued_cmd @qc
4466 * to point to a scatter-gather table @sg, containing @n_elem
4470 * spin_lock_irqsave(host lock)
4473 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
4474 unsigned int n_elem
)
4476 qc
->flags
|= ATA_QCFLAG_SG
;
4478 qc
->n_elem
= n_elem
;
4479 qc
->orig_n_elem
= n_elem
;
4483 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4484 * @qc: Command with memory buffer to be mapped.
4486 * DMA-map the memory buffer associated with queued_cmd @qc.
4489 * spin_lock_irqsave(host lock)
4492 * Zero on success, negative on error.
4495 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
4497 struct ata_port
*ap
= qc
->ap
;
4498 int dir
= qc
->dma_dir
;
4499 struct scatterlist
*sg
= qc
->__sg
;
4500 dma_addr_t dma_address
;
4503 /* we must lengthen transfers to end on a 32-bit boundary */
4504 qc
->pad_len
= sg
->length
& 3;
4506 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4507 struct scatterlist
*psg
= &qc
->pad_sgent
;
4509 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4511 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4513 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
4514 memcpy(pad_buf
, qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
4517 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4518 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4520 sg
->length
-= qc
->pad_len
;
4521 if (sg
->length
== 0)
4524 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4525 sg
->length
, qc
->pad_len
);
4533 dma_address
= dma_map_single(ap
->dev
, qc
->buf_virt
,
4535 if (dma_mapping_error(dma_address
)) {
4537 sg
->length
+= qc
->pad_len
;
4541 sg_dma_address(sg
) = dma_address
;
4542 sg_dma_len(sg
) = sg
->length
;
4545 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
4546 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4552 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4553 * @qc: Command with scatter-gather table to be mapped.
4555 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4558 * spin_lock_irqsave(host lock)
4561 * Zero on success, negative on error.
4565 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
4567 struct ata_port
*ap
= qc
->ap
;
4568 struct scatterlist
*sg
= qc
->__sg
;
4569 struct scatterlist
*lsg
= &sg
[qc
->n_elem
- 1];
4570 int n_elem
, pre_n_elem
, dir
, trim_sg
= 0;
4572 VPRINTK("ENTER, ata%u\n", ap
->print_id
);
4573 WARN_ON(!(qc
->flags
& ATA_QCFLAG_SG
));
4575 /* we must lengthen transfers to end on a 32-bit boundary */
4576 qc
->pad_len
= lsg
->length
& 3;
4578 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4579 struct scatterlist
*psg
= &qc
->pad_sgent
;
4580 unsigned int offset
;
4582 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4584 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4587 * psg->page/offset are used to copy to-be-written
4588 * data in this function or read data in ata_sg_clean.
4590 offset
= lsg
->offset
+ lsg
->length
- qc
->pad_len
;
4591 psg
->page
= nth_page(lsg
->page
, offset
>> PAGE_SHIFT
);
4592 psg
->offset
= offset_in_page(offset
);
4594 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
4595 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
4596 memcpy(pad_buf
, addr
+ psg
->offset
, qc
->pad_len
);
4597 kunmap_atomic(addr
, KM_IRQ0
);
4600 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4601 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4603 lsg
->length
-= qc
->pad_len
;
4604 if (lsg
->length
== 0)
4607 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4608 qc
->n_elem
- 1, lsg
->length
, qc
->pad_len
);
4611 pre_n_elem
= qc
->n_elem
;
4612 if (trim_sg
&& pre_n_elem
)
4621 n_elem
= dma_map_sg(ap
->dev
, sg
, pre_n_elem
, dir
);
4623 /* restore last sg */
4624 lsg
->length
+= qc
->pad_len
;
4628 DPRINTK("%d sg elements mapped\n", n_elem
);
4631 qc
->n_elem
= n_elem
;
4637 * swap_buf_le16 - swap halves of 16-bit words in place
4638 * @buf: Buffer to swap
4639 * @buf_words: Number of 16-bit words in buffer.
4641 * Swap halves of 16-bit words if needed to convert from
4642 * little-endian byte order to native cpu byte order, or
4646 * Inherited from caller.
4648 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
4653 for (i
= 0; i
< buf_words
; i
++)
4654 buf
[i
] = le16_to_cpu(buf
[i
]);
4655 #endif /* __BIG_ENDIAN */
4659 * ata_data_xfer - Transfer data by PIO
4660 * @adev: device to target
4662 * @buflen: buffer length
4663 * @write_data: read/write
4665 * Transfer data from/to the device data register by PIO.
4668 * Inherited from caller.
4670 void ata_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
4671 unsigned int buflen
, int write_data
)
4673 struct ata_port
*ap
= adev
->link
->ap
;
4674 unsigned int words
= buflen
>> 1;
4676 /* Transfer multiple of 2 bytes */
4678 iowrite16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4680 ioread16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4682 /* Transfer trailing 1 byte, if any. */
4683 if (unlikely(buflen
& 0x01)) {
4684 u16 align_buf
[1] = { 0 };
4685 unsigned char *trailing_buf
= buf
+ buflen
- 1;
4688 memcpy(align_buf
, trailing_buf
, 1);
4689 iowrite16(le16_to_cpu(align_buf
[0]), ap
->ioaddr
.data_addr
);
4691 align_buf
[0] = cpu_to_le16(ioread16(ap
->ioaddr
.data_addr
));
4692 memcpy(trailing_buf
, align_buf
, 1);
4698 * ata_data_xfer_noirq - Transfer data by PIO
4699 * @adev: device to target
4701 * @buflen: buffer length
4702 * @write_data: read/write
4704 * Transfer data from/to the device data register by PIO. Do the
4705 * transfer with interrupts disabled.
4708 * Inherited from caller.
4710 void ata_data_xfer_noirq(struct ata_device
*adev
, unsigned char *buf
,
4711 unsigned int buflen
, int write_data
)
4713 unsigned long flags
;
4714 local_irq_save(flags
);
4715 ata_data_xfer(adev
, buf
, buflen
, write_data
);
4716 local_irq_restore(flags
);
4721 * ata_pio_sector - Transfer a sector of data.
4722 * @qc: Command on going
4724 * Transfer qc->sect_size bytes of data from/to the ATA device.
4727 * Inherited from caller.
4730 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
4732 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4733 struct scatterlist
*sg
= qc
->__sg
;
4734 struct ata_port
*ap
= qc
->ap
;
4736 unsigned int offset
;
4739 if (qc
->curbytes
== qc
->nbytes
- qc
->sect_size
)
4740 ap
->hsm_task_state
= HSM_ST_LAST
;
4742 page
= sg
[qc
->cursg
].page
;
4743 offset
= sg
[qc
->cursg
].offset
+ qc
->cursg_ofs
;
4745 /* get the current page and offset */
4746 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4747 offset
%= PAGE_SIZE
;
4749 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4751 if (PageHighMem(page
)) {
4752 unsigned long flags
;
4754 /* FIXME: use a bounce buffer */
4755 local_irq_save(flags
);
4756 buf
= kmap_atomic(page
, KM_IRQ0
);
4758 /* do the actual data transfer */
4759 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
4761 kunmap_atomic(buf
, KM_IRQ0
);
4762 local_irq_restore(flags
);
4764 buf
= page_address(page
);
4765 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
4768 qc
->curbytes
+= qc
->sect_size
;
4769 qc
->cursg_ofs
+= qc
->sect_size
;
4771 if (qc
->cursg_ofs
== (&sg
[qc
->cursg
])->length
) {
4778 * ata_pio_sectors - Transfer one or many sectors.
4779 * @qc: Command on going
4781 * Transfer one or many sectors of data from/to the
4782 * ATA device for the DRQ request.
4785 * Inherited from caller.
4788 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
4790 if (is_multi_taskfile(&qc
->tf
)) {
4791 /* READ/WRITE MULTIPLE */
4794 WARN_ON(qc
->dev
->multi_count
== 0);
4796 nsect
= min((qc
->nbytes
- qc
->curbytes
) / qc
->sect_size
,
4797 qc
->dev
->multi_count
);
4803 ata_altstatus(qc
->ap
); /* flush */
4807 * atapi_send_cdb - Write CDB bytes to hardware
4808 * @ap: Port to which ATAPI device is attached.
4809 * @qc: Taskfile currently active
4811 * When device has indicated its readiness to accept
4812 * a CDB, this function is called. Send the CDB.
4818 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4821 DPRINTK("send cdb\n");
4822 WARN_ON(qc
->dev
->cdb_len
< 12);
4824 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
4825 ata_altstatus(ap
); /* flush */
4827 switch (qc
->tf
.protocol
) {
4828 case ATA_PROT_ATAPI
:
4829 ap
->hsm_task_state
= HSM_ST
;
4831 case ATA_PROT_ATAPI_NODATA
:
4832 ap
->hsm_task_state
= HSM_ST_LAST
;
4834 case ATA_PROT_ATAPI_DMA
:
4835 ap
->hsm_task_state
= HSM_ST_LAST
;
4836 /* initiate bmdma */
4837 ap
->ops
->bmdma_start(qc
);
4843 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4844 * @qc: Command on going
4845 * @bytes: number of bytes
4847 * Transfer Transfer data from/to the ATAPI device.
4850 * Inherited from caller.
4854 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
4856 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4857 struct scatterlist
*sg
= qc
->__sg
;
4858 struct ata_port
*ap
= qc
->ap
;
4861 unsigned int offset
, count
;
4863 if (qc
->curbytes
+ bytes
>= qc
->nbytes
)
4864 ap
->hsm_task_state
= HSM_ST_LAST
;
4867 if (unlikely(qc
->cursg
>= qc
->n_elem
)) {
4869 * The end of qc->sg is reached and the device expects
4870 * more data to transfer. In order not to overrun qc->sg
4871 * and fulfill length specified in the byte count register,
4872 * - for read case, discard trailing data from the device
4873 * - for write case, padding zero data to the device
4875 u16 pad_buf
[1] = { 0 };
4876 unsigned int words
= bytes
>> 1;
4879 if (words
) /* warning if bytes > 1 */
4880 ata_dev_printk(qc
->dev
, KERN_WARNING
,
4881 "%u bytes trailing data\n", bytes
);
4883 for (i
= 0; i
< words
; i
++)
4884 ap
->ops
->data_xfer(qc
->dev
, (unsigned char*)pad_buf
, 2, do_write
);
4886 ap
->hsm_task_state
= HSM_ST_LAST
;
4890 sg
= &qc
->__sg
[qc
->cursg
];
4893 offset
= sg
->offset
+ qc
->cursg_ofs
;
4895 /* get the current page and offset */
4896 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4897 offset
%= PAGE_SIZE
;
4899 /* don't overrun current sg */
4900 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
4902 /* don't cross page boundaries */
4903 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
4905 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4907 if (PageHighMem(page
)) {
4908 unsigned long flags
;
4910 /* FIXME: use bounce buffer */
4911 local_irq_save(flags
);
4912 buf
= kmap_atomic(page
, KM_IRQ0
);
4914 /* do the actual data transfer */
4915 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4917 kunmap_atomic(buf
, KM_IRQ0
);
4918 local_irq_restore(flags
);
4920 buf
= page_address(page
);
4921 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4925 qc
->curbytes
+= count
;
4926 qc
->cursg_ofs
+= count
;
4928 if (qc
->cursg_ofs
== sg
->length
) {
4938 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4939 * @qc: Command on going
4941 * Transfer Transfer data from/to the ATAPI device.
4944 * Inherited from caller.
4947 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
4949 struct ata_port
*ap
= qc
->ap
;
4950 struct ata_device
*dev
= qc
->dev
;
4951 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
4952 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
4954 /* Abuse qc->result_tf for temp storage of intermediate TF
4955 * here to save some kernel stack usage.
4956 * For normal completion, qc->result_tf is not relevant. For
4957 * error, qc->result_tf is later overwritten by ata_qc_complete().
4958 * So, the correctness of qc->result_tf is not affected.
4960 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4961 ireason
= qc
->result_tf
.nsect
;
4962 bc_lo
= qc
->result_tf
.lbam
;
4963 bc_hi
= qc
->result_tf
.lbah
;
4964 bytes
= (bc_hi
<< 8) | bc_lo
;
4966 /* shall be cleared to zero, indicating xfer of data */
4967 if (ireason
& (1 << 0))
4970 /* make sure transfer direction matches expected */
4971 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
4972 if (do_write
!= i_write
)
4975 VPRINTK("ata%u: xfering %d bytes\n", ap
->print_id
, bytes
);
4977 __atapi_pio_bytes(qc
, bytes
);
4978 ata_altstatus(ap
); /* flush */
4983 ata_dev_printk(dev
, KERN_INFO
, "ATAPI check failed\n");
4984 qc
->err_mask
|= AC_ERR_HSM
;
4985 ap
->hsm_task_state
= HSM_ST_ERR
;
4989 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4990 * @ap: the target ata_port
4994 * 1 if ok in workqueue, 0 otherwise.
4997 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4999 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5002 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
5003 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
5004 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
5007 if (is_atapi_taskfile(&qc
->tf
) &&
5008 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5016 * ata_hsm_qc_complete - finish a qc running on standard HSM
5017 * @qc: Command to complete
5018 * @in_wq: 1 if called from workqueue, 0 otherwise
5020 * Finish @qc which is running on standard HSM.
5023 * If @in_wq is zero, spin_lock_irqsave(host lock).
5024 * Otherwise, none on entry and grabs host lock.
5026 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
5028 struct ata_port
*ap
= qc
->ap
;
5029 unsigned long flags
;
5031 if (ap
->ops
->error_handler
) {
5033 spin_lock_irqsave(ap
->lock
, flags
);
5035 /* EH might have kicked in while host lock is
5038 qc
= ata_qc_from_tag(ap
, qc
->tag
);
5040 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
5041 ap
->ops
->irq_on(ap
);
5042 ata_qc_complete(qc
);
5044 ata_port_freeze(ap
);
5047 spin_unlock_irqrestore(ap
->lock
, flags
);
5049 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
5050 ata_qc_complete(qc
);
5052 ata_port_freeze(ap
);
5056 spin_lock_irqsave(ap
->lock
, flags
);
5057 ap
->ops
->irq_on(ap
);
5058 ata_qc_complete(qc
);
5059 spin_unlock_irqrestore(ap
->lock
, flags
);
5061 ata_qc_complete(qc
);
5066 * ata_hsm_move - move the HSM to the next state.
5067 * @ap: the target ata_port
5069 * @status: current device status
5070 * @in_wq: 1 if called from workqueue, 0 otherwise
5073 * 1 when poll next status needed, 0 otherwise.
5075 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
5076 u8 status
, int in_wq
)
5078 unsigned long flags
= 0;
5081 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
5083 /* Make sure ata_qc_issue_prot() does not throw things
5084 * like DMA polling into the workqueue. Notice that
5085 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5087 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
5090 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5091 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
5093 switch (ap
->hsm_task_state
) {
5095 /* Send first data block or PACKET CDB */
5097 /* If polling, we will stay in the work queue after
5098 * sending the data. Otherwise, interrupt handler
5099 * takes over after sending the data.
5101 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5103 /* check device status */
5104 if (unlikely((status
& ATA_DRQ
) == 0)) {
5105 /* handle BSY=0, DRQ=0 as error */
5106 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5107 /* device stops HSM for abort/error */
5108 qc
->err_mask
|= AC_ERR_DEV
;
5110 /* HSM violation. Let EH handle this */
5111 qc
->err_mask
|= AC_ERR_HSM
;
5113 ap
->hsm_task_state
= HSM_ST_ERR
;
5117 /* Device should not ask for data transfer (DRQ=1)
5118 * when it finds something wrong.
5119 * We ignore DRQ here and stop the HSM by
5120 * changing hsm_task_state to HSM_ST_ERR and
5121 * let the EH abort the command or reset the device.
5123 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5124 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with device "
5125 "error, dev_stat 0x%X\n", status
);
5126 qc
->err_mask
|= AC_ERR_HSM
;
5127 ap
->hsm_task_state
= HSM_ST_ERR
;
5131 /* Send the CDB (atapi) or the first data block (ata pio out).
5132 * During the state transition, interrupt handler shouldn't
5133 * be invoked before the data transfer is complete and
5134 * hsm_task_state is changed. Hence, the following locking.
5137 spin_lock_irqsave(ap
->lock
, flags
);
5139 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
5140 /* PIO data out protocol.
5141 * send first data block.
5144 /* ata_pio_sectors() might change the state
5145 * to HSM_ST_LAST. so, the state is changed here
5146 * before ata_pio_sectors().
5148 ap
->hsm_task_state
= HSM_ST
;
5149 ata_pio_sectors(qc
);
5152 atapi_send_cdb(ap
, qc
);
5155 spin_unlock_irqrestore(ap
->lock
, flags
);
5157 /* if polling, ata_pio_task() handles the rest.
5158 * otherwise, interrupt handler takes over from here.
5163 /* complete command or read/write the data register */
5164 if (qc
->tf
.protocol
== ATA_PROT_ATAPI
) {
5165 /* ATAPI PIO protocol */
5166 if ((status
& ATA_DRQ
) == 0) {
5167 /* No more data to transfer or device error.
5168 * Device error will be tagged in HSM_ST_LAST.
5170 ap
->hsm_task_state
= HSM_ST_LAST
;
5174 /* Device should not ask for data transfer (DRQ=1)
5175 * when it finds something wrong.
5176 * We ignore DRQ here and stop the HSM by
5177 * changing hsm_task_state to HSM_ST_ERR and
5178 * let the EH abort the command or reset the device.
5180 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5181 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with "
5182 "device error, dev_stat 0x%X\n",
5184 qc
->err_mask
|= AC_ERR_HSM
;
5185 ap
->hsm_task_state
= HSM_ST_ERR
;
5189 atapi_pio_bytes(qc
);
5191 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
5192 /* bad ireason reported by device */
5196 /* ATA PIO protocol */
5197 if (unlikely((status
& ATA_DRQ
) == 0)) {
5198 /* handle BSY=0, DRQ=0 as error */
5199 if (likely(status
& (ATA_ERR
| ATA_DF
)))
5200 /* device stops HSM for abort/error */
5201 qc
->err_mask
|= AC_ERR_DEV
;
5203 /* HSM violation. Let EH handle this.
5204 * Phantom devices also trigger this
5205 * condition. Mark hint.
5207 qc
->err_mask
|= AC_ERR_HSM
|
5210 ap
->hsm_task_state
= HSM_ST_ERR
;
5214 /* For PIO reads, some devices may ask for
5215 * data transfer (DRQ=1) alone with ERR=1.
5216 * We respect DRQ here and transfer one
5217 * block of junk data before changing the
5218 * hsm_task_state to HSM_ST_ERR.
5220 * For PIO writes, ERR=1 DRQ=1 doesn't make
5221 * sense since the data block has been
5222 * transferred to the device.
5224 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
5225 /* data might be corrputed */
5226 qc
->err_mask
|= AC_ERR_DEV
;
5228 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
5229 ata_pio_sectors(qc
);
5230 status
= ata_wait_idle(ap
);
5233 if (status
& (ATA_BUSY
| ATA_DRQ
))
5234 qc
->err_mask
|= AC_ERR_HSM
;
5236 /* ata_pio_sectors() might change the
5237 * state to HSM_ST_LAST. so, the state
5238 * is changed after ata_pio_sectors().
5240 ap
->hsm_task_state
= HSM_ST_ERR
;
5244 ata_pio_sectors(qc
);
5246 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
5247 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
5249 status
= ata_wait_idle(ap
);
5258 if (unlikely(!ata_ok(status
))) {
5259 qc
->err_mask
|= __ac_err_mask(status
);
5260 ap
->hsm_task_state
= HSM_ST_ERR
;
5264 /* no more data to transfer */
5265 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5266 ap
->print_id
, qc
->dev
->devno
, status
);
5268 WARN_ON(qc
->err_mask
);
5270 ap
->hsm_task_state
= HSM_ST_IDLE
;
5272 /* complete taskfile transaction */
5273 ata_hsm_qc_complete(qc
, in_wq
);
5279 /* make sure qc->err_mask is available to
5280 * know what's wrong and recover
5282 WARN_ON(qc
->err_mask
== 0);
5284 ap
->hsm_task_state
= HSM_ST_IDLE
;
5286 /* complete taskfile transaction */
5287 ata_hsm_qc_complete(qc
, in_wq
);
5299 static void ata_pio_task(struct work_struct
*work
)
5301 struct ata_port
*ap
=
5302 container_of(work
, struct ata_port
, port_task
.work
);
5303 struct ata_queued_cmd
*qc
= ap
->port_task_data
;
5308 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
5311 * This is purely heuristic. This is a fast path.
5312 * Sometimes when we enter, BSY will be cleared in
5313 * a chk-status or two. If not, the drive is probably seeking
5314 * or something. Snooze for a couple msecs, then
5315 * chk-status again. If still busy, queue delayed work.
5317 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
5318 if (status
& ATA_BUSY
) {
5320 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
5321 if (status
& ATA_BUSY
) {
5322 ata_port_queue_task(ap
, ata_pio_task
, qc
, ATA_SHORT_PAUSE
);
5328 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
5330 /* another command or interrupt handler
5331 * may be running at this point.
5338 * ata_qc_new - Request an available ATA command, for queueing
5339 * @ap: Port associated with device @dev
5340 * @dev: Device from whom we request an available command structure
5346 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
5348 struct ata_queued_cmd
*qc
= NULL
;
5351 /* no command while frozen */
5352 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
5355 /* the last tag is reserved for internal command. */
5356 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
5357 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
5358 qc
= __ata_qc_from_tag(ap
, i
);
5369 * ata_qc_new_init - Request an available ATA command, and initialize it
5370 * @dev: Device from whom we request an available command structure
5376 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
5378 struct ata_port
*ap
= dev
->link
->ap
;
5379 struct ata_queued_cmd
*qc
;
5381 qc
= ata_qc_new(ap
);
5394 * ata_qc_free - free unused ata_queued_cmd
5395 * @qc: Command to complete
5397 * Designed to free unused ata_queued_cmd object
5398 * in case something prevents using it.
5401 * spin_lock_irqsave(host lock)
5403 void ata_qc_free(struct ata_queued_cmd
*qc
)
5405 struct ata_port
*ap
= qc
->ap
;
5408 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5412 if (likely(ata_tag_valid(tag
))) {
5413 qc
->tag
= ATA_TAG_POISON
;
5414 clear_bit(tag
, &ap
->qc_allocated
);
5418 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
5420 struct ata_port
*ap
= qc
->ap
;
5421 struct ata_link
*link
= qc
->dev
->link
;
5423 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5424 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
5426 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
5429 /* command should be marked inactive atomically with qc completion */
5430 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5431 link
->sactive
&= ~(1 << qc
->tag
);
5433 ap
->nr_active_links
--;
5435 link
->active_tag
= ATA_TAG_POISON
;
5436 ap
->nr_active_links
--;
5439 /* clear exclusive status */
5440 if (unlikely(qc
->flags
& ATA_QCFLAG_CLEAR_EXCL
&&
5441 ap
->excl_link
== link
))
5442 ap
->excl_link
= NULL
;
5444 /* atapi: mark qc as inactive to prevent the interrupt handler
5445 * from completing the command twice later, before the error handler
5446 * is called. (when rc != 0 and atapi request sense is needed)
5448 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
5449 ap
->qc_active
&= ~(1 << qc
->tag
);
5451 /* call completion callback */
5452 qc
->complete_fn(qc
);
5455 static void fill_result_tf(struct ata_queued_cmd
*qc
)
5457 struct ata_port
*ap
= qc
->ap
;
5459 qc
->result_tf
.flags
= qc
->tf
.flags
;
5460 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5464 * ata_qc_complete - Complete an active ATA command
5465 * @qc: Command to complete
5466 * @err_mask: ATA Status register contents
5468 * Indicate to the mid and upper layers that an ATA
5469 * command has completed, with either an ok or not-ok status.
5472 * spin_lock_irqsave(host lock)
5474 void ata_qc_complete(struct ata_queued_cmd
*qc
)
5476 struct ata_port
*ap
= qc
->ap
;
5478 /* XXX: New EH and old EH use different mechanisms to
5479 * synchronize EH with regular execution path.
5481 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5482 * Normal execution path is responsible for not accessing a
5483 * failed qc. libata core enforces the rule by returning NULL
5484 * from ata_qc_from_tag() for failed qcs.
5486 * Old EH depends on ata_qc_complete() nullifying completion
5487 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5488 * not synchronize with interrupt handler. Only PIO task is
5491 if (ap
->ops
->error_handler
) {
5492 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
5494 if (unlikely(qc
->err_mask
))
5495 qc
->flags
|= ATA_QCFLAG_FAILED
;
5497 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
5498 if (!ata_tag_internal(qc
->tag
)) {
5499 /* always fill result TF for failed qc */
5501 ata_qc_schedule_eh(qc
);
5506 /* read result TF if requested */
5507 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5510 __ata_qc_complete(qc
);
5512 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
5515 /* read result TF if failed or requested */
5516 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5519 __ata_qc_complete(qc
);
5524 * ata_qc_complete_multiple - Complete multiple qcs successfully
5525 * @ap: port in question
5526 * @qc_active: new qc_active mask
5527 * @finish_qc: LLDD callback invoked before completing a qc
5529 * Complete in-flight commands. This functions is meant to be
5530 * called from low-level driver's interrupt routine to complete
5531 * requests normally. ap->qc_active and @qc_active is compared
5532 * and commands are completed accordingly.
5535 * spin_lock_irqsave(host lock)
5538 * Number of completed commands on success, -errno otherwise.
5540 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
5541 void (*finish_qc
)(struct ata_queued_cmd
*))
5547 done_mask
= ap
->qc_active
^ qc_active
;
5549 if (unlikely(done_mask
& qc_active
)) {
5550 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
5551 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
5555 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
5556 struct ata_queued_cmd
*qc
;
5558 if (!(done_mask
& (1 << i
)))
5561 if ((qc
= ata_qc_from_tag(ap
, i
))) {
5564 ata_qc_complete(qc
);
5572 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
5574 struct ata_port
*ap
= qc
->ap
;
5576 switch (qc
->tf
.protocol
) {
5579 case ATA_PROT_ATAPI_DMA
:
5582 case ATA_PROT_ATAPI
:
5584 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
5597 * ata_qc_issue - issue taskfile to device
5598 * @qc: command to issue to device
5600 * Prepare an ATA command to submission to device.
5601 * This includes mapping the data into a DMA-able
5602 * area, filling in the S/G table, and finally
5603 * writing the taskfile to hardware, starting the command.
5606 * spin_lock_irqsave(host lock)
5608 void ata_qc_issue(struct ata_queued_cmd
*qc
)
5610 struct ata_port
*ap
= qc
->ap
;
5611 struct ata_link
*link
= qc
->dev
->link
;
5613 /* Make sure only one non-NCQ command is outstanding. The
5614 * check is skipped for old EH because it reuses active qc to
5615 * request ATAPI sense.
5617 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(link
->active_tag
));
5619 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5620 WARN_ON(link
->sactive
& (1 << qc
->tag
));
5623 ap
->nr_active_links
++;
5624 link
->sactive
|= 1 << qc
->tag
;
5626 WARN_ON(link
->sactive
);
5628 ap
->nr_active_links
++;
5629 link
->active_tag
= qc
->tag
;
5632 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
5633 ap
->qc_active
|= 1 << qc
->tag
;
5635 if (ata_should_dma_map(qc
)) {
5636 if (qc
->flags
& ATA_QCFLAG_SG
) {
5637 if (ata_sg_setup(qc
))
5639 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
5640 if (ata_sg_setup_one(qc
))
5644 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
5647 ap
->ops
->qc_prep(qc
);
5649 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
5650 if (unlikely(qc
->err_mask
))
5655 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
5656 qc
->err_mask
|= AC_ERR_SYSTEM
;
5658 ata_qc_complete(qc
);
5662 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5663 * @qc: command to issue to device
5665 * Using various libata functions and hooks, this function
5666 * starts an ATA command. ATA commands are grouped into
5667 * classes called "protocols", and issuing each type of protocol
5668 * is slightly different.
5670 * May be used as the qc_issue() entry in ata_port_operations.
5673 * spin_lock_irqsave(host lock)
5676 * Zero on success, AC_ERR_* mask on failure
5679 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
5681 struct ata_port
*ap
= qc
->ap
;
5683 /* Use polling pio if the LLD doesn't handle
5684 * interrupt driven pio and atapi CDB interrupt.
5686 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
5687 switch (qc
->tf
.protocol
) {
5689 case ATA_PROT_NODATA
:
5690 case ATA_PROT_ATAPI
:
5691 case ATA_PROT_ATAPI_NODATA
:
5692 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
5694 case ATA_PROT_ATAPI_DMA
:
5695 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
5696 /* see ata_dma_blacklisted() */
5704 /* select the device */
5705 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
5707 /* start the command */
5708 switch (qc
->tf
.protocol
) {
5709 case ATA_PROT_NODATA
:
5710 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5711 ata_qc_set_polling(qc
);
5713 ata_tf_to_host(ap
, &qc
->tf
);
5714 ap
->hsm_task_state
= HSM_ST_LAST
;
5716 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5717 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5722 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5724 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5725 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5726 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
5727 ap
->hsm_task_state
= HSM_ST_LAST
;
5731 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5732 ata_qc_set_polling(qc
);
5734 ata_tf_to_host(ap
, &qc
->tf
);
5736 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
5737 /* PIO data out protocol */
5738 ap
->hsm_task_state
= HSM_ST_FIRST
;
5739 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5741 /* always send first data block using
5742 * the ata_pio_task() codepath.
5745 /* PIO data in protocol */
5746 ap
->hsm_task_state
= HSM_ST
;
5748 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5749 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5751 /* if polling, ata_pio_task() handles the rest.
5752 * otherwise, interrupt handler takes over from here.
5758 case ATA_PROT_ATAPI
:
5759 case ATA_PROT_ATAPI_NODATA
:
5760 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5761 ata_qc_set_polling(qc
);
5763 ata_tf_to_host(ap
, &qc
->tf
);
5765 ap
->hsm_task_state
= HSM_ST_FIRST
;
5767 /* send cdb by polling if no cdb interrupt */
5768 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
5769 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
5770 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5773 case ATA_PROT_ATAPI_DMA
:
5774 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5776 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5777 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5778 ap
->hsm_task_state
= HSM_ST_FIRST
;
5780 /* send cdb by polling if no cdb interrupt */
5781 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5782 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5787 return AC_ERR_SYSTEM
;
5794 * ata_host_intr - Handle host interrupt for given (port, task)
5795 * @ap: Port on which interrupt arrived (possibly...)
5796 * @qc: Taskfile currently active in engine
5798 * Handle host interrupt for given queued command. Currently,
5799 * only DMA interrupts are handled. All other commands are
5800 * handled via polling with interrupts disabled (nIEN bit).
5803 * spin_lock_irqsave(host lock)
5806 * One if interrupt was handled, zero if not (shared irq).
5809 inline unsigned int ata_host_intr (struct ata_port
*ap
,
5810 struct ata_queued_cmd
*qc
)
5812 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
5813 u8 status
, host_stat
= 0;
5815 VPRINTK("ata%u: protocol %d task_state %d\n",
5816 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
5818 /* Check whether we are expecting interrupt in this state */
5819 switch (ap
->hsm_task_state
) {
5821 /* Some pre-ATAPI-4 devices assert INTRQ
5822 * at this state when ready to receive CDB.
5825 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5826 * The flag was turned on only for atapi devices.
5827 * No need to check is_atapi_taskfile(&qc->tf) again.
5829 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5833 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
5834 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
) {
5835 /* check status of DMA engine */
5836 host_stat
= ap
->ops
->bmdma_status(ap
);
5837 VPRINTK("ata%u: host_stat 0x%X\n",
5838 ap
->print_id
, host_stat
);
5840 /* if it's not our irq... */
5841 if (!(host_stat
& ATA_DMA_INTR
))
5844 /* before we do anything else, clear DMA-Start bit */
5845 ap
->ops
->bmdma_stop(qc
);
5847 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
5848 /* error when transfering data to/from memory */
5849 qc
->err_mask
|= AC_ERR_HOST_BUS
;
5850 ap
->hsm_task_state
= HSM_ST_ERR
;
5860 /* check altstatus */
5861 status
= ata_altstatus(ap
);
5862 if (status
& ATA_BUSY
)
5865 /* check main status, clearing INTRQ */
5866 status
= ata_chk_status(ap
);
5867 if (unlikely(status
& ATA_BUSY
))
5870 /* ack bmdma irq events */
5871 ap
->ops
->irq_clear(ap
);
5873 ata_hsm_move(ap
, qc
, status
, 0);
5875 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
5876 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
))
5877 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
5879 return 1; /* irq handled */
5882 ap
->stats
.idle_irq
++;
5885 if ((ap
->stats
.idle_irq
% 1000) == 0) {
5887 ap
->ops
->irq_clear(ap
);
5888 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
5892 return 0; /* irq not handled */
5896 * ata_interrupt - Default ATA host interrupt handler
5897 * @irq: irq line (unused)
5898 * @dev_instance: pointer to our ata_host information structure
5900 * Default interrupt handler for PCI IDE devices. Calls
5901 * ata_host_intr() for each port that is not disabled.
5904 * Obtains host lock during operation.
5907 * IRQ_NONE or IRQ_HANDLED.
5910 irqreturn_t
ata_interrupt (int irq
, void *dev_instance
)
5912 struct ata_host
*host
= dev_instance
;
5914 unsigned int handled
= 0;
5915 unsigned long flags
;
5917 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5918 spin_lock_irqsave(&host
->lock
, flags
);
5920 for (i
= 0; i
< host
->n_ports
; i
++) {
5921 struct ata_port
*ap
;
5923 ap
= host
->ports
[i
];
5925 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
5926 struct ata_queued_cmd
*qc
;
5928 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
5929 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
5930 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
5931 handled
|= ata_host_intr(ap
, qc
);
5935 spin_unlock_irqrestore(&host
->lock
, flags
);
5937 return IRQ_RETVAL(handled
);
5941 * sata_scr_valid - test whether SCRs are accessible
5942 * @link: ATA link to test SCR accessibility for
5944 * Test whether SCRs are accessible for @link.
5950 * 1 if SCRs are accessible, 0 otherwise.
5952 int sata_scr_valid(struct ata_link
*link
)
5954 struct ata_port
*ap
= link
->ap
;
5956 return (ap
->flags
& ATA_FLAG_SATA
) && ap
->ops
->scr_read
;
5960 * sata_scr_read - read SCR register of the specified port
5961 * @link: ATA link to read SCR for
5963 * @val: Place to store read value
5965 * Read SCR register @reg of @link into *@val. This function is
5966 * guaranteed to succeed if the cable type of the port is SATA
5967 * and the port implements ->scr_read.
5973 * 0 on success, negative errno on failure.
5975 int sata_scr_read(struct ata_link
*link
, int reg
, u32
*val
)
5977 struct ata_port
*ap
= link
->ap
;
5979 if (sata_scr_valid(link
))
5980 return ap
->ops
->scr_read(ap
, reg
, val
);
5985 * sata_scr_write - write SCR register of the specified port
5986 * @link: ATA link to write SCR for
5987 * @reg: SCR to write
5988 * @val: value to write
5990 * Write @val to SCR register @reg of @link. This function is
5991 * guaranteed to succeed if the cable type of the port is SATA
5992 * and the port implements ->scr_read.
5998 * 0 on success, negative errno on failure.
6000 int sata_scr_write(struct ata_link
*link
, int reg
, u32 val
)
6002 struct ata_port
*ap
= link
->ap
;
6004 if (sata_scr_valid(link
))
6005 return ap
->ops
->scr_write(ap
, reg
, val
);
6010 * sata_scr_write_flush - write SCR register of the specified port and flush
6011 * @link: ATA link to write SCR for
6012 * @reg: SCR to write
6013 * @val: value to write
6015 * This function is identical to sata_scr_write() except that this
6016 * function performs flush after writing to the register.
6022 * 0 on success, negative errno on failure.
6024 int sata_scr_write_flush(struct ata_link
*link
, int reg
, u32 val
)
6026 struct ata_port
*ap
= link
->ap
;
6029 if (sata_scr_valid(link
)) {
6030 rc
= ap
->ops
->scr_write(ap
, reg
, val
);
6032 rc
= ap
->ops
->scr_read(ap
, reg
, &val
);
6039 * ata_link_online - test whether the given link is online
6040 * @link: ATA link to test
6042 * Test whether @link is online. Note that this function returns
6043 * 0 if online status of @link cannot be obtained, so
6044 * ata_link_online(link) != !ata_link_offline(link).
6050 * 1 if the port online status is available and online.
6052 int ata_link_online(struct ata_link
*link
)
6056 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6057 (sstatus
& 0xf) == 0x3)
6063 * ata_link_offline - test whether the given link is offline
6064 * @link: ATA link to test
6066 * Test whether @link is offline. Note that this function
6067 * returns 0 if offline status of @link cannot be obtained, so
6068 * ata_link_online(link) != !ata_link_offline(link).
6074 * 1 if the port offline status is available and offline.
6076 int ata_link_offline(struct ata_link
*link
)
6080 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0 &&
6081 (sstatus
& 0xf) != 0x3)
6086 int ata_flush_cache(struct ata_device
*dev
)
6088 unsigned int err_mask
;
6091 if (!ata_try_flush_cache(dev
))
6094 if (dev
->flags
& ATA_DFLAG_FLUSH_EXT
)
6095 cmd
= ATA_CMD_FLUSH_EXT
;
6097 cmd
= ATA_CMD_FLUSH
;
6099 /* This is wrong. On a failed flush we get back the LBA of the lost
6100 sector and we should (assuming it wasn't aborted as unknown) issue
6101 a further flush command to continue the writeback until it
6103 err_mask
= ata_do_simple_cmd(dev
, cmd
);
6105 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
6113 static int ata_host_request_pm(struct ata_host
*host
, pm_message_t mesg
,
6114 unsigned int action
, unsigned int ehi_flags
,
6117 unsigned long flags
;
6120 for (i
= 0; i
< host
->n_ports
; i
++) {
6121 struct ata_port
*ap
= host
->ports
[i
];
6122 struct ata_link
*link
;
6124 /* Previous resume operation might still be in
6125 * progress. Wait for PM_PENDING to clear.
6127 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
6128 ata_port_wait_eh(ap
);
6129 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6132 /* request PM ops to EH */
6133 spin_lock_irqsave(ap
->lock
, flags
);
6138 ap
->pm_result
= &rc
;
6141 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
6142 __ata_port_for_each_link(link
, ap
) {
6143 link
->eh_info
.action
|= action
;
6144 link
->eh_info
.flags
|= ehi_flags
;
6147 ata_port_schedule_eh(ap
);
6149 spin_unlock_irqrestore(ap
->lock
, flags
);
6151 /* wait and check result */
6153 ata_port_wait_eh(ap
);
6154 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
6164 * ata_host_suspend - suspend host
6165 * @host: host to suspend
6168 * Suspend @host. Actual operation is performed by EH. This
6169 * function requests EH to perform PM operations and waits for EH
6173 * Kernel thread context (may sleep).
6176 * 0 on success, -errno on failure.
6178 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
6182 rc
= ata_host_request_pm(host
, mesg
, 0, ATA_EHI_QUIET
, 1);
6184 host
->dev
->power
.power_state
= mesg
;
6189 * ata_host_resume - resume host
6190 * @host: host to resume
6192 * Resume @host. Actual operation is performed by EH. This
6193 * function requests EH to perform PM operations and returns.
6194 * Note that all resume operations are performed parallely.
6197 * Kernel thread context (may sleep).
6199 void ata_host_resume(struct ata_host
*host
)
6201 ata_host_request_pm(host
, PMSG_ON
, ATA_EH_SOFTRESET
,
6202 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
6203 host
->dev
->power
.power_state
= PMSG_ON
;
6208 * ata_port_start - Set port up for dma.
6209 * @ap: Port to initialize
6211 * Called just after data structures for each port are
6212 * initialized. Allocates space for PRD table.
6214 * May be used as the port_start() entry in ata_port_operations.
6217 * Inherited from caller.
6219 int ata_port_start(struct ata_port
*ap
)
6221 struct device
*dev
= ap
->dev
;
6224 ap
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
,
6229 rc
= ata_pad_alloc(ap
, dev
);
6233 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
,
6234 (unsigned long long)ap
->prd_dma
);
6239 * ata_dev_init - Initialize an ata_device structure
6240 * @dev: Device structure to initialize
6242 * Initialize @dev in preparation for probing.
6245 * Inherited from caller.
6247 void ata_dev_init(struct ata_device
*dev
)
6249 struct ata_link
*link
= dev
->link
;
6250 struct ata_port
*ap
= link
->ap
;
6251 unsigned long flags
;
6253 /* SATA spd limit is bound to the first device */
6254 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6257 /* High bits of dev->flags are used to record warm plug
6258 * requests which occur asynchronously. Synchronize using
6261 spin_lock_irqsave(ap
->lock
, flags
);
6262 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
6264 spin_unlock_irqrestore(ap
->lock
, flags
);
6266 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
6267 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
6268 dev
->pio_mask
= UINT_MAX
;
6269 dev
->mwdma_mask
= UINT_MAX
;
6270 dev
->udma_mask
= UINT_MAX
;
6274 * ata_link_init - Initialize an ata_link structure
6275 * @ap: ATA port link is attached to
6276 * @link: Link structure to initialize
6277 * @pmp: Port multiplier port number
6282 * Kernel thread context (may sleep)
6284 void ata_link_init(struct ata_port
*ap
, struct ata_link
*link
, int pmp
)
6288 /* clear everything except for devices */
6289 memset(link
, 0, offsetof(struct ata_link
, device
[0]));
6293 link
->active_tag
= ATA_TAG_POISON
;
6294 link
->hw_sata_spd_limit
= UINT_MAX
;
6296 /* can't use iterator, ap isn't initialized yet */
6297 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
6298 struct ata_device
*dev
= &link
->device
[i
];
6301 dev
->devno
= dev
- link
->device
;
6307 * sata_link_init_spd - Initialize link->sata_spd_limit
6308 * @link: Link to configure sata_spd_limit for
6310 * Initialize @link->[hw_]sata_spd_limit to the currently
6314 * Kernel thread context (may sleep).
6317 * 0 on success, -errno on failure.
6319 int sata_link_init_spd(struct ata_link
*link
)
6324 rc
= sata_scr_read(link
, SCR_CONTROL
, &scontrol
);
6328 spd
= (scontrol
>> 4) & 0xf;
6330 link
->hw_sata_spd_limit
&= (1 << spd
) - 1;
6332 link
->sata_spd_limit
= link
->hw_sata_spd_limit
;
6338 * ata_port_alloc - allocate and initialize basic ATA port resources
6339 * @host: ATA host this allocated port belongs to
6341 * Allocate and initialize basic ATA port resources.
6344 * Allocate ATA port on success, NULL on failure.
6347 * Inherited from calling layer (may sleep).
6349 struct ata_port
*ata_port_alloc(struct ata_host
*host
)
6351 struct ata_port
*ap
;
6355 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
6359 ap
->pflags
|= ATA_PFLAG_INITIALIZING
;
6360 ap
->lock
= &host
->lock
;
6361 ap
->flags
= ATA_FLAG_DISABLED
;
6363 ap
->ctl
= ATA_DEVCTL_OBS
;
6365 ap
->dev
= host
->dev
;
6366 ap
->last_ctl
= 0xFF;
6368 #if defined(ATA_VERBOSE_DEBUG)
6369 /* turn on all debugging levels */
6370 ap
->msg_enable
= 0x00FF;
6371 #elif defined(ATA_DEBUG)
6372 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
6374 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
6377 INIT_DELAYED_WORK(&ap
->port_task
, NULL
);
6378 INIT_DELAYED_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
);
6379 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
);
6380 INIT_LIST_HEAD(&ap
->eh_done_q
);
6381 init_waitqueue_head(&ap
->eh_wait_q
);
6382 init_timer_deferrable(&ap
->fastdrain_timer
);
6383 ap
->fastdrain_timer
.function
= ata_eh_fastdrain_timerfn
;
6384 ap
->fastdrain_timer
.data
= (unsigned long)ap
;
6386 ap
->cbl
= ATA_CBL_NONE
;
6388 ata_link_init(ap
, &ap
->link
, 0);
6391 ap
->stats
.unhandled_irq
= 1;
6392 ap
->stats
.idle_irq
= 1;
6397 static void ata_host_release(struct device
*gendev
, void *res
)
6399 struct ata_host
*host
= dev_get_drvdata(gendev
);
6402 for (i
= 0; i
< host
->n_ports
; i
++) {
6403 struct ata_port
*ap
= host
->ports
[i
];
6408 if ((host
->flags
& ATA_HOST_STARTED
) && ap
->ops
->port_stop
)
6409 ap
->ops
->port_stop(ap
);
6412 if ((host
->flags
& ATA_HOST_STARTED
) && host
->ops
->host_stop
)
6413 host
->ops
->host_stop(host
);
6415 for (i
= 0; i
< host
->n_ports
; i
++) {
6416 struct ata_port
*ap
= host
->ports
[i
];
6422 scsi_host_put(ap
->scsi_host
);
6425 host
->ports
[i
] = NULL
;
6428 dev_set_drvdata(gendev
, NULL
);
6432 * ata_host_alloc - allocate and init basic ATA host resources
6433 * @dev: generic device this host is associated with
6434 * @max_ports: maximum number of ATA ports associated with this host
6436 * Allocate and initialize basic ATA host resources. LLD calls
6437 * this function to allocate a host, initializes it fully and
6438 * attaches it using ata_host_register().
6440 * @max_ports ports are allocated and host->n_ports is
6441 * initialized to @max_ports. The caller is allowed to decrease
6442 * host->n_ports before calling ata_host_register(). The unused
6443 * ports will be automatically freed on registration.
6446 * Allocate ATA host on success, NULL on failure.
6449 * Inherited from calling layer (may sleep).
6451 struct ata_host
*ata_host_alloc(struct device
*dev
, int max_ports
)
6453 struct ata_host
*host
;
6459 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
6462 /* alloc a container for our list of ATA ports (buses) */
6463 sz
= sizeof(struct ata_host
) + (max_ports
+ 1) * sizeof(void *);
6464 /* alloc a container for our list of ATA ports (buses) */
6465 host
= devres_alloc(ata_host_release
, sz
, GFP_KERNEL
);
6469 devres_add(dev
, host
);
6470 dev_set_drvdata(dev
, host
);
6472 spin_lock_init(&host
->lock
);
6474 host
->n_ports
= max_ports
;
6476 /* allocate ports bound to this host */
6477 for (i
= 0; i
< max_ports
; i
++) {
6478 struct ata_port
*ap
;
6480 ap
= ata_port_alloc(host
);
6485 host
->ports
[i
] = ap
;
6488 devres_remove_group(dev
, NULL
);
6492 devres_release_group(dev
, NULL
);
6497 * ata_host_alloc_pinfo - alloc host and init with port_info array
6498 * @dev: generic device this host is associated with
6499 * @ppi: array of ATA port_info to initialize host with
6500 * @n_ports: number of ATA ports attached to this host
6502 * Allocate ATA host and initialize with info from @ppi. If NULL
6503 * terminated, @ppi may contain fewer entries than @n_ports. The
6504 * last entry will be used for the remaining ports.
6507 * Allocate ATA host on success, NULL on failure.
6510 * Inherited from calling layer (may sleep).
6512 struct ata_host
*ata_host_alloc_pinfo(struct device
*dev
,
6513 const struct ata_port_info
* const * ppi
,
6516 const struct ata_port_info
*pi
;
6517 struct ata_host
*host
;
6520 host
= ata_host_alloc(dev
, n_ports
);
6524 for (i
= 0, j
= 0, pi
= NULL
; i
< host
->n_ports
; i
++) {
6525 struct ata_port
*ap
= host
->ports
[i
];
6530 ap
->pio_mask
= pi
->pio_mask
;
6531 ap
->mwdma_mask
= pi
->mwdma_mask
;
6532 ap
->udma_mask
= pi
->udma_mask
;
6533 ap
->flags
|= pi
->flags
;
6534 ap
->link
.flags
|= pi
->link_flags
;
6535 ap
->ops
= pi
->port_ops
;
6537 if (!host
->ops
&& (pi
->port_ops
!= &ata_dummy_port_ops
))
6538 host
->ops
= pi
->port_ops
;
6539 if (!host
->private_data
&& pi
->private_data
)
6540 host
->private_data
= pi
->private_data
;
6547 * ata_host_start - start and freeze ports of an ATA host
6548 * @host: ATA host to start ports for
6550 * Start and then freeze ports of @host. Started status is
6551 * recorded in host->flags, so this function can be called
6552 * multiple times. Ports are guaranteed to get started only
6553 * once. If host->ops isn't initialized yet, its set to the
6554 * first non-dummy port ops.
6557 * Inherited from calling layer (may sleep).
6560 * 0 if all ports are started successfully, -errno otherwise.
6562 int ata_host_start(struct ata_host
*host
)
6566 if (host
->flags
& ATA_HOST_STARTED
)
6569 for (i
= 0; i
< host
->n_ports
; i
++) {
6570 struct ata_port
*ap
= host
->ports
[i
];
6572 if (!host
->ops
&& !ata_port_is_dummy(ap
))
6573 host
->ops
= ap
->ops
;
6575 if (ap
->ops
->port_start
) {
6576 rc
= ap
->ops
->port_start(ap
);
6578 ata_port_printk(ap
, KERN_ERR
, "failed to "
6579 "start port (errno=%d)\n", rc
);
6584 ata_eh_freeze_port(ap
);
6587 host
->flags
|= ATA_HOST_STARTED
;
6592 struct ata_port
*ap
= host
->ports
[i
];
6594 if (ap
->ops
->port_stop
)
6595 ap
->ops
->port_stop(ap
);
6601 * ata_sas_host_init - Initialize a host struct
6602 * @host: host to initialize
6603 * @dev: device host is attached to
6604 * @flags: host flags
6608 * PCI/etc. bus probe sem.
6611 /* KILLME - the only user left is ipr */
6612 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
6613 unsigned long flags
, const struct ata_port_operations
*ops
)
6615 spin_lock_init(&host
->lock
);
6617 host
->flags
= flags
;
6622 * ata_host_register - register initialized ATA host
6623 * @host: ATA host to register
6624 * @sht: template for SCSI host
6626 * Register initialized ATA host. @host is allocated using
6627 * ata_host_alloc() and fully initialized by LLD. This function
6628 * starts ports, registers @host with ATA and SCSI layers and
6629 * probe registered devices.
6632 * Inherited from calling layer (may sleep).
6635 * 0 on success, -errno otherwise.
6637 int ata_host_register(struct ata_host
*host
, struct scsi_host_template
*sht
)
6641 /* host must have been started */
6642 if (!(host
->flags
& ATA_HOST_STARTED
)) {
6643 dev_printk(KERN_ERR
, host
->dev
,
6644 "BUG: trying to register unstarted host\n");
6649 /* Blow away unused ports. This happens when LLD can't
6650 * determine the exact number of ports to allocate at
6653 for (i
= host
->n_ports
; host
->ports
[i
]; i
++)
6654 kfree(host
->ports
[i
]);
6656 /* give ports names and add SCSI hosts */
6657 for (i
= 0; i
< host
->n_ports
; i
++)
6658 host
->ports
[i
]->print_id
= ata_print_id
++;
6660 rc
= ata_scsi_add_hosts(host
, sht
);
6664 /* associate with ACPI nodes */
6665 ata_acpi_associate(host
);
6667 /* set cable, sata_spd_limit and report */
6668 for (i
= 0; i
< host
->n_ports
; i
++) {
6669 struct ata_port
*ap
= host
->ports
[i
];
6670 unsigned long xfer_mask
;
6672 /* set SATA cable type if still unset */
6673 if (ap
->cbl
== ATA_CBL_NONE
&& (ap
->flags
& ATA_FLAG_SATA
))
6674 ap
->cbl
= ATA_CBL_SATA
;
6676 /* init sata_spd_limit to the current value */
6677 sata_link_init_spd(&ap
->link
);
6679 /* print per-port info to dmesg */
6680 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
, ap
->mwdma_mask
,
6683 if (!ata_port_is_dummy(ap
))
6684 ata_port_printk(ap
, KERN_INFO
,
6685 "%cATA max %s %s\n",
6686 (ap
->flags
& ATA_FLAG_SATA
) ? 'S' : 'P',
6687 ata_mode_string(xfer_mask
),
6688 ap
->link
.eh_info
.desc
);
6690 ata_port_printk(ap
, KERN_INFO
, "DUMMY\n");
6693 /* perform each probe synchronously */
6694 DPRINTK("probe begin\n");
6695 for (i
= 0; i
< host
->n_ports
; i
++) {
6696 struct ata_port
*ap
= host
->ports
[i
];
6700 if (ap
->ops
->error_handler
) {
6701 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
6702 unsigned long flags
;
6706 /* kick EH for boot probing */
6707 spin_lock_irqsave(ap
->lock
, flags
);
6710 (1 << ata_link_max_devices(&ap
->link
)) - 1;
6711 ehi
->action
|= ATA_EH_SOFTRESET
;
6712 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
6714 ap
->pflags
&= ~ATA_PFLAG_INITIALIZING
;
6715 ap
->pflags
|= ATA_PFLAG_LOADING
;
6716 ata_port_schedule_eh(ap
);
6718 spin_unlock_irqrestore(ap
->lock
, flags
);
6720 /* wait for EH to finish */
6721 ata_port_wait_eh(ap
);
6723 DPRINTK("ata%u: bus probe begin\n", ap
->print_id
);
6724 rc
= ata_bus_probe(ap
);
6725 DPRINTK("ata%u: bus probe end\n", ap
->print_id
);
6728 /* FIXME: do something useful here?
6729 * Current libata behavior will
6730 * tear down everything when
6731 * the module is removed
6732 * or the h/w is unplugged.
6738 /* probes are done, now scan each port's disk(s) */
6739 DPRINTK("host probe begin\n");
6740 for (i
= 0; i
< host
->n_ports
; i
++) {
6741 struct ata_port
*ap
= host
->ports
[i
];
6743 ata_scsi_scan_host(ap
, 1);
6750 * ata_host_activate - start host, request IRQ and register it
6751 * @host: target ATA host
6752 * @irq: IRQ to request
6753 * @irq_handler: irq_handler used when requesting IRQ
6754 * @irq_flags: irq_flags used when requesting IRQ
6755 * @sht: scsi_host_template to use when registering the host
6757 * After allocating an ATA host and initializing it, most libata
6758 * LLDs perform three steps to activate the host - start host,
6759 * request IRQ and register it. This helper takes necessasry
6760 * arguments and performs the three steps in one go.
6763 * Inherited from calling layer (may sleep).
6766 * 0 on success, -errno otherwise.
6768 int ata_host_activate(struct ata_host
*host
, int irq
,
6769 irq_handler_t irq_handler
, unsigned long irq_flags
,
6770 struct scsi_host_template
*sht
)
6774 rc
= ata_host_start(host
);
6778 rc
= devm_request_irq(host
->dev
, irq
, irq_handler
, irq_flags
,
6779 dev_driver_string(host
->dev
), host
);
6783 for (i
= 0; i
< host
->n_ports
; i
++)
6784 ata_port_desc(host
->ports
[i
], "irq %d", irq
);
6786 rc
= ata_host_register(host
, sht
);
6787 /* if failed, just free the IRQ and leave ports alone */
6789 devm_free_irq(host
->dev
, irq
, host
);
6795 * ata_port_detach - Detach ATA port in prepration of device removal
6796 * @ap: ATA port to be detached
6798 * Detach all ATA devices and the associated SCSI devices of @ap;
6799 * then, remove the associated SCSI host. @ap is guaranteed to
6800 * be quiescent on return from this function.
6803 * Kernel thread context (may sleep).
6805 void ata_port_detach(struct ata_port
*ap
)
6807 unsigned long flags
;
6808 struct ata_link
*link
;
6809 struct ata_device
*dev
;
6811 if (!ap
->ops
->error_handler
)
6814 /* tell EH we're leaving & flush EH */
6815 spin_lock_irqsave(ap
->lock
, flags
);
6816 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
6817 spin_unlock_irqrestore(ap
->lock
, flags
);
6819 ata_port_wait_eh(ap
);
6821 /* EH is now guaranteed to see UNLOADING, so no new device
6822 * will be attached. Disable all existing devices.
6824 spin_lock_irqsave(ap
->lock
, flags
);
6826 ata_port_for_each_link(link
, ap
) {
6827 ata_link_for_each_dev(dev
, link
)
6828 ata_dev_disable(dev
);
6831 spin_unlock_irqrestore(ap
->lock
, flags
);
6833 /* Final freeze & EH. All in-flight commands are aborted. EH
6834 * will be skipped and retrials will be terminated with bad
6837 spin_lock_irqsave(ap
->lock
, flags
);
6838 ata_port_freeze(ap
); /* won't be thawed */
6839 spin_unlock_irqrestore(ap
->lock
, flags
);
6841 ata_port_wait_eh(ap
);
6842 cancel_rearming_delayed_work(&ap
->hotplug_task
);
6845 /* remove the associated SCSI host */
6846 scsi_remove_host(ap
->scsi_host
);
6850 * ata_host_detach - Detach all ports of an ATA host
6851 * @host: Host to detach
6853 * Detach all ports of @host.
6856 * Kernel thread context (may sleep).
6858 void ata_host_detach(struct ata_host
*host
)
6862 for (i
= 0; i
< host
->n_ports
; i
++)
6863 ata_port_detach(host
->ports
[i
]);
6867 * ata_std_ports - initialize ioaddr with standard port offsets.
6868 * @ioaddr: IO address structure to be initialized
6870 * Utility function which initializes data_addr, error_addr,
6871 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6872 * device_addr, status_addr, and command_addr to standard offsets
6873 * relative to cmd_addr.
6875 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6878 void ata_std_ports(struct ata_ioports
*ioaddr
)
6880 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
6881 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
6882 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
6883 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
6884 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
6885 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
6886 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
6887 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
6888 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
6889 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
6896 * ata_pci_remove_one - PCI layer callback for device removal
6897 * @pdev: PCI device that was removed
6899 * PCI layer indicates to libata via this hook that hot-unplug or
6900 * module unload event has occurred. Detach all ports. Resource
6901 * release is handled via devres.
6904 * Inherited from PCI layer (may sleep).
6906 void ata_pci_remove_one(struct pci_dev
*pdev
)
6908 struct device
*dev
= pci_dev_to_dev(pdev
);
6909 struct ata_host
*host
= dev_get_drvdata(dev
);
6911 ata_host_detach(host
);
6914 /* move to PCI subsystem */
6915 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
6917 unsigned long tmp
= 0;
6919 switch (bits
->width
) {
6922 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
6928 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
6934 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
6945 return (tmp
== bits
->val
) ? 1 : 0;
6949 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6951 pci_save_state(pdev
);
6952 pci_disable_device(pdev
);
6954 if (mesg
.event
== PM_EVENT_SUSPEND
)
6955 pci_set_power_state(pdev
, PCI_D3hot
);
6958 int ata_pci_device_do_resume(struct pci_dev
*pdev
)
6962 pci_set_power_state(pdev
, PCI_D0
);
6963 pci_restore_state(pdev
);
6965 rc
= pcim_enable_device(pdev
);
6967 dev_printk(KERN_ERR
, &pdev
->dev
,
6968 "failed to enable device after resume (%d)\n", rc
);
6972 pci_set_master(pdev
);
6976 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6978 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6981 rc
= ata_host_suspend(host
, mesg
);
6985 ata_pci_device_do_suspend(pdev
, mesg
);
6990 int ata_pci_device_resume(struct pci_dev
*pdev
)
6992 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6995 rc
= ata_pci_device_do_resume(pdev
);
6997 ata_host_resume(host
);
7000 #endif /* CONFIG_PM */
7002 #endif /* CONFIG_PCI */
7005 static int __init
ata_init(void)
7007 ata_probe_timeout
*= HZ
;
7008 ata_wq
= create_workqueue("ata");
7012 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
7014 destroy_workqueue(ata_wq
);
7018 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
7022 static void __exit
ata_exit(void)
7024 destroy_workqueue(ata_wq
);
7025 destroy_workqueue(ata_aux_wq
);
7028 subsys_initcall(ata_init
);
7029 module_exit(ata_exit
);
7031 static unsigned long ratelimit_time
;
7032 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
7034 int ata_ratelimit(void)
7037 unsigned long flags
;
7039 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
7041 if (time_after(jiffies
, ratelimit_time
)) {
7043 ratelimit_time
= jiffies
+ (HZ
/5);
7047 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
7053 * ata_wait_register - wait until register value changes
7054 * @reg: IO-mapped register
7055 * @mask: Mask to apply to read register value
7056 * @val: Wait condition
7057 * @interval_msec: polling interval in milliseconds
7058 * @timeout_msec: timeout in milliseconds
7060 * Waiting for some bits of register to change is a common
7061 * operation for ATA controllers. This function reads 32bit LE
7062 * IO-mapped register @reg and tests for the following condition.
7064 * (*@reg & mask) != val
7066 * If the condition is met, it returns; otherwise, the process is
7067 * repeated after @interval_msec until timeout.
7070 * Kernel thread context (may sleep)
7073 * The final register value.
7075 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
7076 unsigned long interval_msec
,
7077 unsigned long timeout_msec
)
7079 unsigned long timeout
;
7082 tmp
= ioread32(reg
);
7084 /* Calculate timeout _after_ the first read to make sure
7085 * preceding writes reach the controller before starting to
7086 * eat away the timeout.
7088 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
7090 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
7091 msleep(interval_msec
);
7092 tmp
= ioread32(reg
);
7101 static void ata_dummy_noret(struct ata_port
*ap
) { }
7102 static int ata_dummy_ret0(struct ata_port
*ap
) { return 0; }
7103 static void ata_dummy_qc_noret(struct ata_queued_cmd
*qc
) { }
7105 static u8
ata_dummy_check_status(struct ata_port
*ap
)
7110 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
7112 return AC_ERR_SYSTEM
;
7115 const struct ata_port_operations ata_dummy_port_ops
= {
7116 .check_status
= ata_dummy_check_status
,
7117 .check_altstatus
= ata_dummy_check_status
,
7118 .dev_select
= ata_noop_dev_select
,
7119 .qc_prep
= ata_noop_qc_prep
,
7120 .qc_issue
= ata_dummy_qc_issue
,
7121 .freeze
= ata_dummy_noret
,
7122 .thaw
= ata_dummy_noret
,
7123 .error_handler
= ata_dummy_noret
,
7124 .post_internal_cmd
= ata_dummy_qc_noret
,
7125 .irq_clear
= ata_dummy_noret
,
7126 .port_start
= ata_dummy_ret0
,
7127 .port_stop
= ata_dummy_noret
,
7130 const struct ata_port_info ata_dummy_port_info
= {
7131 .port_ops
= &ata_dummy_port_ops
,
7135 * libata is essentially a library of internal helper functions for
7136 * low-level ATA host controller drivers. As such, the API/ABI is
7137 * likely to change as new drivers are added and updated.
7138 * Do not depend on ABI/API stability.
7141 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
7142 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
7143 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
7144 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
7145 EXPORT_SYMBOL_GPL(ata_dummy_port_info
);
7146 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
7147 EXPORT_SYMBOL_GPL(ata_std_ports
);
7148 EXPORT_SYMBOL_GPL(ata_host_init
);
7149 EXPORT_SYMBOL_GPL(ata_host_alloc
);
7150 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo
);
7151 EXPORT_SYMBOL_GPL(ata_host_start
);
7152 EXPORT_SYMBOL_GPL(ata_host_register
);
7153 EXPORT_SYMBOL_GPL(ata_host_activate
);
7154 EXPORT_SYMBOL_GPL(ata_host_detach
);
7155 EXPORT_SYMBOL_GPL(ata_sg_init
);
7156 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
7157 EXPORT_SYMBOL_GPL(ata_hsm_move
);
7158 EXPORT_SYMBOL_GPL(ata_qc_complete
);
7159 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
7160 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
7161 EXPORT_SYMBOL_GPL(ata_tf_load
);
7162 EXPORT_SYMBOL_GPL(ata_tf_read
);
7163 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
7164 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
7165 EXPORT_SYMBOL_GPL(sata_print_link_status
);
7166 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
7167 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
7168 EXPORT_SYMBOL_GPL(ata_check_status
);
7169 EXPORT_SYMBOL_GPL(ata_altstatus
);
7170 EXPORT_SYMBOL_GPL(ata_exec_command
);
7171 EXPORT_SYMBOL_GPL(ata_port_start
);
7172 EXPORT_SYMBOL_GPL(ata_sff_port_start
);
7173 EXPORT_SYMBOL_GPL(ata_interrupt
);
7174 EXPORT_SYMBOL_GPL(ata_do_set_mode
);
7175 EXPORT_SYMBOL_GPL(ata_data_xfer
);
7176 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq
);
7177 EXPORT_SYMBOL_GPL(ata_std_qc_defer
);
7178 EXPORT_SYMBOL_GPL(ata_qc_prep
);
7179 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep
);
7180 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
7181 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
7182 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
7183 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
7184 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
7185 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
7186 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
7187 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
7188 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh
);
7189 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
7190 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
7191 EXPORT_SYMBOL_GPL(ata_port_probe
);
7192 EXPORT_SYMBOL_GPL(ata_dev_disable
);
7193 EXPORT_SYMBOL_GPL(sata_set_spd
);
7194 EXPORT_SYMBOL_GPL(sata_link_debounce
);
7195 EXPORT_SYMBOL_GPL(sata_link_resume
);
7196 EXPORT_SYMBOL_GPL(sata_phy_reset
);
7197 EXPORT_SYMBOL_GPL(__sata_phy_reset
);
7198 EXPORT_SYMBOL_GPL(ata_bus_reset
);
7199 EXPORT_SYMBOL_GPL(ata_std_prereset
);
7200 EXPORT_SYMBOL_GPL(ata_std_softreset
);
7201 EXPORT_SYMBOL_GPL(sata_link_hardreset
);
7202 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
7203 EXPORT_SYMBOL_GPL(ata_std_postreset
);
7204 EXPORT_SYMBOL_GPL(ata_dev_classify
);
7205 EXPORT_SYMBOL_GPL(ata_dev_pair
);
7206 EXPORT_SYMBOL_GPL(ata_port_disable
);
7207 EXPORT_SYMBOL_GPL(ata_ratelimit
);
7208 EXPORT_SYMBOL_GPL(ata_wait_register
);
7209 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
7210 EXPORT_SYMBOL_GPL(ata_wait_ready
);
7211 EXPORT_SYMBOL_GPL(ata_port_queue_task
);
7212 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
7213 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
7214 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
7215 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
7216 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
7217 EXPORT_SYMBOL_GPL(ata_host_intr
);
7218 EXPORT_SYMBOL_GPL(sata_scr_valid
);
7219 EXPORT_SYMBOL_GPL(sata_scr_read
);
7220 EXPORT_SYMBOL_GPL(sata_scr_write
);
7221 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
7222 EXPORT_SYMBOL_GPL(ata_link_online
);
7223 EXPORT_SYMBOL_GPL(ata_link_offline
);
7225 EXPORT_SYMBOL_GPL(ata_host_suspend
);
7226 EXPORT_SYMBOL_GPL(ata_host_resume
);
7227 #endif /* CONFIG_PM */
7228 EXPORT_SYMBOL_GPL(ata_id_string
);
7229 EXPORT_SYMBOL_GPL(ata_id_c_string
);
7230 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode
);
7231 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
7233 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
7234 EXPORT_SYMBOL_GPL(ata_timing_compute
);
7235 EXPORT_SYMBOL_GPL(ata_timing_merge
);
7238 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
7239 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host
);
7240 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma
);
7241 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host
);
7242 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
7243 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
7245 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
7246 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
7247 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
7248 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
7249 #endif /* CONFIG_PM */
7250 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
7251 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
7252 #endif /* CONFIG_PCI */
7254 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc
);
7255 EXPORT_SYMBOL_GPL(ata_ehi_push_desc
);
7256 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc
);
7257 EXPORT_SYMBOL_GPL(ata_port_desc
);
7259 EXPORT_SYMBOL_GPL(ata_port_pbar_desc
);
7260 #endif /* CONFIG_PCI */
7261 EXPORT_SYMBOL_GPL(ata_eng_timeout
);
7262 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
7263 EXPORT_SYMBOL_GPL(ata_link_abort
);
7264 EXPORT_SYMBOL_GPL(ata_port_abort
);
7265 EXPORT_SYMBOL_GPL(ata_port_freeze
);
7266 EXPORT_SYMBOL_GPL(sata_async_notification
);
7267 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
7268 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
7269 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
7270 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
7271 EXPORT_SYMBOL_GPL(ata_do_eh
);
7272 EXPORT_SYMBOL_GPL(ata_irq_on
);
7273 EXPORT_SYMBOL_GPL(ata_dev_try_classify
);
7275 EXPORT_SYMBOL_GPL(ata_cable_40wire
);
7276 EXPORT_SYMBOL_GPL(ata_cable_80wire
);
7277 EXPORT_SYMBOL_GPL(ata_cable_unknown
);
7278 EXPORT_SYMBOL_GPL(ata_cable_sata
);