2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 #define DRV_VERSION "2.20" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
71 u16 heads
, u16 sectors
);
72 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
73 static void ata_dev_xfermask(struct ata_device
*dev
);
75 unsigned int ata_print_id
= 1;
76 static struct workqueue_struct
*ata_wq
;
78 struct workqueue_struct
*ata_aux_wq
;
80 int atapi_enabled
= 1;
81 module_param(atapi_enabled
, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
85 module_param(atapi_dmadir
, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
89 module_param_named(fua
, libata_fua
, int, 0444);
90 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
92 static int ata_ignore_hpa
= 0;
93 module_param_named(ignore_hpa
, ata_ignore_hpa
, int, 0644);
94 MODULE_PARM_DESC(ignore_hpa
, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
96 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
97 module_param(ata_probe_timeout
, int, 0444);
98 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
100 int libata_noacpi
= 1;
101 module_param_named(noacpi
, libata_noacpi
, int, 0444);
102 MODULE_PARM_DESC(noacpi
, "Disables the use of ACPI in suspend/resume when set");
104 MODULE_AUTHOR("Jeff Garzik");
105 MODULE_DESCRIPTION("Library module for ATA devices");
106 MODULE_LICENSE("GPL");
107 MODULE_VERSION(DRV_VERSION
);
111 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 * @tf: Taskfile to convert
113 * @fis: Buffer into which data will output
114 * @pmp: Port multiplier port
116 * Converts a standard ATA taskfile to a Serial ATA
117 * FIS structure (Register - Host to Device).
120 * Inherited from caller.
123 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8
*fis
, u8 pmp
)
125 fis
[0] = 0x27; /* Register - Host to Device FIS */
126 fis
[1] = (pmp
& 0xf) | (1 << 7); /* Port multiplier number,
127 bit 7 indicates Command FIS */
128 fis
[2] = tf
->command
;
129 fis
[3] = tf
->feature
;
136 fis
[8] = tf
->hob_lbal
;
137 fis
[9] = tf
->hob_lbam
;
138 fis
[10] = tf
->hob_lbah
;
139 fis
[11] = tf
->hob_feature
;
142 fis
[13] = tf
->hob_nsect
;
153 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 * @fis: Buffer from which data will be input
155 * @tf: Taskfile to output
157 * Converts a serial ATA FIS structure to a standard ATA taskfile.
160 * Inherited from caller.
163 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
165 tf
->command
= fis
[2]; /* status */
166 tf
->feature
= fis
[3]; /* error */
173 tf
->hob_lbal
= fis
[8];
174 tf
->hob_lbam
= fis
[9];
175 tf
->hob_lbah
= fis
[10];
178 tf
->hob_nsect
= fis
[13];
181 static const u8 ata_rw_cmds
[] = {
185 ATA_CMD_READ_MULTI_EXT
,
186 ATA_CMD_WRITE_MULTI_EXT
,
190 ATA_CMD_WRITE_MULTI_FUA_EXT
,
194 ATA_CMD_PIO_READ_EXT
,
195 ATA_CMD_PIO_WRITE_EXT
,
208 ATA_CMD_WRITE_FUA_EXT
212 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
213 * @tf: command to examine and configure
214 * @dev: device tf belongs to
216 * Examine the device configuration and tf->flags to calculate
217 * the proper read/write commands and protocol to use.
222 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
226 int index
, fua
, lba48
, write
;
228 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
229 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
230 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
232 if (dev
->flags
& ATA_DFLAG_PIO
) {
233 tf
->protocol
= ATA_PROT_PIO
;
234 index
= dev
->multi_count
? 0 : 8;
235 } else if (lba48
&& (dev
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
236 /* Unable to use DMA due to host limitation */
237 tf
->protocol
= ATA_PROT_PIO
;
238 index
= dev
->multi_count
? 0 : 8;
240 tf
->protocol
= ATA_PROT_DMA
;
244 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
253 * ata_tf_read_block - Read block address from ATA taskfile
254 * @tf: ATA taskfile of interest
255 * @dev: ATA device @tf belongs to
260 * Read block address from @tf. This function can handle all
261 * three address formats - LBA, LBA48 and CHS. tf->protocol and
262 * flags select the address format to use.
265 * Block address read from @tf.
267 u64
ata_tf_read_block(struct ata_taskfile
*tf
, struct ata_device
*dev
)
271 if (tf
->flags
& ATA_TFLAG_LBA
) {
272 if (tf
->flags
& ATA_TFLAG_LBA48
) {
273 block
|= (u64
)tf
->hob_lbah
<< 40;
274 block
|= (u64
)tf
->hob_lbam
<< 32;
275 block
|= tf
->hob_lbal
<< 24;
277 block
|= (tf
->device
& 0xf) << 24;
279 block
|= tf
->lbah
<< 16;
280 block
|= tf
->lbam
<< 8;
285 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
286 head
= tf
->device
& 0xf;
289 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
;
296 * ata_build_rw_tf - Build ATA taskfile for given read/write request
297 * @tf: Target ATA taskfile
298 * @dev: ATA device @tf belongs to
299 * @block: Block address
300 * @n_block: Number of blocks
301 * @tf_flags: RW/FUA etc...
307 * Build ATA taskfile @tf for read/write request described by
308 * @block, @n_block, @tf_flags and @tag on @dev.
312 * 0 on success, -ERANGE if the request is too large for @dev,
313 * -EINVAL if the request is invalid.
315 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
316 u64 block
, u32 n_block
, unsigned int tf_flags
,
319 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
320 tf
->flags
|= tf_flags
;
322 if (ata_ncq_enabled(dev
) && likely(tag
!= ATA_TAG_INTERNAL
)) {
324 if (!lba_48_ok(block
, n_block
))
327 tf
->protocol
= ATA_PROT_NCQ
;
328 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
330 if (tf
->flags
& ATA_TFLAG_WRITE
)
331 tf
->command
= ATA_CMD_FPDMA_WRITE
;
333 tf
->command
= ATA_CMD_FPDMA_READ
;
335 tf
->nsect
= tag
<< 3;
336 tf
->hob_feature
= (n_block
>> 8) & 0xff;
337 tf
->feature
= n_block
& 0xff;
339 tf
->hob_lbah
= (block
>> 40) & 0xff;
340 tf
->hob_lbam
= (block
>> 32) & 0xff;
341 tf
->hob_lbal
= (block
>> 24) & 0xff;
342 tf
->lbah
= (block
>> 16) & 0xff;
343 tf
->lbam
= (block
>> 8) & 0xff;
344 tf
->lbal
= block
& 0xff;
347 if (tf
->flags
& ATA_TFLAG_FUA
)
348 tf
->device
|= 1 << 7;
349 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
350 tf
->flags
|= ATA_TFLAG_LBA
;
352 if (lba_28_ok(block
, n_block
)) {
354 tf
->device
|= (block
>> 24) & 0xf;
355 } else if (lba_48_ok(block
, n_block
)) {
356 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
360 tf
->flags
|= ATA_TFLAG_LBA48
;
362 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
364 tf
->hob_lbah
= (block
>> 40) & 0xff;
365 tf
->hob_lbam
= (block
>> 32) & 0xff;
366 tf
->hob_lbal
= (block
>> 24) & 0xff;
368 /* request too large even for LBA48 */
371 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
374 tf
->nsect
= n_block
& 0xff;
376 tf
->lbah
= (block
>> 16) & 0xff;
377 tf
->lbam
= (block
>> 8) & 0xff;
378 tf
->lbal
= block
& 0xff;
380 tf
->device
|= ATA_LBA
;
383 u32 sect
, head
, cyl
, track
;
385 /* The request -may- be too large for CHS addressing. */
386 if (!lba_28_ok(block
, n_block
))
389 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
392 /* Convert LBA to CHS */
393 track
= (u32
)block
/ dev
->sectors
;
394 cyl
= track
/ dev
->heads
;
395 head
= track
% dev
->heads
;
396 sect
= (u32
)block
% dev
->sectors
+ 1;
398 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399 (u32
)block
, track
, cyl
, head
, sect
);
401 /* Check whether the converted CHS can fit.
405 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
408 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
419 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 * @pio_mask: pio_mask
421 * @mwdma_mask: mwdma_mask
422 * @udma_mask: udma_mask
424 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 * unsigned int xfer_mask.
433 static unsigned int ata_pack_xfermask(unsigned int pio_mask
,
434 unsigned int mwdma_mask
,
435 unsigned int udma_mask
)
437 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
438 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
439 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
443 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 * @xfer_mask: xfer_mask to unpack
445 * @pio_mask: resulting pio_mask
446 * @mwdma_mask: resulting mwdma_mask
447 * @udma_mask: resulting udma_mask
449 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 * Any NULL distination masks will be ignored.
452 static void ata_unpack_xfermask(unsigned int xfer_mask
,
453 unsigned int *pio_mask
,
454 unsigned int *mwdma_mask
,
455 unsigned int *udma_mask
)
458 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
460 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
462 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
465 static const struct ata_xfer_ent
{
469 { ATA_SHIFT_PIO
, ATA_BITS_PIO
, XFER_PIO_0
},
470 { ATA_SHIFT_MWDMA
, ATA_BITS_MWDMA
, XFER_MW_DMA_0
},
471 { ATA_SHIFT_UDMA
, ATA_BITS_UDMA
, XFER_UDMA_0
},
476 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 * @xfer_mask: xfer_mask of interest
479 * Return matching XFER_* value for @xfer_mask. Only the highest
480 * bit of @xfer_mask is considered.
486 * Matching XFER_* value, 0 if no match found.
488 static u8
ata_xfer_mask2mode(unsigned int xfer_mask
)
490 int highbit
= fls(xfer_mask
) - 1;
491 const struct ata_xfer_ent
*ent
;
493 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
494 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
495 return ent
->base
+ highbit
- ent
->shift
;
500 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 * @xfer_mode: XFER_* of interest
503 * Return matching xfer_mask for @xfer_mode.
509 * Matching xfer_mask, 0 if no match found.
511 static unsigned int ata_xfer_mode2mask(u8 xfer_mode
)
513 const struct ata_xfer_ent
*ent
;
515 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
516 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
517 return 1 << (ent
->shift
+ xfer_mode
- ent
->base
);
522 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 * @xfer_mode: XFER_* of interest
525 * Return matching xfer_shift for @xfer_mode.
531 * Matching xfer_shift, -1 if no match found.
533 static int ata_xfer_mode2shift(unsigned int xfer_mode
)
535 const struct ata_xfer_ent
*ent
;
537 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
538 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
544 * ata_mode_string - convert xfer_mask to string
545 * @xfer_mask: mask of bits supported; only highest bit counts.
547 * Determine string which represents the highest speed
548 * (highest bit in @modemask).
554 * Constant C string representing highest speed listed in
555 * @mode_mask, or the constant C string "<n/a>".
557 static const char *ata_mode_string(unsigned int xfer_mask
)
559 static const char * const xfer_mode_str
[] = {
583 highbit
= fls(xfer_mask
) - 1;
584 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
585 return xfer_mode_str
[highbit
];
589 static const char *sata_spd_string(unsigned int spd
)
591 static const char * const spd_str
[] = {
596 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
598 return spd_str
[spd
- 1];
601 void ata_dev_disable(struct ata_device
*dev
)
603 if (ata_dev_enabled(dev
) && ata_msg_drv(dev
->ap
)) {
604 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
605 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
|
612 * ata_devchk - PATA device presence detection
613 * @ap: ATA channel to examine
614 * @device: Device to examine (starting at zero)
616 * This technique was originally described in
617 * Hale Landis's ATADRVR (www.ata-atapi.com), and
618 * later found its way into the ATA/ATAPI spec.
620 * Write a pattern to the ATA shadow registers,
621 * and if a device is present, it will respond by
622 * correctly storing and echoing back the
623 * ATA shadow register contents.
629 static unsigned int ata_devchk(struct ata_port
*ap
, unsigned int device
)
631 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
634 ap
->ops
->dev_select(ap
, device
);
636 iowrite8(0x55, ioaddr
->nsect_addr
);
637 iowrite8(0xaa, ioaddr
->lbal_addr
);
639 iowrite8(0xaa, ioaddr
->nsect_addr
);
640 iowrite8(0x55, ioaddr
->lbal_addr
);
642 iowrite8(0x55, ioaddr
->nsect_addr
);
643 iowrite8(0xaa, ioaddr
->lbal_addr
);
645 nsect
= ioread8(ioaddr
->nsect_addr
);
646 lbal
= ioread8(ioaddr
->lbal_addr
);
648 if ((nsect
== 0x55) && (lbal
== 0xaa))
649 return 1; /* we found a device */
651 return 0; /* nothing found */
655 * ata_dev_classify - determine device type based on ATA-spec signature
656 * @tf: ATA taskfile register set for device to be identified
658 * Determine from taskfile register contents whether a device is
659 * ATA or ATAPI, as per "Signature and persistence" section
660 * of ATA/PI spec (volume 1, sect 5.14).
666 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667 * the event of failure.
670 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
672 /* Apple's open source Darwin code hints that some devices only
673 * put a proper signature into the LBA mid/high registers,
674 * So, we only check those. It's sufficient for uniqueness.
677 if (((tf
->lbam
== 0) && (tf
->lbah
== 0)) ||
678 ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3))) {
679 DPRINTK("found ATA device by sig\n");
683 if (((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) ||
684 ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96))) {
685 DPRINTK("found ATAPI device by sig\n");
686 return ATA_DEV_ATAPI
;
689 DPRINTK("unknown device\n");
690 return ATA_DEV_UNKNOWN
;
694 * ata_dev_try_classify - Parse returned ATA device signature
695 * @ap: ATA channel to examine
696 * @device: Device to examine (starting at zero)
697 * @r_err: Value of error register on completion
699 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700 * an ATA/ATAPI-defined set of values is placed in the ATA
701 * shadow registers, indicating the results of device detection
704 * Select the ATA device, and read the values from the ATA shadow
705 * registers. Then parse according to the Error register value,
706 * and the spec-defined values examined by ata_dev_classify().
712 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
716 ata_dev_try_classify(struct ata_port
*ap
, unsigned int device
, u8
*r_err
)
718 struct ata_taskfile tf
;
722 ap
->ops
->dev_select(ap
, device
);
724 memset(&tf
, 0, sizeof(tf
));
726 ap
->ops
->tf_read(ap
, &tf
);
731 /* see if device passed diags: if master then continue and warn later */
732 if (err
== 0 && device
== 0)
733 /* diagnostic fail : do nothing _YET_ */
734 ap
->device
[device
].horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
737 else if ((device
== 0) && (err
== 0x81))
742 /* determine if device is ATA or ATAPI */
743 class = ata_dev_classify(&tf
);
745 if (class == ATA_DEV_UNKNOWN
)
747 if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
753 * ata_id_string - Convert IDENTIFY DEVICE page into string
754 * @id: IDENTIFY DEVICE results we will examine
755 * @s: string into which data is output
756 * @ofs: offset into identify device page
757 * @len: length of string to return. must be an even number.
759 * The strings in the IDENTIFY DEVICE page are broken up into
760 * 16-bit chunks. Run through the string, and output each
761 * 8-bit chunk linearly, regardless of platform.
767 void ata_id_string(const u16
*id
, unsigned char *s
,
768 unsigned int ofs
, unsigned int len
)
787 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
788 * @id: IDENTIFY DEVICE results we will examine
789 * @s: string into which data is output
790 * @ofs: offset into identify device page
791 * @len: length of string to return. must be an odd number.
793 * This function is identical to ata_id_string except that it
794 * trims trailing spaces and terminates the resulting string with
795 * null. @len must be actual maximum length (even number) + 1.
800 void ata_id_c_string(const u16
*id
, unsigned char *s
,
801 unsigned int ofs
, unsigned int len
)
807 ata_id_string(id
, s
, ofs
, len
- 1);
809 p
= s
+ strnlen(s
, len
- 1);
810 while (p
> s
&& p
[-1] == ' ')
815 static u64
ata_tf_to_lba48(struct ata_taskfile
*tf
)
819 sectors
|= ((u64
)(tf
->hob_lbah
& 0xff)) << 40;
820 sectors
|= ((u64
)(tf
->hob_lbam
& 0xff)) << 32;
821 sectors
|= (tf
->hob_lbal
& 0xff) << 24;
822 sectors
|= (tf
->lbah
& 0xff) << 16;
823 sectors
|= (tf
->lbam
& 0xff) << 8;
824 sectors
|= (tf
->lbal
& 0xff);
829 static u64
ata_tf_to_lba(struct ata_taskfile
*tf
)
833 sectors
|= (tf
->device
& 0x0f) << 24;
834 sectors
|= (tf
->lbah
& 0xff) << 16;
835 sectors
|= (tf
->lbam
& 0xff) << 8;
836 sectors
|= (tf
->lbal
& 0xff);
842 * ata_read_native_max_address_ext - LBA48 native max query
843 * @dev: Device to query
845 * Perform an LBA48 size query upon the device in question. Return the
846 * actual LBA48 size or zero if the command fails.
849 static u64
ata_read_native_max_address_ext(struct ata_device
*dev
)
852 struct ata_taskfile tf
;
854 ata_tf_init(dev
, &tf
);
856 tf
.command
= ATA_CMD_READ_NATIVE_MAX_EXT
;
857 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_LBA48
| ATA_TFLAG_ISADDR
;
858 tf
.protocol
|= ATA_PROT_NODATA
;
861 err
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
865 return ata_tf_to_lba48(&tf
);
869 * ata_read_native_max_address - LBA28 native max query
870 * @dev: Device to query
872 * Performa an LBA28 size query upon the device in question. Return the
873 * actual LBA28 size or zero if the command fails.
876 static u64
ata_read_native_max_address(struct ata_device
*dev
)
879 struct ata_taskfile tf
;
881 ata_tf_init(dev
, &tf
);
883 tf
.command
= ATA_CMD_READ_NATIVE_MAX
;
884 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
885 tf
.protocol
|= ATA_PROT_NODATA
;
888 err
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
892 return ata_tf_to_lba(&tf
);
896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query
899 * Perform an LBA48 size set max upon the device in question. Return the
900 * actual LBA48 size or zero if the command fails.
903 static u64
ata_set_native_max_address_ext(struct ata_device
*dev
, u64 new_sectors
)
906 struct ata_taskfile tf
;
910 ata_tf_init(dev
, &tf
);
912 tf
.command
= ATA_CMD_SET_MAX_EXT
;
913 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_LBA48
| ATA_TFLAG_ISADDR
;
914 tf
.protocol
|= ATA_PROT_NODATA
;
917 tf
.lbal
= (new_sectors
>> 0) & 0xff;
918 tf
.lbam
= (new_sectors
>> 8) & 0xff;
919 tf
.lbah
= (new_sectors
>> 16) & 0xff;
921 tf
.hob_lbal
= (new_sectors
>> 24) & 0xff;
922 tf
.hob_lbam
= (new_sectors
>> 32) & 0xff;
923 tf
.hob_lbah
= (new_sectors
>> 40) & 0xff;
925 err
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
929 return ata_tf_to_lba48(&tf
);
933 * ata_set_native_max_address - LBA28 native max set
934 * @dev: Device to query
936 * Perform an LBA28 size set max upon the device in question. Return the
937 * actual LBA28 size or zero if the command fails.
940 static u64
ata_set_native_max_address(struct ata_device
*dev
, u64 new_sectors
)
943 struct ata_taskfile tf
;
947 ata_tf_init(dev
, &tf
);
949 tf
.command
= ATA_CMD_SET_MAX
;
950 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
951 tf
.protocol
|= ATA_PROT_NODATA
;
953 tf
.lbal
= (new_sectors
>> 0) & 0xff;
954 tf
.lbam
= (new_sectors
>> 8) & 0xff;
955 tf
.lbah
= (new_sectors
>> 16) & 0xff;
956 tf
.device
|= ((new_sectors
>> 24) & 0x0f) | 0x40;
958 err
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
962 return ata_tf_to_lba(&tf
);
966 * ata_hpa_resize - Resize a device with an HPA set
967 * @dev: Device to resize
969 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
970 * it if required to the full size of the media. The caller must check
971 * the drive has the HPA feature set enabled.
974 static u64
ata_hpa_resize(struct ata_device
*dev
)
976 u64 sectors
= dev
->n_sectors
;
979 if (ata_id_has_lba48(dev
->id
))
980 hpa_sectors
= ata_read_native_max_address_ext(dev
);
982 hpa_sectors
= ata_read_native_max_address(dev
);
984 /* if no hpa, both should be equal */
985 ata_dev_printk(dev
, KERN_INFO
, "%s 1: sectors = %lld, hpa_sectors = %lld\n",
986 __FUNCTION__
, sectors
, hpa_sectors
);
988 if (hpa_sectors
> sectors
) {
989 ata_dev_printk(dev
, KERN_INFO
,
990 "Host Protected Area detected:\n"
991 "\tcurrent size: %lld sectors\n"
992 "\tnative size: %lld sectors\n",
993 sectors
, hpa_sectors
);
995 if (ata_ignore_hpa
) {
996 if (ata_id_has_lba48(dev
->id
))
997 hpa_sectors
= ata_set_native_max_address_ext(dev
, hpa_sectors
);
999 hpa_sectors
= ata_set_native_max_address(dev
, hpa_sectors
);
1002 ata_dev_printk(dev
, KERN_INFO
,
1003 "native size increased to %lld sectors\n", hpa_sectors
);
1011 static u64
ata_id_n_sectors(const u16
*id
)
1013 if (ata_id_has_lba(id
)) {
1014 if (ata_id_has_lba48(id
))
1015 return ata_id_u64(id
, 100);
1017 return ata_id_u32(id
, 60);
1019 if (ata_id_current_chs_valid(id
))
1020 return ata_id_u32(id
, 57);
1022 return id
[1] * id
[3] * id
[6];
1027 * ata_id_to_dma_mode - Identify DMA mode from id block
1028 * @dev: device to identify
1029 * @unknown: mode to assume if we cannot tell
1031 * Set up the timing values for the device based upon the identify
1032 * reported values for the DMA mode. This function is used by drivers
1033 * which rely upon firmware configured modes, but wish to report the
1034 * mode correctly when possible.
1036 * In addition we emit similarly formatted messages to the default
1037 * ata_dev_set_mode handler, in order to provide consistency of
1041 void ata_id_to_dma_mode(struct ata_device
*dev
, u8 unknown
)
1046 /* Pack the DMA modes */
1047 mask
= ((dev
->id
[63] >> 8) << ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
;
1048 if (dev
->id
[53] & 0x04)
1049 mask
|= ((dev
->id
[88] >> 8) << ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
;
1051 /* Select the mode in use */
1052 mode
= ata_xfer_mask2mode(mask
);
1055 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
1056 ata_mode_string(mask
));
1058 /* SWDMA perhaps ? */
1060 ata_dev_printk(dev
, KERN_INFO
, "configured for DMA\n");
1063 /* Configure the device reporting */
1064 dev
->xfer_mode
= mode
;
1065 dev
->xfer_shift
= ata_xfer_mode2shift(mode
);
1069 * ata_noop_dev_select - Select device 0/1 on ATA bus
1070 * @ap: ATA channel to manipulate
1071 * @device: ATA device (numbered from zero) to select
1073 * This function performs no actual function.
1075 * May be used as the dev_select() entry in ata_port_operations.
1080 void ata_noop_dev_select (struct ata_port
*ap
, unsigned int device
)
1086 * ata_std_dev_select - Select device 0/1 on ATA bus
1087 * @ap: ATA channel to manipulate
1088 * @device: ATA device (numbered from zero) to select
1090 * Use the method defined in the ATA specification to
1091 * make either device 0, or device 1, active on the
1092 * ATA channel. Works with both PIO and MMIO.
1094 * May be used as the dev_select() entry in ata_port_operations.
1100 void ata_std_dev_select (struct ata_port
*ap
, unsigned int device
)
1105 tmp
= ATA_DEVICE_OBS
;
1107 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
1109 iowrite8(tmp
, ap
->ioaddr
.device_addr
);
1110 ata_pause(ap
); /* needed; also flushes, for mmio */
1114 * ata_dev_select - Select device 0/1 on ATA bus
1115 * @ap: ATA channel to manipulate
1116 * @device: ATA device (numbered from zero) to select
1117 * @wait: non-zero to wait for Status register BSY bit to clear
1118 * @can_sleep: non-zero if context allows sleeping
1120 * Use the method defined in the ATA specification to
1121 * make either device 0, or device 1, active on the
1124 * This is a high-level version of ata_std_dev_select(),
1125 * which additionally provides the services of inserting
1126 * the proper pauses and status polling, where needed.
1132 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
1133 unsigned int wait
, unsigned int can_sleep
)
1135 if (ata_msg_probe(ap
))
1136 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, "
1137 "device %u, wait %u\n", device
, wait
);
1142 ap
->ops
->dev_select(ap
, device
);
1145 if (can_sleep
&& ap
->device
[device
].class == ATA_DEV_ATAPI
)
1152 * ata_dump_id - IDENTIFY DEVICE info debugging output
1153 * @id: IDENTIFY DEVICE page to dump
1155 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1162 static inline void ata_dump_id(const u16
*id
)
1164 DPRINTK("49==0x%04x "
1174 DPRINTK("80==0x%04x "
1184 DPRINTK("88==0x%04x "
1191 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1192 * @id: IDENTIFY data to compute xfer mask from
1194 * Compute the xfermask for this device. This is not as trivial
1195 * as it seems if we must consider early devices correctly.
1197 * FIXME: pre IDE drive timing (do we care ?).
1205 static unsigned int ata_id_xfermask(const u16
*id
)
1207 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
1209 /* Usual case. Word 53 indicates word 64 is valid */
1210 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1211 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1215 /* If word 64 isn't valid then Word 51 high byte holds
1216 * the PIO timing number for the maximum. Turn it into
1219 u8 mode
= (id
[ATA_ID_OLD_PIO_MODES
] >> 8) & 0xFF;
1220 if (mode
< 5) /* Valid PIO range */
1221 pio_mask
= (2 << mode
) - 1;
1225 /* But wait.. there's more. Design your standards by
1226 * committee and you too can get a free iordy field to
1227 * process. However its the speeds not the modes that
1228 * are supported... Note drivers using the timing API
1229 * will get this right anyway
1233 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1235 if (ata_id_is_cfa(id
)) {
1237 * Process compact flash extended modes
1239 int pio
= id
[163] & 0x7;
1240 int dma
= (id
[163] >> 3) & 7;
1243 pio_mask
|= (1 << 5);
1245 pio_mask
|= (1 << 6);
1247 mwdma_mask
|= (1 << 3);
1249 mwdma_mask
|= (1 << 4);
1253 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1254 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1256 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1260 * ata_port_queue_task - Queue port_task
1261 * @ap: The ata_port to queue port_task for
1262 * @fn: workqueue function to be scheduled
1263 * @data: data for @fn to use
1264 * @delay: delay time for workqueue function
1266 * Schedule @fn(@data) for execution after @delay jiffies using
1267 * port_task. There is one port_task per port and it's the
1268 * user(low level driver)'s responsibility to make sure that only
1269 * one task is active at any given time.
1271 * libata core layer takes care of synchronization between
1272 * port_task and EH. ata_port_queue_task() may be ignored for EH
1276 * Inherited from caller.
1278 void ata_port_queue_task(struct ata_port
*ap
, work_func_t fn
, void *data
,
1279 unsigned long delay
)
1283 if (ap
->pflags
& ATA_PFLAG_FLUSH_PORT_TASK
)
1286 PREPARE_DELAYED_WORK(&ap
->port_task
, fn
);
1287 ap
->port_task_data
= data
;
1289 rc
= queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
1291 /* rc == 0 means that another user is using port task */
1296 * ata_port_flush_task - Flush port_task
1297 * @ap: The ata_port to flush port_task for
1299 * After this function completes, port_task is guranteed not to
1300 * be running or scheduled.
1303 * Kernel thread context (may sleep)
1305 void ata_port_flush_task(struct ata_port
*ap
)
1307 unsigned long flags
;
1311 spin_lock_irqsave(ap
->lock
, flags
);
1312 ap
->pflags
|= ATA_PFLAG_FLUSH_PORT_TASK
;
1313 spin_unlock_irqrestore(ap
->lock
, flags
);
1315 DPRINTK("flush #1\n");
1316 flush_workqueue(ata_wq
);
1319 * At this point, if a task is running, it's guaranteed to see
1320 * the FLUSH flag; thus, it will never queue pio tasks again.
1323 if (!cancel_delayed_work(&ap
->port_task
)) {
1324 if (ata_msg_ctl(ap
))
1325 ata_port_printk(ap
, KERN_DEBUG
, "%s: flush #2\n",
1327 flush_workqueue(ata_wq
);
1330 spin_lock_irqsave(ap
->lock
, flags
);
1331 ap
->pflags
&= ~ATA_PFLAG_FLUSH_PORT_TASK
;
1332 spin_unlock_irqrestore(ap
->lock
, flags
);
1334 if (ata_msg_ctl(ap
))
1335 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __FUNCTION__
);
1338 static void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1340 struct completion
*waiting
= qc
->private_data
;
1346 * ata_exec_internal_sg - execute libata internal command
1347 * @dev: Device to which the command is sent
1348 * @tf: Taskfile registers for the command and the result
1349 * @cdb: CDB for packet command
1350 * @dma_dir: Data tranfer direction of the command
1351 * @sg: sg list for the data buffer of the command
1352 * @n_elem: Number of sg entries
1354 * Executes libata internal command with timeout. @tf contains
1355 * command on entry and result on return. Timeout and error
1356 * conditions are reported via return value. No recovery action
1357 * is taken after a command times out. It's caller's duty to
1358 * clean up after timeout.
1361 * None. Should be called with kernel context, might sleep.
1364 * Zero on success, AC_ERR_* mask on failure
1366 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1367 struct ata_taskfile
*tf
, const u8
*cdb
,
1368 int dma_dir
, struct scatterlist
*sg
,
1369 unsigned int n_elem
)
1371 struct ata_port
*ap
= dev
->ap
;
1372 u8 command
= tf
->command
;
1373 struct ata_queued_cmd
*qc
;
1374 unsigned int tag
, preempted_tag
;
1375 u32 preempted_sactive
, preempted_qc_active
;
1376 DECLARE_COMPLETION_ONSTACK(wait
);
1377 unsigned long flags
;
1378 unsigned int err_mask
;
1381 spin_lock_irqsave(ap
->lock
, flags
);
1383 /* no internal command while frozen */
1384 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1385 spin_unlock_irqrestore(ap
->lock
, flags
);
1386 return AC_ERR_SYSTEM
;
1389 /* initialize internal qc */
1391 /* XXX: Tag 0 is used for drivers with legacy EH as some
1392 * drivers choke if any other tag is given. This breaks
1393 * ata_tag_internal() test for those drivers. Don't use new
1394 * EH stuff without converting to it.
1396 if (ap
->ops
->error_handler
)
1397 tag
= ATA_TAG_INTERNAL
;
1401 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1403 qc
= __ata_qc_from_tag(ap
, tag
);
1411 preempted_tag
= ap
->active_tag
;
1412 preempted_sactive
= ap
->sactive
;
1413 preempted_qc_active
= ap
->qc_active
;
1414 ap
->active_tag
= ATA_TAG_POISON
;
1418 /* prepare & issue qc */
1421 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1422 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1423 qc
->dma_dir
= dma_dir
;
1424 if (dma_dir
!= DMA_NONE
) {
1425 unsigned int i
, buflen
= 0;
1427 for (i
= 0; i
< n_elem
; i
++)
1428 buflen
+= sg
[i
].length
;
1430 ata_sg_init(qc
, sg
, n_elem
);
1431 qc
->nbytes
= buflen
;
1434 qc
->private_data
= &wait
;
1435 qc
->complete_fn
= ata_qc_complete_internal
;
1439 spin_unlock_irqrestore(ap
->lock
, flags
);
1441 rc
= wait_for_completion_timeout(&wait
, ata_probe_timeout
);
1443 ata_port_flush_task(ap
);
1446 spin_lock_irqsave(ap
->lock
, flags
);
1448 /* We're racing with irq here. If we lose, the
1449 * following test prevents us from completing the qc
1450 * twice. If we win, the port is frozen and will be
1451 * cleaned up by ->post_internal_cmd().
1453 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1454 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1456 if (ap
->ops
->error_handler
)
1457 ata_port_freeze(ap
);
1459 ata_qc_complete(qc
);
1461 if (ata_msg_warn(ap
))
1462 ata_dev_printk(dev
, KERN_WARNING
,
1463 "qc timeout (cmd 0x%x)\n", command
);
1466 spin_unlock_irqrestore(ap
->lock
, flags
);
1469 /* do post_internal_cmd */
1470 if (ap
->ops
->post_internal_cmd
)
1471 ap
->ops
->post_internal_cmd(qc
);
1473 /* perform minimal error analysis */
1474 if (qc
->flags
& ATA_QCFLAG_FAILED
) {
1475 if (qc
->result_tf
.command
& (ATA_ERR
| ATA_DF
))
1476 qc
->err_mask
|= AC_ERR_DEV
;
1479 qc
->err_mask
|= AC_ERR_OTHER
;
1481 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1482 qc
->err_mask
&= ~AC_ERR_OTHER
;
1486 spin_lock_irqsave(ap
->lock
, flags
);
1488 *tf
= qc
->result_tf
;
1489 err_mask
= qc
->err_mask
;
1492 ap
->active_tag
= preempted_tag
;
1493 ap
->sactive
= preempted_sactive
;
1494 ap
->qc_active
= preempted_qc_active
;
1496 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1497 * Until those drivers are fixed, we detect the condition
1498 * here, fail the command with AC_ERR_SYSTEM and reenable the
1501 * Note that this doesn't change any behavior as internal
1502 * command failure results in disabling the device in the
1503 * higher layer for LLDDs without new reset/EH callbacks.
1505 * Kill the following code as soon as those drivers are fixed.
1507 if (ap
->flags
& ATA_FLAG_DISABLED
) {
1508 err_mask
|= AC_ERR_SYSTEM
;
1512 spin_unlock_irqrestore(ap
->lock
, flags
);
1518 * ata_exec_internal - execute libata internal command
1519 * @dev: Device to which the command is sent
1520 * @tf: Taskfile registers for the command and the result
1521 * @cdb: CDB for packet command
1522 * @dma_dir: Data tranfer direction of the command
1523 * @buf: Data buffer of the command
1524 * @buflen: Length of data buffer
1526 * Wrapper around ata_exec_internal_sg() which takes simple
1527 * buffer instead of sg list.
1530 * None. Should be called with kernel context, might sleep.
1533 * Zero on success, AC_ERR_* mask on failure
1535 unsigned ata_exec_internal(struct ata_device
*dev
,
1536 struct ata_taskfile
*tf
, const u8
*cdb
,
1537 int dma_dir
, void *buf
, unsigned int buflen
)
1539 struct scatterlist
*psg
= NULL
, sg
;
1540 unsigned int n_elem
= 0;
1542 if (dma_dir
!= DMA_NONE
) {
1544 sg_init_one(&sg
, buf
, buflen
);
1549 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, psg
, n_elem
);
1553 * ata_do_simple_cmd - execute simple internal command
1554 * @dev: Device to which the command is sent
1555 * @cmd: Opcode to execute
1557 * Execute a 'simple' command, that only consists of the opcode
1558 * 'cmd' itself, without filling any other registers
1561 * Kernel thread context (may sleep).
1564 * Zero on success, AC_ERR_* mask on failure
1566 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
1568 struct ata_taskfile tf
;
1570 ata_tf_init(dev
, &tf
);
1573 tf
.flags
|= ATA_TFLAG_DEVICE
;
1574 tf
.protocol
= ATA_PROT_NODATA
;
1576 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1580 * ata_pio_need_iordy - check if iordy needed
1583 * Check if the current speed of the device requires IORDY. Used
1584 * by various controllers for chip configuration.
1587 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1589 /* Controller doesn't support IORDY. Probably a pointless check
1590 as the caller should know this */
1591 if (adev
->ap
->flags
& ATA_FLAG_NO_IORDY
)
1593 /* PIO3 and higher it is mandatory */
1594 if (adev
->pio_mode
> XFER_PIO_2
)
1596 /* We turn it on when possible */
1597 if (ata_id_has_iordy(adev
->id
))
1603 * ata_pio_mask_no_iordy - Return the non IORDY mask
1606 * Compute the highest mode possible if we are not using iordy. Return
1607 * -1 if no iordy mode is available.
1610 static u32
ata_pio_mask_no_iordy(const struct ata_device
*adev
)
1612 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1613 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1614 u16 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1615 /* Is the speed faster than the drive allows non IORDY ? */
1617 /* This is cycle times not frequency - watch the logic! */
1618 if (pio
> 240) /* PIO2 is 240nS per cycle */
1619 return 3 << ATA_SHIFT_PIO
;
1620 return 7 << ATA_SHIFT_PIO
;
1623 return 3 << ATA_SHIFT_PIO
;
1627 * ata_dev_read_id - Read ID data from the specified device
1628 * @dev: target device
1629 * @p_class: pointer to class of the target device (may be changed)
1630 * @flags: ATA_READID_* flags
1631 * @id: buffer to read IDENTIFY data into
1633 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1634 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1635 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1636 * for pre-ATA4 drives.
1639 * Kernel thread context (may sleep)
1642 * 0 on success, -errno otherwise.
1644 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1645 unsigned int flags
, u16
*id
)
1647 struct ata_port
*ap
= dev
->ap
;
1648 unsigned int class = *p_class
;
1649 struct ata_taskfile tf
;
1650 unsigned int err_mask
= 0;
1652 int tried_spinup
= 0;
1655 if (ata_msg_ctl(ap
))
1656 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
1658 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
1660 ata_tf_init(dev
, &tf
);
1664 tf
.command
= ATA_CMD_ID_ATA
;
1667 tf
.command
= ATA_CMD_ID_ATAPI
;
1671 reason
= "unsupported class";
1675 tf
.protocol
= ATA_PROT_PIO
;
1677 /* Some devices choke if TF registers contain garbage. Make
1678 * sure those are properly initialized.
1680 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1682 /* Device presence detection is unreliable on some
1683 * controllers. Always poll IDENTIFY if available.
1685 tf
.flags
|= ATA_TFLAG_POLLING
;
1687 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1688 id
, sizeof(id
[0]) * ATA_ID_WORDS
);
1690 if (err_mask
& AC_ERR_NODEV_HINT
) {
1691 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1692 ap
->print_id
, dev
->devno
);
1697 reason
= "I/O error";
1701 swap_buf_le16(id
, ATA_ID_WORDS
);
1705 reason
= "device reports illegal type";
1707 if (class == ATA_DEV_ATA
) {
1708 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1711 if (ata_id_is_ata(id
))
1715 if (!tried_spinup
&& (id
[2] == 0x37c8 || id
[2] == 0x738c)) {
1718 * Drive powered-up in standby mode, and requires a specific
1719 * SET_FEATURES spin-up subcommand before it will accept
1720 * anything other than the original IDENTIFY command.
1722 ata_tf_init(dev
, &tf
);
1723 tf
.command
= ATA_CMD_SET_FEATURES
;
1724 tf
.feature
= SETFEATURES_SPINUP
;
1725 tf
.protocol
= ATA_PROT_NODATA
;
1726 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1727 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1730 reason
= "SPINUP failed";
1734 * If the drive initially returned incomplete IDENTIFY info,
1735 * we now must reissue the IDENTIFY command.
1737 if (id
[2] == 0x37c8)
1741 if ((flags
& ATA_READID_POSTRESET
) && class == ATA_DEV_ATA
) {
1743 * The exact sequence expected by certain pre-ATA4 drives is:
1746 * INITIALIZE DEVICE PARAMETERS
1748 * Some drives were very specific about that exact sequence.
1750 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
1751 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
1754 reason
= "INIT_DEV_PARAMS failed";
1758 /* current CHS translation info (id[53-58]) might be
1759 * changed. reread the identify device info.
1761 flags
&= ~ATA_READID_POSTRESET
;
1771 if (ata_msg_warn(ap
))
1772 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
1773 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
1777 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
1779 return ((dev
->ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
1782 static void ata_dev_config_ncq(struct ata_device
*dev
,
1783 char *desc
, size_t desc_sz
)
1785 struct ata_port
*ap
= dev
->ap
;
1786 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
1788 if (!ata_id_has_ncq(dev
->id
)) {
1792 if (ata_device_blacklisted(dev
) & ATA_HORKAGE_NONCQ
) {
1793 snprintf(desc
, desc_sz
, "NCQ (not used)");
1796 if (ap
->flags
& ATA_FLAG_NCQ
) {
1797 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
- 1);
1798 dev
->flags
|= ATA_DFLAG_NCQ
;
1801 if (hdepth
>= ddepth
)
1802 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
1804 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
1808 * ata_dev_configure - Configure the specified ATA/ATAPI device
1809 * @dev: Target device to configure
1811 * Configure @dev according to @dev->id. Generic and low-level
1812 * driver specific fixups are also applied.
1815 * Kernel thread context (may sleep)
1818 * 0 on success, -errno otherwise
1820 int ata_dev_configure(struct ata_device
*dev
)
1822 struct ata_port
*ap
= dev
->ap
;
1823 int print_info
= ap
->eh_context
.i
.flags
& ATA_EHI_PRINTINFO
;
1824 const u16
*id
= dev
->id
;
1825 unsigned int xfer_mask
;
1826 char revbuf
[7]; /* XYZ-99\0 */
1827 char fwrevbuf
[ATA_ID_FW_REV_LEN
+1];
1828 char modelbuf
[ATA_ID_PROD_LEN
+1];
1831 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
1832 ata_dev_printk(dev
, KERN_INFO
, "%s: ENTER/EXIT -- nodev\n",
1837 if (ata_msg_probe(ap
))
1838 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER\n", __FUNCTION__
);
1841 rc
= ata_acpi_push_id(ap
, dev
->devno
);
1843 ata_dev_printk(dev
, KERN_WARNING
, "failed to set _SDD(%d)\n",
1847 /* retrieve and execute the ATA task file of _GTF */
1848 ata_acpi_exec_tfs(ap
);
1850 /* print device capabilities */
1851 if (ata_msg_probe(ap
))
1852 ata_dev_printk(dev
, KERN_DEBUG
,
1853 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1854 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1856 id
[49], id
[82], id
[83], id
[84],
1857 id
[85], id
[86], id
[87], id
[88]);
1859 /* initialize to-be-configured parameters */
1860 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
1861 dev
->max_sectors
= 0;
1869 * common ATA, ATAPI feature tests
1872 /* find max transfer mode; for printk only */
1873 xfer_mask
= ata_id_xfermask(id
);
1875 if (ata_msg_probe(ap
))
1878 /* ATA-specific feature tests */
1879 if (dev
->class == ATA_DEV_ATA
) {
1880 if (ata_id_is_cfa(id
)) {
1881 if (id
[162] & 1) /* CPRM may make this media unusable */
1882 ata_dev_printk(dev
, KERN_WARNING
,
1883 "supports DRM functions and may "
1884 "not be fully accessable.\n");
1885 snprintf(revbuf
, 7, "CFA");
1888 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
1890 dev
->n_sectors
= ata_id_n_sectors(id
);
1891 dev
->n_sectors_boot
= dev
->n_sectors
;
1893 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1894 ata_id_c_string(dev
->id
, fwrevbuf
, ATA_ID_FW_REV
,
1897 ata_id_c_string(dev
->id
, modelbuf
, ATA_ID_PROD
,
1900 if (dev
->id
[59] & 0x100)
1901 dev
->multi_count
= dev
->id
[59] & 0xff;
1903 if (ata_id_has_lba(id
)) {
1904 const char *lba_desc
;
1908 dev
->flags
|= ATA_DFLAG_LBA
;
1909 if (ata_id_has_lba48(id
)) {
1910 dev
->flags
|= ATA_DFLAG_LBA48
;
1913 if (dev
->n_sectors
>= (1UL << 28) &&
1914 ata_id_has_flush_ext(id
))
1915 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
1918 if (ata_id_hpa_enabled(dev
->id
))
1919 dev
->n_sectors
= ata_hpa_resize(dev
);
1922 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
1924 /* print device info to dmesg */
1925 if (ata_msg_drv(ap
) && print_info
) {
1926 ata_dev_printk(dev
, KERN_INFO
,
1927 "%s: %s, %s, max %s\n",
1928 revbuf
, modelbuf
, fwrevbuf
,
1929 ata_mode_string(xfer_mask
));
1930 ata_dev_printk(dev
, KERN_INFO
,
1931 "%Lu sectors, multi %u: %s %s\n",
1932 (unsigned long long)dev
->n_sectors
,
1933 dev
->multi_count
, lba_desc
, ncq_desc
);
1938 /* Default translation */
1939 dev
->cylinders
= id
[1];
1941 dev
->sectors
= id
[6];
1943 if (ata_id_current_chs_valid(id
)) {
1944 /* Current CHS translation is valid. */
1945 dev
->cylinders
= id
[54];
1946 dev
->heads
= id
[55];
1947 dev
->sectors
= id
[56];
1950 /* print device info to dmesg */
1951 if (ata_msg_drv(ap
) && print_info
) {
1952 ata_dev_printk(dev
, KERN_INFO
,
1953 "%s: %s, %s, max %s\n",
1954 revbuf
, modelbuf
, fwrevbuf
,
1955 ata_mode_string(xfer_mask
));
1956 ata_dev_printk(dev
, KERN_INFO
,
1957 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1958 (unsigned long long)dev
->n_sectors
,
1959 dev
->multi_count
, dev
->cylinders
,
1960 dev
->heads
, dev
->sectors
);
1967 /* ATAPI-specific feature tests */
1968 else if (dev
->class == ATA_DEV_ATAPI
) {
1969 char *cdb_intr_string
= "";
1971 rc
= atapi_cdb_len(id
);
1972 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
1973 if (ata_msg_warn(ap
))
1974 ata_dev_printk(dev
, KERN_WARNING
,
1975 "unsupported CDB len\n");
1979 dev
->cdb_len
= (unsigned int) rc
;
1981 if (ata_id_cdb_intr(dev
->id
)) {
1982 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
1983 cdb_intr_string
= ", CDB intr";
1986 /* print device info to dmesg */
1987 if (ata_msg_drv(ap
) && print_info
)
1988 ata_dev_printk(dev
, KERN_INFO
, "ATAPI, max %s%s\n",
1989 ata_mode_string(xfer_mask
),
1993 /* determine max_sectors */
1994 dev
->max_sectors
= ATA_MAX_SECTORS
;
1995 if (dev
->flags
& ATA_DFLAG_LBA48
)
1996 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
1998 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
1999 /* Let the user know. We don't want to disallow opens for
2000 rescue purposes, or in case the vendor is just a blithering
2003 ata_dev_printk(dev
, KERN_WARNING
,
2004 "Drive reports diagnostics failure. This may indicate a drive\n");
2005 ata_dev_printk(dev
, KERN_WARNING
,
2006 "fault or invalid emulation. Contact drive vendor for information.\n");
2010 /* limit bridge transfers to udma5, 200 sectors */
2011 if (ata_dev_knobble(dev
)) {
2012 if (ata_msg_drv(ap
) && print_info
)
2013 ata_dev_printk(dev
, KERN_INFO
,
2014 "applying bridge limits\n");
2015 dev
->udma_mask
&= ATA_UDMA5
;
2016 dev
->max_sectors
= ATA_MAX_SECTORS
;
2019 if (ata_device_blacklisted(dev
) & ATA_HORKAGE_MAX_SEC_128
)
2020 dev
->max_sectors
= min_t(unsigned int, ATA_MAX_SECTORS_128
,
2023 /* limit ATAPI DMA to R/W commands only */
2024 if (ata_device_blacklisted(dev
) & ATA_HORKAGE_DMA_RW_ONLY
)
2025 dev
->horkage
|= ATA_HORKAGE_DMA_RW_ONLY
;
2027 if (ap
->ops
->dev_config
)
2028 ap
->ops
->dev_config(dev
);
2030 if (ata_msg_probe(ap
))
2031 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
2032 __FUNCTION__
, ata_chk_status(ap
));
2036 if (ata_msg_probe(ap
))
2037 ata_dev_printk(dev
, KERN_DEBUG
,
2038 "%s: EXIT, err\n", __FUNCTION__
);
2043 * ata_cable_40wire - return 40 wire cable type
2046 * Helper method for drivers which want to hardwire 40 wire cable
2050 int ata_cable_40wire(struct ata_port
*ap
)
2052 return ATA_CBL_PATA40
;
2056 * ata_cable_80wire - return 80 wire cable type
2059 * Helper method for drivers which want to hardwire 80 wire cable
2063 int ata_cable_80wire(struct ata_port
*ap
)
2065 return ATA_CBL_PATA80
;
2069 * ata_cable_unknown - return unknown PATA cable.
2072 * Helper method for drivers which have no PATA cable detection.
2075 int ata_cable_unknown(struct ata_port
*ap
)
2077 return ATA_CBL_PATA_UNK
;
2081 * ata_cable_sata - return SATA cable type
2084 * Helper method for drivers which have SATA cables
2087 int ata_cable_sata(struct ata_port
*ap
)
2089 return ATA_CBL_SATA
;
2093 * ata_bus_probe - Reset and probe ATA bus
2096 * Master ATA bus probing function. Initiates a hardware-dependent
2097 * bus reset, then attempts to identify any devices found on
2101 * PCI/etc. bus probe sem.
2104 * Zero on success, negative errno otherwise.
2107 int ata_bus_probe(struct ata_port
*ap
)
2109 unsigned int classes
[ATA_MAX_DEVICES
];
2110 int tries
[ATA_MAX_DEVICES
];
2112 struct ata_device
*dev
;
2116 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
2117 tries
[i
] = ATA_PROBE_MAX_TRIES
;
2120 /* reset and determine device classes */
2121 ap
->ops
->phy_reset(ap
);
2123 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2124 dev
= &ap
->device
[i
];
2126 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
2127 dev
->class != ATA_DEV_UNKNOWN
)
2128 classes
[dev
->devno
] = dev
->class;
2130 classes
[dev
->devno
] = ATA_DEV_NONE
;
2132 dev
->class = ATA_DEV_UNKNOWN
;
2137 /* after the reset the device state is PIO 0 and the controller
2138 state is undefined. Record the mode */
2140 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
2141 ap
->device
[i
].pio_mode
= XFER_PIO_0
;
2143 /* read IDENTIFY page and configure devices. We have to do the identify
2144 specific sequence bass-ackwards so that PDIAG- is released by
2147 for (i
= ATA_MAX_DEVICES
- 1; i
>= 0; i
--) {
2148 dev
= &ap
->device
[i
];
2151 dev
->class = classes
[i
];
2153 if (!ata_dev_enabled(dev
))
2156 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
2162 /* Now ask for the cable type as PDIAG- should have been released */
2163 if (ap
->ops
->cable_detect
)
2164 ap
->cbl
= ap
->ops
->cable_detect(ap
);
2166 /* After the identify sequence we can now set up the devices. We do
2167 this in the normal order so that the user doesn't get confused */
2169 for(i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2170 dev
= &ap
->device
[i
];
2171 if (!ata_dev_enabled(dev
))
2174 ap
->eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
2175 rc
= ata_dev_configure(dev
);
2176 ap
->eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
2181 /* configure transfer mode */
2182 rc
= ata_set_mode(ap
, &dev
);
2186 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
2187 if (ata_dev_enabled(&ap
->device
[i
]))
2190 /* no device present, disable port */
2191 ata_port_disable(ap
);
2192 ap
->ops
->port_disable(ap
);
2196 tries
[dev
->devno
]--;
2200 /* eeek, something went very wrong, give up */
2201 tries
[dev
->devno
] = 0;
2205 /* give it just one more chance */
2206 tries
[dev
->devno
] = min(tries
[dev
->devno
], 1);
2208 if (tries
[dev
->devno
] == 1) {
2209 /* This is the last chance, better to slow
2210 * down than lose it.
2212 sata_down_spd_limit(ap
);
2213 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
2217 if (!tries
[dev
->devno
])
2218 ata_dev_disable(dev
);
2224 * ata_port_probe - Mark port as enabled
2225 * @ap: Port for which we indicate enablement
2227 * Modify @ap data structure such that the system
2228 * thinks that the entire port is enabled.
2230 * LOCKING: host lock, or some other form of
2234 void ata_port_probe(struct ata_port
*ap
)
2236 ap
->flags
&= ~ATA_FLAG_DISABLED
;
2240 * sata_print_link_status - Print SATA link status
2241 * @ap: SATA port to printk link status about
2243 * This function prints link speed and status of a SATA link.
2248 void sata_print_link_status(struct ata_port
*ap
)
2250 u32 sstatus
, scontrol
, tmp
;
2252 if (sata_scr_read(ap
, SCR_STATUS
, &sstatus
))
2254 sata_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2256 if (ata_port_online(ap
)) {
2257 tmp
= (sstatus
>> 4) & 0xf;
2258 ata_port_printk(ap
, KERN_INFO
,
2259 "SATA link up %s (SStatus %X SControl %X)\n",
2260 sata_spd_string(tmp
), sstatus
, scontrol
);
2262 ata_port_printk(ap
, KERN_INFO
,
2263 "SATA link down (SStatus %X SControl %X)\n",
2269 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2270 * @ap: SATA port associated with target SATA PHY.
2272 * This function issues commands to standard SATA Sxxx
2273 * PHY registers, to wake up the phy (and device), and
2274 * clear any reset condition.
2277 * PCI/etc. bus probe sem.
2280 void __sata_phy_reset(struct ata_port
*ap
)
2283 unsigned long timeout
= jiffies
+ (HZ
* 5);
2285 if (ap
->flags
& ATA_FLAG_SATA_RESET
) {
2286 /* issue phy wake/reset */
2287 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
2288 /* Couldn't find anything in SATA I/II specs, but
2289 * AHCI-1.1 10.4.2 says at least 1 ms. */
2292 /* phy wake/clear reset */
2293 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
2295 /* wait for phy to become ready, if necessary */
2298 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
2299 if ((sstatus
& 0xf) != 1)
2301 } while (time_before(jiffies
, timeout
));
2303 /* print link status */
2304 sata_print_link_status(ap
);
2306 /* TODO: phy layer with polling, timeouts, etc. */
2307 if (!ata_port_offline(ap
))
2310 ata_port_disable(ap
);
2312 if (ap
->flags
& ATA_FLAG_DISABLED
)
2315 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
2316 ata_port_disable(ap
);
2320 ap
->cbl
= ATA_CBL_SATA
;
2324 * sata_phy_reset - Reset SATA bus.
2325 * @ap: SATA port associated with target SATA PHY.
2327 * This function resets the SATA bus, and then probes
2328 * the bus for devices.
2331 * PCI/etc. bus probe sem.
2334 void sata_phy_reset(struct ata_port
*ap
)
2336 __sata_phy_reset(ap
);
2337 if (ap
->flags
& ATA_FLAG_DISABLED
)
2343 * ata_dev_pair - return other device on cable
2346 * Obtain the other device on the same cable, or if none is
2347 * present NULL is returned
2350 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
2352 struct ata_port
*ap
= adev
->ap
;
2353 struct ata_device
*pair
= &ap
->device
[1 - adev
->devno
];
2354 if (!ata_dev_enabled(pair
))
2360 * ata_port_disable - Disable port.
2361 * @ap: Port to be disabled.
2363 * Modify @ap data structure such that the system
2364 * thinks that the entire port is disabled, and should
2365 * never attempt to probe or communicate with devices
2368 * LOCKING: host lock, or some other form of
2372 void ata_port_disable(struct ata_port
*ap
)
2374 ap
->device
[0].class = ATA_DEV_NONE
;
2375 ap
->device
[1].class = ATA_DEV_NONE
;
2376 ap
->flags
|= ATA_FLAG_DISABLED
;
2380 * sata_down_spd_limit - adjust SATA spd limit downward
2381 * @ap: Port to adjust SATA spd limit for
2383 * Adjust SATA spd limit of @ap downward. Note that this
2384 * function only adjusts the limit. The change must be applied
2385 * using sata_set_spd().
2388 * Inherited from caller.
2391 * 0 on success, negative errno on failure
2393 int sata_down_spd_limit(struct ata_port
*ap
)
2395 u32 sstatus
, spd
, mask
;
2398 rc
= sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
2402 mask
= ap
->sata_spd_limit
;
2405 highbit
= fls(mask
) - 1;
2406 mask
&= ~(1 << highbit
);
2408 spd
= (sstatus
>> 4) & 0xf;
2412 mask
&= (1 << spd
) - 1;
2416 ap
->sata_spd_limit
= mask
;
2418 ata_port_printk(ap
, KERN_WARNING
, "limiting SATA link speed to %s\n",
2419 sata_spd_string(fls(mask
)));
2424 static int __sata_set_spd_needed(struct ata_port
*ap
, u32
*scontrol
)
2428 if (ap
->sata_spd_limit
== UINT_MAX
)
2431 limit
= fls(ap
->sata_spd_limit
);
2433 spd
= (*scontrol
>> 4) & 0xf;
2434 *scontrol
= (*scontrol
& ~0xf0) | ((limit
& 0xf) << 4);
2436 return spd
!= limit
;
2440 * sata_set_spd_needed - is SATA spd configuration needed
2441 * @ap: Port in question
2443 * Test whether the spd limit in SControl matches
2444 * @ap->sata_spd_limit. This function is used to determine
2445 * whether hardreset is necessary to apply SATA spd
2449 * Inherited from caller.
2452 * 1 if SATA spd configuration is needed, 0 otherwise.
2454 int sata_set_spd_needed(struct ata_port
*ap
)
2458 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
))
2461 return __sata_set_spd_needed(ap
, &scontrol
);
2465 * sata_set_spd - set SATA spd according to spd limit
2466 * @ap: Port to set SATA spd for
2468 * Set SATA spd of @ap according to sata_spd_limit.
2471 * Inherited from caller.
2474 * 0 if spd doesn't need to be changed, 1 if spd has been
2475 * changed. Negative errno if SCR registers are inaccessible.
2477 int sata_set_spd(struct ata_port
*ap
)
2482 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2485 if (!__sata_set_spd_needed(ap
, &scontrol
))
2488 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
2495 * This mode timing computation functionality is ported over from
2496 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2499 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2500 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2501 * for UDMA6, which is currently supported only by Maxtor drives.
2503 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2506 static const struct ata_timing ata_timing
[] = {
2508 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
2509 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
2510 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
2511 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
2513 { XFER_MW_DMA_4
, 25, 0, 0, 0, 55, 20, 80, 0 },
2514 { XFER_MW_DMA_3
, 25, 0, 0, 0, 65, 25, 100, 0 },
2515 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
2516 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
2517 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
2519 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2521 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
2522 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
2523 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
2525 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
2526 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
2527 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
2529 { XFER_PIO_6
, 10, 55, 20, 80, 55, 20, 80, 0 },
2530 { XFER_PIO_5
, 15, 65, 25, 100, 65, 25, 100, 0 },
2531 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
2532 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
2534 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
2535 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
2536 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
2538 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2543 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2544 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2546 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
2548 q
->setup
= EZ(t
->setup
* 1000, T
);
2549 q
->act8b
= EZ(t
->act8b
* 1000, T
);
2550 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
2551 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
2552 q
->active
= EZ(t
->active
* 1000, T
);
2553 q
->recover
= EZ(t
->recover
* 1000, T
);
2554 q
->cycle
= EZ(t
->cycle
* 1000, T
);
2555 q
->udma
= EZ(t
->udma
* 1000, UT
);
2558 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
2559 struct ata_timing
*m
, unsigned int what
)
2561 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
2562 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
2563 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
2564 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
2565 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
2566 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
2567 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
2568 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
2571 static const struct ata_timing
* ata_timing_find_mode(unsigned short speed
)
2573 const struct ata_timing
*t
;
2575 for (t
= ata_timing
; t
->mode
!= speed
; t
++)
2576 if (t
->mode
== 0xFF)
2581 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
2582 struct ata_timing
*t
, int T
, int UT
)
2584 const struct ata_timing
*s
;
2585 struct ata_timing p
;
2591 if (!(s
= ata_timing_find_mode(speed
)))
2594 memcpy(t
, s
, sizeof(*s
));
2597 * If the drive is an EIDE drive, it can tell us it needs extended
2598 * PIO/MW_DMA cycle timing.
2601 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
2602 memset(&p
, 0, sizeof(p
));
2603 if(speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
2604 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
2605 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
2606 } else if(speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
2607 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
2609 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
2613 * Convert the timing to bus clock counts.
2616 ata_timing_quantize(t
, t
, T
, UT
);
2619 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2620 * S.M.A.R.T * and some other commands. We have to ensure that the
2621 * DMA cycle timing is slower/equal than the fastest PIO timing.
2624 if (speed
> XFER_PIO_6
) {
2625 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
2626 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
2630 * Lengthen active & recovery time so that cycle time is correct.
2633 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
2634 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
2635 t
->rec8b
= t
->cyc8b
- t
->act8b
;
2638 if (t
->active
+ t
->recover
< t
->cycle
) {
2639 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
2640 t
->recover
= t
->cycle
- t
->active
;
2647 * ata_down_xfermask_limit - adjust dev xfer masks downward
2648 * @dev: Device to adjust xfer masks
2649 * @sel: ATA_DNXFER_* selector
2651 * Adjust xfer masks of @dev downward. Note that this function
2652 * does not apply the change. Invoking ata_set_mode() afterwards
2653 * will apply the limit.
2656 * Inherited from caller.
2659 * 0 on success, negative errno on failure
2661 int ata_down_xfermask_limit(struct ata_device
*dev
, unsigned int sel
)
2664 unsigned int orig_mask
, xfer_mask
;
2665 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
2668 quiet
= !!(sel
& ATA_DNXFER_QUIET
);
2669 sel
&= ~ATA_DNXFER_QUIET
;
2671 xfer_mask
= orig_mask
= ata_pack_xfermask(dev
->pio_mask
,
2674 ata_unpack_xfermask(xfer_mask
, &pio_mask
, &mwdma_mask
, &udma_mask
);
2677 case ATA_DNXFER_PIO
:
2678 highbit
= fls(pio_mask
) - 1;
2679 pio_mask
&= ~(1 << highbit
);
2682 case ATA_DNXFER_DMA
:
2684 highbit
= fls(udma_mask
) - 1;
2685 udma_mask
&= ~(1 << highbit
);
2688 } else if (mwdma_mask
) {
2689 highbit
= fls(mwdma_mask
) - 1;
2690 mwdma_mask
&= ~(1 << highbit
);
2696 case ATA_DNXFER_40C
:
2697 udma_mask
&= ATA_UDMA_MASK_40C
;
2700 case ATA_DNXFER_FORCE_PIO0
:
2702 case ATA_DNXFER_FORCE_PIO
:
2711 xfer_mask
&= ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
2713 if (!(xfer_mask
& ATA_MASK_PIO
) || xfer_mask
== orig_mask
)
2717 if (xfer_mask
& (ATA_MASK_MWDMA
| ATA_MASK_UDMA
))
2718 snprintf(buf
, sizeof(buf
), "%s:%s",
2719 ata_mode_string(xfer_mask
),
2720 ata_mode_string(xfer_mask
& ATA_MASK_PIO
));
2722 snprintf(buf
, sizeof(buf
), "%s",
2723 ata_mode_string(xfer_mask
));
2725 ata_dev_printk(dev
, KERN_WARNING
,
2726 "limiting speed to %s\n", buf
);
2729 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
2735 static int ata_dev_set_mode(struct ata_device
*dev
)
2737 struct ata_eh_context
*ehc
= &dev
->ap
->eh_context
;
2738 unsigned int err_mask
;
2741 dev
->flags
&= ~ATA_DFLAG_PIO
;
2742 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
2743 dev
->flags
|= ATA_DFLAG_PIO
;
2745 err_mask
= ata_dev_set_xfermode(dev
);
2746 /* Old CFA may refuse this command, which is just fine */
2747 if (dev
->xfer_shift
== ATA_SHIFT_PIO
&& ata_id_is_cfa(dev
->id
))
2748 err_mask
&= ~AC_ERR_DEV
;
2751 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
2752 "(err_mask=0x%x)\n", err_mask
);
2756 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
2757 rc
= ata_dev_revalidate(dev
, 0);
2758 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
2762 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2763 dev
->xfer_shift
, (int)dev
->xfer_mode
);
2765 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
2766 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)));
2771 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2772 * @ap: port on which timings will be programmed
2773 * @r_failed_dev: out paramter for failed device
2775 * Standard implementation of the function used to tune and set
2776 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2777 * ata_dev_set_mode() fails, pointer to the failing device is
2778 * returned in @r_failed_dev.
2781 * PCI/etc. bus probe sem.
2784 * 0 on success, negative errno otherwise
2787 int ata_do_set_mode(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
2789 struct ata_device
*dev
;
2790 int i
, rc
= 0, used_dma
= 0, found
= 0;
2793 /* step 1: calculate xfer_mask */
2794 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2795 unsigned int pio_mask
, dma_mask
;
2797 dev
= &ap
->device
[i
];
2799 if (!ata_dev_enabled(dev
))
2802 ata_dev_xfermask(dev
);
2804 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
2805 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
2806 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
2807 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
2816 /* step 2: always set host PIO timings */
2817 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2818 dev
= &ap
->device
[i
];
2819 if (!ata_dev_enabled(dev
))
2822 if (!dev
->pio_mode
) {
2823 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
2828 dev
->xfer_mode
= dev
->pio_mode
;
2829 dev
->xfer_shift
= ATA_SHIFT_PIO
;
2830 if (ap
->ops
->set_piomode
)
2831 ap
->ops
->set_piomode(ap
, dev
);
2834 /* step 3: set host DMA timings */
2835 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2836 dev
= &ap
->device
[i
];
2838 if (!ata_dev_enabled(dev
) || !dev
->dma_mode
)
2841 dev
->xfer_mode
= dev
->dma_mode
;
2842 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
2843 if (ap
->ops
->set_dmamode
)
2844 ap
->ops
->set_dmamode(ap
, dev
);
2847 /* step 4: update devices' xfer mode */
2848 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2849 dev
= &ap
->device
[i
];
2851 /* don't update suspended devices' xfer mode */
2852 if (!ata_dev_ready(dev
))
2855 rc
= ata_dev_set_mode(dev
);
2860 /* Record simplex status. If we selected DMA then the other
2861 * host channels are not permitted to do so.
2863 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
2864 ap
->host
->simplex_claimed
= ap
;
2866 /* step5: chip specific finalisation */
2867 if (ap
->ops
->post_set_mode
)
2868 ap
->ops
->post_set_mode(ap
);
2871 *r_failed_dev
= dev
;
2876 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2877 * @ap: port on which timings will be programmed
2878 * @r_failed_dev: out paramter for failed device
2880 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2881 * ata_set_mode() fails, pointer to the failing device is
2882 * returned in @r_failed_dev.
2885 * PCI/etc. bus probe sem.
2888 * 0 on success, negative errno otherwise
2890 int ata_set_mode(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
2892 /* has private set_mode? */
2893 if (ap
->ops
->set_mode
)
2894 return ap
->ops
->set_mode(ap
, r_failed_dev
);
2895 return ata_do_set_mode(ap
, r_failed_dev
);
2899 * ata_tf_to_host - issue ATA taskfile to host controller
2900 * @ap: port to which command is being issued
2901 * @tf: ATA taskfile register set
2903 * Issues ATA taskfile register set to ATA host controller,
2904 * with proper synchronization with interrupt handler and
2908 * spin_lock_irqsave(host lock)
2911 static inline void ata_tf_to_host(struct ata_port
*ap
,
2912 const struct ata_taskfile
*tf
)
2914 ap
->ops
->tf_load(ap
, tf
);
2915 ap
->ops
->exec_command(ap
, tf
);
2919 * ata_busy_sleep - sleep until BSY clears, or timeout
2920 * @ap: port containing status register to be polled
2921 * @tmout_pat: impatience timeout
2922 * @tmout: overall timeout
2924 * Sleep until ATA Status register bit BSY clears,
2925 * or a timeout occurs.
2928 * Kernel thread context (may sleep).
2931 * 0 on success, -errno otherwise.
2933 int ata_busy_sleep(struct ata_port
*ap
,
2934 unsigned long tmout_pat
, unsigned long tmout
)
2936 unsigned long timer_start
, timeout
;
2939 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
2940 timer_start
= jiffies
;
2941 timeout
= timer_start
+ tmout_pat
;
2942 while (status
!= 0xff && (status
& ATA_BUSY
) &&
2943 time_before(jiffies
, timeout
)) {
2945 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
2948 if (status
!= 0xff && (status
& ATA_BUSY
))
2949 ata_port_printk(ap
, KERN_WARNING
,
2950 "port is slow to respond, please be patient "
2951 "(Status 0x%x)\n", status
);
2953 timeout
= timer_start
+ tmout
;
2954 while (status
!= 0xff && (status
& ATA_BUSY
) &&
2955 time_before(jiffies
, timeout
)) {
2957 status
= ata_chk_status(ap
);
2963 if (status
& ATA_BUSY
) {
2964 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
2965 "(%lu secs, Status 0x%x)\n",
2966 tmout
/ HZ
, status
);
2973 static void ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
)
2975 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2976 unsigned int dev0
= devmask
& (1 << 0);
2977 unsigned int dev1
= devmask
& (1 << 1);
2978 unsigned long timeout
;
2980 /* if device 0 was found in ata_devchk, wait for its
2984 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2986 /* if device 1 was found in ata_devchk, wait for
2987 * register access, then wait for BSY to clear
2989 timeout
= jiffies
+ ATA_TMOUT_BOOT
;
2993 ap
->ops
->dev_select(ap
, 1);
2994 nsect
= ioread8(ioaddr
->nsect_addr
);
2995 lbal
= ioread8(ioaddr
->lbal_addr
);
2996 if ((nsect
== 1) && (lbal
== 1))
2998 if (time_after(jiffies
, timeout
)) {
3002 msleep(50); /* give drive a breather */
3005 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
3007 /* is all this really necessary? */
3008 ap
->ops
->dev_select(ap
, 0);
3010 ap
->ops
->dev_select(ap
, 1);
3012 ap
->ops
->dev_select(ap
, 0);
3015 static unsigned int ata_bus_softreset(struct ata_port
*ap
,
3016 unsigned int devmask
)
3018 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3020 DPRINTK("ata%u: bus reset via SRST\n", ap
->print_id
);
3022 /* software reset. causes dev0 to be selected */
3023 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3024 udelay(20); /* FIXME: flush */
3025 iowrite8(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
3026 udelay(20); /* FIXME: flush */
3027 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3029 /* spec mandates ">= 2ms" before checking status.
3030 * We wait 150ms, because that was the magic delay used for
3031 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3032 * between when the ATA command register is written, and then
3033 * status is checked. Because waiting for "a while" before
3034 * checking status is fine, post SRST, we perform this magic
3035 * delay here as well.
3037 * Old drivers/ide uses the 2mS rule and then waits for ready
3041 /* Before we perform post reset processing we want to see if
3042 * the bus shows 0xFF because the odd clown forgets the D7
3043 * pulldown resistor.
3045 if (ata_check_status(ap
) == 0xFF)
3048 ata_bus_post_reset(ap
, devmask
);
3054 * ata_bus_reset - reset host port and associated ATA channel
3055 * @ap: port to reset
3057 * This is typically the first time we actually start issuing
3058 * commands to the ATA channel. We wait for BSY to clear, then
3059 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3060 * result. Determine what devices, if any, are on the channel
3061 * by looking at the device 0/1 error register. Look at the signature
3062 * stored in each device's taskfile registers, to determine if
3063 * the device is ATA or ATAPI.
3066 * PCI/etc. bus probe sem.
3067 * Obtains host lock.
3070 * Sets ATA_FLAG_DISABLED if bus reset fails.
3073 void ata_bus_reset(struct ata_port
*ap
)
3075 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
3076 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3078 unsigned int dev0
, dev1
= 0, devmask
= 0;
3080 DPRINTK("ENTER, host %u, port %u\n", ap
->print_id
, ap
->port_no
);
3082 /* determine if device 0/1 are present */
3083 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
3086 dev0
= ata_devchk(ap
, 0);
3088 dev1
= ata_devchk(ap
, 1);
3092 devmask
|= (1 << 0);
3094 devmask
|= (1 << 1);
3096 /* select device 0 again */
3097 ap
->ops
->dev_select(ap
, 0);
3099 /* issue bus reset */
3100 if (ap
->flags
& ATA_FLAG_SRST
)
3101 if (ata_bus_softreset(ap
, devmask
))
3105 * determine by signature whether we have ATA or ATAPI devices
3107 ap
->device
[0].class = ata_dev_try_classify(ap
, 0, &err
);
3108 if ((slave_possible
) && (err
!= 0x81))
3109 ap
->device
[1].class = ata_dev_try_classify(ap
, 1, &err
);
3111 /* re-enable interrupts */
3112 ap
->ops
->irq_on(ap
);
3114 /* is double-select really necessary? */
3115 if (ap
->device
[1].class != ATA_DEV_NONE
)
3116 ap
->ops
->dev_select(ap
, 1);
3117 if (ap
->device
[0].class != ATA_DEV_NONE
)
3118 ap
->ops
->dev_select(ap
, 0);
3120 /* if no devices were detected, disable this port */
3121 if ((ap
->device
[0].class == ATA_DEV_NONE
) &&
3122 (ap
->device
[1].class == ATA_DEV_NONE
))
3125 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
3126 /* set up device control for ATA_FLAG_SATA_RESET */
3127 iowrite8(ap
->ctl
, ioaddr
->ctl_addr
);
3134 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
3135 ap
->ops
->port_disable(ap
);
3141 * sata_phy_debounce - debounce SATA phy status
3142 * @ap: ATA port to debounce SATA phy status for
3143 * @params: timing parameters { interval, duratinon, timeout } in msec
3145 * Make sure SStatus of @ap reaches stable state, determined by
3146 * holding the same value where DET is not 1 for @duration polled
3147 * every @interval, before @timeout. Timeout constraints the
3148 * beginning of the stable state. Because, after hot unplugging,
3149 * DET gets stuck at 1 on some controllers, this functions waits
3150 * until timeout then returns 0 if DET is stable at 1.
3153 * Kernel thread context (may sleep)
3156 * 0 on success, -errno on failure.
3158 int sata_phy_debounce(struct ata_port
*ap
, const unsigned long *params
)
3160 unsigned long interval_msec
= params
[0];
3161 unsigned long duration
= params
[1] * HZ
/ 1000;
3162 unsigned long timeout
= jiffies
+ params
[2] * HZ
/ 1000;
3163 unsigned long last_jiffies
;
3167 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
3172 last_jiffies
= jiffies
;
3175 msleep(interval_msec
);
3176 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
3182 if (cur
== 1 && time_before(jiffies
, timeout
))
3184 if (time_after(jiffies
, last_jiffies
+ duration
))
3189 /* unstable, start over */
3191 last_jiffies
= jiffies
;
3194 if (time_after(jiffies
, timeout
))
3200 * sata_phy_resume - resume SATA phy
3201 * @ap: ATA port to resume SATA phy for
3202 * @params: timing parameters { interval, duratinon, timeout } in msec
3204 * Resume SATA phy of @ap and debounce it.
3207 * Kernel thread context (may sleep)
3210 * 0 on success, -errno on failure.
3212 int sata_phy_resume(struct ata_port
*ap
, const unsigned long *params
)
3217 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
3220 scontrol
= (scontrol
& 0x0f0) | 0x300;
3222 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
3225 /* Some PHYs react badly if SStatus is pounded immediately
3226 * after resuming. Delay 200ms before debouncing.
3230 return sata_phy_debounce(ap
, params
);
3233 static void ata_wait_spinup(struct ata_port
*ap
)
3235 struct ata_eh_context
*ehc
= &ap
->eh_context
;
3236 unsigned long end
, secs
;
3239 /* first, debounce phy if SATA */
3240 if (ap
->cbl
== ATA_CBL_SATA
) {
3241 rc
= sata_phy_debounce(ap
, sata_deb_timing_hotplug
);
3243 /* if debounced successfully and offline, no need to wait */
3244 if ((rc
== 0 || rc
== -EOPNOTSUPP
) && ata_port_offline(ap
))
3248 /* okay, let's give the drive time to spin up */
3249 end
= ehc
->i
.hotplug_timestamp
+ ATA_SPINUP_WAIT
* HZ
/ 1000;
3250 secs
= ((end
- jiffies
) + HZ
- 1) / HZ
;
3252 if (time_after(jiffies
, end
))
3256 ata_port_printk(ap
, KERN_INFO
, "waiting for device to spin up "
3257 "(%lu secs)\n", secs
);
3259 schedule_timeout_uninterruptible(end
- jiffies
);
3263 * ata_std_prereset - prepare for reset
3264 * @ap: ATA port to be reset
3266 * @ap is about to be reset. Initialize it.
3269 * Kernel thread context (may sleep)
3272 * 0 on success, -errno otherwise.
3274 int ata_std_prereset(struct ata_port
*ap
)
3276 struct ata_eh_context
*ehc
= &ap
->eh_context
;
3277 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
3280 /* handle link resume & hotplug spinup */
3281 if ((ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) &&
3282 (ap
->flags
& ATA_FLAG_HRST_TO_RESUME
))
3283 ehc
->i
.action
|= ATA_EH_HARDRESET
;
3285 if ((ehc
->i
.flags
& ATA_EHI_HOTPLUGGED
) &&
3286 (ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
))
3287 ata_wait_spinup(ap
);
3289 /* if we're about to do hardreset, nothing more to do */
3290 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
3293 /* if SATA, resume phy */
3294 if (ap
->cbl
== ATA_CBL_SATA
) {
3295 rc
= sata_phy_resume(ap
, timing
);
3296 if (rc
&& rc
!= -EOPNOTSUPP
) {
3297 /* phy resume failed */
3298 ata_port_printk(ap
, KERN_WARNING
, "failed to resume "
3299 "link for reset (errno=%d)\n", rc
);
3304 /* Wait for !BSY if the controller can wait for the first D2H
3305 * Reg FIS and we don't know that no device is attached.
3307 if (!(ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
) && !ata_port_offline(ap
))
3308 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
3314 * ata_std_softreset - reset host port via ATA SRST
3315 * @ap: port to reset
3316 * @classes: resulting classes of attached devices
3318 * Reset host port using ATA SRST.
3321 * Kernel thread context (may sleep)
3324 * 0 on success, -errno otherwise.
3326 int ata_std_softreset(struct ata_port
*ap
, unsigned int *classes
)
3328 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
3329 unsigned int devmask
= 0, err_mask
;
3334 if (ata_port_offline(ap
)) {
3335 classes
[0] = ATA_DEV_NONE
;
3339 /* determine if device 0/1 are present */
3340 if (ata_devchk(ap
, 0))
3341 devmask
|= (1 << 0);
3342 if (slave_possible
&& ata_devchk(ap
, 1))
3343 devmask
|= (1 << 1);
3345 /* select device 0 again */
3346 ap
->ops
->dev_select(ap
, 0);
3348 /* issue bus reset */
3349 DPRINTK("about to softreset, devmask=%x\n", devmask
);
3350 err_mask
= ata_bus_softreset(ap
, devmask
);
3352 ata_port_printk(ap
, KERN_ERR
, "SRST failed (err_mask=0x%x)\n",
3357 /* determine by signature whether we have ATA or ATAPI devices */
3358 classes
[0] = ata_dev_try_classify(ap
, 0, &err
);
3359 if (slave_possible
&& err
!= 0x81)
3360 classes
[1] = ata_dev_try_classify(ap
, 1, &err
);
3363 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
3368 * sata_port_hardreset - reset port via SATA phy reset
3369 * @ap: port to reset
3370 * @timing: timing parameters { interval, duratinon, timeout } in msec
3372 * SATA phy-reset host port using DET bits of SControl register.
3375 * Kernel thread context (may sleep)
3378 * 0 on success, -errno otherwise.
3380 int sata_port_hardreset(struct ata_port
*ap
, const unsigned long *timing
)
3387 if (sata_set_spd_needed(ap
)) {
3388 /* SATA spec says nothing about how to reconfigure
3389 * spd. To be on the safe side, turn off phy during
3390 * reconfiguration. This works for at least ICH7 AHCI
3393 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
3396 scontrol
= (scontrol
& 0x0f0) | 0x304;
3398 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
3404 /* issue phy wake/reset */
3405 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
3408 scontrol
= (scontrol
& 0x0f0) | 0x301;
3410 if ((rc
= sata_scr_write_flush(ap
, SCR_CONTROL
, scontrol
)))
3413 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3414 * 10.4.2 says at least 1 ms.
3418 /* bring phy back */
3419 rc
= sata_phy_resume(ap
, timing
);
3421 DPRINTK("EXIT, rc=%d\n", rc
);
3426 * sata_std_hardreset - reset host port via SATA phy reset
3427 * @ap: port to reset
3428 * @class: resulting class of attached device
3430 * SATA phy-reset host port using DET bits of SControl register,
3431 * wait for !BSY and classify the attached device.
3434 * Kernel thread context (may sleep)
3437 * 0 on success, -errno otherwise.
3439 int sata_std_hardreset(struct ata_port
*ap
, unsigned int *class)
3441 const unsigned long *timing
= sata_ehc_deb_timing(&ap
->eh_context
);
3447 rc
= sata_port_hardreset(ap
, timing
);
3449 ata_port_printk(ap
, KERN_ERR
,
3450 "COMRESET failed (errno=%d)\n", rc
);
3454 /* TODO: phy layer with polling, timeouts, etc. */
3455 if (ata_port_offline(ap
)) {
3456 *class = ATA_DEV_NONE
;
3457 DPRINTK("EXIT, link offline\n");
3461 /* wait a while before checking status, see SRST for more info */
3464 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
3465 ata_port_printk(ap
, KERN_ERR
,
3466 "COMRESET failed (device not ready)\n");
3470 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
3472 *class = ata_dev_try_classify(ap
, 0, NULL
);
3474 DPRINTK("EXIT, class=%u\n", *class);
3479 * ata_std_postreset - standard postreset callback
3480 * @ap: the target ata_port
3481 * @classes: classes of attached devices
3483 * This function is invoked after a successful reset. Note that
3484 * the device might have been reset more than once using
3485 * different reset methods before postreset is invoked.
3488 * Kernel thread context (may sleep)
3490 void ata_std_postreset(struct ata_port
*ap
, unsigned int *classes
)
3496 /* print link status */
3497 sata_print_link_status(ap
);
3500 if (sata_scr_read(ap
, SCR_ERROR
, &serror
) == 0)
3501 sata_scr_write(ap
, SCR_ERROR
, serror
);
3503 /* re-enable interrupts */
3504 if (!ap
->ops
->error_handler
)
3505 ap
->ops
->irq_on(ap
);
3507 /* is double-select really necessary? */
3508 if (classes
[0] != ATA_DEV_NONE
)
3509 ap
->ops
->dev_select(ap
, 1);
3510 if (classes
[1] != ATA_DEV_NONE
)
3511 ap
->ops
->dev_select(ap
, 0);
3513 /* bail out if no device is present */
3514 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
3515 DPRINTK("EXIT, no device\n");
3519 /* set up device control */
3520 if (ap
->ioaddr
.ctl_addr
)
3521 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
3527 * ata_dev_same_device - Determine whether new ID matches configured device
3528 * @dev: device to compare against
3529 * @new_class: class of the new device
3530 * @new_id: IDENTIFY page of the new device
3532 * Compare @new_class and @new_id against @dev and determine
3533 * whether @dev is the device indicated by @new_class and
3540 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3542 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
3545 const u16
*old_id
= dev
->id
;
3546 unsigned char model
[2][ATA_ID_PROD_LEN
+ 1];
3547 unsigned char serial
[2][ATA_ID_SERNO_LEN
+ 1];
3550 if (dev
->class != new_class
) {
3551 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
3552 dev
->class, new_class
);
3556 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD
, sizeof(model
[0]));
3557 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD
, sizeof(model
[1]));
3558 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO
, sizeof(serial
[0]));
3559 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO
, sizeof(serial
[1]));
3560 new_n_sectors
= ata_id_n_sectors(new_id
);
3562 if (strcmp(model
[0], model
[1])) {
3563 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
3564 "'%s' != '%s'\n", model
[0], model
[1]);
3568 if (strcmp(serial
[0], serial
[1])) {
3569 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
3570 "'%s' != '%s'\n", serial
[0], serial
[1]);
3574 if (dev
->class == ATA_DEV_ATA
&& dev
->n_sectors
!= new_n_sectors
) {
3575 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
3577 (unsigned long long)dev
->n_sectors
,
3578 (unsigned long long)new_n_sectors
);
3579 /* Are we the boot time size - if so we appear to be the
3580 same disk at this point and our HPA got reapplied */
3581 if (ata_ignore_hpa
&& dev
->n_sectors_boot
== new_n_sectors
3582 && ata_id_hpa_enabled(new_id
))
3591 * ata_dev_revalidate - Revalidate ATA device
3592 * @dev: device to revalidate
3593 * @readid_flags: read ID flags
3595 * Re-read IDENTIFY page and make sure @dev is still attached to
3599 * Kernel thread context (may sleep)
3602 * 0 on success, negative errno otherwise
3604 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int readid_flags
)
3606 unsigned int class = dev
->class;
3607 u16
*id
= (void *)dev
->ap
->sector_buf
;
3610 if (!ata_dev_enabled(dev
)) {
3616 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
3620 /* is the device still there? */
3621 if (!ata_dev_same_device(dev
, class, id
)) {
3626 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
3628 /* configure device according to the new ID */
3629 rc
= ata_dev_configure(dev
);
3634 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
3638 struct ata_blacklist_entry
{
3639 const char *model_num
;
3640 const char *model_rev
;
3641 unsigned long horkage
;
3644 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
3645 /* Devices with DMA related problems under Linux */
3646 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
3647 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
3648 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
3649 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
3650 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
3651 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
3652 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
3653 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
3654 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
3655 { "CRD-8480B", NULL
, ATA_HORKAGE_NODMA
},
3656 { "CRD-8482B", NULL
, ATA_HORKAGE_NODMA
},
3657 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
3658 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
3659 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
3660 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
3661 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
3662 { "HITACHI CDR-8335", NULL
, ATA_HORKAGE_NODMA
},
3663 { "HITACHI CDR-8435", NULL
, ATA_HORKAGE_NODMA
},
3664 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
3665 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
3666 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
3667 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
3668 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
3669 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
3670 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
3671 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
3672 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
3673 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
3674 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA
},
3676 /* Weird ATAPI devices */
3677 { "TORiSAN DVD-ROM DRD-N216", NULL
, ATA_HORKAGE_MAX_SEC_128
|
3678 ATA_HORKAGE_DMA_RW_ONLY
},
3680 /* Devices we expect to fail diagnostics */
3682 /* Devices where NCQ should be avoided */
3684 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
3685 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3686 { "FUJITSU MHT2060BH", NULL
, ATA_HORKAGE_NONCQ
},
3688 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ
},
3689 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3690 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ
},
3691 /* Blacklist entries taken from Silicon Image 3124/3132
3692 Windows driver .inf file - also several Linux problem reports */
3693 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ
, },
3694 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ
, },
3695 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ
, },
3697 /* Devices with NCQ limits */
3703 unsigned long ata_device_blacklisted(const struct ata_device
*dev
)
3705 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
3706 unsigned char model_rev
[ATA_ID_FW_REV_LEN
+ 1];
3707 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
3709 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
3710 ata_id_c_string(dev
->id
, model_rev
, ATA_ID_FW_REV
, sizeof(model_rev
));
3712 while (ad
->model_num
) {
3713 if (!strcmp(ad
->model_num
, model_num
)) {
3714 if (ad
->model_rev
== NULL
)
3716 if (!strcmp(ad
->model_rev
, model_rev
))
3724 static int ata_dma_blacklisted(const struct ata_device
*dev
)
3726 /* We don't support polling DMA.
3727 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3728 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3730 if ((dev
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
3731 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
3733 return (ata_device_blacklisted(dev
) & ATA_HORKAGE_NODMA
) ? 1 : 0;
3737 * ata_dev_xfermask - Compute supported xfermask of the given device
3738 * @dev: Device to compute xfermask for
3740 * Compute supported xfermask of @dev and store it in
3741 * dev->*_mask. This function is responsible for applying all
3742 * known limits including host controller limits, device
3748 static void ata_dev_xfermask(struct ata_device
*dev
)
3750 struct ata_port
*ap
= dev
->ap
;
3751 struct ata_host
*host
= ap
->host
;
3752 unsigned long xfer_mask
;
3754 /* controller modes available */
3755 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
3756 ap
->mwdma_mask
, ap
->udma_mask
);
3758 /* drive modes available */
3759 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
3760 dev
->mwdma_mask
, dev
->udma_mask
);
3761 xfer_mask
&= ata_id_xfermask(dev
->id
);
3764 * CFA Advanced TrueIDE timings are not allowed on a shared
3767 if (ata_dev_pair(dev
)) {
3768 /* No PIO5 or PIO6 */
3769 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
3770 /* No MWDMA3 or MWDMA 4 */
3771 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
3774 if (ata_dma_blacklisted(dev
)) {
3775 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3776 ata_dev_printk(dev
, KERN_WARNING
,
3777 "device is on DMA blacklist, disabling DMA\n");
3780 if ((host
->flags
& ATA_HOST_SIMPLEX
) &&
3781 host
->simplex_claimed
&& host
->simplex_claimed
!= ap
) {
3782 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3783 ata_dev_printk(dev
, KERN_WARNING
, "simplex DMA is claimed by "
3784 "other device, disabling DMA\n");
3787 if (ap
->flags
& ATA_FLAG_NO_IORDY
)
3788 xfer_mask
&= ata_pio_mask_no_iordy(dev
);
3790 if (ap
->ops
->mode_filter
)
3791 xfer_mask
= ap
->ops
->mode_filter(dev
, xfer_mask
);
3793 /* Apply cable rule here. Don't apply it early because when
3794 * we handle hot plug the cable type can itself change.
3795 * Check this last so that we know if the transfer rate was
3796 * solely limited by the cable.
3797 * Unknown or 80 wire cables reported host side are checked
3798 * drive side as well. Cases where we know a 40wire cable
3799 * is used safely for 80 are not checked here.
3801 if (xfer_mask
& (0xF8 << ATA_SHIFT_UDMA
))
3802 /* UDMA/44 or higher would be available */
3803 if((ap
->cbl
== ATA_CBL_PATA40
) ||
3804 (ata_drive_40wire(dev
->id
) &&
3805 (ap
->cbl
== ATA_CBL_PATA_UNK
||
3806 ap
->cbl
== ATA_CBL_PATA80
))) {
3807 ata_dev_printk(dev
, KERN_WARNING
,
3808 "limited to UDMA/33 due to 40-wire cable\n");
3809 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
3812 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
3813 &dev
->mwdma_mask
, &dev
->udma_mask
);
3817 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3818 * @dev: Device to which command will be sent
3820 * Issue SET FEATURES - XFER MODE command to device @dev
3824 * PCI/etc. bus probe sem.
3827 * 0 on success, AC_ERR_* mask otherwise.
3830 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
3832 struct ata_taskfile tf
;
3833 unsigned int err_mask
;
3835 /* set up set-features taskfile */
3836 DPRINTK("set features - xfer mode\n");
3838 ata_tf_init(dev
, &tf
);
3839 tf
.command
= ATA_CMD_SET_FEATURES
;
3840 tf
.feature
= SETFEATURES_XFER
;
3841 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3842 tf
.protocol
= ATA_PROT_NODATA
;
3843 tf
.nsect
= dev
->xfer_mode
;
3845 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3847 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3852 * ata_dev_init_params - Issue INIT DEV PARAMS command
3853 * @dev: Device to which command will be sent
3854 * @heads: Number of heads (taskfile parameter)
3855 * @sectors: Number of sectors (taskfile parameter)
3858 * Kernel thread context (may sleep)
3861 * 0 on success, AC_ERR_* mask otherwise.
3863 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
3864 u16 heads
, u16 sectors
)
3866 struct ata_taskfile tf
;
3867 unsigned int err_mask
;
3869 /* Number of sectors per track 1-255. Number of heads 1-16 */
3870 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
3871 return AC_ERR_INVALID
;
3873 /* set up init dev params taskfile */
3874 DPRINTK("init dev params \n");
3876 ata_tf_init(dev
, &tf
);
3877 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
3878 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3879 tf
.protocol
= ATA_PROT_NODATA
;
3881 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
3883 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3885 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3890 * ata_sg_clean - Unmap DMA memory associated with command
3891 * @qc: Command containing DMA memory to be released
3893 * Unmap all mapped DMA memory associated with this command.
3896 * spin_lock_irqsave(host lock)
3898 void ata_sg_clean(struct ata_queued_cmd
*qc
)
3900 struct ata_port
*ap
= qc
->ap
;
3901 struct scatterlist
*sg
= qc
->__sg
;
3902 int dir
= qc
->dma_dir
;
3903 void *pad_buf
= NULL
;
3905 WARN_ON(!(qc
->flags
& ATA_QCFLAG_DMAMAP
));
3906 WARN_ON(sg
== NULL
);
3908 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
3909 WARN_ON(qc
->n_elem
> 1);
3911 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
3913 /* if we padded the buffer out to 32-bit bound, and data
3914 * xfer direction is from-device, we must copy from the
3915 * pad buffer back into the supplied buffer
3917 if (qc
->pad_len
&& !(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
3918 pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3920 if (qc
->flags
& ATA_QCFLAG_SG
) {
3922 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
3923 /* restore last sg */
3924 sg
[qc
->orig_n_elem
- 1].length
+= qc
->pad_len
;
3926 struct scatterlist
*psg
= &qc
->pad_sgent
;
3927 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
3928 memcpy(addr
+ psg
->offset
, pad_buf
, qc
->pad_len
);
3929 kunmap_atomic(addr
, KM_IRQ0
);
3933 dma_unmap_single(ap
->dev
,
3934 sg_dma_address(&sg
[0]), sg_dma_len(&sg
[0]),
3937 sg
->length
+= qc
->pad_len
;
3939 memcpy(qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
3940 pad_buf
, qc
->pad_len
);
3943 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
3948 * ata_fill_sg - Fill PCI IDE PRD table
3949 * @qc: Metadata associated with taskfile to be transferred
3951 * Fill PCI IDE PRD (scatter-gather) table with segments
3952 * associated with the current disk command.
3955 * spin_lock_irqsave(host lock)
3958 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
3960 struct ata_port
*ap
= qc
->ap
;
3961 struct scatterlist
*sg
;
3964 WARN_ON(qc
->__sg
== NULL
);
3965 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
3968 ata_for_each_sg(sg
, qc
) {
3972 /* determine if physical DMA addr spans 64K boundary.
3973 * Note h/w doesn't support 64-bit, so we unconditionally
3974 * truncate dma_addr_t to u32.
3976 addr
= (u32
) sg_dma_address(sg
);
3977 sg_len
= sg_dma_len(sg
);
3980 offset
= addr
& 0xffff;
3982 if ((offset
+ sg_len
) > 0x10000)
3983 len
= 0x10000 - offset
;
3985 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
3986 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
3987 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
3996 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
3999 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4000 * @qc: Metadata associated with taskfile to check
4002 * Allow low-level driver to filter ATA PACKET commands, returning
4003 * a status indicating whether or not it is OK to use DMA for the
4004 * supplied PACKET command.
4007 * spin_lock_irqsave(host lock)
4009 * RETURNS: 0 when ATAPI DMA can be used
4012 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
4014 struct ata_port
*ap
= qc
->ap
;
4015 int rc
= 0; /* Assume ATAPI DMA is OK by default */
4017 /* some drives can only do ATAPI DMA on read/write */
4018 if (unlikely(qc
->dev
->horkage
& ATA_HORKAGE_DMA_RW_ONLY
)) {
4019 struct scsi_cmnd
*cmd
= qc
->scsicmd
;
4020 u8
*scsicmd
= cmd
->cmnd
;
4022 switch (scsicmd
[0]) {
4029 /* atapi dma maybe ok */
4032 /* turn off atapi dma */
4037 if (ap
->ops
->check_atapi_dma
)
4038 rc
= ap
->ops
->check_atapi_dma(qc
);
4043 * ata_qc_prep - Prepare taskfile for submission
4044 * @qc: Metadata associated with taskfile to be prepared
4046 * Prepare ATA taskfile for submission.
4049 * spin_lock_irqsave(host lock)
4051 void ata_qc_prep(struct ata_queued_cmd
*qc
)
4053 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4059 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
4062 * ata_sg_init_one - Associate command with memory buffer
4063 * @qc: Command to be associated
4064 * @buf: Memory buffer
4065 * @buflen: Length of memory buffer, in bytes.
4067 * Initialize the data-related elements of queued_cmd @qc
4068 * to point to a single memory buffer, @buf of byte length @buflen.
4071 * spin_lock_irqsave(host lock)
4074 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
4076 qc
->flags
|= ATA_QCFLAG_SINGLE
;
4078 qc
->__sg
= &qc
->sgent
;
4080 qc
->orig_n_elem
= 1;
4082 qc
->nbytes
= buflen
;
4084 sg_init_one(&qc
->sgent
, buf
, buflen
);
4088 * ata_sg_init - Associate command with scatter-gather table.
4089 * @qc: Command to be associated
4090 * @sg: Scatter-gather table.
4091 * @n_elem: Number of elements in s/g table.
4093 * Initialize the data-related elements of queued_cmd @qc
4094 * to point to a scatter-gather table @sg, containing @n_elem
4098 * spin_lock_irqsave(host lock)
4101 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
4102 unsigned int n_elem
)
4104 qc
->flags
|= ATA_QCFLAG_SG
;
4106 qc
->n_elem
= n_elem
;
4107 qc
->orig_n_elem
= n_elem
;
4111 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4112 * @qc: Command with memory buffer to be mapped.
4114 * DMA-map the memory buffer associated with queued_cmd @qc.
4117 * spin_lock_irqsave(host lock)
4120 * Zero on success, negative on error.
4123 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
4125 struct ata_port
*ap
= qc
->ap
;
4126 int dir
= qc
->dma_dir
;
4127 struct scatterlist
*sg
= qc
->__sg
;
4128 dma_addr_t dma_address
;
4131 /* we must lengthen transfers to end on a 32-bit boundary */
4132 qc
->pad_len
= sg
->length
& 3;
4134 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4135 struct scatterlist
*psg
= &qc
->pad_sgent
;
4137 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4139 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4141 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
4142 memcpy(pad_buf
, qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
4145 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4146 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4148 sg
->length
-= qc
->pad_len
;
4149 if (sg
->length
== 0)
4152 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4153 sg
->length
, qc
->pad_len
);
4161 dma_address
= dma_map_single(ap
->dev
, qc
->buf_virt
,
4163 if (dma_mapping_error(dma_address
)) {
4165 sg
->length
+= qc
->pad_len
;
4169 sg_dma_address(sg
) = dma_address
;
4170 sg_dma_len(sg
) = sg
->length
;
4173 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
4174 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4180 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4181 * @qc: Command with scatter-gather table to be mapped.
4183 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4186 * spin_lock_irqsave(host lock)
4189 * Zero on success, negative on error.
4193 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
4195 struct ata_port
*ap
= qc
->ap
;
4196 struct scatterlist
*sg
= qc
->__sg
;
4197 struct scatterlist
*lsg
= &sg
[qc
->n_elem
- 1];
4198 int n_elem
, pre_n_elem
, dir
, trim_sg
= 0;
4200 VPRINTK("ENTER, ata%u\n", ap
->print_id
);
4201 WARN_ON(!(qc
->flags
& ATA_QCFLAG_SG
));
4203 /* we must lengthen transfers to end on a 32-bit boundary */
4204 qc
->pad_len
= lsg
->length
& 3;
4206 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4207 struct scatterlist
*psg
= &qc
->pad_sgent
;
4208 unsigned int offset
;
4210 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
4212 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
4215 * psg->page/offset are used to copy to-be-written
4216 * data in this function or read data in ata_sg_clean.
4218 offset
= lsg
->offset
+ lsg
->length
- qc
->pad_len
;
4219 psg
->page
= nth_page(lsg
->page
, offset
>> PAGE_SHIFT
);
4220 psg
->offset
= offset_in_page(offset
);
4222 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
4223 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
4224 memcpy(pad_buf
, addr
+ psg
->offset
, qc
->pad_len
);
4225 kunmap_atomic(addr
, KM_IRQ0
);
4228 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
4229 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
4231 lsg
->length
-= qc
->pad_len
;
4232 if (lsg
->length
== 0)
4235 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4236 qc
->n_elem
- 1, lsg
->length
, qc
->pad_len
);
4239 pre_n_elem
= qc
->n_elem
;
4240 if (trim_sg
&& pre_n_elem
)
4249 n_elem
= dma_map_sg(ap
->dev
, sg
, pre_n_elem
, dir
);
4251 /* restore last sg */
4252 lsg
->length
+= qc
->pad_len
;
4256 DPRINTK("%d sg elements mapped\n", n_elem
);
4259 qc
->n_elem
= n_elem
;
4265 * swap_buf_le16 - swap halves of 16-bit words in place
4266 * @buf: Buffer to swap
4267 * @buf_words: Number of 16-bit words in buffer.
4269 * Swap halves of 16-bit words if needed to convert from
4270 * little-endian byte order to native cpu byte order, or
4274 * Inherited from caller.
4276 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
4281 for (i
= 0; i
< buf_words
; i
++)
4282 buf
[i
] = le16_to_cpu(buf
[i
]);
4283 #endif /* __BIG_ENDIAN */
4287 * ata_data_xfer - Transfer data by PIO
4288 * @adev: device to target
4290 * @buflen: buffer length
4291 * @write_data: read/write
4293 * Transfer data from/to the device data register by PIO.
4296 * Inherited from caller.
4298 void ata_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
4299 unsigned int buflen
, int write_data
)
4301 struct ata_port
*ap
= adev
->ap
;
4302 unsigned int words
= buflen
>> 1;
4304 /* Transfer multiple of 2 bytes */
4306 iowrite16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4308 ioread16_rep(ap
->ioaddr
.data_addr
, buf
, words
);
4310 /* Transfer trailing 1 byte, if any. */
4311 if (unlikely(buflen
& 0x01)) {
4312 u16 align_buf
[1] = { 0 };
4313 unsigned char *trailing_buf
= buf
+ buflen
- 1;
4316 memcpy(align_buf
, trailing_buf
, 1);
4317 iowrite16(le16_to_cpu(align_buf
[0]), ap
->ioaddr
.data_addr
);
4319 align_buf
[0] = cpu_to_le16(ioread16(ap
->ioaddr
.data_addr
));
4320 memcpy(trailing_buf
, align_buf
, 1);
4326 * ata_data_xfer_noirq - Transfer data by PIO
4327 * @adev: device to target
4329 * @buflen: buffer length
4330 * @write_data: read/write
4332 * Transfer data from/to the device data register by PIO. Do the
4333 * transfer with interrupts disabled.
4336 * Inherited from caller.
4338 void ata_data_xfer_noirq(struct ata_device
*adev
, unsigned char *buf
,
4339 unsigned int buflen
, int write_data
)
4341 unsigned long flags
;
4342 local_irq_save(flags
);
4343 ata_data_xfer(adev
, buf
, buflen
, write_data
);
4344 local_irq_restore(flags
);
4349 * ata_pio_sector - Transfer a sector of data.
4350 * @qc: Command on going
4352 * Transfer qc->sect_size bytes of data from/to the ATA device.
4355 * Inherited from caller.
4358 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
4360 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4361 struct scatterlist
*sg
= qc
->__sg
;
4362 struct ata_port
*ap
= qc
->ap
;
4364 unsigned int offset
;
4367 if (qc
->curbytes
== qc
->nbytes
- qc
->sect_size
)
4368 ap
->hsm_task_state
= HSM_ST_LAST
;
4370 page
= sg
[qc
->cursg
].page
;
4371 offset
= sg
[qc
->cursg
].offset
+ qc
->cursg_ofs
;
4373 /* get the current page and offset */
4374 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4375 offset
%= PAGE_SIZE
;
4377 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4379 if (PageHighMem(page
)) {
4380 unsigned long flags
;
4382 /* FIXME: use a bounce buffer */
4383 local_irq_save(flags
);
4384 buf
= kmap_atomic(page
, KM_IRQ0
);
4386 /* do the actual data transfer */
4387 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
4389 kunmap_atomic(buf
, KM_IRQ0
);
4390 local_irq_restore(flags
);
4392 buf
= page_address(page
);
4393 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, qc
->sect_size
, do_write
);
4396 qc
->curbytes
+= qc
->sect_size
;
4397 qc
->cursg_ofs
+= qc
->sect_size
;
4399 if (qc
->cursg_ofs
== (&sg
[qc
->cursg
])->length
) {
4406 * ata_pio_sectors - Transfer one or many sectors.
4407 * @qc: Command on going
4409 * Transfer one or many sectors of data from/to the
4410 * ATA device for the DRQ request.
4413 * Inherited from caller.
4416 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
4418 if (is_multi_taskfile(&qc
->tf
)) {
4419 /* READ/WRITE MULTIPLE */
4422 WARN_ON(qc
->dev
->multi_count
== 0);
4424 nsect
= min((qc
->nbytes
- qc
->curbytes
) / qc
->sect_size
,
4425 qc
->dev
->multi_count
);
4433 * atapi_send_cdb - Write CDB bytes to hardware
4434 * @ap: Port to which ATAPI device is attached.
4435 * @qc: Taskfile currently active
4437 * When device has indicated its readiness to accept
4438 * a CDB, this function is called. Send the CDB.
4444 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4447 DPRINTK("send cdb\n");
4448 WARN_ON(qc
->dev
->cdb_len
< 12);
4450 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
4451 ata_altstatus(ap
); /* flush */
4453 switch (qc
->tf
.protocol
) {
4454 case ATA_PROT_ATAPI
:
4455 ap
->hsm_task_state
= HSM_ST
;
4457 case ATA_PROT_ATAPI_NODATA
:
4458 ap
->hsm_task_state
= HSM_ST_LAST
;
4460 case ATA_PROT_ATAPI_DMA
:
4461 ap
->hsm_task_state
= HSM_ST_LAST
;
4462 /* initiate bmdma */
4463 ap
->ops
->bmdma_start(qc
);
4469 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4470 * @qc: Command on going
4471 * @bytes: number of bytes
4473 * Transfer Transfer data from/to the ATAPI device.
4476 * Inherited from caller.
4480 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
4482 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4483 struct scatterlist
*sg
= qc
->__sg
;
4484 struct ata_port
*ap
= qc
->ap
;
4487 unsigned int offset
, count
;
4489 if (qc
->curbytes
+ bytes
>= qc
->nbytes
)
4490 ap
->hsm_task_state
= HSM_ST_LAST
;
4493 if (unlikely(qc
->cursg
>= qc
->n_elem
)) {
4495 * The end of qc->sg is reached and the device expects
4496 * more data to transfer. In order not to overrun qc->sg
4497 * and fulfill length specified in the byte count register,
4498 * - for read case, discard trailing data from the device
4499 * - for write case, padding zero data to the device
4501 u16 pad_buf
[1] = { 0 };
4502 unsigned int words
= bytes
>> 1;
4505 if (words
) /* warning if bytes > 1 */
4506 ata_dev_printk(qc
->dev
, KERN_WARNING
,
4507 "%u bytes trailing data\n", bytes
);
4509 for (i
= 0; i
< words
; i
++)
4510 ap
->ops
->data_xfer(qc
->dev
, (unsigned char*)pad_buf
, 2, do_write
);
4512 ap
->hsm_task_state
= HSM_ST_LAST
;
4516 sg
= &qc
->__sg
[qc
->cursg
];
4519 offset
= sg
->offset
+ qc
->cursg_ofs
;
4521 /* get the current page and offset */
4522 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4523 offset
%= PAGE_SIZE
;
4525 /* don't overrun current sg */
4526 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
4528 /* don't cross page boundaries */
4529 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
4531 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4533 if (PageHighMem(page
)) {
4534 unsigned long flags
;
4536 /* FIXME: use bounce buffer */
4537 local_irq_save(flags
);
4538 buf
= kmap_atomic(page
, KM_IRQ0
);
4540 /* do the actual data transfer */
4541 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4543 kunmap_atomic(buf
, KM_IRQ0
);
4544 local_irq_restore(flags
);
4546 buf
= page_address(page
);
4547 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4551 qc
->curbytes
+= count
;
4552 qc
->cursg_ofs
+= count
;
4554 if (qc
->cursg_ofs
== sg
->length
) {
4564 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4565 * @qc: Command on going
4567 * Transfer Transfer data from/to the ATAPI device.
4570 * Inherited from caller.
4573 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
4575 struct ata_port
*ap
= qc
->ap
;
4576 struct ata_device
*dev
= qc
->dev
;
4577 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
4578 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
4580 /* Abuse qc->result_tf for temp storage of intermediate TF
4581 * here to save some kernel stack usage.
4582 * For normal completion, qc->result_tf is not relevant. For
4583 * error, qc->result_tf is later overwritten by ata_qc_complete().
4584 * So, the correctness of qc->result_tf is not affected.
4586 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4587 ireason
= qc
->result_tf
.nsect
;
4588 bc_lo
= qc
->result_tf
.lbam
;
4589 bc_hi
= qc
->result_tf
.lbah
;
4590 bytes
= (bc_hi
<< 8) | bc_lo
;
4592 /* shall be cleared to zero, indicating xfer of data */
4593 if (ireason
& (1 << 0))
4596 /* make sure transfer direction matches expected */
4597 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
4598 if (do_write
!= i_write
)
4601 VPRINTK("ata%u: xfering %d bytes\n", ap
->print_id
, bytes
);
4603 __atapi_pio_bytes(qc
, bytes
);
4608 ata_dev_printk(dev
, KERN_INFO
, "ATAPI check failed\n");
4609 qc
->err_mask
|= AC_ERR_HSM
;
4610 ap
->hsm_task_state
= HSM_ST_ERR
;
4614 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4615 * @ap: the target ata_port
4619 * 1 if ok in workqueue, 0 otherwise.
4622 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4624 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4627 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
4628 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
4629 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
4632 if (is_atapi_taskfile(&qc
->tf
) &&
4633 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
4641 * ata_hsm_qc_complete - finish a qc running on standard HSM
4642 * @qc: Command to complete
4643 * @in_wq: 1 if called from workqueue, 0 otherwise
4645 * Finish @qc which is running on standard HSM.
4648 * If @in_wq is zero, spin_lock_irqsave(host lock).
4649 * Otherwise, none on entry and grabs host lock.
4651 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
4653 struct ata_port
*ap
= qc
->ap
;
4654 unsigned long flags
;
4656 if (ap
->ops
->error_handler
) {
4658 spin_lock_irqsave(ap
->lock
, flags
);
4660 /* EH might have kicked in while host lock is
4663 qc
= ata_qc_from_tag(ap
, qc
->tag
);
4665 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
4666 ap
->ops
->irq_on(ap
);
4667 ata_qc_complete(qc
);
4669 ata_port_freeze(ap
);
4672 spin_unlock_irqrestore(ap
->lock
, flags
);
4674 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
4675 ata_qc_complete(qc
);
4677 ata_port_freeze(ap
);
4681 spin_lock_irqsave(ap
->lock
, flags
);
4682 ap
->ops
->irq_on(ap
);
4683 ata_qc_complete(qc
);
4684 spin_unlock_irqrestore(ap
->lock
, flags
);
4686 ata_qc_complete(qc
);
4689 ata_altstatus(ap
); /* flush */
4693 * ata_hsm_move - move the HSM to the next state.
4694 * @ap: the target ata_port
4696 * @status: current device status
4697 * @in_wq: 1 if called from workqueue, 0 otherwise
4700 * 1 when poll next status needed, 0 otherwise.
4702 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
4703 u8 status
, int in_wq
)
4705 unsigned long flags
= 0;
4708 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
4710 /* Make sure ata_qc_issue_prot() does not throw things
4711 * like DMA polling into the workqueue. Notice that
4712 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4714 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
4717 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4718 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
4720 switch (ap
->hsm_task_state
) {
4722 /* Send first data block or PACKET CDB */
4724 /* If polling, we will stay in the work queue after
4725 * sending the data. Otherwise, interrupt handler
4726 * takes over after sending the data.
4728 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
4730 /* check device status */
4731 if (unlikely((status
& ATA_DRQ
) == 0)) {
4732 /* handle BSY=0, DRQ=0 as error */
4733 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4734 /* device stops HSM for abort/error */
4735 qc
->err_mask
|= AC_ERR_DEV
;
4737 /* HSM violation. Let EH handle this */
4738 qc
->err_mask
|= AC_ERR_HSM
;
4740 ap
->hsm_task_state
= HSM_ST_ERR
;
4744 /* Device should not ask for data transfer (DRQ=1)
4745 * when it finds something wrong.
4746 * We ignore DRQ here and stop the HSM by
4747 * changing hsm_task_state to HSM_ST_ERR and
4748 * let the EH abort the command or reset the device.
4750 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4751 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with device "
4752 "error, dev_stat 0x%X\n", status
);
4753 qc
->err_mask
|= AC_ERR_HSM
;
4754 ap
->hsm_task_state
= HSM_ST_ERR
;
4758 /* Send the CDB (atapi) or the first data block (ata pio out).
4759 * During the state transition, interrupt handler shouldn't
4760 * be invoked before the data transfer is complete and
4761 * hsm_task_state is changed. Hence, the following locking.
4764 spin_lock_irqsave(ap
->lock
, flags
);
4766 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
4767 /* PIO data out protocol.
4768 * send first data block.
4771 /* ata_pio_sectors() might change the state
4772 * to HSM_ST_LAST. so, the state is changed here
4773 * before ata_pio_sectors().
4775 ap
->hsm_task_state
= HSM_ST
;
4776 ata_pio_sectors(qc
);
4777 ata_altstatus(ap
); /* flush */
4780 atapi_send_cdb(ap
, qc
);
4783 spin_unlock_irqrestore(ap
->lock
, flags
);
4785 /* if polling, ata_pio_task() handles the rest.
4786 * otherwise, interrupt handler takes over from here.
4791 /* complete command or read/write the data register */
4792 if (qc
->tf
.protocol
== ATA_PROT_ATAPI
) {
4793 /* ATAPI PIO protocol */
4794 if ((status
& ATA_DRQ
) == 0) {
4795 /* No more data to transfer or device error.
4796 * Device error will be tagged in HSM_ST_LAST.
4798 ap
->hsm_task_state
= HSM_ST_LAST
;
4802 /* Device should not ask for data transfer (DRQ=1)
4803 * when it finds something wrong.
4804 * We ignore DRQ here and stop the HSM by
4805 * changing hsm_task_state to HSM_ST_ERR and
4806 * let the EH abort the command or reset the device.
4808 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4809 ata_port_printk(ap
, KERN_WARNING
, "DRQ=1 with "
4810 "device error, dev_stat 0x%X\n",
4812 qc
->err_mask
|= AC_ERR_HSM
;
4813 ap
->hsm_task_state
= HSM_ST_ERR
;
4817 atapi_pio_bytes(qc
);
4819 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
4820 /* bad ireason reported by device */
4824 /* ATA PIO protocol */
4825 if (unlikely((status
& ATA_DRQ
) == 0)) {
4826 /* handle BSY=0, DRQ=0 as error */
4827 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4828 /* device stops HSM for abort/error */
4829 qc
->err_mask
|= AC_ERR_DEV
;
4831 /* HSM violation. Let EH handle this.
4832 * Phantom devices also trigger this
4833 * condition. Mark hint.
4835 qc
->err_mask
|= AC_ERR_HSM
|
4838 ap
->hsm_task_state
= HSM_ST_ERR
;
4842 /* For PIO reads, some devices may ask for
4843 * data transfer (DRQ=1) alone with ERR=1.
4844 * We respect DRQ here and transfer one
4845 * block of junk data before changing the
4846 * hsm_task_state to HSM_ST_ERR.
4848 * For PIO writes, ERR=1 DRQ=1 doesn't make
4849 * sense since the data block has been
4850 * transferred to the device.
4852 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4853 /* data might be corrputed */
4854 qc
->err_mask
|= AC_ERR_DEV
;
4856 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
4857 ata_pio_sectors(qc
);
4859 status
= ata_wait_idle(ap
);
4862 if (status
& (ATA_BUSY
| ATA_DRQ
))
4863 qc
->err_mask
|= AC_ERR_HSM
;
4865 /* ata_pio_sectors() might change the
4866 * state to HSM_ST_LAST. so, the state
4867 * is changed after ata_pio_sectors().
4869 ap
->hsm_task_state
= HSM_ST_ERR
;
4873 ata_pio_sectors(qc
);
4875 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
4876 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
4879 status
= ata_wait_idle(ap
);
4884 ata_altstatus(ap
); /* flush */
4889 if (unlikely(!ata_ok(status
))) {
4890 qc
->err_mask
|= __ac_err_mask(status
);
4891 ap
->hsm_task_state
= HSM_ST_ERR
;
4895 /* no more data to transfer */
4896 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4897 ap
->print_id
, qc
->dev
->devno
, status
);
4899 WARN_ON(qc
->err_mask
);
4901 ap
->hsm_task_state
= HSM_ST_IDLE
;
4903 /* complete taskfile transaction */
4904 ata_hsm_qc_complete(qc
, in_wq
);
4910 /* make sure qc->err_mask is available to
4911 * know what's wrong and recover
4913 WARN_ON(qc
->err_mask
== 0);
4915 ap
->hsm_task_state
= HSM_ST_IDLE
;
4917 /* complete taskfile transaction */
4918 ata_hsm_qc_complete(qc
, in_wq
);
4930 static void ata_pio_task(struct work_struct
*work
)
4932 struct ata_port
*ap
=
4933 container_of(work
, struct ata_port
, port_task
.work
);
4934 struct ata_queued_cmd
*qc
= ap
->port_task_data
;
4939 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
4942 * This is purely heuristic. This is a fast path.
4943 * Sometimes when we enter, BSY will be cleared in
4944 * a chk-status or two. If not, the drive is probably seeking
4945 * or something. Snooze for a couple msecs, then
4946 * chk-status again. If still busy, queue delayed work.
4948 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
4949 if (status
& ATA_BUSY
) {
4951 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
4952 if (status
& ATA_BUSY
) {
4953 ata_port_queue_task(ap
, ata_pio_task
, qc
, ATA_SHORT_PAUSE
);
4959 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
4961 /* another command or interrupt handler
4962 * may be running at this point.
4969 * ata_qc_new - Request an available ATA command, for queueing
4970 * @ap: Port associated with device @dev
4971 * @dev: Device from whom we request an available command structure
4977 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
4979 struct ata_queued_cmd
*qc
= NULL
;
4982 /* no command while frozen */
4983 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
4986 /* the last tag is reserved for internal command. */
4987 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
4988 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
4989 qc
= __ata_qc_from_tag(ap
, i
);
5000 * ata_qc_new_init - Request an available ATA command, and initialize it
5001 * @dev: Device from whom we request an available command structure
5007 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
5009 struct ata_port
*ap
= dev
->ap
;
5010 struct ata_queued_cmd
*qc
;
5012 qc
= ata_qc_new(ap
);
5025 * ata_qc_free - free unused ata_queued_cmd
5026 * @qc: Command to complete
5028 * Designed to free unused ata_queued_cmd object
5029 * in case something prevents using it.
5032 * spin_lock_irqsave(host lock)
5034 void ata_qc_free(struct ata_queued_cmd
*qc
)
5036 struct ata_port
*ap
= qc
->ap
;
5039 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5043 if (likely(ata_tag_valid(tag
))) {
5044 qc
->tag
= ATA_TAG_POISON
;
5045 clear_bit(tag
, &ap
->qc_allocated
);
5049 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
5051 struct ata_port
*ap
= qc
->ap
;
5053 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
5054 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
5056 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
5059 /* command should be marked inactive atomically with qc completion */
5060 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
5061 ap
->sactive
&= ~(1 << qc
->tag
);
5063 ap
->active_tag
= ATA_TAG_POISON
;
5065 /* atapi: mark qc as inactive to prevent the interrupt handler
5066 * from completing the command twice later, before the error handler
5067 * is called. (when rc != 0 and atapi request sense is needed)
5069 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
5070 ap
->qc_active
&= ~(1 << qc
->tag
);
5072 /* call completion callback */
5073 qc
->complete_fn(qc
);
5076 static void fill_result_tf(struct ata_queued_cmd
*qc
)
5078 struct ata_port
*ap
= qc
->ap
;
5080 qc
->result_tf
.flags
= qc
->tf
.flags
;
5081 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
5085 * ata_qc_complete - Complete an active ATA command
5086 * @qc: Command to complete
5087 * @err_mask: ATA Status register contents
5089 * Indicate to the mid and upper layers that an ATA
5090 * command has completed, with either an ok or not-ok status.
5093 * spin_lock_irqsave(host lock)
5095 void ata_qc_complete(struct ata_queued_cmd
*qc
)
5097 struct ata_port
*ap
= qc
->ap
;
5099 /* XXX: New EH and old EH use different mechanisms to
5100 * synchronize EH with regular execution path.
5102 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5103 * Normal execution path is responsible for not accessing a
5104 * failed qc. libata core enforces the rule by returning NULL
5105 * from ata_qc_from_tag() for failed qcs.
5107 * Old EH depends on ata_qc_complete() nullifying completion
5108 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5109 * not synchronize with interrupt handler. Only PIO task is
5112 if (ap
->ops
->error_handler
) {
5113 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
5115 if (unlikely(qc
->err_mask
))
5116 qc
->flags
|= ATA_QCFLAG_FAILED
;
5118 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
5119 if (!ata_tag_internal(qc
->tag
)) {
5120 /* always fill result TF for failed qc */
5122 ata_qc_schedule_eh(qc
);
5127 /* read result TF if requested */
5128 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5131 __ata_qc_complete(qc
);
5133 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
5136 /* read result TF if failed or requested */
5137 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
5140 __ata_qc_complete(qc
);
5145 * ata_qc_complete_multiple - Complete multiple qcs successfully
5146 * @ap: port in question
5147 * @qc_active: new qc_active mask
5148 * @finish_qc: LLDD callback invoked before completing a qc
5150 * Complete in-flight commands. This functions is meant to be
5151 * called from low-level driver's interrupt routine to complete
5152 * requests normally. ap->qc_active and @qc_active is compared
5153 * and commands are completed accordingly.
5156 * spin_lock_irqsave(host lock)
5159 * Number of completed commands on success, -errno otherwise.
5161 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
5162 void (*finish_qc
)(struct ata_queued_cmd
*))
5168 done_mask
= ap
->qc_active
^ qc_active
;
5170 if (unlikely(done_mask
& qc_active
)) {
5171 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
5172 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
5176 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
5177 struct ata_queued_cmd
*qc
;
5179 if (!(done_mask
& (1 << i
)))
5182 if ((qc
= ata_qc_from_tag(ap
, i
))) {
5185 ata_qc_complete(qc
);
5193 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
5195 struct ata_port
*ap
= qc
->ap
;
5197 switch (qc
->tf
.protocol
) {
5200 case ATA_PROT_ATAPI_DMA
:
5203 case ATA_PROT_ATAPI
:
5205 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
5218 * ata_qc_issue - issue taskfile to device
5219 * @qc: command to issue to device
5221 * Prepare an ATA command to submission to device.
5222 * This includes mapping the data into a DMA-able
5223 * area, filling in the S/G table, and finally
5224 * writing the taskfile to hardware, starting the command.
5227 * spin_lock_irqsave(host lock)
5229 void ata_qc_issue(struct ata_queued_cmd
*qc
)
5231 struct ata_port
*ap
= qc
->ap
;
5233 /* Make sure only one non-NCQ command is outstanding. The
5234 * check is skipped for old EH because it reuses active qc to
5235 * request ATAPI sense.
5237 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(ap
->active_tag
));
5239 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
5240 WARN_ON(ap
->sactive
& (1 << qc
->tag
));
5241 ap
->sactive
|= 1 << qc
->tag
;
5243 WARN_ON(ap
->sactive
);
5244 ap
->active_tag
= qc
->tag
;
5247 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
5248 ap
->qc_active
|= 1 << qc
->tag
;
5250 if (ata_should_dma_map(qc
)) {
5251 if (qc
->flags
& ATA_QCFLAG_SG
) {
5252 if (ata_sg_setup(qc
))
5254 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
5255 if (ata_sg_setup_one(qc
))
5259 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
5262 ap
->ops
->qc_prep(qc
);
5264 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
5265 if (unlikely(qc
->err_mask
))
5270 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
5271 qc
->err_mask
|= AC_ERR_SYSTEM
;
5273 ata_qc_complete(qc
);
5277 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5278 * @qc: command to issue to device
5280 * Using various libata functions and hooks, this function
5281 * starts an ATA command. ATA commands are grouped into
5282 * classes called "protocols", and issuing each type of protocol
5283 * is slightly different.
5285 * May be used as the qc_issue() entry in ata_port_operations.
5288 * spin_lock_irqsave(host lock)
5291 * Zero on success, AC_ERR_* mask on failure
5294 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
5296 struct ata_port
*ap
= qc
->ap
;
5298 /* Use polling pio if the LLD doesn't handle
5299 * interrupt driven pio and atapi CDB interrupt.
5301 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
5302 switch (qc
->tf
.protocol
) {
5304 case ATA_PROT_NODATA
:
5305 case ATA_PROT_ATAPI
:
5306 case ATA_PROT_ATAPI_NODATA
:
5307 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
5309 case ATA_PROT_ATAPI_DMA
:
5310 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
5311 /* see ata_dma_blacklisted() */
5319 /* Some controllers show flaky interrupt behavior after
5320 * setting xfer mode. Use polling instead.
5322 if (unlikely(qc
->tf
.command
== ATA_CMD_SET_FEATURES
&&
5323 qc
->tf
.feature
== SETFEATURES_XFER
) &&
5324 (ap
->flags
& ATA_FLAG_SETXFER_POLLING
))
5325 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
5327 /* select the device */
5328 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
5330 /* start the command */
5331 switch (qc
->tf
.protocol
) {
5332 case ATA_PROT_NODATA
:
5333 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5334 ata_qc_set_polling(qc
);
5336 ata_tf_to_host(ap
, &qc
->tf
);
5337 ap
->hsm_task_state
= HSM_ST_LAST
;
5339 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5340 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5345 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5347 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5348 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5349 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
5350 ap
->hsm_task_state
= HSM_ST_LAST
;
5354 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5355 ata_qc_set_polling(qc
);
5357 ata_tf_to_host(ap
, &qc
->tf
);
5359 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
5360 /* PIO data out protocol */
5361 ap
->hsm_task_state
= HSM_ST_FIRST
;
5362 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5364 /* always send first data block using
5365 * the ata_pio_task() codepath.
5368 /* PIO data in protocol */
5369 ap
->hsm_task_state
= HSM_ST
;
5371 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5372 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5374 /* if polling, ata_pio_task() handles the rest.
5375 * otherwise, interrupt handler takes over from here.
5381 case ATA_PROT_ATAPI
:
5382 case ATA_PROT_ATAPI_NODATA
:
5383 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5384 ata_qc_set_polling(qc
);
5386 ata_tf_to_host(ap
, &qc
->tf
);
5388 ap
->hsm_task_state
= HSM_ST_FIRST
;
5390 /* send cdb by polling if no cdb interrupt */
5391 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
5392 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
5393 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5396 case ATA_PROT_ATAPI_DMA
:
5397 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5399 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5400 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5401 ap
->hsm_task_state
= HSM_ST_FIRST
;
5403 /* send cdb by polling if no cdb interrupt */
5404 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5405 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5410 return AC_ERR_SYSTEM
;
5417 * ata_host_intr - Handle host interrupt for given (port, task)
5418 * @ap: Port on which interrupt arrived (possibly...)
5419 * @qc: Taskfile currently active in engine
5421 * Handle host interrupt for given queued command. Currently,
5422 * only DMA interrupts are handled. All other commands are
5423 * handled via polling with interrupts disabled (nIEN bit).
5426 * spin_lock_irqsave(host lock)
5429 * One if interrupt was handled, zero if not (shared irq).
5432 inline unsigned int ata_host_intr (struct ata_port
*ap
,
5433 struct ata_queued_cmd
*qc
)
5435 struct ata_eh_info
*ehi
= &ap
->eh_info
;
5436 u8 status
, host_stat
= 0;
5438 VPRINTK("ata%u: protocol %d task_state %d\n",
5439 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
5441 /* Check whether we are expecting interrupt in this state */
5442 switch (ap
->hsm_task_state
) {
5444 /* Some pre-ATAPI-4 devices assert INTRQ
5445 * at this state when ready to receive CDB.
5448 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5449 * The flag was turned on only for atapi devices.
5450 * No need to check is_atapi_taskfile(&qc->tf) again.
5452 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5456 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
5457 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
) {
5458 /* check status of DMA engine */
5459 host_stat
= ap
->ops
->bmdma_status(ap
);
5460 VPRINTK("ata%u: host_stat 0x%X\n",
5461 ap
->print_id
, host_stat
);
5463 /* if it's not our irq... */
5464 if (!(host_stat
& ATA_DMA_INTR
))
5467 /* before we do anything else, clear DMA-Start bit */
5468 ap
->ops
->bmdma_stop(qc
);
5470 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
5471 /* error when transfering data to/from memory */
5472 qc
->err_mask
|= AC_ERR_HOST_BUS
;
5473 ap
->hsm_task_state
= HSM_ST_ERR
;
5483 /* check altstatus */
5484 status
= ata_altstatus(ap
);
5485 if (status
& ATA_BUSY
)
5488 /* check main status, clearing INTRQ */
5489 status
= ata_chk_status(ap
);
5490 if (unlikely(status
& ATA_BUSY
))
5493 /* ack bmdma irq events */
5494 ap
->ops
->irq_clear(ap
);
5496 ata_hsm_move(ap
, qc
, status
, 0);
5498 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
5499 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
))
5500 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
5502 return 1; /* irq handled */
5505 ap
->stats
.idle_irq
++;
5508 if ((ap
->stats
.idle_irq
% 1000) == 0) {
5509 ap
->ops
->irq_ack(ap
, 0); /* debug trap */
5510 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
5514 return 0; /* irq not handled */
5518 * ata_interrupt - Default ATA host interrupt handler
5519 * @irq: irq line (unused)
5520 * @dev_instance: pointer to our ata_host information structure
5522 * Default interrupt handler for PCI IDE devices. Calls
5523 * ata_host_intr() for each port that is not disabled.
5526 * Obtains host lock during operation.
5529 * IRQ_NONE or IRQ_HANDLED.
5532 irqreturn_t
ata_interrupt (int irq
, void *dev_instance
)
5534 struct ata_host
*host
= dev_instance
;
5536 unsigned int handled
= 0;
5537 unsigned long flags
;
5539 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5540 spin_lock_irqsave(&host
->lock
, flags
);
5542 for (i
= 0; i
< host
->n_ports
; i
++) {
5543 struct ata_port
*ap
;
5545 ap
= host
->ports
[i
];
5547 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
5548 struct ata_queued_cmd
*qc
;
5550 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
5551 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
5552 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
5553 handled
|= ata_host_intr(ap
, qc
);
5557 spin_unlock_irqrestore(&host
->lock
, flags
);
5559 return IRQ_RETVAL(handled
);
5563 * sata_scr_valid - test whether SCRs are accessible
5564 * @ap: ATA port to test SCR accessibility for
5566 * Test whether SCRs are accessible for @ap.
5572 * 1 if SCRs are accessible, 0 otherwise.
5574 int sata_scr_valid(struct ata_port
*ap
)
5576 return ap
->cbl
== ATA_CBL_SATA
&& ap
->ops
->scr_read
;
5580 * sata_scr_read - read SCR register of the specified port
5581 * @ap: ATA port to read SCR for
5583 * @val: Place to store read value
5585 * Read SCR register @reg of @ap into *@val. This function is
5586 * guaranteed to succeed if the cable type of the port is SATA
5587 * and the port implements ->scr_read.
5593 * 0 on success, negative errno on failure.
5595 int sata_scr_read(struct ata_port
*ap
, int reg
, u32
*val
)
5597 if (sata_scr_valid(ap
)) {
5598 *val
= ap
->ops
->scr_read(ap
, reg
);
5605 * sata_scr_write - write SCR register of the specified port
5606 * @ap: ATA port to write SCR for
5607 * @reg: SCR to write
5608 * @val: value to write
5610 * Write @val to SCR register @reg of @ap. This function is
5611 * guaranteed to succeed if the cable type of the port is SATA
5612 * and the port implements ->scr_read.
5618 * 0 on success, negative errno on failure.
5620 int sata_scr_write(struct ata_port
*ap
, int reg
, u32 val
)
5622 if (sata_scr_valid(ap
)) {
5623 ap
->ops
->scr_write(ap
, reg
, val
);
5630 * sata_scr_write_flush - write SCR register of the specified port and flush
5631 * @ap: ATA port to write SCR for
5632 * @reg: SCR to write
5633 * @val: value to write
5635 * This function is identical to sata_scr_write() except that this
5636 * function performs flush after writing to the register.
5642 * 0 on success, negative errno on failure.
5644 int sata_scr_write_flush(struct ata_port
*ap
, int reg
, u32 val
)
5646 if (sata_scr_valid(ap
)) {
5647 ap
->ops
->scr_write(ap
, reg
, val
);
5648 ap
->ops
->scr_read(ap
, reg
);
5655 * ata_port_online - test whether the given port is online
5656 * @ap: ATA port to test
5658 * Test whether @ap is online. Note that this function returns 0
5659 * if online status of @ap cannot be obtained, so
5660 * ata_port_online(ap) != !ata_port_offline(ap).
5666 * 1 if the port online status is available and online.
5668 int ata_port_online(struct ata_port
*ap
)
5672 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) == 0x3)
5678 * ata_port_offline - test whether the given port is offline
5679 * @ap: ATA port to test
5681 * Test whether @ap is offline. Note that this function returns
5682 * 0 if offline status of @ap cannot be obtained, so
5683 * ata_port_online(ap) != !ata_port_offline(ap).
5689 * 1 if the port offline status is available and offline.
5691 int ata_port_offline(struct ata_port
*ap
)
5695 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) != 0x3)
5700 int ata_flush_cache(struct ata_device
*dev
)
5702 unsigned int err_mask
;
5705 if (!ata_try_flush_cache(dev
))
5708 if (dev
->flags
& ATA_DFLAG_FLUSH_EXT
)
5709 cmd
= ATA_CMD_FLUSH_EXT
;
5711 cmd
= ATA_CMD_FLUSH
;
5713 err_mask
= ata_do_simple_cmd(dev
, cmd
);
5715 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
5723 static int ata_host_request_pm(struct ata_host
*host
, pm_message_t mesg
,
5724 unsigned int action
, unsigned int ehi_flags
,
5727 unsigned long flags
;
5730 for (i
= 0; i
< host
->n_ports
; i
++) {
5731 struct ata_port
*ap
= host
->ports
[i
];
5733 /* Previous resume operation might still be in
5734 * progress. Wait for PM_PENDING to clear.
5736 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
5737 ata_port_wait_eh(ap
);
5738 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5741 /* request PM ops to EH */
5742 spin_lock_irqsave(ap
->lock
, flags
);
5747 ap
->pm_result
= &rc
;
5750 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
5751 ap
->eh_info
.action
|= action
;
5752 ap
->eh_info
.flags
|= ehi_flags
;
5754 ata_port_schedule_eh(ap
);
5756 spin_unlock_irqrestore(ap
->lock
, flags
);
5758 /* wait and check result */
5760 ata_port_wait_eh(ap
);
5761 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5771 * ata_host_suspend - suspend host
5772 * @host: host to suspend
5775 * Suspend @host. Actual operation is performed by EH. This
5776 * function requests EH to perform PM operations and waits for EH
5780 * Kernel thread context (may sleep).
5783 * 0 on success, -errno on failure.
5785 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
5789 rc
= ata_host_request_pm(host
, mesg
, 0, ATA_EHI_QUIET
, 1);
5793 /* EH is quiescent now. Fail if we have any ready device.
5794 * This happens if hotplug occurs between completion of device
5795 * suspension and here.
5797 for (i
= 0; i
< host
->n_ports
; i
++) {
5798 struct ata_port
*ap
= host
->ports
[i
];
5800 for (j
= 0; j
< ATA_MAX_DEVICES
; j
++) {
5801 struct ata_device
*dev
= &ap
->device
[j
];
5803 if (ata_dev_ready(dev
)) {
5804 ata_port_printk(ap
, KERN_WARNING
,
5805 "suspend failed, device %d "
5806 "still active\n", dev
->devno
);
5813 host
->dev
->power
.power_state
= mesg
;
5817 ata_host_resume(host
);
5822 * ata_host_resume - resume host
5823 * @host: host to resume
5825 * Resume @host. Actual operation is performed by EH. This
5826 * function requests EH to perform PM operations and returns.
5827 * Note that all resume operations are performed parallely.
5830 * Kernel thread context (may sleep).
5832 void ata_host_resume(struct ata_host
*host
)
5834 ata_host_request_pm(host
, PMSG_ON
, ATA_EH_SOFTRESET
,
5835 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
5836 host
->dev
->power
.power_state
= PMSG_ON
;
5841 * ata_port_start - Set port up for dma.
5842 * @ap: Port to initialize
5844 * Called just after data structures for each port are
5845 * initialized. Allocates space for PRD table.
5847 * May be used as the port_start() entry in ata_port_operations.
5850 * Inherited from caller.
5852 int ata_port_start(struct ata_port
*ap
)
5854 struct device
*dev
= ap
->dev
;
5857 ap
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
,
5862 rc
= ata_pad_alloc(ap
, dev
);
5866 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
,
5867 (unsigned long long)ap
->prd_dma
);
5872 * ata_dev_init - Initialize an ata_device structure
5873 * @dev: Device structure to initialize
5875 * Initialize @dev in preparation for probing.
5878 * Inherited from caller.
5880 void ata_dev_init(struct ata_device
*dev
)
5882 struct ata_port
*ap
= dev
->ap
;
5883 unsigned long flags
;
5885 /* SATA spd limit is bound to the first device */
5886 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
5888 /* High bits of dev->flags are used to record warm plug
5889 * requests which occur asynchronously. Synchronize using
5892 spin_lock_irqsave(ap
->lock
, flags
);
5893 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
5894 spin_unlock_irqrestore(ap
->lock
, flags
);
5896 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
5897 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
5898 dev
->pio_mask
= UINT_MAX
;
5899 dev
->mwdma_mask
= UINT_MAX
;
5900 dev
->udma_mask
= UINT_MAX
;
5904 * ata_port_alloc - allocate and initialize basic ATA port resources
5905 * @host: ATA host this allocated port belongs to
5907 * Allocate and initialize basic ATA port resources.
5910 * Allocate ATA port on success, NULL on failure.
5913 * Inherited from calling layer (may sleep).
5915 struct ata_port
*ata_port_alloc(struct ata_host
*host
)
5917 struct ata_port
*ap
;
5922 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
5926 ap
->lock
= &host
->lock
;
5927 ap
->flags
= ATA_FLAG_DISABLED
;
5929 ap
->ctl
= ATA_DEVCTL_OBS
;
5931 ap
->dev
= host
->dev
;
5933 ap
->hw_sata_spd_limit
= UINT_MAX
;
5934 ap
->active_tag
= ATA_TAG_POISON
;
5935 ap
->last_ctl
= 0xFF;
5937 #if defined(ATA_VERBOSE_DEBUG)
5938 /* turn on all debugging levels */
5939 ap
->msg_enable
= 0x00FF;
5940 #elif defined(ATA_DEBUG)
5941 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
5943 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
5946 INIT_DELAYED_WORK(&ap
->port_task
, NULL
);
5947 INIT_DELAYED_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
);
5948 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
);
5949 INIT_LIST_HEAD(&ap
->eh_done_q
);
5950 init_waitqueue_head(&ap
->eh_wait_q
);
5952 ap
->cbl
= ATA_CBL_NONE
;
5954 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
5955 struct ata_device
*dev
= &ap
->device
[i
];
5962 ap
->stats
.unhandled_irq
= 1;
5963 ap
->stats
.idle_irq
= 1;
5968 static void ata_host_release(struct device
*gendev
, void *res
)
5970 struct ata_host
*host
= dev_get_drvdata(gendev
);
5973 for (i
= 0; i
< host
->n_ports
; i
++) {
5974 struct ata_port
*ap
= host
->ports
[i
];
5979 if ((host
->flags
& ATA_HOST_STARTED
) && ap
->ops
->port_stop
)
5980 ap
->ops
->port_stop(ap
);
5983 if ((host
->flags
& ATA_HOST_STARTED
) && host
->ops
->host_stop
)
5984 host
->ops
->host_stop(host
);
5986 for (i
= 0; i
< host
->n_ports
; i
++) {
5987 struct ata_port
*ap
= host
->ports
[i
];
5993 scsi_host_put(ap
->scsi_host
);
5996 host
->ports
[i
] = NULL
;
5999 dev_set_drvdata(gendev
, NULL
);
6003 * ata_host_alloc - allocate and init basic ATA host resources
6004 * @dev: generic device this host is associated with
6005 * @max_ports: maximum number of ATA ports associated with this host
6007 * Allocate and initialize basic ATA host resources. LLD calls
6008 * this function to allocate a host, initializes it fully and
6009 * attaches it using ata_host_register().
6011 * @max_ports ports are allocated and host->n_ports is
6012 * initialized to @max_ports. The caller is allowed to decrease
6013 * host->n_ports before calling ata_host_register(). The unused
6014 * ports will be automatically freed on registration.
6017 * Allocate ATA host on success, NULL on failure.
6020 * Inherited from calling layer (may sleep).
6022 struct ata_host
*ata_host_alloc(struct device
*dev
, int max_ports
)
6024 struct ata_host
*host
;
6030 if (!devres_open_group(dev
, NULL
, GFP_KERNEL
))
6033 /* alloc a container for our list of ATA ports (buses) */
6034 sz
= sizeof(struct ata_host
) + (max_ports
+ 1) * sizeof(void *);
6035 /* alloc a container for our list of ATA ports (buses) */
6036 host
= devres_alloc(ata_host_release
, sz
, GFP_KERNEL
);
6040 devres_add(dev
, host
);
6041 dev_set_drvdata(dev
, host
);
6043 spin_lock_init(&host
->lock
);
6045 host
->n_ports
= max_ports
;
6047 /* allocate ports bound to this host */
6048 for (i
= 0; i
< max_ports
; i
++) {
6049 struct ata_port
*ap
;
6051 ap
= ata_port_alloc(host
);
6056 host
->ports
[i
] = ap
;
6059 devres_remove_group(dev
, NULL
);
6063 devres_release_group(dev
, NULL
);
6068 * ata_host_alloc_pinfo - alloc host and init with port_info array
6069 * @dev: generic device this host is associated with
6070 * @ppi: array of ATA port_info to initialize host with
6071 * @n_ports: number of ATA ports attached to this host
6073 * Allocate ATA host and initialize with info from @ppi. If NULL
6074 * terminated, @ppi may contain fewer entries than @n_ports. The
6075 * last entry will be used for the remaining ports.
6078 * Allocate ATA host on success, NULL on failure.
6081 * Inherited from calling layer (may sleep).
6083 struct ata_host
*ata_host_alloc_pinfo(struct device
*dev
,
6084 const struct ata_port_info
* const * ppi
,
6087 const struct ata_port_info
*pi
;
6088 struct ata_host
*host
;
6091 host
= ata_host_alloc(dev
, n_ports
);
6095 for (i
= 0, j
= 0, pi
= NULL
; i
< host
->n_ports
; i
++) {
6096 struct ata_port
*ap
= host
->ports
[i
];
6101 ap
->pio_mask
= pi
->pio_mask
;
6102 ap
->mwdma_mask
= pi
->mwdma_mask
;
6103 ap
->udma_mask
= pi
->udma_mask
;
6104 ap
->flags
|= pi
->flags
;
6105 ap
->ops
= pi
->port_ops
;
6107 if (!host
->ops
&& (pi
->port_ops
!= &ata_dummy_port_ops
))
6108 host
->ops
= pi
->port_ops
;
6109 if (!host
->private_data
&& pi
->private_data
)
6110 host
->private_data
= pi
->private_data
;
6117 * ata_host_start - start and freeze ports of an ATA host
6118 * @host: ATA host to start ports for
6120 * Start and then freeze ports of @host. Started status is
6121 * recorded in host->flags, so this function can be called
6122 * multiple times. Ports are guaranteed to get started only
6123 * once. If host->ops isn't initialized yet, its set to the
6124 * first non-dummy port ops.
6127 * Inherited from calling layer (may sleep).
6130 * 0 if all ports are started successfully, -errno otherwise.
6132 int ata_host_start(struct ata_host
*host
)
6136 if (host
->flags
& ATA_HOST_STARTED
)
6139 for (i
= 0; i
< host
->n_ports
; i
++) {
6140 struct ata_port
*ap
= host
->ports
[i
];
6142 if (!host
->ops
&& !ata_port_is_dummy(ap
))
6143 host
->ops
= ap
->ops
;
6145 if (ap
->ops
->port_start
) {
6146 rc
= ap
->ops
->port_start(ap
);
6148 ata_port_printk(ap
, KERN_ERR
, "failed to "
6149 "start port (errno=%d)\n", rc
);
6154 ata_eh_freeze_port(ap
);
6157 host
->flags
|= ATA_HOST_STARTED
;
6162 struct ata_port
*ap
= host
->ports
[i
];
6164 if (ap
->ops
->port_stop
)
6165 ap
->ops
->port_stop(ap
);
6171 * ata_sas_host_init - Initialize a host struct
6172 * @host: host to initialize
6173 * @dev: device host is attached to
6174 * @flags: host flags
6178 * PCI/etc. bus probe sem.
6181 /* KILLME - the only user left is ipr */
6182 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
6183 unsigned long flags
, const struct ata_port_operations
*ops
)
6185 spin_lock_init(&host
->lock
);
6187 host
->flags
= flags
;
6192 * ata_host_register - register initialized ATA host
6193 * @host: ATA host to register
6194 * @sht: template for SCSI host
6196 * Register initialized ATA host. @host is allocated using
6197 * ata_host_alloc() and fully initialized by LLD. This function
6198 * starts ports, registers @host with ATA and SCSI layers and
6199 * probe registered devices.
6202 * Inherited from calling layer (may sleep).
6205 * 0 on success, -errno otherwise.
6207 int ata_host_register(struct ata_host
*host
, struct scsi_host_template
*sht
)
6211 /* host must have been started */
6212 if (!(host
->flags
& ATA_HOST_STARTED
)) {
6213 dev_printk(KERN_ERR
, host
->dev
,
6214 "BUG: trying to register unstarted host\n");
6219 /* Blow away unused ports. This happens when LLD can't
6220 * determine the exact number of ports to allocate at
6223 for (i
= host
->n_ports
; host
->ports
[i
]; i
++)
6224 kfree(host
->ports
[i
]);
6226 /* give ports names and add SCSI hosts */
6227 for (i
= 0; i
< host
->n_ports
; i
++)
6228 host
->ports
[i
]->print_id
= ata_print_id
++;
6230 rc
= ata_scsi_add_hosts(host
, sht
);
6234 /* set cable, sata_spd_limit and report */
6235 for (i
= 0; i
< host
->n_ports
; i
++) {
6236 struct ata_port
*ap
= host
->ports
[i
];
6239 unsigned long xfer_mask
;
6241 /* set SATA cable type if still unset */
6242 if (ap
->cbl
== ATA_CBL_NONE
&& (ap
->flags
& ATA_FLAG_SATA
))
6243 ap
->cbl
= ATA_CBL_SATA
;
6245 /* init sata_spd_limit to the current value */
6246 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
) == 0) {
6247 int spd
= (scontrol
>> 4) & 0xf;
6248 ap
->hw_sata_spd_limit
&= (1 << spd
) - 1;
6250 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
6252 /* report the secondary IRQ for second channel legacy */
6253 irq_line
= host
->irq
;
6254 if (i
== 1 && host
->irq2
)
6255 irq_line
= host
->irq2
;
6257 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
, ap
->mwdma_mask
,
6260 /* print per-port info to dmesg */
6261 if (!ata_port_is_dummy(ap
))
6262 ata_port_printk(ap
, KERN_INFO
, "%cATA max %s cmd 0x%p "
6263 "ctl 0x%p bmdma 0x%p irq %d\n",
6264 ap
->cbl
== ATA_CBL_SATA
? 'S' : 'P',
6265 ata_mode_string(xfer_mask
),
6266 ap
->ioaddr
.cmd_addr
,
6267 ap
->ioaddr
.ctl_addr
,
6268 ap
->ioaddr
.bmdma_addr
,
6271 ata_port_printk(ap
, KERN_INFO
, "DUMMY\n");
6274 /* perform each probe synchronously */
6275 DPRINTK("probe begin\n");
6276 for (i
= 0; i
< host
->n_ports
; i
++) {
6277 struct ata_port
*ap
= host
->ports
[i
];
6281 if (ap
->ops
->error_handler
) {
6282 struct ata_eh_info
*ehi
= &ap
->eh_info
;
6283 unsigned long flags
;
6287 /* kick EH for boot probing */
6288 spin_lock_irqsave(ap
->lock
, flags
);
6290 ehi
->probe_mask
= (1 << ATA_MAX_DEVICES
) - 1;
6291 ehi
->action
|= ATA_EH_SOFTRESET
;
6292 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
6294 ap
->pflags
|= ATA_PFLAG_LOADING
;
6295 ata_port_schedule_eh(ap
);
6297 spin_unlock_irqrestore(ap
->lock
, flags
);
6299 /* wait for EH to finish */
6300 ata_port_wait_eh(ap
);
6302 DPRINTK("ata%u: bus probe begin\n", ap
->print_id
);
6303 rc
= ata_bus_probe(ap
);
6304 DPRINTK("ata%u: bus probe end\n", ap
->print_id
);
6307 /* FIXME: do something useful here?
6308 * Current libata behavior will
6309 * tear down everything when
6310 * the module is removed
6311 * or the h/w is unplugged.
6317 /* probes are done, now scan each port's disk(s) */
6318 DPRINTK("host probe begin\n");
6319 for (i
= 0; i
< host
->n_ports
; i
++) {
6320 struct ata_port
*ap
= host
->ports
[i
];
6322 ata_scsi_scan_host(ap
);
6329 * ata_host_activate - start host, request IRQ and register it
6330 * @host: target ATA host
6331 * @irq: IRQ to request
6332 * @irq_handler: irq_handler used when requesting IRQ
6333 * @irq_flags: irq_flags used when requesting IRQ
6334 * @sht: scsi_host_template to use when registering the host
6336 * After allocating an ATA host and initializing it, most libata
6337 * LLDs perform three steps to activate the host - start host,
6338 * request IRQ and register it. This helper takes necessasry
6339 * arguments and performs the three steps in one go.
6342 * Inherited from calling layer (may sleep).
6345 * 0 on success, -errno otherwise.
6347 int ata_host_activate(struct ata_host
*host
, int irq
,
6348 irq_handler_t irq_handler
, unsigned long irq_flags
,
6349 struct scsi_host_template
*sht
)
6353 rc
= ata_host_start(host
);
6357 rc
= devm_request_irq(host
->dev
, irq
, irq_handler
, irq_flags
,
6358 dev_driver_string(host
->dev
), host
);
6362 rc
= ata_host_register(host
, sht
);
6363 /* if failed, just free the IRQ and leave ports alone */
6365 devm_free_irq(host
->dev
, irq
, host
);
6371 * ata_port_detach - Detach ATA port in prepration of device removal
6372 * @ap: ATA port to be detached
6374 * Detach all ATA devices and the associated SCSI devices of @ap;
6375 * then, remove the associated SCSI host. @ap is guaranteed to
6376 * be quiescent on return from this function.
6379 * Kernel thread context (may sleep).
6381 void ata_port_detach(struct ata_port
*ap
)
6383 unsigned long flags
;
6386 if (!ap
->ops
->error_handler
)
6389 /* tell EH we're leaving & flush EH */
6390 spin_lock_irqsave(ap
->lock
, flags
);
6391 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
6392 spin_unlock_irqrestore(ap
->lock
, flags
);
6394 ata_port_wait_eh(ap
);
6396 /* EH is now guaranteed to see UNLOADING, so no new device
6397 * will be attached. Disable all existing devices.
6399 spin_lock_irqsave(ap
->lock
, flags
);
6401 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
6402 ata_dev_disable(&ap
->device
[i
]);
6404 spin_unlock_irqrestore(ap
->lock
, flags
);
6406 /* Final freeze & EH. All in-flight commands are aborted. EH
6407 * will be skipped and retrials will be terminated with bad
6410 spin_lock_irqsave(ap
->lock
, flags
);
6411 ata_port_freeze(ap
); /* won't be thawed */
6412 spin_unlock_irqrestore(ap
->lock
, flags
);
6414 ata_port_wait_eh(ap
);
6416 /* Flush hotplug task. The sequence is similar to
6417 * ata_port_flush_task().
6419 flush_workqueue(ata_aux_wq
);
6420 cancel_delayed_work(&ap
->hotplug_task
);
6421 flush_workqueue(ata_aux_wq
);
6424 /* remove the associated SCSI host */
6425 scsi_remove_host(ap
->scsi_host
);
6429 * ata_host_detach - Detach all ports of an ATA host
6430 * @host: Host to detach
6432 * Detach all ports of @host.
6435 * Kernel thread context (may sleep).
6437 void ata_host_detach(struct ata_host
*host
)
6441 for (i
= 0; i
< host
->n_ports
; i
++)
6442 ata_port_detach(host
->ports
[i
]);
6446 * ata_std_ports - initialize ioaddr with standard port offsets.
6447 * @ioaddr: IO address structure to be initialized
6449 * Utility function which initializes data_addr, error_addr,
6450 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6451 * device_addr, status_addr, and command_addr to standard offsets
6452 * relative to cmd_addr.
6454 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6457 void ata_std_ports(struct ata_ioports
*ioaddr
)
6459 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
6460 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
6461 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
6462 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
6463 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
6464 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
6465 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
6466 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
6467 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
6468 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
6475 * ata_pci_remove_one - PCI layer callback for device removal
6476 * @pdev: PCI device that was removed
6478 * PCI layer indicates to libata via this hook that hot-unplug or
6479 * module unload event has occurred. Detach all ports. Resource
6480 * release is handled via devres.
6483 * Inherited from PCI layer (may sleep).
6485 void ata_pci_remove_one(struct pci_dev
*pdev
)
6487 struct device
*dev
= pci_dev_to_dev(pdev
);
6488 struct ata_host
*host
= dev_get_drvdata(dev
);
6490 ata_host_detach(host
);
6493 /* move to PCI subsystem */
6494 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
6496 unsigned long tmp
= 0;
6498 switch (bits
->width
) {
6501 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
6507 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
6513 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
6524 return (tmp
== bits
->val
) ? 1 : 0;
6528 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6530 pci_save_state(pdev
);
6531 pci_disable_device(pdev
);
6533 if (mesg
.event
== PM_EVENT_SUSPEND
)
6534 pci_set_power_state(pdev
, PCI_D3hot
);
6537 int ata_pci_device_do_resume(struct pci_dev
*pdev
)
6541 pci_set_power_state(pdev
, PCI_D0
);
6542 pci_restore_state(pdev
);
6544 rc
= pcim_enable_device(pdev
);
6546 dev_printk(KERN_ERR
, &pdev
->dev
,
6547 "failed to enable device after resume (%d)\n", rc
);
6551 pci_set_master(pdev
);
6555 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6557 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6560 rc
= ata_host_suspend(host
, mesg
);
6564 ata_pci_device_do_suspend(pdev
, mesg
);
6569 int ata_pci_device_resume(struct pci_dev
*pdev
)
6571 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6574 rc
= ata_pci_device_do_resume(pdev
);
6576 ata_host_resume(host
);
6579 #endif /* CONFIG_PM */
6581 #endif /* CONFIG_PCI */
6584 static int __init
ata_init(void)
6586 ata_probe_timeout
*= HZ
;
6587 ata_wq
= create_workqueue("ata");
6591 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
6593 destroy_workqueue(ata_wq
);
6597 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
6601 static void __exit
ata_exit(void)
6603 destroy_workqueue(ata_wq
);
6604 destroy_workqueue(ata_aux_wq
);
6607 subsys_initcall(ata_init
);
6608 module_exit(ata_exit
);
6610 static unsigned long ratelimit_time
;
6611 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
6613 int ata_ratelimit(void)
6616 unsigned long flags
;
6618 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
6620 if (time_after(jiffies
, ratelimit_time
)) {
6622 ratelimit_time
= jiffies
+ (HZ
/5);
6626 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
6632 * ata_wait_register - wait until register value changes
6633 * @reg: IO-mapped register
6634 * @mask: Mask to apply to read register value
6635 * @val: Wait condition
6636 * @interval_msec: polling interval in milliseconds
6637 * @timeout_msec: timeout in milliseconds
6639 * Waiting for some bits of register to change is a common
6640 * operation for ATA controllers. This function reads 32bit LE
6641 * IO-mapped register @reg and tests for the following condition.
6643 * (*@reg & mask) != val
6645 * If the condition is met, it returns; otherwise, the process is
6646 * repeated after @interval_msec until timeout.
6649 * Kernel thread context (may sleep)
6652 * The final register value.
6654 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
6655 unsigned long interval_msec
,
6656 unsigned long timeout_msec
)
6658 unsigned long timeout
;
6661 tmp
= ioread32(reg
);
6663 /* Calculate timeout _after_ the first read to make sure
6664 * preceding writes reach the controller before starting to
6665 * eat away the timeout.
6667 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
6669 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
6670 msleep(interval_msec
);
6671 tmp
= ioread32(reg
);
6680 static void ata_dummy_noret(struct ata_port
*ap
) { }
6681 static int ata_dummy_ret0(struct ata_port
*ap
) { return 0; }
6682 static void ata_dummy_qc_noret(struct ata_queued_cmd
*qc
) { }
6684 static u8
ata_dummy_check_status(struct ata_port
*ap
)
6689 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
6691 return AC_ERR_SYSTEM
;
6694 const struct ata_port_operations ata_dummy_port_ops
= {
6695 .port_disable
= ata_port_disable
,
6696 .check_status
= ata_dummy_check_status
,
6697 .check_altstatus
= ata_dummy_check_status
,
6698 .dev_select
= ata_noop_dev_select
,
6699 .qc_prep
= ata_noop_qc_prep
,
6700 .qc_issue
= ata_dummy_qc_issue
,
6701 .freeze
= ata_dummy_noret
,
6702 .thaw
= ata_dummy_noret
,
6703 .error_handler
= ata_dummy_noret
,
6704 .post_internal_cmd
= ata_dummy_qc_noret
,
6705 .irq_clear
= ata_dummy_noret
,
6706 .port_start
= ata_dummy_ret0
,
6707 .port_stop
= ata_dummy_noret
,
6710 const struct ata_port_info ata_dummy_port_info
= {
6711 .port_ops
= &ata_dummy_port_ops
,
6715 * libata is essentially a library of internal helper functions for
6716 * low-level ATA host controller drivers. As such, the API/ABI is
6717 * likely to change as new drivers are added and updated.
6718 * Do not depend on ABI/API stability.
6721 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
6722 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
6723 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
6724 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
6725 EXPORT_SYMBOL_GPL(ata_dummy_port_info
);
6726 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
6727 EXPORT_SYMBOL_GPL(ata_std_ports
);
6728 EXPORT_SYMBOL_GPL(ata_host_init
);
6729 EXPORT_SYMBOL_GPL(ata_host_alloc
);
6730 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo
);
6731 EXPORT_SYMBOL_GPL(ata_host_start
);
6732 EXPORT_SYMBOL_GPL(ata_host_register
);
6733 EXPORT_SYMBOL_GPL(ata_host_activate
);
6734 EXPORT_SYMBOL_GPL(ata_host_detach
);
6735 EXPORT_SYMBOL_GPL(ata_sg_init
);
6736 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
6737 EXPORT_SYMBOL_GPL(ata_hsm_move
);
6738 EXPORT_SYMBOL_GPL(ata_qc_complete
);
6739 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
6740 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
6741 EXPORT_SYMBOL_GPL(ata_tf_load
);
6742 EXPORT_SYMBOL_GPL(ata_tf_read
);
6743 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
6744 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
6745 EXPORT_SYMBOL_GPL(sata_print_link_status
);
6746 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
6747 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
6748 EXPORT_SYMBOL_GPL(ata_check_status
);
6749 EXPORT_SYMBOL_GPL(ata_altstatus
);
6750 EXPORT_SYMBOL_GPL(ata_exec_command
);
6751 EXPORT_SYMBOL_GPL(ata_port_start
);
6752 EXPORT_SYMBOL_GPL(ata_interrupt
);
6753 EXPORT_SYMBOL_GPL(ata_do_set_mode
);
6754 EXPORT_SYMBOL_GPL(ata_data_xfer
);
6755 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq
);
6756 EXPORT_SYMBOL_GPL(ata_qc_prep
);
6757 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
6758 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
6759 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
6760 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
6761 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
6762 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
6763 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
6764 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
6765 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh
);
6766 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
6767 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
6768 EXPORT_SYMBOL_GPL(ata_port_probe
);
6769 EXPORT_SYMBOL_GPL(ata_dev_disable
);
6770 EXPORT_SYMBOL_GPL(sata_set_spd
);
6771 EXPORT_SYMBOL_GPL(sata_phy_debounce
);
6772 EXPORT_SYMBOL_GPL(sata_phy_resume
);
6773 EXPORT_SYMBOL_GPL(sata_phy_reset
);
6774 EXPORT_SYMBOL_GPL(__sata_phy_reset
);
6775 EXPORT_SYMBOL_GPL(ata_bus_reset
);
6776 EXPORT_SYMBOL_GPL(ata_std_prereset
);
6777 EXPORT_SYMBOL_GPL(ata_std_softreset
);
6778 EXPORT_SYMBOL_GPL(sata_port_hardreset
);
6779 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
6780 EXPORT_SYMBOL_GPL(ata_std_postreset
);
6781 EXPORT_SYMBOL_GPL(ata_dev_classify
);
6782 EXPORT_SYMBOL_GPL(ata_dev_pair
);
6783 EXPORT_SYMBOL_GPL(ata_port_disable
);
6784 EXPORT_SYMBOL_GPL(ata_ratelimit
);
6785 EXPORT_SYMBOL_GPL(ata_wait_register
);
6786 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
6787 EXPORT_SYMBOL_GPL(ata_port_queue_task
);
6788 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
6789 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
6790 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
6791 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
6792 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
6793 EXPORT_SYMBOL_GPL(ata_host_intr
);
6794 EXPORT_SYMBOL_GPL(sata_scr_valid
);
6795 EXPORT_SYMBOL_GPL(sata_scr_read
);
6796 EXPORT_SYMBOL_GPL(sata_scr_write
);
6797 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
6798 EXPORT_SYMBOL_GPL(ata_port_online
);
6799 EXPORT_SYMBOL_GPL(ata_port_offline
);
6801 EXPORT_SYMBOL_GPL(ata_host_suspend
);
6802 EXPORT_SYMBOL_GPL(ata_host_resume
);
6803 #endif /* CONFIG_PM */
6804 EXPORT_SYMBOL_GPL(ata_id_string
);
6805 EXPORT_SYMBOL_GPL(ata_id_c_string
);
6806 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode
);
6807 EXPORT_SYMBOL_GPL(ata_device_blacklisted
);
6808 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
6810 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
6811 EXPORT_SYMBOL_GPL(ata_timing_compute
);
6812 EXPORT_SYMBOL_GPL(ata_timing_merge
);
6815 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
6816 EXPORT_SYMBOL_GPL(ata_pci_init_native_host
);
6817 EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host
);
6818 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
6819 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
6821 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
6822 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
6823 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
6824 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
6825 #endif /* CONFIG_PM */
6826 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
6827 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
6828 #endif /* CONFIG_PCI */
6831 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend
);
6832 EXPORT_SYMBOL_GPL(ata_scsi_device_resume
);
6833 #endif /* CONFIG_PM */
6835 EXPORT_SYMBOL_GPL(ata_eng_timeout
);
6836 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
6837 EXPORT_SYMBOL_GPL(ata_port_abort
);
6838 EXPORT_SYMBOL_GPL(ata_port_freeze
);
6839 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
6840 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
6841 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
6842 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
6843 EXPORT_SYMBOL_GPL(ata_do_eh
);
6844 EXPORT_SYMBOL_GPL(ata_irq_on
);
6845 EXPORT_SYMBOL_GPL(ata_dummy_irq_on
);
6846 EXPORT_SYMBOL_GPL(ata_irq_ack
);
6847 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack
);
6848 EXPORT_SYMBOL_GPL(ata_dev_try_classify
);
6850 EXPORT_SYMBOL_GPL(ata_cable_40wire
);
6851 EXPORT_SYMBOL_GPL(ata_cable_80wire
);
6852 EXPORT_SYMBOL_GPL(ata_cable_unknown
);
6853 EXPORT_SYMBOL_GPL(ata_cable_sata
);