ata_piix: Invalid use of writel/readl with iomap
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
2dcb407e 52#include <linux/io.h>
1da177e4 53#include <scsi/scsi.h>
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
1da177e4
LT
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
71static unsigned int ata_dev_set_feature(struct ata_device *dev,
72 u8 enable, u8 feature);
3373efd8 73static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 75
f3187195 76unsigned int ata_print_id = 1;
1da177e4
LT
77static struct workqueue_struct *ata_wq;
78
453b07ac
TH
79struct workqueue_struct *ata_aux_wq;
80
418dc1f5 81int atapi_enabled = 1;
1623c81e
JG
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
95de719a
AL
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
baf4fdfa
ML
89int atapi_passthru16 = 1;
90module_param(atapi_passthru16, int, 0444);
91MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
c3c013a2
JG
93int libata_fua = 0;
94module_param_named(fua, libata_fua, int, 0444);
95MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
2dcb407e 97static int ata_ignore_hpa;
1e999736
AC
98module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
b3a70601
AC
101static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102module_param_named(dma, libata_dma_mask, int, 0444);
103MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104
a8601e5f
AM
105static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106module_param(ata_probe_timeout, int, 0444);
107MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108
6ebe9d86 109int libata_noacpi = 0;
d7d0dad6 110module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 111MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 112
1da177e4
LT
113MODULE_AUTHOR("Jeff Garzik");
114MODULE_DESCRIPTION("Library module for ATA devices");
115MODULE_LICENSE("GPL");
116MODULE_VERSION(DRV_VERSION);
117
0baab86b 118
1da177e4
LT
119/**
120 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121 * @tf: Taskfile to convert
1da177e4 122 * @pmp: Port multiplier port
9977126c
TH
123 * @is_cmd: This FIS is for command
124 * @fis: Buffer into which data will output
1da177e4
LT
125 *
126 * Converts a standard ATA taskfile to a Serial ATA
127 * FIS structure (Register - Host to Device).
128 *
129 * LOCKING:
130 * Inherited from caller.
131 */
9977126c 132void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 133{
9977126c
TH
134 fis[0] = 0x27; /* Register - Host to Device FIS */
135 fis[1] = pmp & 0xf; /* Port multiplier number*/
136 if (is_cmd)
137 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
138
1da177e4
LT
139 fis[2] = tf->command;
140 fis[3] = tf->feature;
141
142 fis[4] = tf->lbal;
143 fis[5] = tf->lbam;
144 fis[6] = tf->lbah;
145 fis[7] = tf->device;
146
147 fis[8] = tf->hob_lbal;
148 fis[9] = tf->hob_lbam;
149 fis[10] = tf->hob_lbah;
150 fis[11] = tf->hob_feature;
151
152 fis[12] = tf->nsect;
153 fis[13] = tf->hob_nsect;
154 fis[14] = 0;
155 fis[15] = tf->ctl;
156
157 fis[16] = 0;
158 fis[17] = 0;
159 fis[18] = 0;
160 fis[19] = 0;
161}
162
163/**
164 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165 * @fis: Buffer from which data will be input
166 * @tf: Taskfile to output
167 *
e12a1be6 168 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
169 *
170 * LOCKING:
171 * Inherited from caller.
172 */
173
057ace5e 174void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
175{
176 tf->command = fis[2]; /* status */
177 tf->feature = fis[3]; /* error */
178
179 tf->lbal = fis[4];
180 tf->lbam = fis[5];
181 tf->lbah = fis[6];
182 tf->device = fis[7];
183
184 tf->hob_lbal = fis[8];
185 tf->hob_lbam = fis[9];
186 tf->hob_lbah = fis[10];
187
188 tf->nsect = fis[12];
189 tf->hob_nsect = fis[13];
190}
191
8cbd6df1
AL
192static const u8 ata_rw_cmds[] = {
193 /* pio multi */
194 ATA_CMD_READ_MULTI,
195 ATA_CMD_WRITE_MULTI,
196 ATA_CMD_READ_MULTI_EXT,
197 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
198 0,
199 0,
200 0,
201 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
202 /* pio */
203 ATA_CMD_PIO_READ,
204 ATA_CMD_PIO_WRITE,
205 ATA_CMD_PIO_READ_EXT,
206 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
207 0,
208 0,
209 0,
210 0,
8cbd6df1
AL
211 /* dma */
212 ATA_CMD_READ,
213 ATA_CMD_WRITE,
214 ATA_CMD_READ_EXT,
9a3dccc4
TH
215 ATA_CMD_WRITE_EXT,
216 0,
217 0,
218 0,
219 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 220};
1da177e4
LT
221
222/**
8cbd6df1 223 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
224 * @tf: command to examine and configure
225 * @dev: device tf belongs to
1da177e4 226 *
2e9edbf8 227 * Examine the device configuration and tf->flags to calculate
8cbd6df1 228 * the proper read/write commands and protocol to use.
1da177e4
LT
229 *
230 * LOCKING:
231 * caller.
232 */
bd056d7e 233static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 234{
9a3dccc4 235 u8 cmd;
1da177e4 236
9a3dccc4 237 int index, fua, lba48, write;
2e9edbf8 238
9a3dccc4 239 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
240 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 242
8cbd6df1
AL
243 if (dev->flags & ATA_DFLAG_PIO) {
244 tf->protocol = ATA_PROT_PIO;
9a3dccc4 245 index = dev->multi_count ? 0 : 8;
9af5c9c9 246 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
247 /* Unable to use DMA due to host limitation */
248 tf->protocol = ATA_PROT_PIO;
0565c26d 249 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
250 } else {
251 tf->protocol = ATA_PROT_DMA;
9a3dccc4 252 index = 16;
8cbd6df1 253 }
1da177e4 254
9a3dccc4
TH
255 cmd = ata_rw_cmds[index + fua + lba48 + write];
256 if (cmd) {
257 tf->command = cmd;
258 return 0;
259 }
260 return -1;
1da177e4
LT
261}
262
35b649fe
TH
263/**
264 * ata_tf_read_block - Read block address from ATA taskfile
265 * @tf: ATA taskfile of interest
266 * @dev: ATA device @tf belongs to
267 *
268 * LOCKING:
269 * None.
270 *
271 * Read block address from @tf. This function can handle all
272 * three address formats - LBA, LBA48 and CHS. tf->protocol and
273 * flags select the address format to use.
274 *
275 * RETURNS:
276 * Block address read from @tf.
277 */
278u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279{
280 u64 block = 0;
281
282 if (tf->flags & ATA_TFLAG_LBA) {
283 if (tf->flags & ATA_TFLAG_LBA48) {
284 block |= (u64)tf->hob_lbah << 40;
285 block |= (u64)tf->hob_lbam << 32;
286 block |= tf->hob_lbal << 24;
287 } else
288 block |= (tf->device & 0xf) << 24;
289
290 block |= tf->lbah << 16;
291 block |= tf->lbam << 8;
292 block |= tf->lbal;
293 } else {
294 u32 cyl, head, sect;
295
296 cyl = tf->lbam | (tf->lbah << 8);
297 head = tf->device & 0xf;
298 sect = tf->lbal;
299
300 block = (cyl * dev->heads + head) * dev->sectors + sect;
301 }
302
303 return block;
304}
305
bd056d7e
TH
306/**
307 * ata_build_rw_tf - Build ATA taskfile for given read/write request
308 * @tf: Target ATA taskfile
309 * @dev: ATA device @tf belongs to
310 * @block: Block address
311 * @n_block: Number of blocks
312 * @tf_flags: RW/FUA etc...
313 * @tag: tag
314 *
315 * LOCKING:
316 * None.
317 *
318 * Build ATA taskfile @tf for read/write request described by
319 * @block, @n_block, @tf_flags and @tag on @dev.
320 *
321 * RETURNS:
322 *
323 * 0 on success, -ERANGE if the request is too large for @dev,
324 * -EINVAL if the request is invalid.
325 */
326int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327 u64 block, u32 n_block, unsigned int tf_flags,
328 unsigned int tag)
329{
330 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331 tf->flags |= tf_flags;
332
6d1245bf 333 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
334 /* yay, NCQ */
335 if (!lba_48_ok(block, n_block))
336 return -ERANGE;
337
338 tf->protocol = ATA_PROT_NCQ;
339 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340
341 if (tf->flags & ATA_TFLAG_WRITE)
342 tf->command = ATA_CMD_FPDMA_WRITE;
343 else
344 tf->command = ATA_CMD_FPDMA_READ;
345
346 tf->nsect = tag << 3;
347 tf->hob_feature = (n_block >> 8) & 0xff;
348 tf->feature = n_block & 0xff;
349
350 tf->hob_lbah = (block >> 40) & 0xff;
351 tf->hob_lbam = (block >> 32) & 0xff;
352 tf->hob_lbal = (block >> 24) & 0xff;
353 tf->lbah = (block >> 16) & 0xff;
354 tf->lbam = (block >> 8) & 0xff;
355 tf->lbal = block & 0xff;
356
357 tf->device = 1 << 6;
358 if (tf->flags & ATA_TFLAG_FUA)
359 tf->device |= 1 << 7;
360 } else if (dev->flags & ATA_DFLAG_LBA) {
361 tf->flags |= ATA_TFLAG_LBA;
362
363 if (lba_28_ok(block, n_block)) {
364 /* use LBA28 */
365 tf->device |= (block >> 24) & 0xf;
366 } else if (lba_48_ok(block, n_block)) {
367 if (!(dev->flags & ATA_DFLAG_LBA48))
368 return -ERANGE;
369
370 /* use LBA48 */
371 tf->flags |= ATA_TFLAG_LBA48;
372
373 tf->hob_nsect = (n_block >> 8) & 0xff;
374
375 tf->hob_lbah = (block >> 40) & 0xff;
376 tf->hob_lbam = (block >> 32) & 0xff;
377 tf->hob_lbal = (block >> 24) & 0xff;
378 } else
379 /* request too large even for LBA48 */
380 return -ERANGE;
381
382 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383 return -EINVAL;
384
385 tf->nsect = n_block & 0xff;
386
387 tf->lbah = (block >> 16) & 0xff;
388 tf->lbam = (block >> 8) & 0xff;
389 tf->lbal = block & 0xff;
390
391 tf->device |= ATA_LBA;
392 } else {
393 /* CHS */
394 u32 sect, head, cyl, track;
395
396 /* The request -may- be too large for CHS addressing. */
397 if (!lba_28_ok(block, n_block))
398 return -ERANGE;
399
400 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401 return -EINVAL;
402
403 /* Convert LBA to CHS */
404 track = (u32)block / dev->sectors;
405 cyl = track / dev->heads;
406 head = track % dev->heads;
407 sect = (u32)block % dev->sectors + 1;
408
409 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410 (u32)block, track, cyl, head, sect);
411
412 /* Check whether the converted CHS can fit.
413 Cylinder: 0-65535
414 Head: 0-15
415 Sector: 1-255*/
416 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417 return -ERANGE;
418
419 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420 tf->lbal = sect;
421 tf->lbam = cyl;
422 tf->lbah = cyl >> 8;
423 tf->device |= head;
424 }
425
426 return 0;
427}
428
cb95d562
TH
429/**
430 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431 * @pio_mask: pio_mask
432 * @mwdma_mask: mwdma_mask
433 * @udma_mask: udma_mask
434 *
435 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436 * unsigned int xfer_mask.
437 *
438 * LOCKING:
439 * None.
440 *
441 * RETURNS:
442 * Packed xfer_mask.
443 */
444static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445 unsigned int mwdma_mask,
446 unsigned int udma_mask)
447{
448 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451}
452
c0489e4e
TH
453/**
454 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455 * @xfer_mask: xfer_mask to unpack
456 * @pio_mask: resulting pio_mask
457 * @mwdma_mask: resulting mwdma_mask
458 * @udma_mask: resulting udma_mask
459 *
460 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461 * Any NULL distination masks will be ignored.
462 */
463static void ata_unpack_xfermask(unsigned int xfer_mask,
464 unsigned int *pio_mask,
465 unsigned int *mwdma_mask,
466 unsigned int *udma_mask)
467{
468 if (pio_mask)
469 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470 if (mwdma_mask)
471 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472 if (udma_mask)
473 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474}
475
cb95d562 476static const struct ata_xfer_ent {
be9a50c8 477 int shift, bits;
cb95d562
TH
478 u8 base;
479} ata_xfer_tbl[] = {
480 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483 { -1, },
484};
485
486/**
487 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488 * @xfer_mask: xfer_mask of interest
489 *
490 * Return matching XFER_* value for @xfer_mask. Only the highest
491 * bit of @xfer_mask is considered.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * Matching XFER_* value, 0 if no match found.
498 */
499static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500{
501 int highbit = fls(xfer_mask) - 1;
502 const struct ata_xfer_ent *ent;
503
504 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506 return ent->base + highbit - ent->shift;
507 return 0;
508}
509
510/**
511 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512 * @xfer_mode: XFER_* of interest
513 *
514 * Return matching xfer_mask for @xfer_mode.
515 *
516 * LOCKING:
517 * None.
518 *
519 * RETURNS:
520 * Matching xfer_mask, 0 if no match found.
521 */
522static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523{
524 const struct ata_xfer_ent *ent;
525
526 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528 return 1 << (ent->shift + xfer_mode - ent->base);
529 return 0;
530}
531
532/**
533 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534 * @xfer_mode: XFER_* of interest
535 *
536 * Return matching xfer_shift for @xfer_mode.
537 *
538 * LOCKING:
539 * None.
540 *
541 * RETURNS:
542 * Matching xfer_shift, -1 if no match found.
543 */
544static int ata_xfer_mode2shift(unsigned int xfer_mode)
545{
546 const struct ata_xfer_ent *ent;
547
548 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550 return ent->shift;
551 return -1;
552}
553
1da177e4 554/**
1da7b0d0
TH
555 * ata_mode_string - convert xfer_mask to string
556 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
557 *
558 * Determine string which represents the highest speed
1da7b0d0 559 * (highest bit in @modemask).
1da177e4
LT
560 *
561 * LOCKING:
562 * None.
563 *
564 * RETURNS:
565 * Constant C string representing highest speed listed in
1da7b0d0 566 * @mode_mask, or the constant C string "<n/a>".
1da177e4 567 */
1da7b0d0 568static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 569{
75f554bc
TH
570 static const char * const xfer_mode_str[] = {
571 "PIO0",
572 "PIO1",
573 "PIO2",
574 "PIO3",
575 "PIO4",
b352e57d
AC
576 "PIO5",
577 "PIO6",
75f554bc
TH
578 "MWDMA0",
579 "MWDMA1",
580 "MWDMA2",
b352e57d
AC
581 "MWDMA3",
582 "MWDMA4",
75f554bc
TH
583 "UDMA/16",
584 "UDMA/25",
585 "UDMA/33",
586 "UDMA/44",
587 "UDMA/66",
588 "UDMA/100",
589 "UDMA/133",
590 "UDMA7",
591 };
1da7b0d0 592 int highbit;
1da177e4 593
1da7b0d0
TH
594 highbit = fls(xfer_mask) - 1;
595 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596 return xfer_mode_str[highbit];
1da177e4 597 return "<n/a>";
1da177e4
LT
598}
599
4c360c81
TH
600static const char *sata_spd_string(unsigned int spd)
601{
602 static const char * const spd_str[] = {
603 "1.5 Gbps",
604 "3.0 Gbps",
605 };
606
607 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608 return "<unknown>";
609 return spd_str[spd - 1];
610}
611
3373efd8 612void ata_dev_disable(struct ata_device *dev)
0b8efb0a 613{
09d7f9b0 614 if (ata_dev_enabled(dev)) {
9af5c9c9 615 if (ata_msg_drv(dev->link->ap))
09d7f9b0 616 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
617 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
618 ATA_DNXFER_QUIET);
0b8efb0a
TH
619 dev->class++;
620 }
621}
622
ca77329f
KCA
623static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
624{
625 struct ata_link *link = dev->link;
626 struct ata_port *ap = link->ap;
627 u32 scontrol;
628 unsigned int err_mask;
629 int rc;
630
631 /*
632 * disallow DIPM for drivers which haven't set
633 * ATA_FLAG_IPM. This is because when DIPM is enabled,
634 * phy ready will be set in the interrupt status on
635 * state changes, which will cause some drivers to
636 * think there are errors - additionally drivers will
637 * need to disable hot plug.
638 */
639 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
640 ap->pm_policy = NOT_AVAILABLE;
641 return -EINVAL;
642 }
643
644 /*
645 * For DIPM, we will only enable it for the
646 * min_power setting.
647 *
648 * Why? Because Disks are too stupid to know that
649 * If the host rejects a request to go to SLUMBER
650 * they should retry at PARTIAL, and instead it
651 * just would give up. So, for medium_power to
652 * work at all, we need to only allow HIPM.
653 */
654 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
655 if (rc)
656 return rc;
657
658 switch (policy) {
659 case MIN_POWER:
660 /* no restrictions on IPM transitions */
661 scontrol &= ~(0x3 << 8);
662 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
663 if (rc)
664 return rc;
665
666 /* enable DIPM */
667 if (dev->flags & ATA_DFLAG_DIPM)
668 err_mask = ata_dev_set_feature(dev,
669 SETFEATURES_SATA_ENABLE, SATA_DIPM);
670 break;
671 case MEDIUM_POWER:
672 /* allow IPM to PARTIAL */
673 scontrol &= ~(0x1 << 8);
674 scontrol |= (0x2 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
f5456b63
KCA
679 /*
680 * we don't have to disable DIPM since IPM flags
681 * disallow transitions to SLUMBER, which effectively
682 * disable DIPM if it does not support PARTIAL
683 */
ca77329f
KCA
684 break;
685 case NOT_AVAILABLE:
686 case MAX_PERFORMANCE:
687 /* disable all IPM transitions */
688 scontrol |= (0x3 << 8);
689 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
690 if (rc)
691 return rc;
692
f5456b63
KCA
693 /*
694 * we don't have to disable DIPM since IPM flags
695 * disallow all transitions which effectively
696 * disable DIPM anyway.
697 */
ca77329f
KCA
698 break;
699 }
700
701 /* FIXME: handle SET FEATURES failure */
702 (void) err_mask;
703
704 return 0;
705}
706
707/**
708 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
709 * @dev: device to enable power management
710 * @policy: the link power management policy
ca77329f
KCA
711 *
712 * Enable SATA Interface power management. This will enable
713 * Device Interface Power Management (DIPM) for min_power
714 * policy, and then call driver specific callbacks for
715 * enabling Host Initiated Power management.
716 *
717 * Locking: Caller.
718 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
719 */
720void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
721{
722 int rc = 0;
723 struct ata_port *ap = dev->link->ap;
724
725 /* set HIPM first, then DIPM */
726 if (ap->ops->enable_pm)
727 rc = ap->ops->enable_pm(ap, policy);
728 if (rc)
729 goto enable_pm_out;
730 rc = ata_dev_set_dipm(dev, policy);
731
732enable_pm_out:
733 if (rc)
734 ap->pm_policy = MAX_PERFORMANCE;
735 else
736 ap->pm_policy = policy;
737 return /* rc */; /* hopefully we can use 'rc' eventually */
738}
739
1992a5ed 740#ifdef CONFIG_PM
ca77329f
KCA
741/**
742 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 743 * @dev: device to disable power management
ca77329f
KCA
744 *
745 * Disable SATA Interface power management. This will disable
746 * Device Interface Power Management (DIPM) without changing
747 * policy, call driver specific callbacks for disabling Host
748 * Initiated Power management.
749 *
750 * Locking: Caller.
751 * Returns: void
752 */
753static void ata_dev_disable_pm(struct ata_device *dev)
754{
755 struct ata_port *ap = dev->link->ap;
756
757 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
758 if (ap->ops->disable_pm)
759 ap->ops->disable_pm(ap);
760}
1992a5ed 761#endif /* CONFIG_PM */
ca77329f
KCA
762
763void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
764{
765 ap->pm_policy = policy;
766 ap->link.eh_info.action |= ATA_EHI_LPM;
767 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
768 ata_port_schedule_eh(ap);
769}
770
1992a5ed 771#ifdef CONFIG_PM
ca77329f
KCA
772static void ata_lpm_enable(struct ata_host *host)
773{
774 struct ata_link *link;
775 struct ata_port *ap;
776 struct ata_device *dev;
777 int i;
778
779 for (i = 0; i < host->n_ports; i++) {
780 ap = host->ports[i];
781 ata_port_for_each_link(link, ap) {
782 ata_link_for_each_dev(dev, link)
783 ata_dev_disable_pm(dev);
784 }
785 }
786}
787
788static void ata_lpm_disable(struct ata_host *host)
789{
790 int i;
791
792 for (i = 0; i < host->n_ports; i++) {
793 struct ata_port *ap = host->ports[i];
794 ata_lpm_schedule(ap, ap->pm_policy);
795 }
796}
1992a5ed 797#endif /* CONFIG_PM */
ca77329f
KCA
798
799
1da177e4 800/**
0d5ff566 801 * ata_devchk - PATA device presence detection
1da177e4
LT
802 * @ap: ATA channel to examine
803 * @device: Device to examine (starting at zero)
804 *
805 * This technique was originally described in
806 * Hale Landis's ATADRVR (www.ata-atapi.com), and
807 * later found its way into the ATA/ATAPI spec.
808 *
809 * Write a pattern to the ATA shadow registers,
810 * and if a device is present, it will respond by
811 * correctly storing and echoing back the
812 * ATA shadow register contents.
813 *
814 * LOCKING:
815 * caller.
816 */
817
0d5ff566 818static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
819{
820 struct ata_ioports *ioaddr = &ap->ioaddr;
821 u8 nsect, lbal;
822
823 ap->ops->dev_select(ap, device);
824
0d5ff566
TH
825 iowrite8(0x55, ioaddr->nsect_addr);
826 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 827
0d5ff566
TH
828 iowrite8(0xaa, ioaddr->nsect_addr);
829 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 830
0d5ff566
TH
831 iowrite8(0x55, ioaddr->nsect_addr);
832 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 833
0d5ff566
TH
834 nsect = ioread8(ioaddr->nsect_addr);
835 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
836
837 if ((nsect == 0x55) && (lbal == 0xaa))
838 return 1; /* we found a device */
839
840 return 0; /* nothing found */
841}
842
1da177e4
LT
843/**
844 * ata_dev_classify - determine device type based on ATA-spec signature
845 * @tf: ATA taskfile register set for device to be identified
846 *
847 * Determine from taskfile register contents whether a device is
848 * ATA or ATAPI, as per "Signature and persistence" section
849 * of ATA/PI spec (volume 1, sect 5.14).
850 *
851 * LOCKING:
852 * None.
853 *
854 * RETURNS:
633273a3
TH
855 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
856 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 857 */
057ace5e 858unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
859{
860 /* Apple's open source Darwin code hints that some devices only
861 * put a proper signature into the LBA mid/high registers,
862 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
863 *
864 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
865 * signatures for ATA and ATAPI devices attached on SerialATA,
866 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
867 * spec has never mentioned about using different signatures
868 * for ATA/ATAPI devices. Then, Serial ATA II: Port
869 * Multiplier specification began to use 0x69/0x96 to identify
870 * port multpliers and 0x3c/0xc3 to identify SEMB device.
871 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
872 * 0x69/0x96 shortly and described them as reserved for
873 * SerialATA.
874 *
875 * We follow the current spec and consider that 0x69/0x96
876 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 877 */
633273a3 878 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
879 DPRINTK("found ATA device by sig\n");
880 return ATA_DEV_ATA;
881 }
882
633273a3 883 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
884 DPRINTK("found ATAPI device by sig\n");
885 return ATA_DEV_ATAPI;
886 }
887
633273a3
TH
888 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
889 DPRINTK("found PMP device by sig\n");
890 return ATA_DEV_PMP;
891 }
892
893 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 894 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
895 return ATA_DEV_SEMB_UNSUP; /* not yet */
896 }
897
1da177e4
LT
898 DPRINTK("unknown device\n");
899 return ATA_DEV_UNKNOWN;
900}
901
902/**
903 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
904 * @dev: ATA device to classify (starting at zero)
905 * @present: device seems present
b4dc7623 906 * @r_err: Value of error register on completion
1da177e4
LT
907 *
908 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
909 * an ATA/ATAPI-defined set of values is placed in the ATA
910 * shadow registers, indicating the results of device detection
911 * and diagnostics.
912 *
913 * Select the ATA device, and read the values from the ATA shadow
914 * registers. Then parse according to the Error register value,
915 * and the spec-defined values examined by ata_dev_classify().
916 *
917 * LOCKING:
918 * caller.
b4dc7623
TH
919 *
920 * RETURNS:
921 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 922 */
3f19859e
TH
923unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
924 u8 *r_err)
1da177e4 925{
3f19859e 926 struct ata_port *ap = dev->link->ap;
1da177e4
LT
927 struct ata_taskfile tf;
928 unsigned int class;
929 u8 err;
930
3f19859e 931 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
932
933 memset(&tf, 0, sizeof(tf));
934
1da177e4 935 ap->ops->tf_read(ap, &tf);
0169e284 936 err = tf.feature;
b4dc7623
TH
937 if (r_err)
938 *r_err = err;
1da177e4 939
93590859 940 /* see if device passed diags: if master then continue and warn later */
3f19859e 941 if (err == 0 && dev->devno == 0)
93590859 942 /* diagnostic fail : do nothing _YET_ */
3f19859e 943 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 944 else if (err == 1)
1da177e4 945 /* do nothing */ ;
3f19859e 946 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
947 /* do nothing */ ;
948 else
b4dc7623 949 return ATA_DEV_NONE;
1da177e4 950
b4dc7623 951 /* determine if device is ATA or ATAPI */
1da177e4 952 class = ata_dev_classify(&tf);
b4dc7623 953
d7fbee05
TH
954 if (class == ATA_DEV_UNKNOWN) {
955 /* If the device failed diagnostic, it's likely to
956 * have reported incorrect device signature too.
957 * Assume ATA device if the device seems present but
958 * device signature is invalid with diagnostic
959 * failure.
960 */
961 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
962 class = ATA_DEV_ATA;
963 else
964 class = ATA_DEV_NONE;
965 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
966 class = ATA_DEV_NONE;
967
b4dc7623 968 return class;
1da177e4
LT
969}
970
971/**
6a62a04d 972 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
973 * @id: IDENTIFY DEVICE results we will examine
974 * @s: string into which data is output
975 * @ofs: offset into identify device page
976 * @len: length of string to return. must be an even number.
977 *
978 * The strings in the IDENTIFY DEVICE page are broken up into
979 * 16-bit chunks. Run through the string, and output each
980 * 8-bit chunk linearly, regardless of platform.
981 *
982 * LOCKING:
983 * caller.
984 */
985
6a62a04d
TH
986void ata_id_string(const u16 *id, unsigned char *s,
987 unsigned int ofs, unsigned int len)
1da177e4
LT
988{
989 unsigned int c;
990
991 while (len > 0) {
992 c = id[ofs] >> 8;
993 *s = c;
994 s++;
995
996 c = id[ofs] & 0xff;
997 *s = c;
998 s++;
999
1000 ofs++;
1001 len -= 2;
1002 }
1003}
1004
0e949ff3 1005/**
6a62a04d 1006 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1007 * @id: IDENTIFY DEVICE results we will examine
1008 * @s: string into which data is output
1009 * @ofs: offset into identify device page
1010 * @len: length of string to return. must be an odd number.
1011 *
6a62a04d 1012 * This function is identical to ata_id_string except that it
0e949ff3
TH
1013 * trims trailing spaces and terminates the resulting string with
1014 * null. @len must be actual maximum length (even number) + 1.
1015 *
1016 * LOCKING:
1017 * caller.
1018 */
6a62a04d
TH
1019void ata_id_c_string(const u16 *id, unsigned char *s,
1020 unsigned int ofs, unsigned int len)
0e949ff3
TH
1021{
1022 unsigned char *p;
1023
1024 WARN_ON(!(len & 1));
1025
6a62a04d 1026 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1027
1028 p = s + strnlen(s, len - 1);
1029 while (p > s && p[-1] == ' ')
1030 p--;
1031 *p = '\0';
1032}
0baab86b 1033
db6f8759
TH
1034static u64 ata_id_n_sectors(const u16 *id)
1035{
1036 if (ata_id_has_lba(id)) {
1037 if (ata_id_has_lba48(id))
1038 return ata_id_u64(id, 100);
1039 else
1040 return ata_id_u32(id, 60);
1041 } else {
1042 if (ata_id_current_chs_valid(id))
1043 return ata_id_u32(id, 57);
1044 else
1045 return id[1] * id[3] * id[6];
1046 }
1047}
1048
1e999736
AC
1049static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1050{
1051 u64 sectors = 0;
1052
1053 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1054 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1055 sectors |= (tf->hob_lbal & 0xff) << 24;
1056 sectors |= (tf->lbah & 0xff) << 16;
1057 sectors |= (tf->lbam & 0xff) << 8;
1058 sectors |= (tf->lbal & 0xff);
1059
1060 return ++sectors;
1061}
1062
1063static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1064{
1065 u64 sectors = 0;
1066
1067 sectors |= (tf->device & 0x0f) << 24;
1068 sectors |= (tf->lbah & 0xff) << 16;
1069 sectors |= (tf->lbam & 0xff) << 8;
1070 sectors |= (tf->lbal & 0xff);
1071
1072 return ++sectors;
1073}
1074
1075/**
c728a914
TH
1076 * ata_read_native_max_address - Read native max address
1077 * @dev: target device
1078 * @max_sectors: out parameter for the result native max address
1e999736 1079 *
c728a914
TH
1080 * Perform an LBA48 or LBA28 native size query upon the device in
1081 * question.
1e999736 1082 *
c728a914
TH
1083 * RETURNS:
1084 * 0 on success, -EACCES if command is aborted by the drive.
1085 * -EIO on other errors.
1e999736 1086 */
c728a914 1087static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1088{
c728a914 1089 unsigned int err_mask;
1e999736 1090 struct ata_taskfile tf;
c728a914 1091 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1092
1093 ata_tf_init(dev, &tf);
1094
c728a914 1095 /* always clear all address registers */
1e999736 1096 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1097
c728a914
TH
1098 if (lba48) {
1099 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1100 tf.flags |= ATA_TFLAG_LBA48;
1101 } else
1102 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1103
1e999736 1104 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1105 tf.device |= ATA_LBA;
1106
2b789108 1107 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1108 if (err_mask) {
1109 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1110 "max address (err_mask=0x%x)\n", err_mask);
1111 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1112 return -EACCES;
1113 return -EIO;
1114 }
1e999736 1115
c728a914
TH
1116 if (lba48)
1117 *max_sectors = ata_tf_to_lba48(&tf);
1118 else
1119 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1120 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1121 (*max_sectors)--;
c728a914 1122 return 0;
1e999736
AC
1123}
1124
1125/**
c728a914
TH
1126 * ata_set_max_sectors - Set max sectors
1127 * @dev: target device
6b38d1d1 1128 * @new_sectors: new max sectors value to set for the device
1e999736 1129 *
c728a914
TH
1130 * Set max sectors of @dev to @new_sectors.
1131 *
1132 * RETURNS:
1133 * 0 on success, -EACCES if command is aborted or denied (due to
1134 * previous non-volatile SET_MAX) by the drive. -EIO on other
1135 * errors.
1e999736 1136 */
05027adc 1137static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1138{
c728a914 1139 unsigned int err_mask;
1e999736 1140 struct ata_taskfile tf;
c728a914 1141 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1142
1143 new_sectors--;
1144
1145 ata_tf_init(dev, &tf);
1146
1e999736 1147 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1148
1149 if (lba48) {
1150 tf.command = ATA_CMD_SET_MAX_EXT;
1151 tf.flags |= ATA_TFLAG_LBA48;
1152
1153 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1154 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1155 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1156 } else {
c728a914
TH
1157 tf.command = ATA_CMD_SET_MAX;
1158
1e582ba4
TH
1159 tf.device |= (new_sectors >> 24) & 0xf;
1160 }
1161
1e999736 1162 tf.protocol |= ATA_PROT_NODATA;
c728a914 1163 tf.device |= ATA_LBA;
1e999736
AC
1164
1165 tf.lbal = (new_sectors >> 0) & 0xff;
1166 tf.lbam = (new_sectors >> 8) & 0xff;
1167 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1168
2b789108 1169 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1170 if (err_mask) {
1171 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1172 "max address (err_mask=0x%x)\n", err_mask);
1173 if (err_mask == AC_ERR_DEV &&
1174 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1175 return -EACCES;
1176 return -EIO;
1177 }
1178
c728a914 1179 return 0;
1e999736
AC
1180}
1181
1182/**
1183 * ata_hpa_resize - Resize a device with an HPA set
1184 * @dev: Device to resize
1185 *
1186 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1187 * it if required to the full size of the media. The caller must check
1188 * the drive has the HPA feature set enabled.
05027adc
TH
1189 *
1190 * RETURNS:
1191 * 0 on success, -errno on failure.
1e999736 1192 */
05027adc 1193static int ata_hpa_resize(struct ata_device *dev)
1e999736 1194{
05027adc
TH
1195 struct ata_eh_context *ehc = &dev->link->eh_context;
1196 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1197 u64 sectors = ata_id_n_sectors(dev->id);
1198 u64 native_sectors;
c728a914 1199 int rc;
a617c09f 1200
05027adc
TH
1201 /* do we need to do it? */
1202 if (dev->class != ATA_DEV_ATA ||
1203 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1204 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1205 return 0;
1e999736 1206
05027adc
TH
1207 /* read native max address */
1208 rc = ata_read_native_max_address(dev, &native_sectors);
1209 if (rc) {
1210 /* If HPA isn't going to be unlocked, skip HPA
1211 * resizing from the next try.
1212 */
1213 if (!ata_ignore_hpa) {
1214 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1215 "broken, will skip HPA handling\n");
1216 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1217
1218 /* we can continue if device aborted the command */
1219 if (rc == -EACCES)
1220 rc = 0;
1e999736 1221 }
37301a55 1222
05027adc
TH
1223 return rc;
1224 }
1225
1226 /* nothing to do? */
1227 if (native_sectors <= sectors || !ata_ignore_hpa) {
1228 if (!print_info || native_sectors == sectors)
1229 return 0;
1230
1231 if (native_sectors > sectors)
1232 ata_dev_printk(dev, KERN_INFO,
1233 "HPA detected: current %llu, native %llu\n",
1234 (unsigned long long)sectors,
1235 (unsigned long long)native_sectors);
1236 else if (native_sectors < sectors)
1237 ata_dev_printk(dev, KERN_WARNING,
1238 "native sectors (%llu) is smaller than "
1239 "sectors (%llu)\n",
1240 (unsigned long long)native_sectors,
1241 (unsigned long long)sectors);
1242 return 0;
1243 }
1244
1245 /* let's unlock HPA */
1246 rc = ata_set_max_sectors(dev, native_sectors);
1247 if (rc == -EACCES) {
1248 /* if device aborted the command, skip HPA resizing */
1249 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1250 "(%llu -> %llu), skipping HPA handling\n",
1251 (unsigned long long)sectors,
1252 (unsigned long long)native_sectors);
1253 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1254 return 0;
1255 } else if (rc)
1256 return rc;
1257
1258 /* re-read IDENTIFY data */
1259 rc = ata_dev_reread_id(dev, 0);
1260 if (rc) {
1261 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1262 "data after HPA resizing\n");
1263 return rc;
1264 }
1265
1266 if (print_info) {
1267 u64 new_sectors = ata_id_n_sectors(dev->id);
1268 ata_dev_printk(dev, KERN_INFO,
1269 "HPA unlocked: %llu -> %llu, native %llu\n",
1270 (unsigned long long)sectors,
1271 (unsigned long long)new_sectors,
1272 (unsigned long long)native_sectors);
1273 }
1274
1275 return 0;
1e999736
AC
1276}
1277
10305f0f
A
1278/**
1279 * ata_id_to_dma_mode - Identify DMA mode from id block
1280 * @dev: device to identify
cc261267 1281 * @unknown: mode to assume if we cannot tell
10305f0f
A
1282 *
1283 * Set up the timing values for the device based upon the identify
1284 * reported values for the DMA mode. This function is used by drivers
1285 * which rely upon firmware configured modes, but wish to report the
1286 * mode correctly when possible.
1287 *
1288 * In addition we emit similarly formatted messages to the default
1289 * ata_dev_set_mode handler, in order to provide consistency of
1290 * presentation.
1291 */
1292
1293void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1294{
1295 unsigned int mask;
1296 u8 mode;
1297
1298 /* Pack the DMA modes */
1299 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1300 if (dev->id[53] & 0x04)
1301 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1302
1303 /* Select the mode in use */
1304 mode = ata_xfer_mask2mode(mask);
1305
1306 if (mode != 0) {
1307 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1308 ata_mode_string(mask));
1309 } else {
1310 /* SWDMA perhaps ? */
1311 mode = unknown;
1312 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1313 }
1314
1315 /* Configure the device reporting */
1316 dev->xfer_mode = mode;
1317 dev->xfer_shift = ata_xfer_mode2shift(mode);
1318}
1319
0baab86b
EF
1320/**
1321 * ata_noop_dev_select - Select device 0/1 on ATA bus
1322 * @ap: ATA channel to manipulate
1323 * @device: ATA device (numbered from zero) to select
1324 *
1325 * This function performs no actual function.
1326 *
1327 * May be used as the dev_select() entry in ata_port_operations.
1328 *
1329 * LOCKING:
1330 * caller.
1331 */
2dcb407e 1332void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1333{
1334}
1335
0baab86b 1336
1da177e4
LT
1337/**
1338 * ata_std_dev_select - Select device 0/1 on ATA bus
1339 * @ap: ATA channel to manipulate
1340 * @device: ATA device (numbered from zero) to select
1341 *
1342 * Use the method defined in the ATA specification to
1343 * make either device 0, or device 1, active on the
0baab86b
EF
1344 * ATA channel. Works with both PIO and MMIO.
1345 *
1346 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1347 *
1348 * LOCKING:
1349 * caller.
1350 */
1351
2dcb407e 1352void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1353{
1354 u8 tmp;
1355
1356 if (device == 0)
1357 tmp = ATA_DEVICE_OBS;
1358 else
1359 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1360
0d5ff566 1361 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1362 ata_pause(ap); /* needed; also flushes, for mmio */
1363}
1364
1365/**
1366 * ata_dev_select - Select device 0/1 on ATA bus
1367 * @ap: ATA channel to manipulate
1368 * @device: ATA device (numbered from zero) to select
1369 * @wait: non-zero to wait for Status register BSY bit to clear
1370 * @can_sleep: non-zero if context allows sleeping
1371 *
1372 * Use the method defined in the ATA specification to
1373 * make either device 0, or device 1, active on the
1374 * ATA channel.
1375 *
1376 * This is a high-level version of ata_std_dev_select(),
1377 * which additionally provides the services of inserting
1378 * the proper pauses and status polling, where needed.
1379 *
1380 * LOCKING:
1381 * caller.
1382 */
1383
1384void ata_dev_select(struct ata_port *ap, unsigned int device,
1385 unsigned int wait, unsigned int can_sleep)
1386{
88574551 1387 if (ata_msg_probe(ap))
44877b4e
TH
1388 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1389 "device %u, wait %u\n", device, wait);
1da177e4
LT
1390
1391 if (wait)
1392 ata_wait_idle(ap);
1393
1394 ap->ops->dev_select(ap, device);
1395
1396 if (wait) {
9af5c9c9 1397 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1398 msleep(150);
1399 ata_wait_idle(ap);
1400 }
1401}
1402
1403/**
1404 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1405 * @id: IDENTIFY DEVICE page to dump
1da177e4 1406 *
0bd3300a
TH
1407 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1408 * page.
1da177e4
LT
1409 *
1410 * LOCKING:
1411 * caller.
1412 */
1413
0bd3300a 1414static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1415{
1416 DPRINTK("49==0x%04x "
1417 "53==0x%04x "
1418 "63==0x%04x "
1419 "64==0x%04x "
1420 "75==0x%04x \n",
0bd3300a
TH
1421 id[49],
1422 id[53],
1423 id[63],
1424 id[64],
1425 id[75]);
1da177e4
LT
1426 DPRINTK("80==0x%04x "
1427 "81==0x%04x "
1428 "82==0x%04x "
1429 "83==0x%04x "
1430 "84==0x%04x \n",
0bd3300a
TH
1431 id[80],
1432 id[81],
1433 id[82],
1434 id[83],
1435 id[84]);
1da177e4
LT
1436 DPRINTK("88==0x%04x "
1437 "93==0x%04x\n",
0bd3300a
TH
1438 id[88],
1439 id[93]);
1da177e4
LT
1440}
1441
cb95d562
TH
1442/**
1443 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444 * @id: IDENTIFY data to compute xfer mask from
1445 *
1446 * Compute the xfermask for this device. This is not as trivial
1447 * as it seems if we must consider early devices correctly.
1448 *
1449 * FIXME: pre IDE drive timing (do we care ?).
1450 *
1451 * LOCKING:
1452 * None.
1453 *
1454 * RETURNS:
1455 * Computed xfermask
1456 */
1457static unsigned int ata_id_xfermask(const u16 *id)
1458{
1459 unsigned int pio_mask, mwdma_mask, udma_mask;
1460
1461 /* Usual case. Word 53 indicates word 64 is valid */
1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464 pio_mask <<= 3;
1465 pio_mask |= 0x7;
1466 } else {
1467 /* If word 64 isn't valid then Word 51 high byte holds
1468 * the PIO timing number for the maximum. Turn it into
1469 * a mask.
1470 */
7a0f1c8a 1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1472 if (mode < 5) /* Valid PIO range */
2dcb407e 1473 pio_mask = (2 << mode) - 1;
46767aeb
AC
1474 else
1475 pio_mask = 1;
cb95d562
TH
1476
1477 /* But wait.. there's more. Design your standards by
1478 * committee and you too can get a free iordy field to
1479 * process. However its the speeds not the modes that
1480 * are supported... Note drivers using the timing API
1481 * will get this right anyway
1482 */
1483 }
1484
1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1486
b352e57d
AC
1487 if (ata_id_is_cfa(id)) {
1488 /*
1489 * Process compact flash extended modes
1490 */
1491 int pio = id[163] & 0x7;
1492 int dma = (id[163] >> 3) & 7;
1493
1494 if (pio)
1495 pio_mask |= (1 << 5);
1496 if (pio > 1)
1497 pio_mask |= (1 << 6);
1498 if (dma)
1499 mwdma_mask |= (1 << 3);
1500 if (dma > 1)
1501 mwdma_mask |= (1 << 4);
1502 }
1503
fb21f0d0
TH
1504 udma_mask = 0;
1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1507
1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
1510
86e45b6b
TH
1511/**
1512 * ata_port_queue_task - Queue port_task
1513 * @ap: The ata_port to queue port_task for
e2a7f77a 1514 * @fn: workqueue function to be scheduled
65f27f38 1515 * @data: data for @fn to use
e2a7f77a 1516 * @delay: delay time for workqueue function
86e45b6b
TH
1517 *
1518 * Schedule @fn(@data) for execution after @delay jiffies using
1519 * port_task. There is one port_task per port and it's the
1520 * user(low level driver)'s responsibility to make sure that only
1521 * one task is active at any given time.
1522 *
1523 * libata core layer takes care of synchronization between
1524 * port_task and EH. ata_port_queue_task() may be ignored for EH
1525 * synchronization.
1526 *
1527 * LOCKING:
1528 * Inherited from caller.
1529 */
65f27f38 1530void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1531 unsigned long delay)
1532{
65f27f38
DH
1533 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1534 ap->port_task_data = data;
86e45b6b 1535
45a66c1c
ON
1536 /* may fail if ata_port_flush_task() in progress */
1537 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1538}
1539
1540/**
1541 * ata_port_flush_task - Flush port_task
1542 * @ap: The ata_port to flush port_task for
1543 *
1544 * After this function completes, port_task is guranteed not to
1545 * be running or scheduled.
1546 *
1547 * LOCKING:
1548 * Kernel thread context (may sleep)
1549 */
1550void ata_port_flush_task(struct ata_port *ap)
1551{
86e45b6b
TH
1552 DPRINTK("ENTER\n");
1553
45a66c1c 1554 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1555
0dd4b21f
BP
1556 if (ata_msg_ctl(ap))
1557 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1558}
1559
7102d230 1560static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1561{
77853bf2 1562 struct completion *waiting = qc->private_data;
a2a7a662 1563
a2a7a662 1564 complete(waiting);
a2a7a662
TH
1565}
1566
1567/**
2432697b 1568 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1569 * @dev: Device to which the command is sent
1570 * @tf: Taskfile registers for the command and the result
d69cf37d 1571 * @cdb: CDB for packet command
a2a7a662 1572 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1573 * @sgl: sg list for the data buffer of the command
2432697b 1574 * @n_elem: Number of sg entries
2b789108 1575 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1576 *
1577 * Executes libata internal command with timeout. @tf contains
1578 * command on entry and result on return. Timeout and error
1579 * conditions are reported via return value. No recovery action
1580 * is taken after a command times out. It's caller's duty to
1581 * clean up after timeout.
1582 *
1583 * LOCKING:
1584 * None. Should be called with kernel context, might sleep.
551e8889
TH
1585 *
1586 * RETURNS:
1587 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1588 */
2432697b
TH
1589unsigned ata_exec_internal_sg(struct ata_device *dev,
1590 struct ata_taskfile *tf, const u8 *cdb,
87260216 1591 int dma_dir, struct scatterlist *sgl,
2b789108 1592 unsigned int n_elem, unsigned long timeout)
a2a7a662 1593{
9af5c9c9
TH
1594 struct ata_link *link = dev->link;
1595 struct ata_port *ap = link->ap;
a2a7a662
TH
1596 u8 command = tf->command;
1597 struct ata_queued_cmd *qc;
2ab7db1f 1598 unsigned int tag, preempted_tag;
dedaf2b0 1599 u32 preempted_sactive, preempted_qc_active;
da917d69 1600 int preempted_nr_active_links;
60be6b9a 1601 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1602 unsigned long flags;
77853bf2 1603 unsigned int err_mask;
d95a717f 1604 int rc;
a2a7a662 1605
ba6a1308 1606 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1607
e3180499 1608 /* no internal command while frozen */
b51e9e5d 1609 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1610 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1611 return AC_ERR_SYSTEM;
1612 }
1613
2ab7db1f 1614 /* initialize internal qc */
a2a7a662 1615
2ab7db1f
TH
1616 /* XXX: Tag 0 is used for drivers with legacy EH as some
1617 * drivers choke if any other tag is given. This breaks
1618 * ata_tag_internal() test for those drivers. Don't use new
1619 * EH stuff without converting to it.
1620 */
1621 if (ap->ops->error_handler)
1622 tag = ATA_TAG_INTERNAL;
1623 else
1624 tag = 0;
1625
6cec4a39 1626 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1627 BUG();
f69499f4 1628 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1629
1630 qc->tag = tag;
1631 qc->scsicmd = NULL;
1632 qc->ap = ap;
1633 qc->dev = dev;
1634 ata_qc_reinit(qc);
1635
9af5c9c9
TH
1636 preempted_tag = link->active_tag;
1637 preempted_sactive = link->sactive;
dedaf2b0 1638 preempted_qc_active = ap->qc_active;
da917d69 1639 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1640 link->active_tag = ATA_TAG_POISON;
1641 link->sactive = 0;
dedaf2b0 1642 ap->qc_active = 0;
da917d69 1643 ap->nr_active_links = 0;
2ab7db1f
TH
1644
1645 /* prepare & issue qc */
a2a7a662 1646 qc->tf = *tf;
d69cf37d
TH
1647 if (cdb)
1648 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1649 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1650 qc->dma_dir = dma_dir;
1651 if (dma_dir != DMA_NONE) {
2432697b 1652 unsigned int i, buflen = 0;
87260216 1653 struct scatterlist *sg;
2432697b 1654
87260216
JA
1655 for_each_sg(sgl, sg, n_elem, i)
1656 buflen += sg->length;
2432697b 1657
87260216 1658 ata_sg_init(qc, sgl, n_elem);
49c80429 1659 qc->nbytes = buflen;
a2a7a662
TH
1660 }
1661
77853bf2 1662 qc->private_data = &wait;
a2a7a662
TH
1663 qc->complete_fn = ata_qc_complete_internal;
1664
8e0e694a 1665 ata_qc_issue(qc);
a2a7a662 1666
ba6a1308 1667 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1668
2b789108
TH
1669 if (!timeout)
1670 timeout = ata_probe_timeout * 1000 / HZ;
1671
1672 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1673
1674 ata_port_flush_task(ap);
41ade50c 1675
d95a717f 1676 if (!rc) {
ba6a1308 1677 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1678
1679 /* We're racing with irq here. If we lose, the
1680 * following test prevents us from completing the qc
d95a717f
TH
1681 * twice. If we win, the port is frozen and will be
1682 * cleaned up by ->post_internal_cmd().
a2a7a662 1683 */
77853bf2 1684 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1685 qc->err_mask |= AC_ERR_TIMEOUT;
1686
1687 if (ap->ops->error_handler)
1688 ata_port_freeze(ap);
1689 else
1690 ata_qc_complete(qc);
f15a1daf 1691
0dd4b21f
BP
1692 if (ata_msg_warn(ap))
1693 ata_dev_printk(dev, KERN_WARNING,
88574551 1694 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1695 }
1696
ba6a1308 1697 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1698 }
1699
d95a717f
TH
1700 /* do post_internal_cmd */
1701 if (ap->ops->post_internal_cmd)
1702 ap->ops->post_internal_cmd(qc);
1703
a51d644a
TH
1704 /* perform minimal error analysis */
1705 if (qc->flags & ATA_QCFLAG_FAILED) {
1706 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1707 qc->err_mask |= AC_ERR_DEV;
1708
1709 if (!qc->err_mask)
1710 qc->err_mask |= AC_ERR_OTHER;
1711
1712 if (qc->err_mask & ~AC_ERR_OTHER)
1713 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1714 }
1715
15869303 1716 /* finish up */
ba6a1308 1717 spin_lock_irqsave(ap->lock, flags);
15869303 1718
e61e0672 1719 *tf = qc->result_tf;
77853bf2
TH
1720 err_mask = qc->err_mask;
1721
1722 ata_qc_free(qc);
9af5c9c9
TH
1723 link->active_tag = preempted_tag;
1724 link->sactive = preempted_sactive;
dedaf2b0 1725 ap->qc_active = preempted_qc_active;
da917d69 1726 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1727
1f7dd3e9
TH
1728 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1729 * Until those drivers are fixed, we detect the condition
1730 * here, fail the command with AC_ERR_SYSTEM and reenable the
1731 * port.
1732 *
1733 * Note that this doesn't change any behavior as internal
1734 * command failure results in disabling the device in the
1735 * higher layer for LLDDs without new reset/EH callbacks.
1736 *
1737 * Kill the following code as soon as those drivers are fixed.
1738 */
198e0fed 1739 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1740 err_mask |= AC_ERR_SYSTEM;
1741 ata_port_probe(ap);
1742 }
1743
ba6a1308 1744 spin_unlock_irqrestore(ap->lock, flags);
15869303 1745
77853bf2 1746 return err_mask;
a2a7a662
TH
1747}
1748
2432697b 1749/**
33480a0e 1750 * ata_exec_internal - execute libata internal command
2432697b
TH
1751 * @dev: Device to which the command is sent
1752 * @tf: Taskfile registers for the command and the result
1753 * @cdb: CDB for packet command
1754 * @dma_dir: Data tranfer direction of the command
1755 * @buf: Data buffer of the command
1756 * @buflen: Length of data buffer
2b789108 1757 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1758 *
1759 * Wrapper around ata_exec_internal_sg() which takes simple
1760 * buffer instead of sg list.
1761 *
1762 * LOCKING:
1763 * None. Should be called with kernel context, might sleep.
1764 *
1765 * RETURNS:
1766 * Zero on success, AC_ERR_* mask on failure
1767 */
1768unsigned ata_exec_internal(struct ata_device *dev,
1769 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1770 int dma_dir, void *buf, unsigned int buflen,
1771 unsigned long timeout)
2432697b 1772{
33480a0e
TH
1773 struct scatterlist *psg = NULL, sg;
1774 unsigned int n_elem = 0;
2432697b 1775
33480a0e
TH
1776 if (dma_dir != DMA_NONE) {
1777 WARN_ON(!buf);
1778 sg_init_one(&sg, buf, buflen);
1779 psg = &sg;
1780 n_elem++;
1781 }
2432697b 1782
2b789108
TH
1783 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1784 timeout);
2432697b
TH
1785}
1786
977e6b9f
TH
1787/**
1788 * ata_do_simple_cmd - execute simple internal command
1789 * @dev: Device to which the command is sent
1790 * @cmd: Opcode to execute
1791 *
1792 * Execute a 'simple' command, that only consists of the opcode
1793 * 'cmd' itself, without filling any other registers
1794 *
1795 * LOCKING:
1796 * Kernel thread context (may sleep).
1797 *
1798 * RETURNS:
1799 * Zero on success, AC_ERR_* mask on failure
e58eb583 1800 */
77b08fb5 1801unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1802{
1803 struct ata_taskfile tf;
e58eb583
TH
1804
1805 ata_tf_init(dev, &tf);
1806
1807 tf.command = cmd;
1808 tf.flags |= ATA_TFLAG_DEVICE;
1809 tf.protocol = ATA_PROT_NODATA;
1810
2b789108 1811 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1812}
1813
1bc4ccff
AC
1814/**
1815 * ata_pio_need_iordy - check if iordy needed
1816 * @adev: ATA device
1817 *
1818 * Check if the current speed of the device requires IORDY. Used
1819 * by various controllers for chip configuration.
1820 */
a617c09f 1821
1bc4ccff
AC
1822unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1823{
432729f0
AC
1824 /* Controller doesn't support IORDY. Probably a pointless check
1825 as the caller should know this */
9af5c9c9 1826 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1827 return 0;
432729f0
AC
1828 /* PIO3 and higher it is mandatory */
1829 if (adev->pio_mode > XFER_PIO_2)
1830 return 1;
1831 /* We turn it on when possible */
1832 if (ata_id_has_iordy(adev->id))
1bc4ccff 1833 return 1;
432729f0
AC
1834 return 0;
1835}
2e9edbf8 1836
432729f0
AC
1837/**
1838 * ata_pio_mask_no_iordy - Return the non IORDY mask
1839 * @adev: ATA device
1840 *
1841 * Compute the highest mode possible if we are not using iordy. Return
1842 * -1 if no iordy mode is available.
1843 */
a617c09f 1844
432729f0
AC
1845static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1846{
1bc4ccff 1847 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1848 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1849 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1850 /* Is the speed faster than the drive allows non IORDY ? */
1851 if (pio) {
1852 /* This is cycle times not frequency - watch the logic! */
1853 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1854 return 3 << ATA_SHIFT_PIO;
1855 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1856 }
1857 }
432729f0 1858 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1859}
1860
1da177e4 1861/**
49016aca 1862 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1863 * @dev: target device
1864 * @p_class: pointer to class of the target device (may be changed)
bff04647 1865 * @flags: ATA_READID_* flags
fe635c7e 1866 * @id: buffer to read IDENTIFY data into
1da177e4 1867 *
49016aca
TH
1868 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1869 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1870 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1871 * for pre-ATA4 drives.
1da177e4 1872 *
50a99018 1873 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1874 * now we abort if we hit that case.
50a99018 1875 *
1da177e4 1876 * LOCKING:
49016aca
TH
1877 * Kernel thread context (may sleep)
1878 *
1879 * RETURNS:
1880 * 0 on success, -errno otherwise.
1da177e4 1881 */
a9beec95 1882int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1883 unsigned int flags, u16 *id)
1da177e4 1884{
9af5c9c9 1885 struct ata_port *ap = dev->link->ap;
49016aca 1886 unsigned int class = *p_class;
a0123703 1887 struct ata_taskfile tf;
49016aca
TH
1888 unsigned int err_mask = 0;
1889 const char *reason;
54936f8b 1890 int may_fallback = 1, tried_spinup = 0;
49016aca 1891 int rc;
1da177e4 1892
0dd4b21f 1893 if (ata_msg_ctl(ap))
44877b4e 1894 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1895
49016aca 1896 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1897 retry:
3373efd8 1898 ata_tf_init(dev, &tf);
a0123703 1899
49016aca
TH
1900 switch (class) {
1901 case ATA_DEV_ATA:
a0123703 1902 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1903 break;
1904 case ATA_DEV_ATAPI:
a0123703 1905 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1906 break;
1907 default:
1908 rc = -ENODEV;
1909 reason = "unsupported class";
1910 goto err_out;
1da177e4
LT
1911 }
1912
a0123703 1913 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1914
1915 /* Some devices choke if TF registers contain garbage. Make
1916 * sure those are properly initialized.
1917 */
1918 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1919
1920 /* Device presence detection is unreliable on some
1921 * controllers. Always poll IDENTIFY if available.
1922 */
1923 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1924
3373efd8 1925 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1926 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1927 if (err_mask) {
800b3996 1928 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1929 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1930 ap->print_id, dev->devno);
55a8e2c8
TH
1931 return -ENOENT;
1932 }
1933
54936f8b
TH
1934 /* Device or controller might have reported the wrong
1935 * device class. Give a shot at the other IDENTIFY if
1936 * the current one is aborted by the device.
1937 */
1938 if (may_fallback &&
1939 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1940 may_fallback = 0;
1941
1942 if (class == ATA_DEV_ATA)
1943 class = ATA_DEV_ATAPI;
1944 else
1945 class = ATA_DEV_ATA;
1946 goto retry;
1947 }
1948
49016aca
TH
1949 rc = -EIO;
1950 reason = "I/O error";
1da177e4
LT
1951 goto err_out;
1952 }
1953
54936f8b
TH
1954 /* Falling back doesn't make sense if ID data was read
1955 * successfully at least once.
1956 */
1957 may_fallback = 0;
1958
49016aca 1959 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1960
49016aca 1961 /* sanity check */
a4f5749b 1962 rc = -EINVAL;
6070068b 1963 reason = "device reports invalid type";
a4f5749b
TH
1964
1965 if (class == ATA_DEV_ATA) {
1966 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1967 goto err_out;
1968 } else {
1969 if (ata_id_is_ata(id))
1970 goto err_out;
49016aca
TH
1971 }
1972
169439c2
ML
1973 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1974 tried_spinup = 1;
1975 /*
1976 * Drive powered-up in standby mode, and requires a specific
1977 * SET_FEATURES spin-up subcommand before it will accept
1978 * anything other than the original IDENTIFY command.
1979 */
218f3d30 1980 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1981 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1982 rc = -EIO;
1983 reason = "SPINUP failed";
1984 goto err_out;
1985 }
1986 /*
1987 * If the drive initially returned incomplete IDENTIFY info,
1988 * we now must reissue the IDENTIFY command.
1989 */
1990 if (id[2] == 0x37c8)
1991 goto retry;
1992 }
1993
bff04647 1994 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1995 /*
1996 * The exact sequence expected by certain pre-ATA4 drives is:
1997 * SRST RESET
50a99018
AC
1998 * IDENTIFY (optional in early ATA)
1999 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2000 * anything else..
2001 * Some drives were very specific about that exact sequence.
50a99018
AC
2002 *
2003 * Note that ATA4 says lba is mandatory so the second check
2004 * shoud never trigger.
49016aca
TH
2005 */
2006 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2007 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2008 if (err_mask) {
2009 rc = -EIO;
2010 reason = "INIT_DEV_PARAMS failed";
2011 goto err_out;
2012 }
2013
2014 /* current CHS translation info (id[53-58]) might be
2015 * changed. reread the identify device info.
2016 */
bff04647 2017 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2018 goto retry;
2019 }
2020 }
2021
2022 *p_class = class;
fe635c7e 2023
49016aca
TH
2024 return 0;
2025
2026 err_out:
88574551 2027 if (ata_msg_warn(ap))
0dd4b21f 2028 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2029 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2030 return rc;
2031}
2032
3373efd8 2033static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2034{
9af5c9c9
TH
2035 struct ata_port *ap = dev->link->ap;
2036 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2037}
2038
a6e6ce8e
TH
2039static void ata_dev_config_ncq(struct ata_device *dev,
2040 char *desc, size_t desc_sz)
2041{
9af5c9c9 2042 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2043 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2044
2045 if (!ata_id_has_ncq(dev->id)) {
2046 desc[0] = '\0';
2047 return;
2048 }
75683fe7 2049 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2050 snprintf(desc, desc_sz, "NCQ (not used)");
2051 return;
2052 }
a6e6ce8e 2053 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2054 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2055 dev->flags |= ATA_DFLAG_NCQ;
2056 }
2057
2058 if (hdepth >= ddepth)
2059 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2060 else
2061 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2062}
2063
49016aca 2064/**
ffeae418 2065 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2066 * @dev: Target device to configure
2067 *
2068 * Configure @dev according to @dev->id. Generic and low-level
2069 * driver specific fixups are also applied.
49016aca
TH
2070 *
2071 * LOCKING:
ffeae418
TH
2072 * Kernel thread context (may sleep)
2073 *
2074 * RETURNS:
2075 * 0 on success, -errno otherwise
49016aca 2076 */
efdaedc4 2077int ata_dev_configure(struct ata_device *dev)
49016aca 2078{
9af5c9c9
TH
2079 struct ata_port *ap = dev->link->ap;
2080 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2081 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2082 const u16 *id = dev->id;
ff8854b2 2083 unsigned int xfer_mask;
b352e57d 2084 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2085 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2086 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2087 int rc;
49016aca 2088
0dd4b21f 2089 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2090 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2091 __FUNCTION__);
ffeae418 2092 return 0;
49016aca
TH
2093 }
2094
0dd4b21f 2095 if (ata_msg_probe(ap))
44877b4e 2096 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2097
75683fe7
TH
2098 /* set horkage */
2099 dev->horkage |= ata_dev_blacklisted(dev);
2100
6746544c
TH
2101 /* let ACPI work its magic */
2102 rc = ata_acpi_on_devcfg(dev);
2103 if (rc)
2104 return rc;
08573a86 2105
05027adc
TH
2106 /* massage HPA, do it early as it might change IDENTIFY data */
2107 rc = ata_hpa_resize(dev);
2108 if (rc)
2109 return rc;
2110
c39f5ebe 2111 /* print device capabilities */
0dd4b21f 2112 if (ata_msg_probe(ap))
88574551
TH
2113 ata_dev_printk(dev, KERN_DEBUG,
2114 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2115 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2116 __FUNCTION__,
f15a1daf
TH
2117 id[49], id[82], id[83], id[84],
2118 id[85], id[86], id[87], id[88]);
c39f5ebe 2119
208a9933 2120 /* initialize to-be-configured parameters */
ea1dd4e1 2121 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2122 dev->max_sectors = 0;
2123 dev->cdb_len = 0;
2124 dev->n_sectors = 0;
2125 dev->cylinders = 0;
2126 dev->heads = 0;
2127 dev->sectors = 0;
2128
1da177e4
LT
2129 /*
2130 * common ATA, ATAPI feature tests
2131 */
2132
ff8854b2 2133 /* find max transfer mode; for printk only */
1148c3a7 2134 xfer_mask = ata_id_xfermask(id);
1da177e4 2135
0dd4b21f
BP
2136 if (ata_msg_probe(ap))
2137 ata_dump_id(id);
1da177e4 2138
ef143d57
AL
2139 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2140 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2141 sizeof(fwrevbuf));
2142
2143 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2144 sizeof(modelbuf));
2145
1da177e4
LT
2146 /* ATA-specific feature tests */
2147 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2148 if (ata_id_is_cfa(id)) {
2149 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2150 ata_dev_printk(dev, KERN_WARNING,
2151 "supports DRM functions and may "
2152 "not be fully accessable.\n");
b352e57d 2153 snprintf(revbuf, 7, "CFA");
2dcb407e
JG
2154 } else
2155 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
b352e57d 2156
1148c3a7 2157 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2158
3f64f565
EM
2159 if (dev->id[59] & 0x100)
2160 dev->multi_count = dev->id[59] & 0xff;
2161
1148c3a7 2162 if (ata_id_has_lba(id)) {
4c2d721a 2163 const char *lba_desc;
a6e6ce8e 2164 char ncq_desc[20];
8bf62ece 2165
4c2d721a
TH
2166 lba_desc = "LBA";
2167 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2168 if (ata_id_has_lba48(id)) {
8bf62ece 2169 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2170 lba_desc = "LBA48";
6fc49adb
TH
2171
2172 if (dev->n_sectors >= (1UL << 28) &&
2173 ata_id_has_flush_ext(id))
2174 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2175 }
8bf62ece 2176
a6e6ce8e
TH
2177 /* config NCQ */
2178 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2179
8bf62ece 2180 /* print device info to dmesg */
3f64f565
EM
2181 if (ata_msg_drv(ap) && print_info) {
2182 ata_dev_printk(dev, KERN_INFO,
2183 "%s: %s, %s, max %s\n",
2184 revbuf, modelbuf, fwrevbuf,
2185 ata_mode_string(xfer_mask));
2186 ata_dev_printk(dev, KERN_INFO,
2187 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2188 (unsigned long long)dev->n_sectors,
3f64f565
EM
2189 dev->multi_count, lba_desc, ncq_desc);
2190 }
ffeae418 2191 } else {
8bf62ece
AL
2192 /* CHS */
2193
2194 /* Default translation */
1148c3a7
TH
2195 dev->cylinders = id[1];
2196 dev->heads = id[3];
2197 dev->sectors = id[6];
8bf62ece 2198
1148c3a7 2199 if (ata_id_current_chs_valid(id)) {
8bf62ece 2200 /* Current CHS translation is valid. */
1148c3a7
TH
2201 dev->cylinders = id[54];
2202 dev->heads = id[55];
2203 dev->sectors = id[56];
8bf62ece
AL
2204 }
2205
2206 /* print device info to dmesg */
3f64f565 2207 if (ata_msg_drv(ap) && print_info) {
88574551 2208 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2209 "%s: %s, %s, max %s\n",
2210 revbuf, modelbuf, fwrevbuf,
2211 ata_mode_string(xfer_mask));
a84471fe 2212 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2213 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2214 (unsigned long long)dev->n_sectors,
2215 dev->multi_count, dev->cylinders,
2216 dev->heads, dev->sectors);
2217 }
07f6f7d0
AL
2218 }
2219
6e7846e9 2220 dev->cdb_len = 16;
1da177e4
LT
2221 }
2222
2223 /* ATAPI-specific feature tests */
2c13b7ce 2224 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2225 const char *cdb_intr_string = "";
2226 const char *atapi_an_string = "";
7d77b247 2227 u32 sntf;
08a556db 2228
1148c3a7 2229 rc = atapi_cdb_len(id);
1da177e4 2230 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2231 if (ata_msg_warn(ap))
88574551
TH
2232 ata_dev_printk(dev, KERN_WARNING,
2233 "unsupported CDB len\n");
ffeae418 2234 rc = -EINVAL;
1da177e4
LT
2235 goto err_out_nosup;
2236 }
6e7846e9 2237 dev->cdb_len = (unsigned int) rc;
1da177e4 2238
7d77b247
TH
2239 /* Enable ATAPI AN if both the host and device have
2240 * the support. If PMP is attached, SNTF is required
2241 * to enable ATAPI AN to discern between PHY status
2242 * changed notifications and ATAPI ANs.
9f45cbd3 2243 */
7d77b247
TH
2244 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2245 (!ap->nr_pmp_links ||
2246 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2247 unsigned int err_mask;
2248
9f45cbd3 2249 /* issue SET feature command to turn this on */
218f3d30
JG
2250 err_mask = ata_dev_set_feature(dev,
2251 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2252 if (err_mask)
9f45cbd3 2253 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2254 "failed to enable ATAPI AN "
2255 "(err_mask=0x%x)\n", err_mask);
2256 else {
9f45cbd3 2257 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2258 atapi_an_string = ", ATAPI AN";
2259 }
9f45cbd3
KCA
2260 }
2261
08a556db 2262 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2263 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2264 cdb_intr_string = ", CDB intr";
2265 }
312f7da2 2266
1da177e4 2267 /* print device info to dmesg */
5afc8142 2268 if (ata_msg_drv(ap) && print_info)
ef143d57 2269 ata_dev_printk(dev, KERN_INFO,
854c73a2 2270 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2271 modelbuf, fwrevbuf,
12436c30 2272 ata_mode_string(xfer_mask),
854c73a2 2273 cdb_intr_string, atapi_an_string);
1da177e4
LT
2274 }
2275
914ed354
TH
2276 /* determine max_sectors */
2277 dev->max_sectors = ATA_MAX_SECTORS;
2278 if (dev->flags & ATA_DFLAG_LBA48)
2279 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2280
ca77329f
KCA
2281 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2282 if (ata_id_has_hipm(dev->id))
2283 dev->flags |= ATA_DFLAG_HIPM;
2284 if (ata_id_has_dipm(dev->id))
2285 dev->flags |= ATA_DFLAG_DIPM;
2286 }
2287
93590859
AC
2288 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2289 /* Let the user know. We don't want to disallow opens for
2290 rescue purposes, or in case the vendor is just a blithering
2291 idiot */
2dcb407e 2292 if (print_info) {
93590859
AC
2293 ata_dev_printk(dev, KERN_WARNING,
2294"Drive reports diagnostics failure. This may indicate a drive\n");
2295 ata_dev_printk(dev, KERN_WARNING,
2296"fault or invalid emulation. Contact drive vendor for information.\n");
2297 }
2298 }
2299
4b2f3ede 2300 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2301 if (ata_dev_knobble(dev)) {
5afc8142 2302 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2303 ata_dev_printk(dev, KERN_INFO,
2304 "applying bridge limits\n");
5a529139 2305 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2306 dev->max_sectors = ATA_MAX_SECTORS;
2307 }
2308
f8d8e579 2309 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2310 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2311 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2312 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2313 }
f8d8e579 2314
75683fe7 2315 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2316 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2317 dev->max_sectors);
18d6e9d5 2318
ca77329f
KCA
2319 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2320 dev->horkage |= ATA_HORKAGE_IPM;
2321
2322 /* reset link pm_policy for this port to no pm */
2323 ap->pm_policy = MAX_PERFORMANCE;
2324 }
2325
4b2f3ede 2326 if (ap->ops->dev_config)
cd0d3bbc 2327 ap->ops->dev_config(dev);
4b2f3ede 2328
0dd4b21f
BP
2329 if (ata_msg_probe(ap))
2330 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2331 __FUNCTION__, ata_chk_status(ap));
ffeae418 2332 return 0;
1da177e4
LT
2333
2334err_out_nosup:
0dd4b21f 2335 if (ata_msg_probe(ap))
88574551
TH
2336 ata_dev_printk(dev, KERN_DEBUG,
2337 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2338 return rc;
1da177e4
LT
2339}
2340
be0d18df 2341/**
2e41e8e6 2342 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2343 * @ap: port
2344 *
2e41e8e6 2345 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2346 * detection.
2347 */
2348
2349int ata_cable_40wire(struct ata_port *ap)
2350{
2351 return ATA_CBL_PATA40;
2352}
2353
2354/**
2e41e8e6 2355 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2356 * @ap: port
2357 *
2e41e8e6 2358 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2359 * detection.
2360 */
2361
2362int ata_cable_80wire(struct ata_port *ap)
2363{
2364 return ATA_CBL_PATA80;
2365}
2366
2367/**
2368 * ata_cable_unknown - return unknown PATA cable.
2369 * @ap: port
2370 *
2371 * Helper method for drivers which have no PATA cable detection.
2372 */
2373
2374int ata_cable_unknown(struct ata_port *ap)
2375{
2376 return ATA_CBL_PATA_UNK;
2377}
2378
2379/**
2380 * ata_cable_sata - return SATA cable type
2381 * @ap: port
2382 *
2383 * Helper method for drivers which have SATA cables
2384 */
2385
2386int ata_cable_sata(struct ata_port *ap)
2387{
2388 return ATA_CBL_SATA;
2389}
2390
1da177e4
LT
2391/**
2392 * ata_bus_probe - Reset and probe ATA bus
2393 * @ap: Bus to probe
2394 *
0cba632b
JG
2395 * Master ATA bus probing function. Initiates a hardware-dependent
2396 * bus reset, then attempts to identify any devices found on
2397 * the bus.
2398 *
1da177e4 2399 * LOCKING:
0cba632b 2400 * PCI/etc. bus probe sem.
1da177e4
LT
2401 *
2402 * RETURNS:
96072e69 2403 * Zero on success, negative errno otherwise.
1da177e4
LT
2404 */
2405
80289167 2406int ata_bus_probe(struct ata_port *ap)
1da177e4 2407{
28ca5c57 2408 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2409 int tries[ATA_MAX_DEVICES];
f58229f8 2410 int rc;
e82cbdb9 2411 struct ata_device *dev;
1da177e4 2412
28ca5c57 2413 ata_port_probe(ap);
c19ba8af 2414
f58229f8
TH
2415 ata_link_for_each_dev(dev, &ap->link)
2416 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2417
2418 retry:
cdeab114
TH
2419 ata_link_for_each_dev(dev, &ap->link) {
2420 /* If we issue an SRST then an ATA drive (not ATAPI)
2421 * may change configuration and be in PIO0 timing. If
2422 * we do a hard reset (or are coming from power on)
2423 * this is true for ATA or ATAPI. Until we've set a
2424 * suitable controller mode we should not touch the
2425 * bus as we may be talking too fast.
2426 */
2427 dev->pio_mode = XFER_PIO_0;
2428
2429 /* If the controller has a pio mode setup function
2430 * then use it to set the chipset to rights. Don't
2431 * touch the DMA setup as that will be dealt with when
2432 * configuring devices.
2433 */
2434 if (ap->ops->set_piomode)
2435 ap->ops->set_piomode(ap, dev);
2436 }
2437
2044470c 2438 /* reset and determine device classes */
52783c5d 2439 ap->ops->phy_reset(ap);
2061a47a 2440
f58229f8 2441 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2442 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2443 dev->class != ATA_DEV_UNKNOWN)
2444 classes[dev->devno] = dev->class;
2445 else
2446 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2447
52783c5d 2448 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2449 }
1da177e4 2450
52783c5d 2451 ata_port_probe(ap);
2044470c 2452
f31f0cc2
JG
2453 /* read IDENTIFY page and configure devices. We have to do the identify
2454 specific sequence bass-ackwards so that PDIAG- is released by
2455 the slave device */
2456
f58229f8
TH
2457 ata_link_for_each_dev(dev, &ap->link) {
2458 if (tries[dev->devno])
2459 dev->class = classes[dev->devno];
ffeae418 2460
14d2bac1 2461 if (!ata_dev_enabled(dev))
ffeae418 2462 continue;
ffeae418 2463
bff04647
TH
2464 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2465 dev->id);
14d2bac1
TH
2466 if (rc)
2467 goto fail;
f31f0cc2
JG
2468 }
2469
be0d18df
AC
2470 /* Now ask for the cable type as PDIAG- should have been released */
2471 if (ap->ops->cable_detect)
2472 ap->cbl = ap->ops->cable_detect(ap);
2473
614fe29b
AC
2474 /* We may have SATA bridge glue hiding here irrespective of the
2475 reported cable types and sensed types */
2476 ata_link_for_each_dev(dev, &ap->link) {
2477 if (!ata_dev_enabled(dev))
2478 continue;
2479 /* SATA drives indicate we have a bridge. We don't know which
2480 end of the link the bridge is which is a problem */
2481 if (ata_id_is_sata(dev->id))
2482 ap->cbl = ATA_CBL_SATA;
2483 }
2484
f31f0cc2
JG
2485 /* After the identify sequence we can now set up the devices. We do
2486 this in the normal order so that the user doesn't get confused */
2487
f58229f8 2488 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2489 if (!ata_dev_enabled(dev))
2490 continue;
14d2bac1 2491
9af5c9c9 2492 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2493 rc = ata_dev_configure(dev);
9af5c9c9 2494 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2495 if (rc)
2496 goto fail;
1da177e4
LT
2497 }
2498
e82cbdb9 2499 /* configure transfer mode */
0260731f 2500 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2501 if (rc)
51713d35 2502 goto fail;
1da177e4 2503
f58229f8
TH
2504 ata_link_for_each_dev(dev, &ap->link)
2505 if (ata_dev_enabled(dev))
e82cbdb9 2506 return 0;
1da177e4 2507
e82cbdb9
TH
2508 /* no device present, disable port */
2509 ata_port_disable(ap);
96072e69 2510 return -ENODEV;
14d2bac1
TH
2511
2512 fail:
4ae72a1e
TH
2513 tries[dev->devno]--;
2514
14d2bac1
TH
2515 switch (rc) {
2516 case -EINVAL:
4ae72a1e 2517 /* eeek, something went very wrong, give up */
14d2bac1
TH
2518 tries[dev->devno] = 0;
2519 break;
4ae72a1e
TH
2520
2521 case -ENODEV:
2522 /* give it just one more chance */
2523 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2524 case -EIO:
4ae72a1e
TH
2525 if (tries[dev->devno] == 1) {
2526 /* This is the last chance, better to slow
2527 * down than lose it.
2528 */
936fd732 2529 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2530 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2531 }
14d2bac1
TH
2532 }
2533
4ae72a1e 2534 if (!tries[dev->devno])
3373efd8 2535 ata_dev_disable(dev);
ec573755 2536
14d2bac1 2537 goto retry;
1da177e4
LT
2538}
2539
2540/**
0cba632b
JG
2541 * ata_port_probe - Mark port as enabled
2542 * @ap: Port for which we indicate enablement
1da177e4 2543 *
0cba632b
JG
2544 * Modify @ap data structure such that the system
2545 * thinks that the entire port is enabled.
2546 *
cca3974e 2547 * LOCKING: host lock, or some other form of
0cba632b 2548 * serialization.
1da177e4
LT
2549 */
2550
2551void ata_port_probe(struct ata_port *ap)
2552{
198e0fed 2553 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2554}
2555
3be680b7
TH
2556/**
2557 * sata_print_link_status - Print SATA link status
936fd732 2558 * @link: SATA link to printk link status about
3be680b7
TH
2559 *
2560 * This function prints link speed and status of a SATA link.
2561 *
2562 * LOCKING:
2563 * None.
2564 */
936fd732 2565void sata_print_link_status(struct ata_link *link)
3be680b7 2566{
6d5f9732 2567 u32 sstatus, scontrol, tmp;
3be680b7 2568
936fd732 2569 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2570 return;
936fd732 2571 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2572
936fd732 2573 if (ata_link_online(link)) {
3be680b7 2574 tmp = (sstatus >> 4) & 0xf;
936fd732 2575 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2576 "SATA link up %s (SStatus %X SControl %X)\n",
2577 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2578 } else {
936fd732 2579 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2580 "SATA link down (SStatus %X SControl %X)\n",
2581 sstatus, scontrol);
3be680b7
TH
2582 }
2583}
2584
ebdfca6e
AC
2585/**
2586 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2587 * @adev: device
2588 *
2589 * Obtain the other device on the same cable, or if none is
2590 * present NULL is returned
2591 */
2e9edbf8 2592
3373efd8 2593struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2594{
9af5c9c9
TH
2595 struct ata_link *link = adev->link;
2596 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2597 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2598 return NULL;
2599 return pair;
2600}
2601
1da177e4 2602/**
780a87f7
JG
2603 * ata_port_disable - Disable port.
2604 * @ap: Port to be disabled.
1da177e4 2605 *
780a87f7
JG
2606 * Modify @ap data structure such that the system
2607 * thinks that the entire port is disabled, and should
2608 * never attempt to probe or communicate with devices
2609 * on this port.
2610 *
cca3974e 2611 * LOCKING: host lock, or some other form of
780a87f7 2612 * serialization.
1da177e4
LT
2613 */
2614
2615void ata_port_disable(struct ata_port *ap)
2616{
9af5c9c9
TH
2617 ap->link.device[0].class = ATA_DEV_NONE;
2618 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2619 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2620}
2621
1c3fae4d 2622/**
3c567b7d 2623 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2624 * @link: Link to adjust SATA spd limit for
1c3fae4d 2625 *
936fd732 2626 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2627 * function only adjusts the limit. The change must be applied
3c567b7d 2628 * using sata_set_spd().
1c3fae4d
TH
2629 *
2630 * LOCKING:
2631 * Inherited from caller.
2632 *
2633 * RETURNS:
2634 * 0 on success, negative errno on failure
2635 */
936fd732 2636int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2637{
81952c54
TH
2638 u32 sstatus, spd, mask;
2639 int rc, highbit;
1c3fae4d 2640
936fd732 2641 if (!sata_scr_valid(link))
008a7896
TH
2642 return -EOPNOTSUPP;
2643
2644 /* If SCR can be read, use it to determine the current SPD.
936fd732 2645 * If not, use cached value in link->sata_spd.
008a7896 2646 */
936fd732 2647 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2648 if (rc == 0)
2649 spd = (sstatus >> 4) & 0xf;
2650 else
936fd732 2651 spd = link->sata_spd;
1c3fae4d 2652
936fd732 2653 mask = link->sata_spd_limit;
1c3fae4d
TH
2654 if (mask <= 1)
2655 return -EINVAL;
008a7896
TH
2656
2657 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2658 highbit = fls(mask) - 1;
2659 mask &= ~(1 << highbit);
2660
008a7896
TH
2661 /* Mask off all speeds higher than or equal to the current
2662 * one. Force 1.5Gbps if current SPD is not available.
2663 */
2664 if (spd > 1)
2665 mask &= (1 << (spd - 1)) - 1;
2666 else
2667 mask &= 1;
2668
2669 /* were we already at the bottom? */
1c3fae4d
TH
2670 if (!mask)
2671 return -EINVAL;
2672
936fd732 2673 link->sata_spd_limit = mask;
1c3fae4d 2674
936fd732 2675 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2676 sata_spd_string(fls(mask)));
1c3fae4d
TH
2677
2678 return 0;
2679}
2680
936fd732 2681static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2682{
5270222f
TH
2683 struct ata_link *host_link = &link->ap->link;
2684 u32 limit, target, spd;
1c3fae4d 2685
5270222f
TH
2686 limit = link->sata_spd_limit;
2687
2688 /* Don't configure downstream link faster than upstream link.
2689 * It doesn't speed up anything and some PMPs choke on such
2690 * configuration.
2691 */
2692 if (!ata_is_host_link(link) && host_link->sata_spd)
2693 limit &= (1 << host_link->sata_spd) - 1;
2694
2695 if (limit == UINT_MAX)
2696 target = 0;
1c3fae4d 2697 else
5270222f 2698 target = fls(limit);
1c3fae4d
TH
2699
2700 spd = (*scontrol >> 4) & 0xf;
5270222f 2701 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2702
5270222f 2703 return spd != target;
1c3fae4d
TH
2704}
2705
2706/**
3c567b7d 2707 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2708 * @link: Link in question
1c3fae4d
TH
2709 *
2710 * Test whether the spd limit in SControl matches
936fd732 2711 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2712 * whether hardreset is necessary to apply SATA spd
2713 * configuration.
2714 *
2715 * LOCKING:
2716 * Inherited from caller.
2717 *
2718 * RETURNS:
2719 * 1 if SATA spd configuration is needed, 0 otherwise.
2720 */
936fd732 2721int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2722{
2723 u32 scontrol;
2724
936fd732 2725 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2726 return 1;
1c3fae4d 2727
936fd732 2728 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2729}
2730
2731/**
3c567b7d 2732 * sata_set_spd - set SATA spd according to spd limit
936fd732 2733 * @link: Link to set SATA spd for
1c3fae4d 2734 *
936fd732 2735 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2736 *
2737 * LOCKING:
2738 * Inherited from caller.
2739 *
2740 * RETURNS:
2741 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2742 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2743 */
936fd732 2744int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2745{
2746 u32 scontrol;
81952c54 2747 int rc;
1c3fae4d 2748
936fd732 2749 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2750 return rc;
1c3fae4d 2751
936fd732 2752 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2753 return 0;
2754
936fd732 2755 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2756 return rc;
2757
1c3fae4d
TH
2758 return 1;
2759}
2760
452503f9
AC
2761/*
2762 * This mode timing computation functionality is ported over from
2763 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2764 */
2765/*
b352e57d 2766 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2767 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2768 * for UDMA6, which is currently supported only by Maxtor drives.
2769 *
2770 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2771 */
2772
2773static const struct ata_timing ata_timing[] = {
2774
2775 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2776 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2777 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2778 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2779
b352e57d
AC
2780 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2781 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2782 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2783 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2784 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2785
2786/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2787
452503f9
AC
2788 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2789 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2790 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2791
452503f9
AC
2792 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2793 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2794 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2795
b352e57d
AC
2796 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2797 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2798 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2799 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2800
2801 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2802 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2803 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2804
2805/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2806
2807 { 0xFF }
2808};
2809
2dcb407e
JG
2810#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2811#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2812
2813static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2814{
2815 q->setup = EZ(t->setup * 1000, T);
2816 q->act8b = EZ(t->act8b * 1000, T);
2817 q->rec8b = EZ(t->rec8b * 1000, T);
2818 q->cyc8b = EZ(t->cyc8b * 1000, T);
2819 q->active = EZ(t->active * 1000, T);
2820 q->recover = EZ(t->recover * 1000, T);
2821 q->cycle = EZ(t->cycle * 1000, T);
2822 q->udma = EZ(t->udma * 1000, UT);
2823}
2824
2825void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2826 struct ata_timing *m, unsigned int what)
2827{
2828 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2829 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2830 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2831 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2832 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2833 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2834 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2835 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2836}
2837
2dcb407e 2838static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
452503f9
AC
2839{
2840 const struct ata_timing *t;
2841
2842 for (t = ata_timing; t->mode != speed; t++)
91190758 2843 if (t->mode == 0xFF)
452503f9 2844 return NULL;
2e9edbf8 2845 return t;
452503f9
AC
2846}
2847
2848int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2849 struct ata_timing *t, int T, int UT)
2850{
2851 const struct ata_timing *s;
2852 struct ata_timing p;
2853
2854 /*
2e9edbf8 2855 * Find the mode.
75b1f2f8 2856 */
452503f9
AC
2857
2858 if (!(s = ata_timing_find_mode(speed)))
2859 return -EINVAL;
2860
75b1f2f8
AL
2861 memcpy(t, s, sizeof(*s));
2862
452503f9
AC
2863 /*
2864 * If the drive is an EIDE drive, it can tell us it needs extended
2865 * PIO/MW_DMA cycle timing.
2866 */
2867
2868 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2869 memset(&p, 0, sizeof(p));
2dcb407e 2870 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2871 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2872 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2873 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2874 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2875 }
2876 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2877 }
2878
2879 /*
2880 * Convert the timing to bus clock counts.
2881 */
2882
75b1f2f8 2883 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2884
2885 /*
c893a3ae
RD
2886 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2887 * S.M.A.R.T * and some other commands. We have to ensure that the
2888 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2889 */
2890
fd3367af 2891 if (speed > XFER_PIO_6) {
452503f9
AC
2892 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2893 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2894 }
2895
2896 /*
c893a3ae 2897 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2898 */
2899
2900 if (t->act8b + t->rec8b < t->cyc8b) {
2901 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2902 t->rec8b = t->cyc8b - t->act8b;
2903 }
2904
2905 if (t->active + t->recover < t->cycle) {
2906 t->active += (t->cycle - (t->active + t->recover)) / 2;
2907 t->recover = t->cycle - t->active;
2908 }
a617c09f 2909
4f701d1e
AC
2910 /* In a few cases quantisation may produce enough errors to
2911 leave t->cycle too low for the sum of active and recovery
2912 if so we must correct this */
2913 if (t->active + t->recover > t->cycle)
2914 t->cycle = t->active + t->recover;
452503f9
AC
2915
2916 return 0;
2917}
2918
cf176e1a
TH
2919/**
2920 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2921 * @dev: Device to adjust xfer masks
458337db 2922 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2923 *
2924 * Adjust xfer masks of @dev downward. Note that this function
2925 * does not apply the change. Invoking ata_set_mode() afterwards
2926 * will apply the limit.
2927 *
2928 * LOCKING:
2929 * Inherited from caller.
2930 *
2931 * RETURNS:
2932 * 0 on success, negative errno on failure
2933 */
458337db 2934int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2935{
458337db
TH
2936 char buf[32];
2937 unsigned int orig_mask, xfer_mask;
2938 unsigned int pio_mask, mwdma_mask, udma_mask;
2939 int quiet, highbit;
cf176e1a 2940
458337db
TH
2941 quiet = !!(sel & ATA_DNXFER_QUIET);
2942 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2943
458337db
TH
2944 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2945 dev->mwdma_mask,
2946 dev->udma_mask);
2947 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2948
458337db
TH
2949 switch (sel) {
2950 case ATA_DNXFER_PIO:
2951 highbit = fls(pio_mask) - 1;
2952 pio_mask &= ~(1 << highbit);
2953 break;
2954
2955 case ATA_DNXFER_DMA:
2956 if (udma_mask) {
2957 highbit = fls(udma_mask) - 1;
2958 udma_mask &= ~(1 << highbit);
2959 if (!udma_mask)
2960 return -ENOENT;
2961 } else if (mwdma_mask) {
2962 highbit = fls(mwdma_mask) - 1;
2963 mwdma_mask &= ~(1 << highbit);
2964 if (!mwdma_mask)
2965 return -ENOENT;
2966 }
2967 break;
2968
2969 case ATA_DNXFER_40C:
2970 udma_mask &= ATA_UDMA_MASK_40C;
2971 break;
2972
2973 case ATA_DNXFER_FORCE_PIO0:
2974 pio_mask &= 1;
2975 case ATA_DNXFER_FORCE_PIO:
2976 mwdma_mask = 0;
2977 udma_mask = 0;
2978 break;
2979
458337db
TH
2980 default:
2981 BUG();
2982 }
2983
2984 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2985
2986 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2987 return -ENOENT;
2988
2989 if (!quiet) {
2990 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2991 snprintf(buf, sizeof(buf), "%s:%s",
2992 ata_mode_string(xfer_mask),
2993 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2994 else
2995 snprintf(buf, sizeof(buf), "%s",
2996 ata_mode_string(xfer_mask));
2997
2998 ata_dev_printk(dev, KERN_WARNING,
2999 "limiting speed to %s\n", buf);
3000 }
cf176e1a
TH
3001
3002 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3003 &dev->udma_mask);
3004
cf176e1a 3005 return 0;
cf176e1a
TH
3006}
3007
3373efd8 3008static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3009{
9af5c9c9 3010 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
3011 unsigned int err_mask;
3012 int rc;
1da177e4 3013
e8384607 3014 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3015 if (dev->xfer_shift == ATA_SHIFT_PIO)
3016 dev->flags |= ATA_DFLAG_PIO;
3017
3373efd8 3018 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3019
11750a40
A
3020 /* Old CFA may refuse this command, which is just fine */
3021 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
3022 err_mask &= ~AC_ERR_DEV;
3023
0bc2a79a
AC
3024 /* Some very old devices and some bad newer ones fail any kind of
3025 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3026 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3027 dev->pio_mode <= XFER_PIO_2)
3028 err_mask &= ~AC_ERR_DEV;
2dcb407e 3029
3acaf94b
AC
3030 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3031 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3032 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3033 dev->dma_mode == XFER_MW_DMA_0 &&
3034 (dev->id[63] >> 8) & 1)
3035 err_mask &= ~AC_ERR_DEV;
3036
83206a29 3037 if (err_mask) {
f15a1daf
TH
3038 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3039 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3040 return -EIO;
3041 }
1da177e4 3042
baa1e78a 3043 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3044 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3045 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3046 if (rc)
83206a29 3047 return rc;
48a8a14f 3048
23e71c3d
TH
3049 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3050 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3051
f15a1daf
TH
3052 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3053 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3054 return 0;
1da177e4
LT
3055}
3056
1da177e4 3057/**
04351821 3058 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3059 * @link: link on which timings will be programmed
e82cbdb9 3060 * @r_failed_dev: out paramter for failed device
1da177e4 3061 *
04351821
A
3062 * Standard implementation of the function used to tune and set
3063 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3064 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3065 * returned in @r_failed_dev.
780a87f7 3066 *
1da177e4 3067 * LOCKING:
0cba632b 3068 * PCI/etc. bus probe sem.
e82cbdb9
TH
3069 *
3070 * RETURNS:
3071 * 0 on success, negative errno otherwise
1da177e4 3072 */
04351821 3073
0260731f 3074int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3075{
0260731f 3076 struct ata_port *ap = link->ap;
e8e0619f 3077 struct ata_device *dev;
f58229f8 3078 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3079
a6d5a51c 3080 /* step 1: calculate xfer_mask */
f58229f8 3081 ata_link_for_each_dev(dev, link) {
acf356b1 3082 unsigned int pio_mask, dma_mask;
b3a70601 3083 unsigned int mode_mask;
a6d5a51c 3084
e1211e3f 3085 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3086 continue;
3087
b3a70601
AC
3088 mode_mask = ATA_DMA_MASK_ATA;
3089 if (dev->class == ATA_DEV_ATAPI)
3090 mode_mask = ATA_DMA_MASK_ATAPI;
3091 else if (ata_id_is_cfa(dev->id))
3092 mode_mask = ATA_DMA_MASK_CFA;
3093
3373efd8 3094 ata_dev_xfermask(dev);
1da177e4 3095
acf356b1
TH
3096 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3097 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3098
3099 if (libata_dma_mask & mode_mask)
3100 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3101 else
3102 dma_mask = 0;
3103
acf356b1
TH
3104 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3105 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3106
4f65977d 3107 found = 1;
5444a6f4
AC
3108 if (dev->dma_mode)
3109 used_dma = 1;
a6d5a51c 3110 }
4f65977d 3111 if (!found)
e82cbdb9 3112 goto out;
a6d5a51c
TH
3113
3114 /* step 2: always set host PIO timings */
f58229f8 3115 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3116 if (!ata_dev_enabled(dev))
3117 continue;
3118
3119 if (!dev->pio_mode) {
f15a1daf 3120 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3121 rc = -EINVAL;
e82cbdb9 3122 goto out;
e8e0619f
TH
3123 }
3124
3125 dev->xfer_mode = dev->pio_mode;
3126 dev->xfer_shift = ATA_SHIFT_PIO;
3127 if (ap->ops->set_piomode)
3128 ap->ops->set_piomode(ap, dev);
3129 }
1da177e4 3130
a6d5a51c 3131 /* step 3: set host DMA timings */
f58229f8 3132 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3133 if (!ata_dev_enabled(dev) || !dev->dma_mode)
3134 continue;
3135
3136 dev->xfer_mode = dev->dma_mode;
3137 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3138 if (ap->ops->set_dmamode)
3139 ap->ops->set_dmamode(ap, dev);
3140 }
1da177e4
LT
3141
3142 /* step 4: update devices' xfer mode */
f58229f8 3143 ata_link_for_each_dev(dev, link) {
18d90deb 3144 /* don't update suspended devices' xfer mode */
9666f400 3145 if (!ata_dev_enabled(dev))
83206a29
TH
3146 continue;
3147
3373efd8 3148 rc = ata_dev_set_mode(dev);
5bbc53f4 3149 if (rc)
e82cbdb9 3150 goto out;
83206a29 3151 }
1da177e4 3152
e8e0619f
TH
3153 /* Record simplex status. If we selected DMA then the other
3154 * host channels are not permitted to do so.
5444a6f4 3155 */
cca3974e 3156 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3157 ap->host->simplex_claimed = ap;
5444a6f4 3158
e82cbdb9
TH
3159 out:
3160 if (rc)
3161 *r_failed_dev = dev;
3162 return rc;
1da177e4
LT
3163}
3164
04351821
A
3165/**
3166 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3167 * @link: link on which timings will be programmed
04351821
A
3168 * @r_failed_dev: out paramter for failed device
3169 *
3170 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3171 * ata_set_mode() fails, pointer to the failing device is
3172 * returned in @r_failed_dev.
3173 *
3174 * LOCKING:
3175 * PCI/etc. bus probe sem.
3176 *
3177 * RETURNS:
3178 * 0 on success, negative errno otherwise
3179 */
0260731f 3180int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3181{
0260731f
TH
3182 struct ata_port *ap = link->ap;
3183
04351821
A
3184 /* has private set_mode? */
3185 if (ap->ops->set_mode)
0260731f
TH
3186 return ap->ops->set_mode(link, r_failed_dev);
3187 return ata_do_set_mode(link, r_failed_dev);
04351821
A
3188}
3189
1fdffbce
JG
3190/**
3191 * ata_tf_to_host - issue ATA taskfile to host controller
3192 * @ap: port to which command is being issued
3193 * @tf: ATA taskfile register set
3194 *
3195 * Issues ATA taskfile register set to ATA host controller,
3196 * with proper synchronization with interrupt handler and
3197 * other threads.
3198 *
3199 * LOCKING:
cca3974e 3200 * spin_lock_irqsave(host lock)
1fdffbce
JG
3201 */
3202
3203static inline void ata_tf_to_host(struct ata_port *ap,
3204 const struct ata_taskfile *tf)
3205{
3206 ap->ops->tf_load(ap, tf);
3207 ap->ops->exec_command(ap, tf);
3208}
3209
1da177e4
LT
3210/**
3211 * ata_busy_sleep - sleep until BSY clears, or timeout
3212 * @ap: port containing status register to be polled
3213 * @tmout_pat: impatience timeout
3214 * @tmout: overall timeout
3215 *
780a87f7
JG
3216 * Sleep until ATA Status register bit BSY clears,
3217 * or a timeout occurs.
3218 *
d1adc1bb
TH
3219 * LOCKING:
3220 * Kernel thread context (may sleep).
3221 *
3222 * RETURNS:
3223 * 0 on success, -errno otherwise.
1da177e4 3224 */
d1adc1bb
TH
3225int ata_busy_sleep(struct ata_port *ap,
3226 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3227{
3228 unsigned long timer_start, timeout;
3229 u8 status;
3230
3231 status = ata_busy_wait(ap, ATA_BUSY, 300);
3232 timer_start = jiffies;
3233 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3234 while (status != 0xff && (status & ATA_BUSY) &&
3235 time_before(jiffies, timeout)) {
1da177e4
LT
3236 msleep(50);
3237 status = ata_busy_wait(ap, ATA_BUSY, 3);
3238 }
3239
d1adc1bb 3240 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3241 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3242 "port is slow to respond, please be patient "
3243 "(Status 0x%x)\n", status);
1da177e4
LT
3244
3245 timeout = timer_start + tmout;
d1adc1bb
TH
3246 while (status != 0xff && (status & ATA_BUSY) &&
3247 time_before(jiffies, timeout)) {
1da177e4
LT
3248 msleep(50);
3249 status = ata_chk_status(ap);
3250 }
3251
d1adc1bb
TH
3252 if (status == 0xff)
3253 return -ENODEV;
3254
1da177e4 3255 if (status & ATA_BUSY) {
f15a1daf 3256 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3257 "(%lu secs, Status 0x%x)\n",
3258 tmout / HZ, status);
d1adc1bb 3259 return -EBUSY;
1da177e4
LT
3260 }
3261
3262 return 0;
3263}
3264
88ff6eaf
TH
3265/**
3266 * ata_wait_after_reset - wait before checking status after reset
3267 * @ap: port containing status register to be polled
3268 * @deadline: deadline jiffies for the operation
3269 *
3270 * After reset, we need to pause a while before reading status.
3271 * Also, certain combination of controller and device report 0xff
3272 * for some duration (e.g. until SATA PHY is up and running)
3273 * which is interpreted as empty port in ATA world. This
3274 * function also waits for such devices to get out of 0xff
3275 * status.
3276 *
3277 * LOCKING:
3278 * Kernel thread context (may sleep).
3279 */
3280void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3281{
3282 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3283
3284 if (time_before(until, deadline))
3285 deadline = until;
3286
3287 /* Spec mandates ">= 2ms" before checking status. We wait
3288 * 150ms, because that was the magic delay used for ATAPI
3289 * devices in Hale Landis's ATADRVR, for the period of time
3290 * between when the ATA command register is written, and then
3291 * status is checked. Because waiting for "a while" before
3292 * checking status is fine, post SRST, we perform this magic
3293 * delay here as well.
3294 *
3295 * Old drivers/ide uses the 2mS rule and then waits for ready.
3296 */
3297 msleep(150);
3298
3299 /* Wait for 0xff to clear. Some SATA devices take a long time
3300 * to clear 0xff after reset. For example, HHD424020F7SV00
3301 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3302 * than that.
1974e201
TH
3303 *
3304 * Note that some PATA controllers (pata_ali) explode if
3305 * status register is read more than once when there's no
3306 * device attached.
88ff6eaf 3307 */
1974e201
TH
3308 if (ap->flags & ATA_FLAG_SATA) {
3309 while (1) {
3310 u8 status = ata_chk_status(ap);
88ff6eaf 3311
1974e201
TH
3312 if (status != 0xff || time_after(jiffies, deadline))
3313 return;
88ff6eaf 3314
1974e201
TH
3315 msleep(50);
3316 }
88ff6eaf
TH
3317 }
3318}
3319
d4b2bab4
TH
3320/**
3321 * ata_wait_ready - sleep until BSY clears, or timeout
3322 * @ap: port containing status register to be polled
3323 * @deadline: deadline jiffies for the operation
3324 *
3325 * Sleep until ATA Status register bit BSY clears, or timeout
3326 * occurs.
3327 *
3328 * LOCKING:
3329 * Kernel thread context (may sleep).
3330 *
3331 * RETURNS:
3332 * 0 on success, -errno otherwise.
3333 */
3334int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3335{
3336 unsigned long start = jiffies;
3337 int warned = 0;
3338
3339 while (1) {
3340 u8 status = ata_chk_status(ap);
3341 unsigned long now = jiffies;
3342
3343 if (!(status & ATA_BUSY))
3344 return 0;
936fd732 3345 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3346 return -ENODEV;
3347 if (time_after(now, deadline))
3348 return -EBUSY;
3349
3350 if (!warned && time_after(now, start + 5 * HZ) &&
3351 (deadline - now > 3 * HZ)) {
3352 ata_port_printk(ap, KERN_WARNING,
3353 "port is slow to respond, please be patient "
3354 "(Status 0x%x)\n", status);
3355 warned = 1;
3356 }
3357
3358 msleep(50);
3359 }
3360}
3361
3362static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3363 unsigned long deadline)
1da177e4
LT
3364{
3365 struct ata_ioports *ioaddr = &ap->ioaddr;
3366 unsigned int dev0 = devmask & (1 << 0);
3367 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3368 int rc, ret = 0;
1da177e4
LT
3369
3370 /* if device 0 was found in ata_devchk, wait for its
3371 * BSY bit to clear
3372 */
d4b2bab4
TH
3373 if (dev0) {
3374 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3375 if (rc) {
3376 if (rc != -ENODEV)
3377 return rc;
3378 ret = rc;
3379 }
d4b2bab4 3380 }
1da177e4 3381
e141d999
TH
3382 /* if device 1 was found in ata_devchk, wait for register
3383 * access briefly, then wait for BSY to clear.
1da177e4 3384 */
e141d999
TH
3385 if (dev1) {
3386 int i;
1da177e4
LT
3387
3388 ap->ops->dev_select(ap, 1);
e141d999
TH
3389
3390 /* Wait for register access. Some ATAPI devices fail
3391 * to set nsect/lbal after reset, so don't waste too
3392 * much time on it. We're gonna wait for !BSY anyway.
3393 */
3394 for (i = 0; i < 2; i++) {
3395 u8 nsect, lbal;
3396
3397 nsect = ioread8(ioaddr->nsect_addr);
3398 lbal = ioread8(ioaddr->lbal_addr);
3399 if ((nsect == 1) && (lbal == 1))
3400 break;
3401 msleep(50); /* give drive a breather */
3402 }
3403
d4b2bab4 3404 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3405 if (rc) {
3406 if (rc != -ENODEV)
3407 return rc;
3408 ret = rc;
3409 }
d4b2bab4 3410 }
1da177e4
LT
3411
3412 /* is all this really necessary? */
3413 ap->ops->dev_select(ap, 0);
3414 if (dev1)
3415 ap->ops->dev_select(ap, 1);
3416 if (dev0)
3417 ap->ops->dev_select(ap, 0);
d4b2bab4 3418
9b89391c 3419 return ret;
1da177e4
LT
3420}
3421
d4b2bab4
TH
3422static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3423 unsigned long deadline)
1da177e4
LT
3424{
3425 struct ata_ioports *ioaddr = &ap->ioaddr;
3426
44877b4e 3427 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3428
3429 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3430 iowrite8(ap->ctl, ioaddr->ctl_addr);
3431 udelay(20); /* FIXME: flush */
3432 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3433 udelay(20); /* FIXME: flush */
3434 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3435
88ff6eaf
TH
3436 /* wait a while before checking status */
3437 ata_wait_after_reset(ap, deadline);
1da177e4 3438
2e9edbf8 3439 /* Before we perform post reset processing we want to see if
298a41ca
TH
3440 * the bus shows 0xFF because the odd clown forgets the D7
3441 * pulldown resistor.
3442 */
150981b0 3443 if (ata_chk_status(ap) == 0xFF)
9b89391c 3444 return -ENODEV;
09c7ad79 3445
d4b2bab4 3446 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3447}
3448
3449/**
3450 * ata_bus_reset - reset host port and associated ATA channel
3451 * @ap: port to reset
3452 *
3453 * This is typically the first time we actually start issuing
3454 * commands to the ATA channel. We wait for BSY to clear, then
3455 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3456 * result. Determine what devices, if any, are on the channel
3457 * by looking at the device 0/1 error register. Look at the signature
3458 * stored in each device's taskfile registers, to determine if
3459 * the device is ATA or ATAPI.
3460 *
3461 * LOCKING:
0cba632b 3462 * PCI/etc. bus probe sem.
cca3974e 3463 * Obtains host lock.
1da177e4
LT
3464 *
3465 * SIDE EFFECTS:
198e0fed 3466 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3467 */
3468
3469void ata_bus_reset(struct ata_port *ap)
3470{
9af5c9c9 3471 struct ata_device *device = ap->link.device;
1da177e4
LT
3472 struct ata_ioports *ioaddr = &ap->ioaddr;
3473 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3474 u8 err;
aec5c3c1 3475 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3476 int rc;
1da177e4 3477
44877b4e 3478 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3479
3480 /* determine if device 0/1 are present */
3481 if (ap->flags & ATA_FLAG_SATA_RESET)
3482 dev0 = 1;
3483 else {
3484 dev0 = ata_devchk(ap, 0);
3485 if (slave_possible)
3486 dev1 = ata_devchk(ap, 1);
3487 }
3488
3489 if (dev0)
3490 devmask |= (1 << 0);
3491 if (dev1)
3492 devmask |= (1 << 1);
3493
3494 /* select device 0 again */
3495 ap->ops->dev_select(ap, 0);
3496
3497 /* issue bus reset */
9b89391c
TH
3498 if (ap->flags & ATA_FLAG_SRST) {
3499 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3500 if (rc && rc != -ENODEV)
aec5c3c1 3501 goto err_out;
9b89391c 3502 }
1da177e4
LT
3503
3504 /*
3505 * determine by signature whether we have ATA or ATAPI devices
3506 */
3f19859e 3507 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3508 if ((slave_possible) && (err != 0x81))
3f19859e 3509 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3510
1da177e4 3511 /* is double-select really necessary? */
9af5c9c9 3512 if (device[1].class != ATA_DEV_NONE)
1da177e4 3513 ap->ops->dev_select(ap, 1);
9af5c9c9 3514 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3515 ap->ops->dev_select(ap, 0);
3516
3517 /* if no devices were detected, disable this port */
9af5c9c9
TH
3518 if ((device[0].class == ATA_DEV_NONE) &&
3519 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3520 goto err_out;
3521
3522 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3523 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3524 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3525 }
3526
3527 DPRINTK("EXIT\n");
3528 return;
3529
3530err_out:
f15a1daf 3531 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3532 ata_port_disable(ap);
1da177e4
LT
3533
3534 DPRINTK("EXIT\n");
3535}
3536
d7bb4cc7 3537/**
936fd732
TH
3538 * sata_link_debounce - debounce SATA phy status
3539 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3540 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3541 * @deadline: deadline jiffies for the operation
d7bb4cc7 3542 *
936fd732 3543* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3544 * holding the same value where DET is not 1 for @duration polled
3545 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3546 * beginning of the stable state. Because DET gets stuck at 1 on
3547 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3548 * until timeout then returns 0 if DET is stable at 1.
3549 *
d4b2bab4
TH
3550 * @timeout is further limited by @deadline. The sooner of the
3551 * two is used.
3552 *
d7bb4cc7
TH
3553 * LOCKING:
3554 * Kernel thread context (may sleep)
3555 *
3556 * RETURNS:
3557 * 0 on success, -errno on failure.
3558 */
936fd732
TH
3559int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3560 unsigned long deadline)
7a7921e8 3561{
d7bb4cc7 3562 unsigned long interval_msec = params[0];
d4b2bab4
TH
3563 unsigned long duration = msecs_to_jiffies(params[1]);
3564 unsigned long last_jiffies, t;
d7bb4cc7
TH
3565 u32 last, cur;
3566 int rc;
3567
d4b2bab4
TH
3568 t = jiffies + msecs_to_jiffies(params[2]);
3569 if (time_before(t, deadline))
3570 deadline = t;
3571
936fd732 3572 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3573 return rc;
3574 cur &= 0xf;
3575
3576 last = cur;
3577 last_jiffies = jiffies;
3578
3579 while (1) {
3580 msleep(interval_msec);
936fd732 3581 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3582 return rc;
3583 cur &= 0xf;
3584
3585 /* DET stable? */
3586 if (cur == last) {
d4b2bab4 3587 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3588 continue;
3589 if (time_after(jiffies, last_jiffies + duration))
3590 return 0;
3591 continue;
3592 }
3593
3594 /* unstable, start over */
3595 last = cur;
3596 last_jiffies = jiffies;
3597
f1545154
TH
3598 /* Check deadline. If debouncing failed, return
3599 * -EPIPE to tell upper layer to lower link speed.
3600 */
d4b2bab4 3601 if (time_after(jiffies, deadline))
f1545154 3602 return -EPIPE;
d7bb4cc7
TH
3603 }
3604}
3605
3606/**
936fd732
TH
3607 * sata_link_resume - resume SATA link
3608 * @link: ATA link to resume SATA
d7bb4cc7 3609 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3610 * @deadline: deadline jiffies for the operation
d7bb4cc7 3611 *
936fd732 3612 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3613 *
3614 * LOCKING:
3615 * Kernel thread context (may sleep)
3616 *
3617 * RETURNS:
3618 * 0 on success, -errno on failure.
3619 */
936fd732
TH
3620int sata_link_resume(struct ata_link *link, const unsigned long *params,
3621 unsigned long deadline)
d7bb4cc7
TH
3622{
3623 u32 scontrol;
81952c54
TH
3624 int rc;
3625
936fd732 3626 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3627 return rc;
7a7921e8 3628
852ee16a 3629 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3630
936fd732 3631 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3632 return rc;
7a7921e8 3633
d7bb4cc7
TH
3634 /* Some PHYs react badly if SStatus is pounded immediately
3635 * after resuming. Delay 200ms before debouncing.
3636 */
3637 msleep(200);
7a7921e8 3638
936fd732 3639 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3640}
3641
f5914a46
TH
3642/**
3643 * ata_std_prereset - prepare for reset
cc0680a5 3644 * @link: ATA link to be reset
d4b2bab4 3645 * @deadline: deadline jiffies for the operation
f5914a46 3646 *
cc0680a5 3647 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3648 * prereset makes libata abort whole reset sequence and give up
3649 * that port, so prereset should be best-effort. It does its
3650 * best to prepare for reset sequence but if things go wrong, it
3651 * should just whine, not fail.
f5914a46
TH
3652 *
3653 * LOCKING:
3654 * Kernel thread context (may sleep)
3655 *
3656 * RETURNS:
3657 * 0 on success, -errno otherwise.
3658 */
cc0680a5 3659int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3660{
cc0680a5 3661 struct ata_port *ap = link->ap;
936fd732 3662 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3663 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3664 int rc;
3665
31daabda 3666 /* handle link resume */
28324304 3667 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3668 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3669 ehc->i.action |= ATA_EH_HARDRESET;
3670
633273a3
TH
3671 /* Some PMPs don't work with only SRST, force hardreset if PMP
3672 * is supported.
3673 */
3674 if (ap->flags & ATA_FLAG_PMP)
3675 ehc->i.action |= ATA_EH_HARDRESET;
3676
f5914a46
TH
3677 /* if we're about to do hardreset, nothing more to do */
3678 if (ehc->i.action & ATA_EH_HARDRESET)
3679 return 0;
3680
936fd732 3681 /* if SATA, resume link */
a16abc0b 3682 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3683 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3684 /* whine about phy resume failure but proceed */
3685 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3686 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3687 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3688 }
3689
3690 /* Wait for !BSY if the controller can wait for the first D2H
3691 * Reg FIS and we don't know that no device is attached.
3692 */
0c88758b 3693 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3694 rc = ata_wait_ready(ap, deadline);
6dffaf61 3695 if (rc && rc != -ENODEV) {
cc0680a5 3696 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3697 "(errno=%d), forcing hardreset\n", rc);
3698 ehc->i.action |= ATA_EH_HARDRESET;
3699 }
3700 }
f5914a46
TH
3701
3702 return 0;
3703}
3704
c2bd5804
TH
3705/**
3706 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3707 * @link: ATA link to reset
c2bd5804 3708 * @classes: resulting classes of attached devices
d4b2bab4 3709 * @deadline: deadline jiffies for the operation
c2bd5804 3710 *
52783c5d 3711 * Reset host port using ATA SRST.
c2bd5804
TH
3712 *
3713 * LOCKING:
3714 * Kernel thread context (may sleep)
3715 *
3716 * RETURNS:
3717 * 0 on success, -errno otherwise.
3718 */
cc0680a5 3719int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3720 unsigned long deadline)
c2bd5804 3721{
cc0680a5 3722 struct ata_port *ap = link->ap;
c2bd5804 3723 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3724 unsigned int devmask = 0;
3725 int rc;
c2bd5804
TH
3726 u8 err;
3727
3728 DPRINTK("ENTER\n");
3729
936fd732 3730 if (ata_link_offline(link)) {
3a39746a
TH
3731 classes[0] = ATA_DEV_NONE;
3732 goto out;
3733 }
3734
c2bd5804
TH
3735 /* determine if device 0/1 are present */
3736 if (ata_devchk(ap, 0))
3737 devmask |= (1 << 0);
3738 if (slave_possible && ata_devchk(ap, 1))
3739 devmask |= (1 << 1);
3740
c2bd5804
TH
3741 /* select device 0 again */
3742 ap->ops->dev_select(ap, 0);
3743
3744 /* issue bus reset */
3745 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3746 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3747 /* if link is occupied, -ENODEV too is an error */
936fd732 3748 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3749 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3750 return rc;
c2bd5804
TH
3751 }
3752
3753 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3754 classes[0] = ata_dev_try_classify(&link->device[0],
3755 devmask & (1 << 0), &err);
c2bd5804 3756 if (slave_possible && err != 0x81)
3f19859e
TH
3757 classes[1] = ata_dev_try_classify(&link->device[1],
3758 devmask & (1 << 1), &err);
c2bd5804 3759
3a39746a 3760 out:
c2bd5804
TH
3761 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3762 return 0;
3763}
3764
3765/**
cc0680a5
TH
3766 * sata_link_hardreset - reset link via SATA phy reset
3767 * @link: link to reset
b6103f6d 3768 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3769 * @deadline: deadline jiffies for the operation
c2bd5804 3770 *
cc0680a5 3771 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3772 *
3773 * LOCKING:
3774 * Kernel thread context (may sleep)
3775 *
3776 * RETURNS:
3777 * 0 on success, -errno otherwise.
3778 */
cc0680a5 3779int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3780 unsigned long deadline)
c2bd5804 3781{
852ee16a 3782 u32 scontrol;
81952c54 3783 int rc;
852ee16a 3784
c2bd5804
TH
3785 DPRINTK("ENTER\n");
3786
936fd732 3787 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3788 /* SATA spec says nothing about how to reconfigure
3789 * spd. To be on the safe side, turn off phy during
3790 * reconfiguration. This works for at least ICH7 AHCI
3791 * and Sil3124.
3792 */
936fd732 3793 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3794 goto out;
81952c54 3795
a34b6fc0 3796 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3797
936fd732 3798 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3799 goto out;
1c3fae4d 3800
936fd732 3801 sata_set_spd(link);
1c3fae4d
TH
3802 }
3803
3804 /* issue phy wake/reset */
936fd732 3805 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3806 goto out;
81952c54 3807
852ee16a 3808 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3809
936fd732 3810 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3811 goto out;
c2bd5804 3812
1c3fae4d 3813 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3814 * 10.4.2 says at least 1 ms.
3815 */
3816 msleep(1);
3817
936fd732
TH
3818 /* bring link back */
3819 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3820 out:
3821 DPRINTK("EXIT, rc=%d\n", rc);
3822 return rc;
3823}
3824
3825/**
3826 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3827 * @link: link to reset
b6103f6d 3828 * @class: resulting class of attached device
d4b2bab4 3829 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3830 *
3831 * SATA phy-reset host port using DET bits of SControl register,
3832 * wait for !BSY and classify the attached device.
3833 *
3834 * LOCKING:
3835 * Kernel thread context (may sleep)
3836 *
3837 * RETURNS:
3838 * 0 on success, -errno otherwise.
3839 */
cc0680a5 3840int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3841 unsigned long deadline)
b6103f6d 3842{
cc0680a5 3843 struct ata_port *ap = link->ap;
936fd732 3844 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3845 int rc;
3846
3847 DPRINTK("ENTER\n");
3848
3849 /* do hardreset */
cc0680a5 3850 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3851 if (rc) {
cc0680a5 3852 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3853 "COMRESET failed (errno=%d)\n", rc);
3854 return rc;
3855 }
c2bd5804 3856
c2bd5804 3857 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3858 if (ata_link_offline(link)) {
c2bd5804
TH
3859 *class = ATA_DEV_NONE;
3860 DPRINTK("EXIT, link offline\n");
3861 return 0;
3862 }
3863
88ff6eaf
TH
3864 /* wait a while before checking status */
3865 ata_wait_after_reset(ap, deadline);
34fee227 3866
633273a3
TH
3867 /* If PMP is supported, we have to do follow-up SRST. Note
3868 * that some PMPs don't send D2H Reg FIS after hardreset at
3869 * all if the first port is empty. Wait for it just for a
3870 * second and request follow-up SRST.
3871 */
3872 if (ap->flags & ATA_FLAG_PMP) {
3873 ata_wait_ready(ap, jiffies + HZ);
3874 return -EAGAIN;
3875 }
3876
d4b2bab4 3877 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3878 /* link occupied, -ENODEV too is an error */
3879 if (rc) {
cc0680a5 3880 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3881 "COMRESET failed (errno=%d)\n", rc);
3882 return rc;
c2bd5804
TH
3883 }
3884
3a39746a
TH
3885 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3886
3f19859e 3887 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3888
3889 DPRINTK("EXIT, class=%u\n", *class);
3890 return 0;
3891}
3892
3893/**
3894 * ata_std_postreset - standard postreset callback
cc0680a5 3895 * @link: the target ata_link
c2bd5804
TH
3896 * @classes: classes of attached devices
3897 *
3898 * This function is invoked after a successful reset. Note that
3899 * the device might have been reset more than once using
3900 * different reset methods before postreset is invoked.
c2bd5804 3901 *
c2bd5804
TH
3902 * LOCKING:
3903 * Kernel thread context (may sleep)
3904 */
cc0680a5 3905void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3906{
cc0680a5 3907 struct ata_port *ap = link->ap;
dc2b3515
TH
3908 u32 serror;
3909
c2bd5804
TH
3910 DPRINTK("ENTER\n");
3911
c2bd5804 3912 /* print link status */
936fd732 3913 sata_print_link_status(link);
c2bd5804 3914
dc2b3515 3915 /* clear SError */
936fd732
TH
3916 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3917 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3918
c2bd5804
TH
3919 /* is double-select really necessary? */
3920 if (classes[0] != ATA_DEV_NONE)
3921 ap->ops->dev_select(ap, 1);
3922 if (classes[1] != ATA_DEV_NONE)
3923 ap->ops->dev_select(ap, 0);
3924
3a39746a
TH
3925 /* bail out if no device is present */
3926 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3927 DPRINTK("EXIT, no device\n");
3928 return;
3929 }
3930
3931 /* set up device control */
0d5ff566
TH
3932 if (ap->ioaddr.ctl_addr)
3933 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3934
3935 DPRINTK("EXIT\n");
3936}
3937
623a3128
TH
3938/**
3939 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3940 * @dev: device to compare against
3941 * @new_class: class of the new device
3942 * @new_id: IDENTIFY page of the new device
3943 *
3944 * Compare @new_class and @new_id against @dev and determine
3945 * whether @dev is the device indicated by @new_class and
3946 * @new_id.
3947 *
3948 * LOCKING:
3949 * None.
3950 *
3951 * RETURNS:
3952 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3953 */
3373efd8
TH
3954static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3955 const u16 *new_id)
623a3128
TH
3956{
3957 const u16 *old_id = dev->id;
a0cf733b
TH
3958 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3959 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3960
3961 if (dev->class != new_class) {
f15a1daf
TH
3962 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3963 dev->class, new_class);
623a3128
TH
3964 return 0;
3965 }
3966
a0cf733b
TH
3967 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3968 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3969 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3970 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3971
3972 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3973 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3974 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3975 return 0;
3976 }
3977
3978 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3979 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3980 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3981 return 0;
3982 }
3983
623a3128
TH
3984 return 1;
3985}
3986
3987/**
fe30911b 3988 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3989 * @dev: target ATA device
bff04647 3990 * @readid_flags: read ID flags
623a3128
TH
3991 *
3992 * Re-read IDENTIFY page and make sure @dev is still attached to
3993 * the port.
3994 *
3995 * LOCKING:
3996 * Kernel thread context (may sleep)
3997 *
3998 * RETURNS:
3999 * 0 on success, negative errno otherwise
4000 */
fe30911b 4001int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4002{
5eb45c02 4003 unsigned int class = dev->class;
9af5c9c9 4004 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4005 int rc;
4006
fe635c7e 4007 /* read ID data */
bff04647 4008 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4009 if (rc)
fe30911b 4010 return rc;
623a3128
TH
4011
4012 /* is the device still there? */
fe30911b
TH
4013 if (!ata_dev_same_device(dev, class, id))
4014 return -ENODEV;
623a3128 4015
fe635c7e 4016 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4017 return 0;
4018}
4019
4020/**
4021 * ata_dev_revalidate - Revalidate ATA device
4022 * @dev: device to revalidate
422c9daa 4023 * @new_class: new class code
fe30911b
TH
4024 * @readid_flags: read ID flags
4025 *
4026 * Re-read IDENTIFY page, make sure @dev is still attached to the
4027 * port and reconfigure it according to the new IDENTIFY page.
4028 *
4029 * LOCKING:
4030 * Kernel thread context (may sleep)
4031 *
4032 * RETURNS:
4033 * 0 on success, negative errno otherwise
4034 */
422c9daa
TH
4035int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4036 unsigned int readid_flags)
fe30911b 4037{
6ddcd3b0 4038 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4039 int rc;
4040
4041 if (!ata_dev_enabled(dev))
4042 return -ENODEV;
4043
422c9daa
TH
4044 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4045 if (ata_class_enabled(new_class) &&
4046 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4047 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4048 dev->class, new_class);
4049 rc = -ENODEV;
4050 goto fail;
4051 }
4052
fe30911b
TH
4053 /* re-read ID */
4054 rc = ata_dev_reread_id(dev, readid_flags);
4055 if (rc)
4056 goto fail;
623a3128
TH
4057
4058 /* configure device according to the new ID */
efdaedc4 4059 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4060 if (rc)
4061 goto fail;
4062
4063 /* verify n_sectors hasn't changed */
b54eebd6
TH
4064 if (dev->class == ATA_DEV_ATA && n_sectors &&
4065 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4066 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4067 "%llu != %llu\n",
4068 (unsigned long long)n_sectors,
4069 (unsigned long long)dev->n_sectors);
8270bec4
TH
4070
4071 /* restore original n_sectors */
4072 dev->n_sectors = n_sectors;
4073
6ddcd3b0
TH
4074 rc = -ENODEV;
4075 goto fail;
4076 }
4077
4078 return 0;
623a3128
TH
4079
4080 fail:
f15a1daf 4081 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4082 return rc;
4083}
4084
6919a0a6
AC
4085struct ata_blacklist_entry {
4086 const char *model_num;
4087 const char *model_rev;
4088 unsigned long horkage;
4089};
4090
4091static const struct ata_blacklist_entry ata_device_blacklist [] = {
4092 /* Devices with DMA related problems under Linux */
4093 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4094 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4095 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4096 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4097 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4098 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4099 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4100 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4101 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4102 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4103 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4104 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4105 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4106 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4107 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4108 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4109 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4110 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4111 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4112 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4113 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4114 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4115 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4116 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4117 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4118 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4119 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4120 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4121 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4122 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4123 /* Odd clown on sil3726/4726 PMPs */
4124 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4125 ATA_HORKAGE_SKIP_PM },
6919a0a6 4126
18d6e9d5 4127 /* Weird ATAPI devices */
40a1d531 4128 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4129
6919a0a6
AC
4130 /* Devices we expect to fail diagnostics */
4131
4132 /* Devices where NCQ should be avoided */
4133 /* NCQ is slow */
2dcb407e 4134 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
4135 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4136 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4137 /* NCQ is broken */
539cc7c7 4138 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4139 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4140 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4141 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4142 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4143
36e337d0
RH
4144 /* Blacklist entries taken from Silicon Image 3124/3132
4145 Windows driver .inf file - also several Linux problem reports */
4146 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4147 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4148 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
4149 /* Drives which do spurious command completion */
4150 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 4151 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 4152 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 4153 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
0c173174 4154 { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
2f8fcebb 4155 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 4156 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 4157 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 4158 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 4159 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 4160 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 4161 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
4162 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
4163 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 4164 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
12850ffe 4165 { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
6919a0a6 4166
16c55b03
TH
4167 /* devices which puke on READ_NATIVE_MAX */
4168 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4169 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4170 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4171 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4172
93328e11
AC
4173 /* Devices which report 1 sector over size HPA */
4174 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4175 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4176
6bbfd53d
AC
4177 /* Devices which get the IVB wrong */
4178 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4179 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4180
6919a0a6
AC
4181 /* End Marker */
4182 { }
1da177e4 4183};
2e9edbf8 4184
741b7763 4185static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4186{
4187 const char *p;
4188 int len;
4189
4190 /*
4191 * check for trailing wildcard: *\0
4192 */
4193 p = strchr(patt, wildchar);
4194 if (p && ((*(p + 1)) == 0))
4195 len = p - patt;
317b50b8 4196 else {
539cc7c7 4197 len = strlen(name);
317b50b8
AP
4198 if (!len) {
4199 if (!*patt)
4200 return 0;
4201 return -1;
4202 }
4203 }
539cc7c7
JG
4204
4205 return strncmp(patt, name, len);
4206}
4207
75683fe7 4208static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4209{
8bfa79fc
TH
4210 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4211 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4212 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4213
8bfa79fc
TH
4214 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4215 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4216
6919a0a6 4217 while (ad->model_num) {
539cc7c7 4218 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4219 if (ad->model_rev == NULL)
4220 return ad->horkage;
539cc7c7 4221 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4222 return ad->horkage;
f4b15fef 4223 }
6919a0a6 4224 ad++;
f4b15fef 4225 }
1da177e4
LT
4226 return 0;
4227}
4228
6919a0a6
AC
4229static int ata_dma_blacklisted(const struct ata_device *dev)
4230{
4231 /* We don't support polling DMA.
4232 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4233 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4234 */
9af5c9c9 4235 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4236 (dev->flags & ATA_DFLAG_CDB_INTR))
4237 return 1;
75683fe7 4238 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4239}
4240
6bbfd53d
AC
4241/**
4242 * ata_is_40wire - check drive side detection
4243 * @dev: device
4244 *
4245 * Perform drive side detection decoding, allowing for device vendors
4246 * who can't follow the documentation.
4247 */
4248
4249static int ata_is_40wire(struct ata_device *dev)
4250{
4251 if (dev->horkage & ATA_HORKAGE_IVB)
4252 return ata_drive_40wire_relaxed(dev->id);
4253 return ata_drive_40wire(dev->id);
4254}
4255
a6d5a51c
TH
4256/**
4257 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4258 * @dev: Device to compute xfermask for
4259 *
acf356b1
TH
4260 * Compute supported xfermask of @dev and store it in
4261 * dev->*_mask. This function is responsible for applying all
4262 * known limits including host controller limits, device
4263 * blacklist, etc...
a6d5a51c
TH
4264 *
4265 * LOCKING:
4266 * None.
a6d5a51c 4267 */
3373efd8 4268static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4269{
9af5c9c9
TH
4270 struct ata_link *link = dev->link;
4271 struct ata_port *ap = link->ap;
cca3974e 4272 struct ata_host *host = ap->host;
a6d5a51c 4273 unsigned long xfer_mask;
1da177e4 4274
37deecb5 4275 /* controller modes available */
565083e1
TH
4276 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4277 ap->mwdma_mask, ap->udma_mask);
4278
8343f889 4279 /* drive modes available */
37deecb5
TH
4280 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4281 dev->mwdma_mask, dev->udma_mask);
4282 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4283
b352e57d
AC
4284 /*
4285 * CFA Advanced TrueIDE timings are not allowed on a shared
4286 * cable
4287 */
4288 if (ata_dev_pair(dev)) {
4289 /* No PIO5 or PIO6 */
4290 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4291 /* No MWDMA3 or MWDMA 4 */
4292 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4293 }
4294
37deecb5
TH
4295 if (ata_dma_blacklisted(dev)) {
4296 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4297 ata_dev_printk(dev, KERN_WARNING,
4298 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4299 }
a6d5a51c 4300
14d66ab7 4301 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4302 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4303 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4304 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4305 "other device, disabling DMA\n");
5444a6f4 4306 }
565083e1 4307
e424675f
JG
4308 if (ap->flags & ATA_FLAG_NO_IORDY)
4309 xfer_mask &= ata_pio_mask_no_iordy(dev);
4310
5444a6f4 4311 if (ap->ops->mode_filter)
a76b62ca 4312 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4313
8343f889
RH
4314 /* Apply cable rule here. Don't apply it early because when
4315 * we handle hot plug the cable type can itself change.
4316 * Check this last so that we know if the transfer rate was
4317 * solely limited by the cable.
4318 * Unknown or 80 wire cables reported host side are checked
4319 * drive side as well. Cases where we know a 40wire cable
4320 * is used safely for 80 are not checked here.
4321 */
4322 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4323 /* UDMA/44 or higher would be available */
2dcb407e 4324 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4325 (ata_is_40wire(dev) &&
2dcb407e
JG
4326 (ap->cbl == ATA_CBL_PATA_UNK ||
4327 ap->cbl == ATA_CBL_PATA80))) {
4328 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4329 "limited to UDMA/33 due to 40-wire cable\n");
4330 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4331 }
4332
565083e1
TH
4333 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4334 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4335}
4336
1da177e4
LT
4337/**
4338 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4339 * @dev: Device to which command will be sent
4340 *
780a87f7
JG
4341 * Issue SET FEATURES - XFER MODE command to device @dev
4342 * on port @ap.
4343 *
1da177e4 4344 * LOCKING:
0cba632b 4345 * PCI/etc. bus probe sem.
83206a29
TH
4346 *
4347 * RETURNS:
4348 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4349 */
4350
3373efd8 4351static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4352{
a0123703 4353 struct ata_taskfile tf;
83206a29 4354 unsigned int err_mask;
1da177e4
LT
4355
4356 /* set up set-features taskfile */
4357 DPRINTK("set features - xfer mode\n");
4358
464cf177
TH
4359 /* Some controllers and ATAPI devices show flaky interrupt
4360 * behavior after setting xfer mode. Use polling instead.
4361 */
3373efd8 4362 ata_tf_init(dev, &tf);
a0123703
TH
4363 tf.command = ATA_CMD_SET_FEATURES;
4364 tf.feature = SETFEATURES_XFER;
464cf177 4365 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4366 tf.protocol = ATA_PROT_NODATA;
4367 tf.nsect = dev->xfer_mode;
1da177e4 4368
2b789108 4369 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4370
4371 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4372 return err_mask;
4373}
9f45cbd3 4374/**
218f3d30 4375 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4376 * @dev: Device to which command will be sent
4377 * @enable: Whether to enable or disable the feature
218f3d30 4378 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4379 *
4380 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4381 * on port @ap with sector count
9f45cbd3
KCA
4382 *
4383 * LOCKING:
4384 * PCI/etc. bus probe sem.
4385 *
4386 * RETURNS:
4387 * 0 on success, AC_ERR_* mask otherwise.
4388 */
218f3d30
JG
4389static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4390 u8 feature)
9f45cbd3
KCA
4391{
4392 struct ata_taskfile tf;
4393 unsigned int err_mask;
4394
4395 /* set up set-features taskfile */
4396 DPRINTK("set features - SATA features\n");
4397
4398 ata_tf_init(dev, &tf);
4399 tf.command = ATA_CMD_SET_FEATURES;
4400 tf.feature = enable;
4401 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4402 tf.protocol = ATA_PROT_NODATA;
218f3d30 4403 tf.nsect = feature;
9f45cbd3 4404
2b789108 4405 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4406
83206a29
TH
4407 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4408 return err_mask;
1da177e4
LT
4409}
4410
8bf62ece
AL
4411/**
4412 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4413 * @dev: Device to which command will be sent
e2a7f77a
RD
4414 * @heads: Number of heads (taskfile parameter)
4415 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4416 *
4417 * LOCKING:
6aff8f1f
TH
4418 * Kernel thread context (may sleep)
4419 *
4420 * RETURNS:
4421 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4422 */
3373efd8
TH
4423static unsigned int ata_dev_init_params(struct ata_device *dev,
4424 u16 heads, u16 sectors)
8bf62ece 4425{
a0123703 4426 struct ata_taskfile tf;
6aff8f1f 4427 unsigned int err_mask;
8bf62ece
AL
4428
4429 /* Number of sectors per track 1-255. Number of heads 1-16 */
4430 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4431 return AC_ERR_INVALID;
8bf62ece
AL
4432
4433 /* set up init dev params taskfile */
4434 DPRINTK("init dev params \n");
4435
3373efd8 4436 ata_tf_init(dev, &tf);
a0123703
TH
4437 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4438 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4439 tf.protocol = ATA_PROT_NODATA;
4440 tf.nsect = sectors;
4441 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4442
2b789108 4443 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4444 /* A clean abort indicates an original or just out of spec drive
4445 and we should continue as we issue the setup based on the
4446 drive reported working geometry */
4447 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4448 err_mask = 0;
8bf62ece 4449
6aff8f1f
TH
4450 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4451 return err_mask;
8bf62ece
AL
4452}
4453
1da177e4 4454/**
0cba632b
JG
4455 * ata_sg_clean - Unmap DMA memory associated with command
4456 * @qc: Command containing DMA memory to be released
4457 *
4458 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4459 *
4460 * LOCKING:
cca3974e 4461 * spin_lock_irqsave(host lock)
1da177e4 4462 */
70e6ad0c 4463void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4464{
4465 struct ata_port *ap = qc->ap;
cedc9a47 4466 struct scatterlist *sg = qc->__sg;
1da177e4 4467 int dir = qc->dma_dir;
cedc9a47 4468 void *pad_buf = NULL;
1da177e4 4469
a4631474
TH
4470 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4471 WARN_ON(sg == NULL);
1da177e4
LT
4472
4473 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4474 WARN_ON(qc->n_elem > 1);
1da177e4 4475
2c13b7ce 4476 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4477
cedc9a47
JG
4478 /* if we padded the buffer out to 32-bit bound, and data
4479 * xfer direction is from-device, we must copy from the
4480 * pad buffer back into the supplied buffer
4481 */
4482 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4483 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4484
4485 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4486 if (qc->n_elem)
2f1f610b 4487 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4488 /* restore last sg */
87260216 4489 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4490 if (pad_buf) {
4491 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4492 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4493 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4494 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4495 }
4496 } else {
2e242fa9 4497 if (qc->n_elem)
2f1f610b 4498 dma_unmap_single(ap->dev,
e1410f2d
JG
4499 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4500 dir);
cedc9a47
JG
4501 /* restore sg */
4502 sg->length += qc->pad_len;
4503 if (pad_buf)
4504 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4505 pad_buf, qc->pad_len);
4506 }
1da177e4
LT
4507
4508 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4509 qc->__sg = NULL;
1da177e4
LT
4510}
4511
4512/**
4513 * ata_fill_sg - Fill PCI IDE PRD table
4514 * @qc: Metadata associated with taskfile to be transferred
4515 *
780a87f7
JG
4516 * Fill PCI IDE PRD (scatter-gather) table with segments
4517 * associated with the current disk command.
4518 *
1da177e4 4519 * LOCKING:
cca3974e 4520 * spin_lock_irqsave(host lock)
1da177e4
LT
4521 *
4522 */
4523static void ata_fill_sg(struct ata_queued_cmd *qc)
4524{
1da177e4 4525 struct ata_port *ap = qc->ap;
cedc9a47
JG
4526 struct scatterlist *sg;
4527 unsigned int idx;
1da177e4 4528
a4631474 4529 WARN_ON(qc->__sg == NULL);
f131883e 4530 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4531
4532 idx = 0;
cedc9a47 4533 ata_for_each_sg(sg, qc) {
1da177e4
LT
4534 u32 addr, offset;
4535 u32 sg_len, len;
4536
4537 /* determine if physical DMA addr spans 64K boundary.
4538 * Note h/w doesn't support 64-bit, so we unconditionally
4539 * truncate dma_addr_t to u32.
4540 */
4541 addr = (u32) sg_dma_address(sg);
4542 sg_len = sg_dma_len(sg);
4543
4544 while (sg_len) {
4545 offset = addr & 0xffff;
4546 len = sg_len;
4547 if ((offset + sg_len) > 0x10000)
4548 len = 0x10000 - offset;
4549
4550 ap->prd[idx].addr = cpu_to_le32(addr);
4551 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4552 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4553
4554 idx++;
4555 sg_len -= len;
4556 addr += len;
4557 }
4558 }
4559
4560 if (idx)
4561 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4562}
b9a4197e 4563
d26fc955
AC
4564/**
4565 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4566 * @qc: Metadata associated with taskfile to be transferred
4567 *
4568 * Fill PCI IDE PRD (scatter-gather) table with segments
4569 * associated with the current disk command. Perform the fill
4570 * so that we avoid writing any length 64K records for
4571 * controllers that don't follow the spec.
4572 *
4573 * LOCKING:
4574 * spin_lock_irqsave(host lock)
4575 *
4576 */
4577static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4578{
4579 struct ata_port *ap = qc->ap;
4580 struct scatterlist *sg;
4581 unsigned int idx;
4582
4583 WARN_ON(qc->__sg == NULL);
4584 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4585
4586 idx = 0;
4587 ata_for_each_sg(sg, qc) {
4588 u32 addr, offset;
4589 u32 sg_len, len, blen;
4590
2dcb407e 4591 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4592 * Note h/w doesn't support 64-bit, so we unconditionally
4593 * truncate dma_addr_t to u32.
4594 */
4595 addr = (u32) sg_dma_address(sg);
4596 sg_len = sg_dma_len(sg);
4597
4598 while (sg_len) {
4599 offset = addr & 0xffff;
4600 len = sg_len;
4601 if ((offset + sg_len) > 0x10000)
4602 len = 0x10000 - offset;
4603
4604 blen = len & 0xffff;
4605 ap->prd[idx].addr = cpu_to_le32(addr);
4606 if (blen == 0) {
4607 /* Some PATA chipsets like the CS5530 can't
4608 cope with 0x0000 meaning 64K as the spec says */
4609 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4610 blen = 0x8000;
4611 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4612 }
4613 ap->prd[idx].flags_len = cpu_to_le32(blen);
4614 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4615
4616 idx++;
4617 sg_len -= len;
4618 addr += len;
4619 }
4620 }
4621
4622 if (idx)
4623 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4624}
4625
1da177e4
LT
4626/**
4627 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4628 * @qc: Metadata associated with taskfile to check
4629 *
780a87f7
JG
4630 * Allow low-level driver to filter ATA PACKET commands, returning
4631 * a status indicating whether or not it is OK to use DMA for the
4632 * supplied PACKET command.
4633 *
1da177e4 4634 * LOCKING:
cca3974e 4635 * spin_lock_irqsave(host lock)
0cba632b 4636 *
1da177e4
LT
4637 * RETURNS: 0 when ATAPI DMA can be used
4638 * nonzero otherwise
4639 */
4640int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4641{
4642 struct ata_port *ap = qc->ap;
b9a4197e
TH
4643
4644 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4645 * few ATAPI devices choke on such DMA requests.
4646 */
4647 if (unlikely(qc->nbytes & 15))
4648 return 1;
6f23a31d 4649
1da177e4 4650 if (ap->ops->check_atapi_dma)
b9a4197e 4651 return ap->ops->check_atapi_dma(qc);
1da177e4 4652
b9a4197e 4653 return 0;
1da177e4 4654}
b9a4197e 4655
31cc23b3
TH
4656/**
4657 * ata_std_qc_defer - Check whether a qc needs to be deferred
4658 * @qc: ATA command in question
4659 *
4660 * Non-NCQ commands cannot run with any other command, NCQ or
4661 * not. As upper layer only knows the queue depth, we are
4662 * responsible for maintaining exclusion. This function checks
4663 * whether a new command @qc can be issued.
4664 *
4665 * LOCKING:
4666 * spin_lock_irqsave(host lock)
4667 *
4668 * RETURNS:
4669 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4670 */
4671int ata_std_qc_defer(struct ata_queued_cmd *qc)
4672{
4673 struct ata_link *link = qc->dev->link;
4674
4675 if (qc->tf.protocol == ATA_PROT_NCQ) {
4676 if (!ata_tag_valid(link->active_tag))
4677 return 0;
4678 } else {
4679 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4680 return 0;
4681 }
4682
4683 return ATA_DEFER_LINK;
4684}
4685
1da177e4
LT
4686/**
4687 * ata_qc_prep - Prepare taskfile for submission
4688 * @qc: Metadata associated with taskfile to be prepared
4689 *
780a87f7
JG
4690 * Prepare ATA taskfile for submission.
4691 *
1da177e4 4692 * LOCKING:
cca3974e 4693 * spin_lock_irqsave(host lock)
1da177e4
LT
4694 */
4695void ata_qc_prep(struct ata_queued_cmd *qc)
4696{
4697 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4698 return;
4699
4700 ata_fill_sg(qc);
4701}
4702
d26fc955
AC
4703/**
4704 * ata_dumb_qc_prep - Prepare taskfile for submission
4705 * @qc: Metadata associated with taskfile to be prepared
4706 *
4707 * Prepare ATA taskfile for submission.
4708 *
4709 * LOCKING:
4710 * spin_lock_irqsave(host lock)
4711 */
4712void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4713{
4714 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4715 return;
4716
4717 ata_fill_sg_dumb(qc);
4718}
4719
e46834cd
BK
4720void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4721
0cba632b
JG
4722/**
4723 * ata_sg_init_one - Associate command with memory buffer
4724 * @qc: Command to be associated
4725 * @buf: Memory buffer
4726 * @buflen: Length of memory buffer, in bytes.
4727 *
4728 * Initialize the data-related elements of queued_cmd @qc
4729 * to point to a single memory buffer, @buf of byte length @buflen.
4730 *
4731 * LOCKING:
cca3974e 4732 * spin_lock_irqsave(host lock)
0cba632b
JG
4733 */
4734
1da177e4
LT
4735void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4736{
1da177e4
LT
4737 qc->flags |= ATA_QCFLAG_SINGLE;
4738
cedc9a47 4739 qc->__sg = &qc->sgent;
1da177e4 4740 qc->n_elem = 1;
cedc9a47 4741 qc->orig_n_elem = 1;
1da177e4 4742 qc->buf_virt = buf;
233277ca 4743 qc->nbytes = buflen;
87260216 4744 qc->cursg = qc->__sg;
1da177e4 4745
61c0596c 4746 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4747}
4748
0cba632b
JG
4749/**
4750 * ata_sg_init - Associate command with scatter-gather table.
4751 * @qc: Command to be associated
4752 * @sg: Scatter-gather table.
4753 * @n_elem: Number of elements in s/g table.
4754 *
4755 * Initialize the data-related elements of queued_cmd @qc
4756 * to point to a scatter-gather table @sg, containing @n_elem
4757 * elements.
4758 *
4759 * LOCKING:
cca3974e 4760 * spin_lock_irqsave(host lock)
0cba632b
JG
4761 */
4762
1da177e4
LT
4763void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4764 unsigned int n_elem)
4765{
4766 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4767 qc->__sg = sg;
1da177e4 4768 qc->n_elem = n_elem;
cedc9a47 4769 qc->orig_n_elem = n_elem;
87260216 4770 qc->cursg = qc->__sg;
1da177e4
LT
4771}
4772
4773/**
0cba632b
JG
4774 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4775 * @qc: Command with memory buffer to be mapped.
4776 *
4777 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4778 *
4779 * LOCKING:
cca3974e 4780 * spin_lock_irqsave(host lock)
1da177e4
LT
4781 *
4782 * RETURNS:
0cba632b 4783 * Zero on success, negative on error.
1da177e4
LT
4784 */
4785
4786static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4787{
4788 struct ata_port *ap = qc->ap;
4789 int dir = qc->dma_dir;
cedc9a47 4790 struct scatterlist *sg = qc->__sg;
1da177e4 4791 dma_addr_t dma_address;
2e242fa9 4792 int trim_sg = 0;
1da177e4 4793
cedc9a47
JG
4794 /* we must lengthen transfers to end on a 32-bit boundary */
4795 qc->pad_len = sg->length & 3;
4796 if (qc->pad_len) {
4797 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4798 struct scatterlist *psg = &qc->pad_sgent;
4799
a4631474 4800 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4801
4802 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4803
4804 if (qc->tf.flags & ATA_TFLAG_WRITE)
4805 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4806 qc->pad_len);
4807
4808 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4809 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4810 /* trim sg */
4811 sg->length -= qc->pad_len;
2e242fa9
TH
4812 if (sg->length == 0)
4813 trim_sg = 1;
cedc9a47
JG
4814
4815 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4816 sg->length, qc->pad_len);
4817 }
4818
2e242fa9
TH
4819 if (trim_sg) {
4820 qc->n_elem--;
e1410f2d
JG
4821 goto skip_map;
4822 }
4823
2f1f610b 4824 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4825 sg->length, dir);
537a95d9
TH
4826 if (dma_mapping_error(dma_address)) {
4827 /* restore sg */
4828 sg->length += qc->pad_len;
1da177e4 4829 return -1;
537a95d9 4830 }
1da177e4
LT
4831
4832 sg_dma_address(sg) = dma_address;
32529e01 4833 sg_dma_len(sg) = sg->length;
1da177e4 4834
2e242fa9 4835skip_map:
1da177e4
LT
4836 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4837 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4838
4839 return 0;
4840}
4841
4842/**
0cba632b
JG
4843 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4844 * @qc: Command with scatter-gather table to be mapped.
4845 *
4846 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4847 *
4848 * LOCKING:
cca3974e 4849 * spin_lock_irqsave(host lock)
1da177e4
LT
4850 *
4851 * RETURNS:
0cba632b 4852 * Zero on success, negative on error.
1da177e4
LT
4853 *
4854 */
4855
4856static int ata_sg_setup(struct ata_queued_cmd *qc)
4857{
4858 struct ata_port *ap = qc->ap;
cedc9a47 4859 struct scatterlist *sg = qc->__sg;
87260216 4860 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4861 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4862
44877b4e 4863 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4864 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4865
cedc9a47
JG
4866 /* we must lengthen transfers to end on a 32-bit boundary */
4867 qc->pad_len = lsg->length & 3;
4868 if (qc->pad_len) {
4869 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4870 struct scatterlist *psg = &qc->pad_sgent;
4871 unsigned int offset;
4872
a4631474 4873 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4874
4875 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4876
4877 /*
4878 * psg->page/offset are used to copy to-be-written
4879 * data in this function or read data in ata_sg_clean.
4880 */
4881 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4882 sg_init_table(psg, 1);
642f1490
JA
4883 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4884 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4885
4886 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4887 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4888 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4889 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4890 }
4891
4892 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4893 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4894 /* trim last sg */
4895 lsg->length -= qc->pad_len;
e1410f2d
JG
4896 if (lsg->length == 0)
4897 trim_sg = 1;
cedc9a47
JG
4898
4899 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4900 qc->n_elem - 1, lsg->length, qc->pad_len);
4901 }
4902
e1410f2d
JG
4903 pre_n_elem = qc->n_elem;
4904 if (trim_sg && pre_n_elem)
4905 pre_n_elem--;
4906
4907 if (!pre_n_elem) {
4908 n_elem = 0;
4909 goto skip_map;
4910 }
4911
1da177e4 4912 dir = qc->dma_dir;
2f1f610b 4913 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4914 if (n_elem < 1) {
4915 /* restore last sg */
4916 lsg->length += qc->pad_len;
1da177e4 4917 return -1;
537a95d9 4918 }
1da177e4
LT
4919
4920 DPRINTK("%d sg elements mapped\n", n_elem);
4921
e1410f2d 4922skip_map:
1da177e4
LT
4923 qc->n_elem = n_elem;
4924
4925 return 0;
4926}
4927
0baab86b 4928/**
c893a3ae 4929 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4930 * @buf: Buffer to swap
4931 * @buf_words: Number of 16-bit words in buffer.
4932 *
4933 * Swap halves of 16-bit words if needed to convert from
4934 * little-endian byte order to native cpu byte order, or
4935 * vice-versa.
4936 *
4937 * LOCKING:
6f0ef4fa 4938 * Inherited from caller.
0baab86b 4939 */
1da177e4
LT
4940void swap_buf_le16(u16 *buf, unsigned int buf_words)
4941{
4942#ifdef __BIG_ENDIAN
4943 unsigned int i;
4944
4945 for (i = 0; i < buf_words; i++)
4946 buf[i] = le16_to_cpu(buf[i]);
4947#endif /* __BIG_ENDIAN */
4948}
4949
6ae4cfb5 4950/**
0d5ff566 4951 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4952 * @adev: device to target
6ae4cfb5
AL
4953 * @buf: data buffer
4954 * @buflen: buffer length
344babaa 4955 * @write_data: read/write
6ae4cfb5
AL
4956 *
4957 * Transfer data from/to the device data register by PIO.
4958 *
4959 * LOCKING:
4960 * Inherited from caller.
6ae4cfb5 4961 */
0d5ff566
TH
4962void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4963 unsigned int buflen, int write_data)
1da177e4 4964{
9af5c9c9 4965 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4966 unsigned int words = buflen >> 1;
1da177e4 4967
6ae4cfb5 4968 /* Transfer multiple of 2 bytes */
1da177e4 4969 if (write_data)
0d5ff566 4970 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4971 else
0d5ff566 4972 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4973
4974 /* Transfer trailing 1 byte, if any. */
4975 if (unlikely(buflen & 0x01)) {
4976 u16 align_buf[1] = { 0 };
4977 unsigned char *trailing_buf = buf + buflen - 1;
4978
4979 if (write_data) {
4980 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4981 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4982 } else {
0d5ff566 4983 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4984 memcpy(trailing_buf, align_buf, 1);
4985 }
4986 }
1da177e4
LT
4987}
4988
75e99585 4989/**
0d5ff566 4990 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4991 * @adev: device to target
4992 * @buf: data buffer
4993 * @buflen: buffer length
4994 * @write_data: read/write
4995 *
88574551 4996 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4997 * transfer with interrupts disabled.
4998 *
4999 * LOCKING:
5000 * Inherited from caller.
5001 */
0d5ff566
TH
5002void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5003 unsigned int buflen, int write_data)
75e99585
AC
5004{
5005 unsigned long flags;
5006 local_irq_save(flags);
0d5ff566 5007 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
5008 local_irq_restore(flags);
5009}
5010
5011
6ae4cfb5 5012/**
5a5dbd18 5013 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5014 * @qc: Command on going
5015 *
5a5dbd18 5016 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5017 *
5018 * LOCKING:
5019 * Inherited from caller.
5020 */
5021
1da177e4
LT
5022static void ata_pio_sector(struct ata_queued_cmd *qc)
5023{
5024 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5025 struct ata_port *ap = qc->ap;
5026 struct page *page;
5027 unsigned int offset;
5028 unsigned char *buf;
5029
5a5dbd18 5030 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5031 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5032
45711f1a 5033 page = sg_page(qc->cursg);
87260216 5034 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5035
5036 /* get the current page and offset */
5037 page = nth_page(page, (offset >> PAGE_SHIFT));
5038 offset %= PAGE_SIZE;
5039
1da177e4
LT
5040 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5041
91b8b313
AL
5042 if (PageHighMem(page)) {
5043 unsigned long flags;
5044
a6b2c5d4 5045 /* FIXME: use a bounce buffer */
91b8b313
AL
5046 local_irq_save(flags);
5047 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5048
91b8b313 5049 /* do the actual data transfer */
5a5dbd18 5050 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5051
91b8b313
AL
5052 kunmap_atomic(buf, KM_IRQ0);
5053 local_irq_restore(flags);
5054 } else {
5055 buf = page_address(page);
5a5dbd18 5056 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5057 }
1da177e4 5058
5a5dbd18
ML
5059 qc->curbytes += qc->sect_size;
5060 qc->cursg_ofs += qc->sect_size;
1da177e4 5061
87260216
JA
5062 if (qc->cursg_ofs == qc->cursg->length) {
5063 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5064 qc->cursg_ofs = 0;
5065 }
1da177e4 5066}
1da177e4 5067
07f6f7d0 5068/**
5a5dbd18 5069 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5070 * @qc: Command on going
5071 *
5a5dbd18 5072 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5073 * ATA device for the DRQ request.
5074 *
5075 * LOCKING:
5076 * Inherited from caller.
5077 */
1da177e4 5078
07f6f7d0
AL
5079static void ata_pio_sectors(struct ata_queued_cmd *qc)
5080{
5081 if (is_multi_taskfile(&qc->tf)) {
5082 /* READ/WRITE MULTIPLE */
5083 unsigned int nsect;
5084
587005de 5085 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5086
5a5dbd18 5087 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5088 qc->dev->multi_count);
07f6f7d0
AL
5089 while (nsect--)
5090 ata_pio_sector(qc);
5091 } else
5092 ata_pio_sector(qc);
4cc980b3
AL
5093
5094 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5095}
5096
c71c1857
AL
5097/**
5098 * atapi_send_cdb - Write CDB bytes to hardware
5099 * @ap: Port to which ATAPI device is attached.
5100 * @qc: Taskfile currently active
5101 *
5102 * When device has indicated its readiness to accept
5103 * a CDB, this function is called. Send the CDB.
5104 *
5105 * LOCKING:
5106 * caller.
5107 */
5108
5109static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5110{
5111 /* send SCSI cdb */
5112 DPRINTK("send cdb\n");
db024d53 5113 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5114
a6b2c5d4 5115 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5116 ata_altstatus(ap); /* flush */
5117
5118 switch (qc->tf.protocol) {
5119 case ATA_PROT_ATAPI:
5120 ap->hsm_task_state = HSM_ST;
5121 break;
5122 case ATA_PROT_ATAPI_NODATA:
5123 ap->hsm_task_state = HSM_ST_LAST;
5124 break;
5125 case ATA_PROT_ATAPI_DMA:
5126 ap->hsm_task_state = HSM_ST_LAST;
5127 /* initiate bmdma */
5128 ap->ops->bmdma_start(qc);
5129 break;
5130 }
1da177e4
LT
5131}
5132
6ae4cfb5
AL
5133/**
5134 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5135 * @qc: Command on going
5136 * @bytes: number of bytes
5137 *
5138 * Transfer Transfer data from/to the ATAPI device.
5139 *
5140 * LOCKING:
5141 * Inherited from caller.
5142 *
5143 */
5144
1da177e4
LT
5145static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5146{
5147 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 5148 struct scatterlist *sg = qc->__sg;
0874ee76 5149 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
5150 struct ata_port *ap = qc->ap;
5151 struct page *page;
5152 unsigned char *buf;
5153 unsigned int offset, count;
0874ee76 5154 int no_more_sg = 0;
1da177e4 5155
563a6e1f 5156 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 5157 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5158
5159next_sg:
0874ee76 5160 if (unlikely(no_more_sg)) {
7fb6ec28 5161 /*
563a6e1f
AL
5162 * The end of qc->sg is reached and the device expects
5163 * more data to transfer. In order not to overrun qc->sg
5164 * and fulfill length specified in the byte count register,
5165 * - for read case, discard trailing data from the device
5166 * - for write case, padding zero data to the device
5167 */
5168 u16 pad_buf[1] = { 0 };
5169 unsigned int words = bytes >> 1;
5170 unsigned int i;
5171
5172 if (words) /* warning if bytes > 1 */
f15a1daf
TH
5173 ata_dev_printk(qc->dev, KERN_WARNING,
5174 "%u bytes trailing data\n", bytes);
563a6e1f
AL
5175
5176 for (i = 0; i < words; i++)
2dcb407e 5177 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5178
14be71f4 5179 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
5180 return;
5181 }
5182
87260216 5183 sg = qc->cursg;
1da177e4 5184
45711f1a 5185 page = sg_page(sg);
1da177e4
LT
5186 offset = sg->offset + qc->cursg_ofs;
5187
5188 /* get the current page and offset */
5189 page = nth_page(page, (offset >> PAGE_SHIFT));
5190 offset %= PAGE_SIZE;
5191
6952df03 5192 /* don't overrun current sg */
32529e01 5193 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5194
5195 /* don't cross page boundaries */
5196 count = min(count, (unsigned int)PAGE_SIZE - offset);
5197
7282aa4b
AL
5198 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5199
91b8b313
AL
5200 if (PageHighMem(page)) {
5201 unsigned long flags;
5202
a6b2c5d4 5203 /* FIXME: use bounce buffer */
91b8b313
AL
5204 local_irq_save(flags);
5205 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5206
91b8b313 5207 /* do the actual data transfer */
a6b2c5d4 5208 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5209
91b8b313
AL
5210 kunmap_atomic(buf, KM_IRQ0);
5211 local_irq_restore(flags);
5212 } else {
5213 buf = page_address(page);
a6b2c5d4 5214 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5215 }
1da177e4
LT
5216
5217 bytes -= count;
5218 qc->curbytes += count;
5219 qc->cursg_ofs += count;
5220
32529e01 5221 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5222 if (qc->cursg == lsg)
5223 no_more_sg = 1;
5224
87260216 5225 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5226 qc->cursg_ofs = 0;
5227 }
5228
563a6e1f 5229 if (bytes)
1da177e4 5230 goto next_sg;
1da177e4
LT
5231}
5232
6ae4cfb5
AL
5233/**
5234 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5235 * @qc: Command on going
5236 *
5237 * Transfer Transfer data from/to the ATAPI device.
5238 *
5239 * LOCKING:
5240 * Inherited from caller.
6ae4cfb5
AL
5241 */
5242
1da177e4
LT
5243static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5244{
5245 struct ata_port *ap = qc->ap;
5246 struct ata_device *dev = qc->dev;
5247 unsigned int ireason, bc_lo, bc_hi, bytes;
5248 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5249
eec4c3f3
AL
5250 /* Abuse qc->result_tf for temp storage of intermediate TF
5251 * here to save some kernel stack usage.
5252 * For normal completion, qc->result_tf is not relevant. For
5253 * error, qc->result_tf is later overwritten by ata_qc_complete().
5254 * So, the correctness of qc->result_tf is not affected.
5255 */
5256 ap->ops->tf_read(ap, &qc->result_tf);
5257 ireason = qc->result_tf.nsect;
5258 bc_lo = qc->result_tf.lbam;
5259 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5260 bytes = (bc_hi << 8) | bc_lo;
5261
5262 /* shall be cleared to zero, indicating xfer of data */
5263 if (ireason & (1 << 0))
5264 goto err_out;
5265
5266 /* make sure transfer direction matches expected */
5267 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5268 if (do_write != i_write)
5269 goto err_out;
5270
44877b4e 5271 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5272
1da177e4 5273 __atapi_pio_bytes(qc, bytes);
4cc980b3 5274 ata_altstatus(ap); /* flush */
1da177e4
LT
5275
5276 return;
5277
5278err_out:
f15a1daf 5279 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5280 qc->err_mask |= AC_ERR_HSM;
14be71f4 5281 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5282}
5283
5284/**
c234fb00
AL
5285 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5286 * @ap: the target ata_port
5287 * @qc: qc on going
1da177e4 5288 *
c234fb00
AL
5289 * RETURNS:
5290 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5291 */
c234fb00
AL
5292
5293static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5294{
c234fb00
AL
5295 if (qc->tf.flags & ATA_TFLAG_POLLING)
5296 return 1;
1da177e4 5297
c234fb00
AL
5298 if (ap->hsm_task_state == HSM_ST_FIRST) {
5299 if (qc->tf.protocol == ATA_PROT_PIO &&
5300 (qc->tf.flags & ATA_TFLAG_WRITE))
5301 return 1;
1da177e4 5302
c234fb00
AL
5303 if (is_atapi_taskfile(&qc->tf) &&
5304 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5305 return 1;
fe79e683
AL
5306 }
5307
c234fb00
AL
5308 return 0;
5309}
1da177e4 5310
c17ea20d
TH
5311/**
5312 * ata_hsm_qc_complete - finish a qc running on standard HSM
5313 * @qc: Command to complete
5314 * @in_wq: 1 if called from workqueue, 0 otherwise
5315 *
5316 * Finish @qc which is running on standard HSM.
5317 *
5318 * LOCKING:
cca3974e 5319 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5320 * Otherwise, none on entry and grabs host lock.
5321 */
5322static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5323{
5324 struct ata_port *ap = qc->ap;
5325 unsigned long flags;
5326
5327 if (ap->ops->error_handler) {
5328 if (in_wq) {
ba6a1308 5329 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5330
cca3974e
JG
5331 /* EH might have kicked in while host lock is
5332 * released.
c17ea20d
TH
5333 */
5334 qc = ata_qc_from_tag(ap, qc->tag);
5335 if (qc) {
5336 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5337 ap->ops->irq_on(ap);
c17ea20d
TH
5338 ata_qc_complete(qc);
5339 } else
5340 ata_port_freeze(ap);
5341 }
5342
ba6a1308 5343 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5344 } else {
5345 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5346 ata_qc_complete(qc);
5347 else
5348 ata_port_freeze(ap);
5349 }
5350 } else {
5351 if (in_wq) {
ba6a1308 5352 spin_lock_irqsave(ap->lock, flags);
83625006 5353 ap->ops->irq_on(ap);
c17ea20d 5354 ata_qc_complete(qc);
ba6a1308 5355 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5356 } else
5357 ata_qc_complete(qc);
5358 }
5359}
5360
bb5cb290
AL
5361/**
5362 * ata_hsm_move - move the HSM to the next state.
5363 * @ap: the target ata_port
5364 * @qc: qc on going
5365 * @status: current device status
5366 * @in_wq: 1 if called from workqueue, 0 otherwise
5367 *
5368 * RETURNS:
5369 * 1 when poll next status needed, 0 otherwise.
5370 */
9a1004d0
TH
5371int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5372 u8 status, int in_wq)
e2cec771 5373{
bb5cb290
AL
5374 unsigned long flags = 0;
5375 int poll_next;
5376
6912ccd5
AL
5377 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5378
bb5cb290
AL
5379 /* Make sure ata_qc_issue_prot() does not throw things
5380 * like DMA polling into the workqueue. Notice that
5381 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5382 */
c234fb00 5383 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5384
e2cec771 5385fsm_start:
999bb6f4 5386 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5387 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5388
e2cec771
AL
5389 switch (ap->hsm_task_state) {
5390 case HSM_ST_FIRST:
bb5cb290
AL
5391 /* Send first data block or PACKET CDB */
5392
5393 /* If polling, we will stay in the work queue after
5394 * sending the data. Otherwise, interrupt handler
5395 * takes over after sending the data.
5396 */
5397 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5398
e2cec771 5399 /* check device status */
3655d1d3
AL
5400 if (unlikely((status & ATA_DRQ) == 0)) {
5401 /* handle BSY=0, DRQ=0 as error */
5402 if (likely(status & (ATA_ERR | ATA_DF)))
5403 /* device stops HSM for abort/error */
5404 qc->err_mask |= AC_ERR_DEV;
5405 else
5406 /* HSM violation. Let EH handle this */
5407 qc->err_mask |= AC_ERR_HSM;
5408
14be71f4 5409 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5410 goto fsm_start;
1da177e4
LT
5411 }
5412
71601958
AL
5413 /* Device should not ask for data transfer (DRQ=1)
5414 * when it finds something wrong.
eee6c32f
AL
5415 * We ignore DRQ here and stop the HSM by
5416 * changing hsm_task_state to HSM_ST_ERR and
5417 * let the EH abort the command or reset the device.
71601958
AL
5418 */
5419 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5420 /* Some ATAPI tape drives forget to clear the ERR bit
5421 * when doing the next command (mostly request sense).
5422 * We ignore ERR here to workaround and proceed sending
5423 * the CDB.
5424 */
5425 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5426 ata_port_printk(ap, KERN_WARNING,
5427 "DRQ=1 with device error, "
5428 "dev_stat 0x%X\n", status);
5429 qc->err_mask |= AC_ERR_HSM;
5430 ap->hsm_task_state = HSM_ST_ERR;
5431 goto fsm_start;
5432 }
71601958 5433 }
1da177e4 5434
bb5cb290
AL
5435 /* Send the CDB (atapi) or the first data block (ata pio out).
5436 * During the state transition, interrupt handler shouldn't
5437 * be invoked before the data transfer is complete and
5438 * hsm_task_state is changed. Hence, the following locking.
5439 */
5440 if (in_wq)
ba6a1308 5441 spin_lock_irqsave(ap->lock, flags);
1da177e4 5442
bb5cb290
AL
5443 if (qc->tf.protocol == ATA_PROT_PIO) {
5444 /* PIO data out protocol.
5445 * send first data block.
5446 */
0565c26d 5447
bb5cb290
AL
5448 /* ata_pio_sectors() might change the state
5449 * to HSM_ST_LAST. so, the state is changed here
5450 * before ata_pio_sectors().
5451 */
5452 ap->hsm_task_state = HSM_ST;
5453 ata_pio_sectors(qc);
bb5cb290
AL
5454 } else
5455 /* send CDB */
5456 atapi_send_cdb(ap, qc);
5457
5458 if (in_wq)
ba6a1308 5459 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5460
5461 /* if polling, ata_pio_task() handles the rest.
5462 * otherwise, interrupt handler takes over from here.
5463 */
e2cec771 5464 break;
1c848984 5465
e2cec771
AL
5466 case HSM_ST:
5467 /* complete command or read/write the data register */
5468 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5469 /* ATAPI PIO protocol */
5470 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5471 /* No more data to transfer or device error.
5472 * Device error will be tagged in HSM_ST_LAST.
5473 */
e2cec771
AL
5474 ap->hsm_task_state = HSM_ST_LAST;
5475 goto fsm_start;
5476 }
1da177e4 5477
71601958
AL
5478 /* Device should not ask for data transfer (DRQ=1)
5479 * when it finds something wrong.
eee6c32f
AL
5480 * We ignore DRQ here and stop the HSM by
5481 * changing hsm_task_state to HSM_ST_ERR and
5482 * let the EH abort the command or reset the device.
71601958
AL
5483 */
5484 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5485 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5486 "device error, dev_stat 0x%X\n",
5487 status);
3655d1d3 5488 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5489 ap->hsm_task_state = HSM_ST_ERR;
5490 goto fsm_start;
71601958 5491 }
1da177e4 5492
e2cec771 5493 atapi_pio_bytes(qc);
7fb6ec28 5494
e2cec771
AL
5495 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5496 /* bad ireason reported by device */
5497 goto fsm_start;
1da177e4 5498
e2cec771
AL
5499 } else {
5500 /* ATA PIO protocol */
5501 if (unlikely((status & ATA_DRQ) == 0)) {
5502 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5503 if (likely(status & (ATA_ERR | ATA_DF)))
5504 /* device stops HSM for abort/error */
5505 qc->err_mask |= AC_ERR_DEV;
5506 else
55a8e2c8
TH
5507 /* HSM violation. Let EH handle this.
5508 * Phantom devices also trigger this
5509 * condition. Mark hint.
5510 */
5511 qc->err_mask |= AC_ERR_HSM |
5512 AC_ERR_NODEV_HINT;
3655d1d3 5513
e2cec771
AL
5514 ap->hsm_task_state = HSM_ST_ERR;
5515 goto fsm_start;
5516 }
1da177e4 5517
eee6c32f
AL
5518 /* For PIO reads, some devices may ask for
5519 * data transfer (DRQ=1) alone with ERR=1.
5520 * We respect DRQ here and transfer one
5521 * block of junk data before changing the
5522 * hsm_task_state to HSM_ST_ERR.
5523 *
5524 * For PIO writes, ERR=1 DRQ=1 doesn't make
5525 * sense since the data block has been
5526 * transferred to the device.
71601958
AL
5527 */
5528 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5529 /* data might be corrputed */
5530 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5531
5532 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5533 ata_pio_sectors(qc);
eee6c32f
AL
5534 status = ata_wait_idle(ap);
5535 }
5536
3655d1d3
AL
5537 if (status & (ATA_BUSY | ATA_DRQ))
5538 qc->err_mask |= AC_ERR_HSM;
5539
eee6c32f
AL
5540 /* ata_pio_sectors() might change the
5541 * state to HSM_ST_LAST. so, the state
5542 * is changed after ata_pio_sectors().
5543 */
5544 ap->hsm_task_state = HSM_ST_ERR;
5545 goto fsm_start;
71601958
AL
5546 }
5547
e2cec771
AL
5548 ata_pio_sectors(qc);
5549
5550 if (ap->hsm_task_state == HSM_ST_LAST &&
5551 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5552 /* all data read */
52a32205 5553 status = ata_wait_idle(ap);
e2cec771
AL
5554 goto fsm_start;
5555 }
5556 }
5557
bb5cb290 5558 poll_next = 1;
1da177e4
LT
5559 break;
5560
14be71f4 5561 case HSM_ST_LAST:
6912ccd5
AL
5562 if (unlikely(!ata_ok(status))) {
5563 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5564 ap->hsm_task_state = HSM_ST_ERR;
5565 goto fsm_start;
5566 }
5567
5568 /* no more data to transfer */
4332a771 5569 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5570 ap->print_id, qc->dev->devno, status);
e2cec771 5571
6912ccd5
AL
5572 WARN_ON(qc->err_mask);
5573
e2cec771 5574 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5575
e2cec771 5576 /* complete taskfile transaction */
c17ea20d 5577 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5578
5579 poll_next = 0;
1da177e4
LT
5580 break;
5581
14be71f4 5582 case HSM_ST_ERR:
e2cec771
AL
5583 /* make sure qc->err_mask is available to
5584 * know what's wrong and recover
5585 */
5586 WARN_ON(qc->err_mask == 0);
5587
5588 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5589
999bb6f4 5590 /* complete taskfile transaction */
c17ea20d 5591 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5592
5593 poll_next = 0;
e2cec771
AL
5594 break;
5595 default:
bb5cb290 5596 poll_next = 0;
6912ccd5 5597 BUG();
1da177e4
LT
5598 }
5599
bb5cb290 5600 return poll_next;
1da177e4
LT
5601}
5602
65f27f38 5603static void ata_pio_task(struct work_struct *work)
8061f5f0 5604{
65f27f38
DH
5605 struct ata_port *ap =
5606 container_of(work, struct ata_port, port_task.work);
5607 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5608 u8 status;
a1af3734 5609 int poll_next;
8061f5f0 5610
7fb6ec28 5611fsm_start:
a1af3734 5612 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5613
a1af3734
AL
5614 /*
5615 * This is purely heuristic. This is a fast path.
5616 * Sometimes when we enter, BSY will be cleared in
5617 * a chk-status or two. If not, the drive is probably seeking
5618 * or something. Snooze for a couple msecs, then
5619 * chk-status again. If still busy, queue delayed work.
5620 */
5621 status = ata_busy_wait(ap, ATA_BUSY, 5);
5622 if (status & ATA_BUSY) {
5623 msleep(2);
5624 status = ata_busy_wait(ap, ATA_BUSY, 10);
5625 if (status & ATA_BUSY) {
31ce6dae 5626 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5627 return;
5628 }
8061f5f0
TH
5629 }
5630
a1af3734
AL
5631 /* move the HSM */
5632 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5633
a1af3734
AL
5634 /* another command or interrupt handler
5635 * may be running at this point.
5636 */
5637 if (poll_next)
7fb6ec28 5638 goto fsm_start;
8061f5f0
TH
5639}
5640
1da177e4
LT
5641/**
5642 * ata_qc_new - Request an available ATA command, for queueing
5643 * @ap: Port associated with device @dev
5644 * @dev: Device from whom we request an available command structure
5645 *
5646 * LOCKING:
0cba632b 5647 * None.
1da177e4
LT
5648 */
5649
5650static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5651{
5652 struct ata_queued_cmd *qc = NULL;
5653 unsigned int i;
5654
e3180499 5655 /* no command while frozen */
b51e9e5d 5656 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5657 return NULL;
5658
2ab7db1f
TH
5659 /* the last tag is reserved for internal command. */
5660 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5661 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5662 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5663 break;
5664 }
5665
5666 if (qc)
5667 qc->tag = i;
5668
5669 return qc;
5670}
5671
5672/**
5673 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5674 * @dev: Device from whom we request an available command structure
5675 *
5676 * LOCKING:
0cba632b 5677 * None.
1da177e4
LT
5678 */
5679
3373efd8 5680struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5681{
9af5c9c9 5682 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5683 struct ata_queued_cmd *qc;
5684
5685 qc = ata_qc_new(ap);
5686 if (qc) {
1da177e4
LT
5687 qc->scsicmd = NULL;
5688 qc->ap = ap;
5689 qc->dev = dev;
1da177e4 5690
2c13b7ce 5691 ata_qc_reinit(qc);
1da177e4
LT
5692 }
5693
5694 return qc;
5695}
5696
1da177e4
LT
5697/**
5698 * ata_qc_free - free unused ata_queued_cmd
5699 * @qc: Command to complete
5700 *
5701 * Designed to free unused ata_queued_cmd object
5702 * in case something prevents using it.
5703 *
5704 * LOCKING:
cca3974e 5705 * spin_lock_irqsave(host lock)
1da177e4
LT
5706 */
5707void ata_qc_free(struct ata_queued_cmd *qc)
5708{
4ba946e9
TH
5709 struct ata_port *ap = qc->ap;
5710 unsigned int tag;
5711
a4631474 5712 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5713
4ba946e9
TH
5714 qc->flags = 0;
5715 tag = qc->tag;
5716 if (likely(ata_tag_valid(tag))) {
4ba946e9 5717 qc->tag = ATA_TAG_POISON;
6cec4a39 5718 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5719 }
1da177e4
LT
5720}
5721
76014427 5722void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5723{
dedaf2b0 5724 struct ata_port *ap = qc->ap;
9af5c9c9 5725 struct ata_link *link = qc->dev->link;
dedaf2b0 5726
a4631474
TH
5727 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5728 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5729
5730 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5731 ata_sg_clean(qc);
5732
7401abf2 5733 /* command should be marked inactive atomically with qc completion */
da917d69 5734 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5735 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5736 if (!link->sactive)
5737 ap->nr_active_links--;
5738 } else {
9af5c9c9 5739 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5740 ap->nr_active_links--;
5741 }
5742
5743 /* clear exclusive status */
5744 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5745 ap->excl_link == link))
5746 ap->excl_link = NULL;
7401abf2 5747
3f3791d3
AL
5748 /* atapi: mark qc as inactive to prevent the interrupt handler
5749 * from completing the command twice later, before the error handler
5750 * is called. (when rc != 0 and atapi request sense is needed)
5751 */
5752 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5753 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5754
1da177e4 5755 /* call completion callback */
77853bf2 5756 qc->complete_fn(qc);
1da177e4
LT
5757}
5758
39599a53
TH
5759static void fill_result_tf(struct ata_queued_cmd *qc)
5760{
5761 struct ata_port *ap = qc->ap;
5762
39599a53 5763 qc->result_tf.flags = qc->tf.flags;
4742d54f 5764 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5765}
5766
f686bcb8
TH
5767/**
5768 * ata_qc_complete - Complete an active ATA command
5769 * @qc: Command to complete
5770 * @err_mask: ATA Status register contents
5771 *
5772 * Indicate to the mid and upper layers that an ATA
5773 * command has completed, with either an ok or not-ok status.
5774 *
5775 * LOCKING:
cca3974e 5776 * spin_lock_irqsave(host lock)
f686bcb8
TH
5777 */
5778void ata_qc_complete(struct ata_queued_cmd *qc)
5779{
5780 struct ata_port *ap = qc->ap;
5781
5782 /* XXX: New EH and old EH use different mechanisms to
5783 * synchronize EH with regular execution path.
5784 *
5785 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5786 * Normal execution path is responsible for not accessing a
5787 * failed qc. libata core enforces the rule by returning NULL
5788 * from ata_qc_from_tag() for failed qcs.
5789 *
5790 * Old EH depends on ata_qc_complete() nullifying completion
5791 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5792 * not synchronize with interrupt handler. Only PIO task is
5793 * taken care of.
5794 */
5795 if (ap->ops->error_handler) {
4dbfa39b
TH
5796 struct ata_device *dev = qc->dev;
5797 struct ata_eh_info *ehi = &dev->link->eh_info;
5798
b51e9e5d 5799 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5800
5801 if (unlikely(qc->err_mask))
5802 qc->flags |= ATA_QCFLAG_FAILED;
5803
5804 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5805 if (!ata_tag_internal(qc->tag)) {
5806 /* always fill result TF for failed qc */
39599a53 5807 fill_result_tf(qc);
f686bcb8
TH
5808 ata_qc_schedule_eh(qc);
5809 return;
5810 }
5811 }
5812
5813 /* read result TF if requested */
5814 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5815 fill_result_tf(qc);
f686bcb8 5816
4dbfa39b
TH
5817 /* Some commands need post-processing after successful
5818 * completion.
5819 */
5820 switch (qc->tf.command) {
5821 case ATA_CMD_SET_FEATURES:
5822 if (qc->tf.feature != SETFEATURES_WC_ON &&
5823 qc->tf.feature != SETFEATURES_WC_OFF)
5824 break;
5825 /* fall through */
5826 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5827 case ATA_CMD_SET_MULTI: /* multi_count changed */
5828 /* revalidate device */
5829 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5830 ata_port_schedule_eh(ap);
5831 break;
054a5fba
TH
5832
5833 case ATA_CMD_SLEEP:
5834 dev->flags |= ATA_DFLAG_SLEEPING;
5835 break;
4dbfa39b
TH
5836 }
5837
f686bcb8
TH
5838 __ata_qc_complete(qc);
5839 } else {
5840 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5841 return;
5842
5843 /* read result TF if failed or requested */
5844 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5845 fill_result_tf(qc);
f686bcb8
TH
5846
5847 __ata_qc_complete(qc);
5848 }
5849}
5850
dedaf2b0
TH
5851/**
5852 * ata_qc_complete_multiple - Complete multiple qcs successfully
5853 * @ap: port in question
5854 * @qc_active: new qc_active mask
5855 * @finish_qc: LLDD callback invoked before completing a qc
5856 *
5857 * Complete in-flight commands. This functions is meant to be
5858 * called from low-level driver's interrupt routine to complete
5859 * requests normally. ap->qc_active and @qc_active is compared
5860 * and commands are completed accordingly.
5861 *
5862 * LOCKING:
cca3974e 5863 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5864 *
5865 * RETURNS:
5866 * Number of completed commands on success, -errno otherwise.
5867 */
5868int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5869 void (*finish_qc)(struct ata_queued_cmd *))
5870{
5871 int nr_done = 0;
5872 u32 done_mask;
5873 int i;
5874
5875 done_mask = ap->qc_active ^ qc_active;
5876
5877 if (unlikely(done_mask & qc_active)) {
5878 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5879 "(%08x->%08x)\n", ap->qc_active, qc_active);
5880 return -EINVAL;
5881 }
5882
5883 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5884 struct ata_queued_cmd *qc;
5885
5886 if (!(done_mask & (1 << i)))
5887 continue;
5888
5889 if ((qc = ata_qc_from_tag(ap, i))) {
5890 if (finish_qc)
5891 finish_qc(qc);
5892 ata_qc_complete(qc);
5893 nr_done++;
5894 }
5895 }
5896
5897 return nr_done;
5898}
5899
1da177e4
LT
5900static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5901{
5902 struct ata_port *ap = qc->ap;
5903
5904 switch (qc->tf.protocol) {
3dc1d881 5905 case ATA_PROT_NCQ:
1da177e4
LT
5906 case ATA_PROT_DMA:
5907 case ATA_PROT_ATAPI_DMA:
5908 return 1;
5909
5910 case ATA_PROT_ATAPI:
5911 case ATA_PROT_PIO:
1da177e4
LT
5912 if (ap->flags & ATA_FLAG_PIO_DMA)
5913 return 1;
5914
5915 /* fall through */
5916
5917 default:
5918 return 0;
5919 }
5920
5921 /* never reached */
5922}
5923
5924/**
5925 * ata_qc_issue - issue taskfile to device
5926 * @qc: command to issue to device
5927 *
5928 * Prepare an ATA command to submission to device.
5929 * This includes mapping the data into a DMA-able
5930 * area, filling in the S/G table, and finally
5931 * writing the taskfile to hardware, starting the command.
5932 *
5933 * LOCKING:
cca3974e 5934 * spin_lock_irqsave(host lock)
1da177e4 5935 */
8e0e694a 5936void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5937{
5938 struct ata_port *ap = qc->ap;
9af5c9c9 5939 struct ata_link *link = qc->dev->link;
1da177e4 5940
dedaf2b0
TH
5941 /* Make sure only one non-NCQ command is outstanding. The
5942 * check is skipped for old EH because it reuses active qc to
5943 * request ATAPI sense.
5944 */
9af5c9c9 5945 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5946
5947 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5948 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5949
5950 if (!link->sactive)
5951 ap->nr_active_links++;
9af5c9c9 5952 link->sactive |= 1 << qc->tag;
dedaf2b0 5953 } else {
9af5c9c9 5954 WARN_ON(link->sactive);
da917d69
TH
5955
5956 ap->nr_active_links++;
9af5c9c9 5957 link->active_tag = qc->tag;
dedaf2b0
TH
5958 }
5959
e4a70e76 5960 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5961 ap->qc_active |= 1 << qc->tag;
e4a70e76 5962
1da177e4
LT
5963 if (ata_should_dma_map(qc)) {
5964 if (qc->flags & ATA_QCFLAG_SG) {
5965 if (ata_sg_setup(qc))
8e436af9 5966 goto sg_err;
1da177e4
LT
5967 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5968 if (ata_sg_setup_one(qc))
8e436af9 5969 goto sg_err;
1da177e4
LT
5970 }
5971 } else {
5972 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5973 }
5974
054a5fba
TH
5975 /* if device is sleeping, schedule softreset and abort the link */
5976 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5977 link->eh_info.action |= ATA_EH_SOFTRESET;
5978 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5979 ata_link_abort(link);
5980 return;
5981 }
5982
1da177e4
LT
5983 ap->ops->qc_prep(qc);
5984
8e0e694a
TH
5985 qc->err_mask |= ap->ops->qc_issue(qc);
5986 if (unlikely(qc->err_mask))
5987 goto err;
5988 return;
1da177e4 5989
8e436af9
TH
5990sg_err:
5991 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5992 qc->err_mask |= AC_ERR_SYSTEM;
5993err:
5994 ata_qc_complete(qc);
1da177e4
LT
5995}
5996
5997/**
5998 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5999 * @qc: command to issue to device
6000 *
6001 * Using various libata functions and hooks, this function
6002 * starts an ATA command. ATA commands are grouped into
6003 * classes called "protocols", and issuing each type of protocol
6004 * is slightly different.
6005 *
0baab86b
EF
6006 * May be used as the qc_issue() entry in ata_port_operations.
6007 *
1da177e4 6008 * LOCKING:
cca3974e 6009 * spin_lock_irqsave(host lock)
1da177e4
LT
6010 *
6011 * RETURNS:
9a3d9eb0 6012 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6013 */
6014
9a3d9eb0 6015unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6016{
6017 struct ata_port *ap = qc->ap;
6018
e50362ec
AL
6019 /* Use polling pio if the LLD doesn't handle
6020 * interrupt driven pio and atapi CDB interrupt.
6021 */
6022 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6023 switch (qc->tf.protocol) {
6024 case ATA_PROT_PIO:
e3472cbe 6025 case ATA_PROT_NODATA:
e50362ec
AL
6026 case ATA_PROT_ATAPI:
6027 case ATA_PROT_ATAPI_NODATA:
6028 qc->tf.flags |= ATA_TFLAG_POLLING;
6029 break;
6030 case ATA_PROT_ATAPI_DMA:
6031 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6032 /* see ata_dma_blacklisted() */
e50362ec
AL
6033 BUG();
6034 break;
6035 default:
6036 break;
6037 }
6038 }
6039
312f7da2 6040 /* select the device */
1da177e4
LT
6041 ata_dev_select(ap, qc->dev->devno, 1, 0);
6042
312f7da2 6043 /* start the command */
1da177e4
LT
6044 switch (qc->tf.protocol) {
6045 case ATA_PROT_NODATA:
312f7da2
AL
6046 if (qc->tf.flags & ATA_TFLAG_POLLING)
6047 ata_qc_set_polling(qc);
6048
e5338254 6049 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6050 ap->hsm_task_state = HSM_ST_LAST;
6051
6052 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6053 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6054
1da177e4
LT
6055 break;
6056
6057 case ATA_PROT_DMA:
587005de 6058 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6059
1da177e4
LT
6060 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6061 ap->ops->bmdma_setup(qc); /* set up bmdma */
6062 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6063 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6064 break;
6065
312f7da2
AL
6066 case ATA_PROT_PIO:
6067 if (qc->tf.flags & ATA_TFLAG_POLLING)
6068 ata_qc_set_polling(qc);
1da177e4 6069
e5338254 6070 ata_tf_to_host(ap, &qc->tf);
312f7da2 6071
54f00389
AL
6072 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6073 /* PIO data out protocol */
6074 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6075 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6076
6077 /* always send first data block using
e27486db 6078 * the ata_pio_task() codepath.
54f00389 6079 */
312f7da2 6080 } else {
54f00389
AL
6081 /* PIO data in protocol */
6082 ap->hsm_task_state = HSM_ST;
6083
6084 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6085 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6086
6087 /* if polling, ata_pio_task() handles the rest.
6088 * otherwise, interrupt handler takes over from here.
6089 */
312f7da2
AL
6090 }
6091
1da177e4
LT
6092 break;
6093
1da177e4 6094 case ATA_PROT_ATAPI:
1da177e4 6095 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6096 if (qc->tf.flags & ATA_TFLAG_POLLING)
6097 ata_qc_set_polling(qc);
6098
e5338254 6099 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6100
312f7da2
AL
6101 ap->hsm_task_state = HSM_ST_FIRST;
6102
6103 /* send cdb by polling if no cdb interrupt */
6104 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6105 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6106 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6107 break;
6108
6109 case ATA_PROT_ATAPI_DMA:
587005de 6110 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6111
1da177e4
LT
6112 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6113 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6114 ap->hsm_task_state = HSM_ST_FIRST;
6115
6116 /* send cdb by polling if no cdb interrupt */
6117 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6118 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6119 break;
6120
6121 default:
6122 WARN_ON(1);
9a3d9eb0 6123 return AC_ERR_SYSTEM;
1da177e4
LT
6124 }
6125
6126 return 0;
6127}
6128
1da177e4
LT
6129/**
6130 * ata_host_intr - Handle host interrupt for given (port, task)
6131 * @ap: Port on which interrupt arrived (possibly...)
6132 * @qc: Taskfile currently active in engine
6133 *
6134 * Handle host interrupt for given queued command. Currently,
6135 * only DMA interrupts are handled. All other commands are
6136 * handled via polling with interrupts disabled (nIEN bit).
6137 *
6138 * LOCKING:
cca3974e 6139 * spin_lock_irqsave(host lock)
1da177e4
LT
6140 *
6141 * RETURNS:
6142 * One if interrupt was handled, zero if not (shared irq).
6143 */
6144
2dcb407e
JG
6145inline unsigned int ata_host_intr(struct ata_port *ap,
6146 struct ata_queued_cmd *qc)
1da177e4 6147{
9af5c9c9 6148 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6149 u8 status, host_stat = 0;
1da177e4 6150
312f7da2 6151 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6152 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6153
312f7da2
AL
6154 /* Check whether we are expecting interrupt in this state */
6155 switch (ap->hsm_task_state) {
6156 case HSM_ST_FIRST:
6912ccd5
AL
6157 /* Some pre-ATAPI-4 devices assert INTRQ
6158 * at this state when ready to receive CDB.
6159 */
1da177e4 6160
312f7da2
AL
6161 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6162 * The flag was turned on only for atapi devices.
6163 * No need to check is_atapi_taskfile(&qc->tf) again.
6164 */
6165 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6166 goto idle_irq;
1da177e4 6167 break;
312f7da2
AL
6168 case HSM_ST_LAST:
6169 if (qc->tf.protocol == ATA_PROT_DMA ||
6170 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6171 /* check status of DMA engine */
6172 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6173 VPRINTK("ata%u: host_stat 0x%X\n",
6174 ap->print_id, host_stat);
312f7da2
AL
6175
6176 /* if it's not our irq... */
6177 if (!(host_stat & ATA_DMA_INTR))
6178 goto idle_irq;
6179
6180 /* before we do anything else, clear DMA-Start bit */
6181 ap->ops->bmdma_stop(qc);
a4f16610
AL
6182
6183 if (unlikely(host_stat & ATA_DMA_ERR)) {
6184 /* error when transfering data to/from memory */
6185 qc->err_mask |= AC_ERR_HOST_BUS;
6186 ap->hsm_task_state = HSM_ST_ERR;
6187 }
312f7da2
AL
6188 }
6189 break;
6190 case HSM_ST:
6191 break;
1da177e4
LT
6192 default:
6193 goto idle_irq;
6194 }
6195
312f7da2
AL
6196 /* check altstatus */
6197 status = ata_altstatus(ap);
6198 if (status & ATA_BUSY)
6199 goto idle_irq;
1da177e4 6200
312f7da2
AL
6201 /* check main status, clearing INTRQ */
6202 status = ata_chk_status(ap);
6203 if (unlikely(status & ATA_BUSY))
6204 goto idle_irq;
1da177e4 6205
312f7da2
AL
6206 /* ack bmdma irq events */
6207 ap->ops->irq_clear(ap);
1da177e4 6208
bb5cb290 6209 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6210
6211 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6212 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6213 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6214
1da177e4
LT
6215 return 1; /* irq handled */
6216
6217idle_irq:
6218 ap->stats.idle_irq++;
6219
6220#ifdef ATA_IRQ_TRAP
6221 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6222 ata_chk_status(ap);
6223 ap->ops->irq_clear(ap);
f15a1daf 6224 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6225 return 1;
1da177e4
LT
6226 }
6227#endif
6228 return 0; /* irq not handled */
6229}
6230
6231/**
6232 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6233 * @irq: irq line (unused)
cca3974e 6234 * @dev_instance: pointer to our ata_host information structure
1da177e4 6235 *
0cba632b
JG
6236 * Default interrupt handler for PCI IDE devices. Calls
6237 * ata_host_intr() for each port that is not disabled.
6238 *
1da177e4 6239 * LOCKING:
cca3974e 6240 * Obtains host lock during operation.
1da177e4
LT
6241 *
6242 * RETURNS:
0cba632b 6243 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6244 */
6245
2dcb407e 6246irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6247{
cca3974e 6248 struct ata_host *host = dev_instance;
1da177e4
LT
6249 unsigned int i;
6250 unsigned int handled = 0;
6251 unsigned long flags;
6252
6253 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6254 spin_lock_irqsave(&host->lock, flags);
1da177e4 6255
cca3974e 6256 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6257 struct ata_port *ap;
6258
cca3974e 6259 ap = host->ports[i];
c1389503 6260 if (ap &&
029f5468 6261 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6262 struct ata_queued_cmd *qc;
6263
9af5c9c9 6264 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6265 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6266 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6267 handled |= ata_host_intr(ap, qc);
6268 }
6269 }
6270
cca3974e 6271 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6272
6273 return IRQ_RETVAL(handled);
6274}
6275
34bf2170
TH
6276/**
6277 * sata_scr_valid - test whether SCRs are accessible
936fd732 6278 * @link: ATA link to test SCR accessibility for
34bf2170 6279 *
936fd732 6280 * Test whether SCRs are accessible for @link.
34bf2170
TH
6281 *
6282 * LOCKING:
6283 * None.
6284 *
6285 * RETURNS:
6286 * 1 if SCRs are accessible, 0 otherwise.
6287 */
936fd732 6288int sata_scr_valid(struct ata_link *link)
34bf2170 6289{
936fd732
TH
6290 struct ata_port *ap = link->ap;
6291
a16abc0b 6292 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6293}
6294
6295/**
6296 * sata_scr_read - read SCR register of the specified port
936fd732 6297 * @link: ATA link to read SCR for
34bf2170
TH
6298 * @reg: SCR to read
6299 * @val: Place to store read value
6300 *
936fd732 6301 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6302 * guaranteed to succeed if @link is ap->link, the cable type of
6303 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6304 *
6305 * LOCKING:
633273a3 6306 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6307 *
6308 * RETURNS:
6309 * 0 on success, negative errno on failure.
6310 */
936fd732 6311int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6312{
633273a3
TH
6313 if (ata_is_host_link(link)) {
6314 struct ata_port *ap = link->ap;
936fd732 6315
633273a3
TH
6316 if (sata_scr_valid(link))
6317 return ap->ops->scr_read(ap, reg, val);
6318 return -EOPNOTSUPP;
6319 }
6320
6321 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6322}
6323
6324/**
6325 * sata_scr_write - write SCR register of the specified port
936fd732 6326 * @link: ATA link to write SCR for
34bf2170
TH
6327 * @reg: SCR to write
6328 * @val: value to write
6329 *
936fd732 6330 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6331 * guaranteed to succeed if @link is ap->link, the cable type of
6332 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6333 *
6334 * LOCKING:
633273a3 6335 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6336 *
6337 * RETURNS:
6338 * 0 on success, negative errno on failure.
6339 */
936fd732 6340int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6341{
633273a3
TH
6342 if (ata_is_host_link(link)) {
6343 struct ata_port *ap = link->ap;
6344
6345 if (sata_scr_valid(link))
6346 return ap->ops->scr_write(ap, reg, val);
6347 return -EOPNOTSUPP;
6348 }
936fd732 6349
633273a3 6350 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6351}
6352
6353/**
6354 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6355 * @link: ATA link to write SCR for
34bf2170
TH
6356 * @reg: SCR to write
6357 * @val: value to write
6358 *
6359 * This function is identical to sata_scr_write() except that this
6360 * function performs flush after writing to the register.
6361 *
6362 * LOCKING:
633273a3 6363 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6364 *
6365 * RETURNS:
6366 * 0 on success, negative errno on failure.
6367 */
936fd732 6368int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6369{
633273a3
TH
6370 if (ata_is_host_link(link)) {
6371 struct ata_port *ap = link->ap;
6372 int rc;
da3dbb17 6373
633273a3
TH
6374 if (sata_scr_valid(link)) {
6375 rc = ap->ops->scr_write(ap, reg, val);
6376 if (rc == 0)
6377 rc = ap->ops->scr_read(ap, reg, &val);
6378 return rc;
6379 }
6380 return -EOPNOTSUPP;
34bf2170 6381 }
633273a3
TH
6382
6383 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6384}
6385
6386/**
936fd732
TH
6387 * ata_link_online - test whether the given link is online
6388 * @link: ATA link to test
34bf2170 6389 *
936fd732
TH
6390 * Test whether @link is online. Note that this function returns
6391 * 0 if online status of @link cannot be obtained, so
6392 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6393 *
6394 * LOCKING:
6395 * None.
6396 *
6397 * RETURNS:
6398 * 1 if the port online status is available and online.
6399 */
936fd732 6400int ata_link_online(struct ata_link *link)
34bf2170
TH
6401{
6402 u32 sstatus;
6403
936fd732
TH
6404 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6405 (sstatus & 0xf) == 0x3)
34bf2170
TH
6406 return 1;
6407 return 0;
6408}
6409
6410/**
936fd732
TH
6411 * ata_link_offline - test whether the given link is offline
6412 * @link: ATA link to test
34bf2170 6413 *
936fd732
TH
6414 * Test whether @link is offline. Note that this function
6415 * returns 0 if offline status of @link cannot be obtained, so
6416 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6417 *
6418 * LOCKING:
6419 * None.
6420 *
6421 * RETURNS:
6422 * 1 if the port offline status is available and offline.
6423 */
936fd732 6424int ata_link_offline(struct ata_link *link)
34bf2170
TH
6425{
6426 u32 sstatus;
6427
936fd732
TH
6428 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6429 (sstatus & 0xf) != 0x3)
34bf2170
TH
6430 return 1;
6431 return 0;
6432}
0baab86b 6433
77b08fb5 6434int ata_flush_cache(struct ata_device *dev)
9b847548 6435{
977e6b9f 6436 unsigned int err_mask;
9b847548
JA
6437 u8 cmd;
6438
6439 if (!ata_try_flush_cache(dev))
6440 return 0;
6441
6fc49adb 6442 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6443 cmd = ATA_CMD_FLUSH_EXT;
6444 else
6445 cmd = ATA_CMD_FLUSH;
6446
4f34337b
AC
6447 /* This is wrong. On a failed flush we get back the LBA of the lost
6448 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6449 a further flush command to continue the writeback until it
4f34337b 6450 does not error */
977e6b9f
TH
6451 err_mask = ata_do_simple_cmd(dev, cmd);
6452 if (err_mask) {
6453 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6454 return -EIO;
6455 }
6456
6457 return 0;
9b847548
JA
6458}
6459
6ffa01d8 6460#ifdef CONFIG_PM
cca3974e
JG
6461static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6462 unsigned int action, unsigned int ehi_flags,
6463 int wait)
500530f6
TH
6464{
6465 unsigned long flags;
6466 int i, rc;
6467
cca3974e
JG
6468 for (i = 0; i < host->n_ports; i++) {
6469 struct ata_port *ap = host->ports[i];
e3667ebf 6470 struct ata_link *link;
500530f6
TH
6471
6472 /* Previous resume operation might still be in
6473 * progress. Wait for PM_PENDING to clear.
6474 */
6475 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6476 ata_port_wait_eh(ap);
6477 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6478 }
6479
6480 /* request PM ops to EH */
6481 spin_lock_irqsave(ap->lock, flags);
6482
6483 ap->pm_mesg = mesg;
6484 if (wait) {
6485 rc = 0;
6486 ap->pm_result = &rc;
6487 }
6488
6489 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6490 __ata_port_for_each_link(link, ap) {
6491 link->eh_info.action |= action;
6492 link->eh_info.flags |= ehi_flags;
6493 }
500530f6
TH
6494
6495 ata_port_schedule_eh(ap);
6496
6497 spin_unlock_irqrestore(ap->lock, flags);
6498
6499 /* wait and check result */
6500 if (wait) {
6501 ata_port_wait_eh(ap);
6502 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6503 if (rc)
6504 return rc;
6505 }
6506 }
6507
6508 return 0;
6509}
6510
6511/**
cca3974e
JG
6512 * ata_host_suspend - suspend host
6513 * @host: host to suspend
500530f6
TH
6514 * @mesg: PM message
6515 *
cca3974e 6516 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6517 * function requests EH to perform PM operations and waits for EH
6518 * to finish.
6519 *
6520 * LOCKING:
6521 * Kernel thread context (may sleep).
6522 *
6523 * RETURNS:
6524 * 0 on success, -errno on failure.
6525 */
cca3974e 6526int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6527{
9666f400 6528 int rc;
500530f6 6529
ca77329f
KCA
6530 /*
6531 * disable link pm on all ports before requesting
6532 * any pm activity
6533 */
6534 ata_lpm_enable(host);
6535
cca3974e 6536 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6537 if (rc == 0)
6538 host->dev->power.power_state = mesg;
500530f6
TH
6539 return rc;
6540}
6541
6542/**
cca3974e
JG
6543 * ata_host_resume - resume host
6544 * @host: host to resume
500530f6 6545 *
cca3974e 6546 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6547 * function requests EH to perform PM operations and returns.
6548 * Note that all resume operations are performed parallely.
6549 *
6550 * LOCKING:
6551 * Kernel thread context (may sleep).
6552 */
cca3974e 6553void ata_host_resume(struct ata_host *host)
500530f6 6554{
cca3974e
JG
6555 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6556 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6557 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6558
6559 /* reenable link pm */
6560 ata_lpm_disable(host);
500530f6 6561}
6ffa01d8 6562#endif
500530f6 6563
c893a3ae
RD
6564/**
6565 * ata_port_start - Set port up for dma.
6566 * @ap: Port to initialize
6567 *
6568 * Called just after data structures for each port are
6569 * initialized. Allocates space for PRD table.
6570 *
6571 * May be used as the port_start() entry in ata_port_operations.
6572 *
6573 * LOCKING:
6574 * Inherited from caller.
6575 */
f0d36efd 6576int ata_port_start(struct ata_port *ap)
1da177e4 6577{
2f1f610b 6578 struct device *dev = ap->dev;
6037d6bb 6579 int rc;
1da177e4 6580
f0d36efd
TH
6581 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6582 GFP_KERNEL);
1da177e4
LT
6583 if (!ap->prd)
6584 return -ENOMEM;
6585
6037d6bb 6586 rc = ata_pad_alloc(ap, dev);
f0d36efd 6587 if (rc)
6037d6bb 6588 return rc;
1da177e4 6589
f0d36efd
TH
6590 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6591 (unsigned long long)ap->prd_dma);
1da177e4
LT
6592 return 0;
6593}
6594
3ef3b43d
TH
6595/**
6596 * ata_dev_init - Initialize an ata_device structure
6597 * @dev: Device structure to initialize
6598 *
6599 * Initialize @dev in preparation for probing.
6600 *
6601 * LOCKING:
6602 * Inherited from caller.
6603 */
6604void ata_dev_init(struct ata_device *dev)
6605{
9af5c9c9
TH
6606 struct ata_link *link = dev->link;
6607 struct ata_port *ap = link->ap;
72fa4b74
TH
6608 unsigned long flags;
6609
5a04bf4b 6610 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6611 link->sata_spd_limit = link->hw_sata_spd_limit;
6612 link->sata_spd = 0;
5a04bf4b 6613
72fa4b74
TH
6614 /* High bits of dev->flags are used to record warm plug
6615 * requests which occur asynchronously. Synchronize using
cca3974e 6616 * host lock.
72fa4b74 6617 */
ba6a1308 6618 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6619 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6620 dev->horkage = 0;
ba6a1308 6621 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6622
72fa4b74
TH
6623 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6624 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6625 dev->pio_mask = UINT_MAX;
6626 dev->mwdma_mask = UINT_MAX;
6627 dev->udma_mask = UINT_MAX;
6628}
6629
4fb37a25
TH
6630/**
6631 * ata_link_init - Initialize an ata_link structure
6632 * @ap: ATA port link is attached to
6633 * @link: Link structure to initialize
8989805d 6634 * @pmp: Port multiplier port number
4fb37a25
TH
6635 *
6636 * Initialize @link.
6637 *
6638 * LOCKING:
6639 * Kernel thread context (may sleep)
6640 */
fb7fd614 6641void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6642{
6643 int i;
6644
6645 /* clear everything except for devices */
6646 memset(link, 0, offsetof(struct ata_link, device[0]));
6647
6648 link->ap = ap;
8989805d 6649 link->pmp = pmp;
4fb37a25
TH
6650 link->active_tag = ATA_TAG_POISON;
6651 link->hw_sata_spd_limit = UINT_MAX;
6652
6653 /* can't use iterator, ap isn't initialized yet */
6654 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6655 struct ata_device *dev = &link->device[i];
6656
6657 dev->link = link;
6658 dev->devno = dev - link->device;
6659 ata_dev_init(dev);
6660 }
6661}
6662
6663/**
6664 * sata_link_init_spd - Initialize link->sata_spd_limit
6665 * @link: Link to configure sata_spd_limit for
6666 *
6667 * Initialize @link->[hw_]sata_spd_limit to the currently
6668 * configured value.
6669 *
6670 * LOCKING:
6671 * Kernel thread context (may sleep).
6672 *
6673 * RETURNS:
6674 * 0 on success, -errno on failure.
6675 */
fb7fd614 6676int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6677{
6678 u32 scontrol, spd;
6679 int rc;
6680
6681 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6682 if (rc)
6683 return rc;
6684
6685 spd = (scontrol >> 4) & 0xf;
6686 if (spd)
6687 link->hw_sata_spd_limit &= (1 << spd) - 1;
6688
6689 link->sata_spd_limit = link->hw_sata_spd_limit;
6690
6691 return 0;
6692}
6693
1da177e4 6694/**
f3187195
TH
6695 * ata_port_alloc - allocate and initialize basic ATA port resources
6696 * @host: ATA host this allocated port belongs to
1da177e4 6697 *
f3187195
TH
6698 * Allocate and initialize basic ATA port resources.
6699 *
6700 * RETURNS:
6701 * Allocate ATA port on success, NULL on failure.
0cba632b 6702 *
1da177e4 6703 * LOCKING:
f3187195 6704 * Inherited from calling layer (may sleep).
1da177e4 6705 */
f3187195 6706struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6707{
f3187195 6708 struct ata_port *ap;
1da177e4 6709
f3187195
TH
6710 DPRINTK("ENTER\n");
6711
6712 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6713 if (!ap)
6714 return NULL;
6715
f4d6d004 6716 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6717 ap->lock = &host->lock;
198e0fed 6718 ap->flags = ATA_FLAG_DISABLED;
f3187195 6719 ap->print_id = -1;
1da177e4 6720 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6721 ap->host = host;
f3187195 6722 ap->dev = host->dev;
1da177e4 6723 ap->last_ctl = 0xFF;
bd5d825c
BP
6724
6725#if defined(ATA_VERBOSE_DEBUG)
6726 /* turn on all debugging levels */
6727 ap->msg_enable = 0x00FF;
6728#elif defined(ATA_DEBUG)
6729 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6730#else
0dd4b21f 6731 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6732#endif
1da177e4 6733
65f27f38
DH
6734 INIT_DELAYED_WORK(&ap->port_task, NULL);
6735 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6736 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6737 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6738 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6739 init_timer_deferrable(&ap->fastdrain_timer);
6740 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6741 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6742
838df628 6743 ap->cbl = ATA_CBL_NONE;
838df628 6744
8989805d 6745 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6746
6747#ifdef ATA_IRQ_TRAP
6748 ap->stats.unhandled_irq = 1;
6749 ap->stats.idle_irq = 1;
6750#endif
1da177e4 6751 return ap;
1da177e4
LT
6752}
6753
f0d36efd
TH
6754static void ata_host_release(struct device *gendev, void *res)
6755{
6756 struct ata_host *host = dev_get_drvdata(gendev);
6757 int i;
6758
1aa506e4
TH
6759 for (i = 0; i < host->n_ports; i++) {
6760 struct ata_port *ap = host->ports[i];
6761
4911487a
TH
6762 if (!ap)
6763 continue;
6764
6765 if (ap->scsi_host)
1aa506e4
TH
6766 scsi_host_put(ap->scsi_host);
6767
633273a3 6768 kfree(ap->pmp_link);
4911487a 6769 kfree(ap);
1aa506e4
TH
6770 host->ports[i] = NULL;
6771 }
6772
1aa56cca 6773 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6774}
6775
f3187195
TH
6776/**
6777 * ata_host_alloc - allocate and init basic ATA host resources
6778 * @dev: generic device this host is associated with
6779 * @max_ports: maximum number of ATA ports associated with this host
6780 *
6781 * Allocate and initialize basic ATA host resources. LLD calls
6782 * this function to allocate a host, initializes it fully and
6783 * attaches it using ata_host_register().
6784 *
6785 * @max_ports ports are allocated and host->n_ports is
6786 * initialized to @max_ports. The caller is allowed to decrease
6787 * host->n_ports before calling ata_host_register(). The unused
6788 * ports will be automatically freed on registration.
6789 *
6790 * RETURNS:
6791 * Allocate ATA host on success, NULL on failure.
6792 *
6793 * LOCKING:
6794 * Inherited from calling layer (may sleep).
6795 */
6796struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6797{
6798 struct ata_host *host;
6799 size_t sz;
6800 int i;
6801
6802 DPRINTK("ENTER\n");
6803
6804 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6805 return NULL;
6806
6807 /* alloc a container for our list of ATA ports (buses) */
6808 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6809 /* alloc a container for our list of ATA ports (buses) */
6810 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6811 if (!host)
6812 goto err_out;
6813
6814 devres_add(dev, host);
6815 dev_set_drvdata(dev, host);
6816
6817 spin_lock_init(&host->lock);
6818 host->dev = dev;
6819 host->n_ports = max_ports;
6820
6821 /* allocate ports bound to this host */
6822 for (i = 0; i < max_ports; i++) {
6823 struct ata_port *ap;
6824
6825 ap = ata_port_alloc(host);
6826 if (!ap)
6827 goto err_out;
6828
6829 ap->port_no = i;
6830 host->ports[i] = ap;
6831 }
6832
6833 devres_remove_group(dev, NULL);
6834 return host;
6835
6836 err_out:
6837 devres_release_group(dev, NULL);
6838 return NULL;
6839}
6840
f5cda257
TH
6841/**
6842 * ata_host_alloc_pinfo - alloc host and init with port_info array
6843 * @dev: generic device this host is associated with
6844 * @ppi: array of ATA port_info to initialize host with
6845 * @n_ports: number of ATA ports attached to this host
6846 *
6847 * Allocate ATA host and initialize with info from @ppi. If NULL
6848 * terminated, @ppi may contain fewer entries than @n_ports. The
6849 * last entry will be used for the remaining ports.
6850 *
6851 * RETURNS:
6852 * Allocate ATA host on success, NULL on failure.
6853 *
6854 * LOCKING:
6855 * Inherited from calling layer (may sleep).
6856 */
6857struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6858 const struct ata_port_info * const * ppi,
6859 int n_ports)
6860{
6861 const struct ata_port_info *pi;
6862 struct ata_host *host;
6863 int i, j;
6864
6865 host = ata_host_alloc(dev, n_ports);
6866 if (!host)
6867 return NULL;
6868
6869 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6870 struct ata_port *ap = host->ports[i];
6871
6872 if (ppi[j])
6873 pi = ppi[j++];
6874
6875 ap->pio_mask = pi->pio_mask;
6876 ap->mwdma_mask = pi->mwdma_mask;
6877 ap->udma_mask = pi->udma_mask;
6878 ap->flags |= pi->flags;
0c88758b 6879 ap->link.flags |= pi->link_flags;
f5cda257
TH
6880 ap->ops = pi->port_ops;
6881
6882 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6883 host->ops = pi->port_ops;
6884 if (!host->private_data && pi->private_data)
6885 host->private_data = pi->private_data;
6886 }
6887
6888 return host;
6889}
6890
32ebbc0c
TH
6891static void ata_host_stop(struct device *gendev, void *res)
6892{
6893 struct ata_host *host = dev_get_drvdata(gendev);
6894 int i;
6895
6896 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6897
6898 for (i = 0; i < host->n_ports; i++) {
6899 struct ata_port *ap = host->ports[i];
6900
6901 if (ap->ops->port_stop)
6902 ap->ops->port_stop(ap);
6903 }
6904
6905 if (host->ops->host_stop)
6906 host->ops->host_stop(host);
6907}
6908
ecef7253
TH
6909/**
6910 * ata_host_start - start and freeze ports of an ATA host
6911 * @host: ATA host to start ports for
6912 *
6913 * Start and then freeze ports of @host. Started status is
6914 * recorded in host->flags, so this function can be called
6915 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6916 * once. If host->ops isn't initialized yet, its set to the
6917 * first non-dummy port ops.
ecef7253
TH
6918 *
6919 * LOCKING:
6920 * Inherited from calling layer (may sleep).
6921 *
6922 * RETURNS:
6923 * 0 if all ports are started successfully, -errno otherwise.
6924 */
6925int ata_host_start(struct ata_host *host)
6926{
32ebbc0c
TH
6927 int have_stop = 0;
6928 void *start_dr = NULL;
ecef7253
TH
6929 int i, rc;
6930
6931 if (host->flags & ATA_HOST_STARTED)
6932 return 0;
6933
6934 for (i = 0; i < host->n_ports; i++) {
6935 struct ata_port *ap = host->ports[i];
6936
f3187195
TH
6937 if (!host->ops && !ata_port_is_dummy(ap))
6938 host->ops = ap->ops;
6939
32ebbc0c
TH
6940 if (ap->ops->port_stop)
6941 have_stop = 1;
6942 }
6943
6944 if (host->ops->host_stop)
6945 have_stop = 1;
6946
6947 if (have_stop) {
6948 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6949 if (!start_dr)
6950 return -ENOMEM;
6951 }
6952
6953 for (i = 0; i < host->n_ports; i++) {
6954 struct ata_port *ap = host->ports[i];
6955
ecef7253
TH
6956 if (ap->ops->port_start) {
6957 rc = ap->ops->port_start(ap);
6958 if (rc) {
6959 ata_port_printk(ap, KERN_ERR, "failed to "
6960 "start port (errno=%d)\n", rc);
6961 goto err_out;
6962 }
6963 }
6964
6965 ata_eh_freeze_port(ap);
6966 }
6967
32ebbc0c
TH
6968 if (start_dr)
6969 devres_add(host->dev, start_dr);
ecef7253
TH
6970 host->flags |= ATA_HOST_STARTED;
6971 return 0;
6972
6973 err_out:
6974 while (--i >= 0) {
6975 struct ata_port *ap = host->ports[i];
6976
6977 if (ap->ops->port_stop)
6978 ap->ops->port_stop(ap);
6979 }
32ebbc0c 6980 devres_free(start_dr);
ecef7253
TH
6981 return rc;
6982}
6983
b03732f0 6984/**
cca3974e
JG
6985 * ata_sas_host_init - Initialize a host struct
6986 * @host: host to initialize
6987 * @dev: device host is attached to
6988 * @flags: host flags
6989 * @ops: port_ops
b03732f0
BK
6990 *
6991 * LOCKING:
6992 * PCI/etc. bus probe sem.
6993 *
6994 */
f3187195 6995/* KILLME - the only user left is ipr */
cca3974e
JG
6996void ata_host_init(struct ata_host *host, struct device *dev,
6997 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6998{
cca3974e
JG
6999 spin_lock_init(&host->lock);
7000 host->dev = dev;
7001 host->flags = flags;
7002 host->ops = ops;
b03732f0
BK
7003}
7004
f3187195
TH
7005/**
7006 * ata_host_register - register initialized ATA host
7007 * @host: ATA host to register
7008 * @sht: template for SCSI host
7009 *
7010 * Register initialized ATA host. @host is allocated using
7011 * ata_host_alloc() and fully initialized by LLD. This function
7012 * starts ports, registers @host with ATA and SCSI layers and
7013 * probe registered devices.
7014 *
7015 * LOCKING:
7016 * Inherited from calling layer (may sleep).
7017 *
7018 * RETURNS:
7019 * 0 on success, -errno otherwise.
7020 */
7021int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7022{
7023 int i, rc;
7024
7025 /* host must have been started */
7026 if (!(host->flags & ATA_HOST_STARTED)) {
7027 dev_printk(KERN_ERR, host->dev,
7028 "BUG: trying to register unstarted host\n");
7029 WARN_ON(1);
7030 return -EINVAL;
7031 }
7032
7033 /* Blow away unused ports. This happens when LLD can't
7034 * determine the exact number of ports to allocate at
7035 * allocation time.
7036 */
7037 for (i = host->n_ports; host->ports[i]; i++)
7038 kfree(host->ports[i]);
7039
7040 /* give ports names and add SCSI hosts */
7041 for (i = 0; i < host->n_ports; i++)
7042 host->ports[i]->print_id = ata_print_id++;
7043
7044 rc = ata_scsi_add_hosts(host, sht);
7045 if (rc)
7046 return rc;
7047
fafbae87
TH
7048 /* associate with ACPI nodes */
7049 ata_acpi_associate(host);
7050
f3187195
TH
7051 /* set cable, sata_spd_limit and report */
7052 for (i = 0; i < host->n_ports; i++) {
7053 struct ata_port *ap = host->ports[i];
f3187195
TH
7054 unsigned long xfer_mask;
7055
7056 /* set SATA cable type if still unset */
7057 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7058 ap->cbl = ATA_CBL_SATA;
7059
7060 /* init sata_spd_limit to the current value */
4fb37a25 7061 sata_link_init_spd(&ap->link);
f3187195 7062
cbcdd875 7063 /* print per-port info to dmesg */
f3187195
TH
7064 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7065 ap->udma_mask);
7066
abf6e8ed 7067 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7068 ata_port_printk(ap, KERN_INFO,
7069 "%cATA max %s %s\n",
a16abc0b 7070 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7071 ata_mode_string(xfer_mask),
cbcdd875 7072 ap->link.eh_info.desc);
abf6e8ed
TH
7073 ata_ehi_clear_desc(&ap->link.eh_info);
7074 } else
f3187195
TH
7075 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7076 }
7077
7078 /* perform each probe synchronously */
7079 DPRINTK("probe begin\n");
7080 for (i = 0; i < host->n_ports; i++) {
7081 struct ata_port *ap = host->ports[i];
7082 int rc;
7083
7084 /* probe */
7085 if (ap->ops->error_handler) {
9af5c9c9 7086 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7087 unsigned long flags;
7088
7089 ata_port_probe(ap);
7090
7091 /* kick EH for boot probing */
7092 spin_lock_irqsave(ap->lock, flags);
7093
f58229f8
TH
7094 ehi->probe_mask =
7095 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7096 ehi->action |= ATA_EH_SOFTRESET;
7097 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7098
f4d6d004 7099 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7100 ap->pflags |= ATA_PFLAG_LOADING;
7101 ata_port_schedule_eh(ap);
7102
7103 spin_unlock_irqrestore(ap->lock, flags);
7104
7105 /* wait for EH to finish */
7106 ata_port_wait_eh(ap);
7107 } else {
7108 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7109 rc = ata_bus_probe(ap);
7110 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7111
7112 if (rc) {
7113 /* FIXME: do something useful here?
7114 * Current libata behavior will
7115 * tear down everything when
7116 * the module is removed
7117 * or the h/w is unplugged.
7118 */
7119 }
7120 }
7121 }
7122
7123 /* probes are done, now scan each port's disk(s) */
7124 DPRINTK("host probe begin\n");
7125 for (i = 0; i < host->n_ports; i++) {
7126 struct ata_port *ap = host->ports[i];
7127
1ae46317 7128 ata_scsi_scan_host(ap, 1);
ca77329f 7129 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7130 }
7131
7132 return 0;
7133}
7134
f5cda257
TH
7135/**
7136 * ata_host_activate - start host, request IRQ and register it
7137 * @host: target ATA host
7138 * @irq: IRQ to request
7139 * @irq_handler: irq_handler used when requesting IRQ
7140 * @irq_flags: irq_flags used when requesting IRQ
7141 * @sht: scsi_host_template to use when registering the host
7142 *
7143 * After allocating an ATA host and initializing it, most libata
7144 * LLDs perform three steps to activate the host - start host,
7145 * request IRQ and register it. This helper takes necessasry
7146 * arguments and performs the three steps in one go.
7147 *
3d46b2e2
PM
7148 * An invalid IRQ skips the IRQ registration and expects the host to
7149 * have set polling mode on the port. In this case, @irq_handler
7150 * should be NULL.
7151 *
f5cda257
TH
7152 * LOCKING:
7153 * Inherited from calling layer (may sleep).
7154 *
7155 * RETURNS:
7156 * 0 on success, -errno otherwise.
7157 */
7158int ata_host_activate(struct ata_host *host, int irq,
7159 irq_handler_t irq_handler, unsigned long irq_flags,
7160 struct scsi_host_template *sht)
7161{
cbcdd875 7162 int i, rc;
f5cda257
TH
7163
7164 rc = ata_host_start(host);
7165 if (rc)
7166 return rc;
7167
3d46b2e2
PM
7168 /* Special case for polling mode */
7169 if (!irq) {
7170 WARN_ON(irq_handler);
7171 return ata_host_register(host, sht);
7172 }
7173
f5cda257
TH
7174 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7175 dev_driver_string(host->dev), host);
7176 if (rc)
7177 return rc;
7178
cbcdd875
TH
7179 for (i = 0; i < host->n_ports; i++)
7180 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7181
f5cda257
TH
7182 rc = ata_host_register(host, sht);
7183 /* if failed, just free the IRQ and leave ports alone */
7184 if (rc)
7185 devm_free_irq(host->dev, irq, host);
7186
7187 return rc;
7188}
7189
720ba126
TH
7190/**
7191 * ata_port_detach - Detach ATA port in prepration of device removal
7192 * @ap: ATA port to be detached
7193 *
7194 * Detach all ATA devices and the associated SCSI devices of @ap;
7195 * then, remove the associated SCSI host. @ap is guaranteed to
7196 * be quiescent on return from this function.
7197 *
7198 * LOCKING:
7199 * Kernel thread context (may sleep).
7200 */
741b7763 7201static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7202{
7203 unsigned long flags;
41bda9c9 7204 struct ata_link *link;
f58229f8 7205 struct ata_device *dev;
720ba126
TH
7206
7207 if (!ap->ops->error_handler)
c3cf30a9 7208 goto skip_eh;
720ba126
TH
7209
7210 /* tell EH we're leaving & flush EH */
ba6a1308 7211 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7212 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7213 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7214
7215 ata_port_wait_eh(ap);
7216
7217 /* EH is now guaranteed to see UNLOADING, so no new device
7218 * will be attached. Disable all existing devices.
7219 */
ba6a1308 7220 spin_lock_irqsave(ap->lock, flags);
720ba126 7221
41bda9c9
TH
7222 ata_port_for_each_link(link, ap) {
7223 ata_link_for_each_dev(dev, link)
7224 ata_dev_disable(dev);
7225 }
720ba126 7226
ba6a1308 7227 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7228
7229 /* Final freeze & EH. All in-flight commands are aborted. EH
7230 * will be skipped and retrials will be terminated with bad
7231 * target.
7232 */
ba6a1308 7233 spin_lock_irqsave(ap->lock, flags);
720ba126 7234 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7235 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7236
7237 ata_port_wait_eh(ap);
45a66c1c 7238 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7239
c3cf30a9 7240 skip_eh:
720ba126 7241 /* remove the associated SCSI host */
cca3974e 7242 scsi_remove_host(ap->scsi_host);
720ba126
TH
7243}
7244
0529c159
TH
7245/**
7246 * ata_host_detach - Detach all ports of an ATA host
7247 * @host: Host to detach
7248 *
7249 * Detach all ports of @host.
7250 *
7251 * LOCKING:
7252 * Kernel thread context (may sleep).
7253 */
7254void ata_host_detach(struct ata_host *host)
7255{
7256 int i;
7257
7258 for (i = 0; i < host->n_ports; i++)
7259 ata_port_detach(host->ports[i]);
7260}
7261
1da177e4
LT
7262/**
7263 * ata_std_ports - initialize ioaddr with standard port offsets.
7264 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7265 *
7266 * Utility function which initializes data_addr, error_addr,
7267 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7268 * device_addr, status_addr, and command_addr to standard offsets
7269 * relative to cmd_addr.
7270 *
7271 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7272 */
0baab86b 7273
1da177e4
LT
7274void ata_std_ports(struct ata_ioports *ioaddr)
7275{
7276 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7277 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7278 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7279 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7280 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7281 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7282 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7283 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7284 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7285 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7286}
7287
0baab86b 7288
374b1873
JG
7289#ifdef CONFIG_PCI
7290
1da177e4
LT
7291/**
7292 * ata_pci_remove_one - PCI layer callback for device removal
7293 * @pdev: PCI device that was removed
7294 *
b878ca5d
TH
7295 * PCI layer indicates to libata via this hook that hot-unplug or
7296 * module unload event has occurred. Detach all ports. Resource
7297 * release is handled via devres.
1da177e4
LT
7298 *
7299 * LOCKING:
7300 * Inherited from PCI layer (may sleep).
7301 */
f0d36efd 7302void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7303{
2855568b 7304 struct device *dev = &pdev->dev;
cca3974e 7305 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7306
b878ca5d 7307 ata_host_detach(host);
1da177e4
LT
7308}
7309
7310/* move to PCI subsystem */
057ace5e 7311int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7312{
7313 unsigned long tmp = 0;
7314
7315 switch (bits->width) {
7316 case 1: {
7317 u8 tmp8 = 0;
7318 pci_read_config_byte(pdev, bits->reg, &tmp8);
7319 tmp = tmp8;
7320 break;
7321 }
7322 case 2: {
7323 u16 tmp16 = 0;
7324 pci_read_config_word(pdev, bits->reg, &tmp16);
7325 tmp = tmp16;
7326 break;
7327 }
7328 case 4: {
7329 u32 tmp32 = 0;
7330 pci_read_config_dword(pdev, bits->reg, &tmp32);
7331 tmp = tmp32;
7332 break;
7333 }
7334
7335 default:
7336 return -EINVAL;
7337 }
7338
7339 tmp &= bits->mask;
7340
7341 return (tmp == bits->val) ? 1 : 0;
7342}
9b847548 7343
6ffa01d8 7344#ifdef CONFIG_PM
3c5100c1 7345void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7346{
7347 pci_save_state(pdev);
4c90d971 7348 pci_disable_device(pdev);
500530f6 7349
4c90d971 7350 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7351 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7352}
7353
553c4aa6 7354int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7355{
553c4aa6
TH
7356 int rc;
7357
9b847548
JA
7358 pci_set_power_state(pdev, PCI_D0);
7359 pci_restore_state(pdev);
553c4aa6 7360
b878ca5d 7361 rc = pcim_enable_device(pdev);
553c4aa6
TH
7362 if (rc) {
7363 dev_printk(KERN_ERR, &pdev->dev,
7364 "failed to enable device after resume (%d)\n", rc);
7365 return rc;
7366 }
7367
9b847548 7368 pci_set_master(pdev);
553c4aa6 7369 return 0;
500530f6
TH
7370}
7371
3c5100c1 7372int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7373{
cca3974e 7374 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7375 int rc = 0;
7376
cca3974e 7377 rc = ata_host_suspend(host, mesg);
500530f6
TH
7378 if (rc)
7379 return rc;
7380
3c5100c1 7381 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7382
7383 return 0;
7384}
7385
7386int ata_pci_device_resume(struct pci_dev *pdev)
7387{
cca3974e 7388 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7389 int rc;
500530f6 7390
553c4aa6
TH
7391 rc = ata_pci_device_do_resume(pdev);
7392 if (rc == 0)
7393 ata_host_resume(host);
7394 return rc;
9b847548 7395}
6ffa01d8
TH
7396#endif /* CONFIG_PM */
7397
1da177e4
LT
7398#endif /* CONFIG_PCI */
7399
7400
1da177e4
LT
7401static int __init ata_init(void)
7402{
a8601e5f 7403 ata_probe_timeout *= HZ;
1da177e4
LT
7404 ata_wq = create_workqueue("ata");
7405 if (!ata_wq)
7406 return -ENOMEM;
7407
453b07ac
TH
7408 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7409 if (!ata_aux_wq) {
7410 destroy_workqueue(ata_wq);
7411 return -ENOMEM;
7412 }
7413
1da177e4
LT
7414 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7415 return 0;
7416}
7417
7418static void __exit ata_exit(void)
7419{
7420 destroy_workqueue(ata_wq);
453b07ac 7421 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7422}
7423
a4625085 7424subsys_initcall(ata_init);
1da177e4
LT
7425module_exit(ata_exit);
7426
67846b30 7427static unsigned long ratelimit_time;
34af946a 7428static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7429
7430int ata_ratelimit(void)
7431{
7432 int rc;
7433 unsigned long flags;
7434
7435 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7436
7437 if (time_after(jiffies, ratelimit_time)) {
7438 rc = 1;
7439 ratelimit_time = jiffies + (HZ/5);
7440 } else
7441 rc = 0;
7442
7443 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7444
7445 return rc;
7446}
7447
c22daff4
TH
7448/**
7449 * ata_wait_register - wait until register value changes
7450 * @reg: IO-mapped register
7451 * @mask: Mask to apply to read register value
7452 * @val: Wait condition
7453 * @interval_msec: polling interval in milliseconds
7454 * @timeout_msec: timeout in milliseconds
7455 *
7456 * Waiting for some bits of register to change is a common
7457 * operation for ATA controllers. This function reads 32bit LE
7458 * IO-mapped register @reg and tests for the following condition.
7459 *
7460 * (*@reg & mask) != val
7461 *
7462 * If the condition is met, it returns; otherwise, the process is
7463 * repeated after @interval_msec until timeout.
7464 *
7465 * LOCKING:
7466 * Kernel thread context (may sleep)
7467 *
7468 * RETURNS:
7469 * The final register value.
7470 */
7471u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7472 unsigned long interval_msec,
7473 unsigned long timeout_msec)
7474{
7475 unsigned long timeout;
7476 u32 tmp;
7477
7478 tmp = ioread32(reg);
7479
7480 /* Calculate timeout _after_ the first read to make sure
7481 * preceding writes reach the controller before starting to
7482 * eat away the timeout.
7483 */
7484 timeout = jiffies + (timeout_msec * HZ) / 1000;
7485
7486 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7487 msleep(interval_msec);
7488 tmp = ioread32(reg);
7489 }
7490
7491 return tmp;
7492}
7493
dd5b06c4
TH
7494/*
7495 * Dummy port_ops
7496 */
7497static void ata_dummy_noret(struct ata_port *ap) { }
7498static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7499static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7500
7501static u8 ata_dummy_check_status(struct ata_port *ap)
7502{
7503 return ATA_DRDY;
7504}
7505
7506static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7507{
7508 return AC_ERR_SYSTEM;
7509}
7510
7511const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7512 .check_status = ata_dummy_check_status,
7513 .check_altstatus = ata_dummy_check_status,
7514 .dev_select = ata_noop_dev_select,
7515 .qc_prep = ata_noop_qc_prep,
7516 .qc_issue = ata_dummy_qc_issue,
7517 .freeze = ata_dummy_noret,
7518 .thaw = ata_dummy_noret,
7519 .error_handler = ata_dummy_noret,
7520 .post_internal_cmd = ata_dummy_qc_noret,
7521 .irq_clear = ata_dummy_noret,
7522 .port_start = ata_dummy_ret0,
7523 .port_stop = ata_dummy_noret,
7524};
7525
21b0ad4f
TH
7526const struct ata_port_info ata_dummy_port_info = {
7527 .port_ops = &ata_dummy_port_ops,
7528};
7529
1da177e4
LT
7530/*
7531 * libata is essentially a library of internal helper functions for
7532 * low-level ATA host controller drivers. As such, the API/ABI is
7533 * likely to change as new drivers are added and updated.
7534 * Do not depend on ABI/API stability.
7535 */
e9c83914
TH
7536EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7537EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7538EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7539EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7540EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7541EXPORT_SYMBOL_GPL(ata_std_bios_param);
7542EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7543EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7544EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7545EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7546EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7547EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7548EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7549EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7550EXPORT_SYMBOL_GPL(ata_sg_init);
7551EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7552EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7553EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7554EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7555EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7556EXPORT_SYMBOL_GPL(ata_tf_load);
7557EXPORT_SYMBOL_GPL(ata_tf_read);
7558EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7559EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7560EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7561EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7562EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7563EXPORT_SYMBOL_GPL(ata_check_status);
7564EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7565EXPORT_SYMBOL_GPL(ata_exec_command);
7566EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7567EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7568EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7569EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7570EXPORT_SYMBOL_GPL(ata_data_xfer);
7571EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7572EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7573EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7574EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7575EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7576EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7577EXPORT_SYMBOL_GPL(ata_bmdma_start);
7578EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7579EXPORT_SYMBOL_GPL(ata_bmdma_status);
7580EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7581EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7582EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7583EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7584EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7585EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7586EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7587EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7588EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7589EXPORT_SYMBOL_GPL(sata_link_debounce);
7590EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7591EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7592EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7593EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7594EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7595EXPORT_SYMBOL_GPL(sata_std_hardreset);
7596EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7597EXPORT_SYMBOL_GPL(ata_dev_classify);
7598EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7599EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7600EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7601EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7602EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7603EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7604EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7605EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7606EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7607EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7608EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7609EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7610EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7611EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7612EXPORT_SYMBOL_GPL(sata_scr_valid);
7613EXPORT_SYMBOL_GPL(sata_scr_read);
7614EXPORT_SYMBOL_GPL(sata_scr_write);
7615EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7616EXPORT_SYMBOL_GPL(ata_link_online);
7617EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7618#ifdef CONFIG_PM
cca3974e
JG
7619EXPORT_SYMBOL_GPL(ata_host_suspend);
7620EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7621#endif /* CONFIG_PM */
6a62a04d
TH
7622EXPORT_SYMBOL_GPL(ata_id_string);
7623EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7624EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7625EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7626
1bc4ccff 7627EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7628EXPORT_SYMBOL_GPL(ata_timing_compute);
7629EXPORT_SYMBOL_GPL(ata_timing_merge);
7630
1da177e4
LT
7631#ifdef CONFIG_PCI
7632EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7633EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7634EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7635EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7636EXPORT_SYMBOL_GPL(ata_pci_init_one);
7637EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7638#ifdef CONFIG_PM
500530f6
TH
7639EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7640EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7641EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7642EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7643#endif /* CONFIG_PM */
67951ade
AC
7644EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7645EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7646#endif /* CONFIG_PCI */
9b847548 7647
31f88384 7648EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7649EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7650EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7651EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7652EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7653
b64bbc39
TH
7654EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7655EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7656EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7657EXPORT_SYMBOL_GPL(ata_port_desc);
7658#ifdef CONFIG_PCI
7659EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7660#endif /* CONFIG_PCI */
7b70fc03 7661EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7662EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7663EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7664EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7665EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7666EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7667EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7668EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7669EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7670EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7671EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7672EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7673
7674EXPORT_SYMBOL_GPL(ata_cable_40wire);
7675EXPORT_SYMBOL_GPL(ata_cable_80wire);
7676EXPORT_SYMBOL_GPL(ata_cable_unknown);
7677EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 3.079812 seconds and 5 git commands to generate.