[PATCH] libata: ACPI and _GTF support
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
11ef697b
KCA
96int noacpi;
97module_param(noacpi, int, 0444);
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
318 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
319 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
320 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
321 /* yay, NCQ */
322 if (!lba_48_ok(block, n_block))
323 return -ERANGE;
324
325 tf->protocol = ATA_PROT_NCQ;
326 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
327
328 if (tf->flags & ATA_TFLAG_WRITE)
329 tf->command = ATA_CMD_FPDMA_WRITE;
330 else
331 tf->command = ATA_CMD_FPDMA_READ;
332
333 tf->nsect = tag << 3;
334 tf->hob_feature = (n_block >> 8) & 0xff;
335 tf->feature = n_block & 0xff;
336
337 tf->hob_lbah = (block >> 40) & 0xff;
338 tf->hob_lbam = (block >> 32) & 0xff;
339 tf->hob_lbal = (block >> 24) & 0xff;
340 tf->lbah = (block >> 16) & 0xff;
341 tf->lbam = (block >> 8) & 0xff;
342 tf->lbal = block & 0xff;
343
344 tf->device = 1 << 6;
345 if (tf->flags & ATA_TFLAG_FUA)
346 tf->device |= 1 << 7;
347 } else if (dev->flags & ATA_DFLAG_LBA) {
348 tf->flags |= ATA_TFLAG_LBA;
349
350 if (lba_28_ok(block, n_block)) {
351 /* use LBA28 */
352 tf->device |= (block >> 24) & 0xf;
353 } else if (lba_48_ok(block, n_block)) {
354 if (!(dev->flags & ATA_DFLAG_LBA48))
355 return -ERANGE;
356
357 /* use LBA48 */
358 tf->flags |= ATA_TFLAG_LBA48;
359
360 tf->hob_nsect = (n_block >> 8) & 0xff;
361
362 tf->hob_lbah = (block >> 40) & 0xff;
363 tf->hob_lbam = (block >> 32) & 0xff;
364 tf->hob_lbal = (block >> 24) & 0xff;
365 } else
366 /* request too large even for LBA48 */
367 return -ERANGE;
368
369 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
370 return -EINVAL;
371
372 tf->nsect = n_block & 0xff;
373
374 tf->lbah = (block >> 16) & 0xff;
375 tf->lbam = (block >> 8) & 0xff;
376 tf->lbal = block & 0xff;
377
378 tf->device |= ATA_LBA;
379 } else {
380 /* CHS */
381 u32 sect, head, cyl, track;
382
383 /* The request -may- be too large for CHS addressing. */
384 if (!lba_28_ok(block, n_block))
385 return -ERANGE;
386
387 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
388 return -EINVAL;
389
390 /* Convert LBA to CHS */
391 track = (u32)block / dev->sectors;
392 cyl = track / dev->heads;
393 head = track % dev->heads;
394 sect = (u32)block % dev->sectors + 1;
395
396 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
397 (u32)block, track, cyl, head, sect);
398
399 /* Check whether the converted CHS can fit.
400 Cylinder: 0-65535
401 Head: 0-15
402 Sector: 1-255*/
403 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
404 return -ERANGE;
405
406 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
407 tf->lbal = sect;
408 tf->lbam = cyl;
409 tf->lbah = cyl >> 8;
410 tf->device |= head;
411 }
412
413 return 0;
414}
415
cb95d562
TH
416/**
417 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
418 * @pio_mask: pio_mask
419 * @mwdma_mask: mwdma_mask
420 * @udma_mask: udma_mask
421 *
422 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
423 * unsigned int xfer_mask.
424 *
425 * LOCKING:
426 * None.
427 *
428 * RETURNS:
429 * Packed xfer_mask.
430 */
431static unsigned int ata_pack_xfermask(unsigned int pio_mask,
432 unsigned int mwdma_mask,
433 unsigned int udma_mask)
434{
435 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
436 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
437 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
438}
439
c0489e4e
TH
440/**
441 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
442 * @xfer_mask: xfer_mask to unpack
443 * @pio_mask: resulting pio_mask
444 * @mwdma_mask: resulting mwdma_mask
445 * @udma_mask: resulting udma_mask
446 *
447 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
448 * Any NULL distination masks will be ignored.
449 */
450static void ata_unpack_xfermask(unsigned int xfer_mask,
451 unsigned int *pio_mask,
452 unsigned int *mwdma_mask,
453 unsigned int *udma_mask)
454{
455 if (pio_mask)
456 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
457 if (mwdma_mask)
458 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
459 if (udma_mask)
460 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
461}
462
cb95d562 463static const struct ata_xfer_ent {
be9a50c8 464 int shift, bits;
cb95d562
TH
465 u8 base;
466} ata_xfer_tbl[] = {
467 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
468 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
469 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
470 { -1, },
471};
472
473/**
474 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
475 * @xfer_mask: xfer_mask of interest
476 *
477 * Return matching XFER_* value for @xfer_mask. Only the highest
478 * bit of @xfer_mask is considered.
479 *
480 * LOCKING:
481 * None.
482 *
483 * RETURNS:
484 * Matching XFER_* value, 0 if no match found.
485 */
486static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
487{
488 int highbit = fls(xfer_mask) - 1;
489 const struct ata_xfer_ent *ent;
490
491 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
492 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
493 return ent->base + highbit - ent->shift;
494 return 0;
495}
496
497/**
498 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
499 * @xfer_mode: XFER_* of interest
500 *
501 * Return matching xfer_mask for @xfer_mode.
502 *
503 * LOCKING:
504 * None.
505 *
506 * RETURNS:
507 * Matching xfer_mask, 0 if no match found.
508 */
509static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
510{
511 const struct ata_xfer_ent *ent;
512
513 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
514 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
515 return 1 << (ent->shift + xfer_mode - ent->base);
516 return 0;
517}
518
519/**
520 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
521 * @xfer_mode: XFER_* of interest
522 *
523 * Return matching xfer_shift for @xfer_mode.
524 *
525 * LOCKING:
526 * None.
527 *
528 * RETURNS:
529 * Matching xfer_shift, -1 if no match found.
530 */
531static int ata_xfer_mode2shift(unsigned int xfer_mode)
532{
533 const struct ata_xfer_ent *ent;
534
535 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
536 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
537 return ent->shift;
538 return -1;
539}
540
1da177e4 541/**
1da7b0d0
TH
542 * ata_mode_string - convert xfer_mask to string
543 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
544 *
545 * Determine string which represents the highest speed
1da7b0d0 546 * (highest bit in @modemask).
1da177e4
LT
547 *
548 * LOCKING:
549 * None.
550 *
551 * RETURNS:
552 * Constant C string representing highest speed listed in
1da7b0d0 553 * @mode_mask, or the constant C string "<n/a>".
1da177e4 554 */
1da7b0d0 555static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 556{
75f554bc
TH
557 static const char * const xfer_mode_str[] = {
558 "PIO0",
559 "PIO1",
560 "PIO2",
561 "PIO3",
562 "PIO4",
b352e57d
AC
563 "PIO5",
564 "PIO6",
75f554bc
TH
565 "MWDMA0",
566 "MWDMA1",
567 "MWDMA2",
b352e57d
AC
568 "MWDMA3",
569 "MWDMA4",
75f554bc
TH
570 "UDMA/16",
571 "UDMA/25",
572 "UDMA/33",
573 "UDMA/44",
574 "UDMA/66",
575 "UDMA/100",
576 "UDMA/133",
577 "UDMA7",
578 };
1da7b0d0 579 int highbit;
1da177e4 580
1da7b0d0
TH
581 highbit = fls(xfer_mask) - 1;
582 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
583 return xfer_mode_str[highbit];
1da177e4 584 return "<n/a>";
1da177e4
LT
585}
586
4c360c81
TH
587static const char *sata_spd_string(unsigned int spd)
588{
589 static const char * const spd_str[] = {
590 "1.5 Gbps",
591 "3.0 Gbps",
592 };
593
594 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
595 return "<unknown>";
596 return spd_str[spd - 1];
597}
598
3373efd8 599void ata_dev_disable(struct ata_device *dev)
0b8efb0a 600{
0dd4b21f 601 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 602 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
b4dc7623
TH
711static unsigned int
712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
0baab86b
EF
826/**
827 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select
830 *
831 * This function performs no actual function.
832 *
833 * May be used as the dev_select() entry in ata_port_operations.
834 *
835 * LOCKING:
836 * caller.
837 */
1da177e4
LT
838void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
839{
840}
841
0baab86b 842
1da177e4
LT
843/**
844 * ata_std_dev_select - Select device 0/1 on ATA bus
845 * @ap: ATA channel to manipulate
846 * @device: ATA device (numbered from zero) to select
847 *
848 * Use the method defined in the ATA specification to
849 * make either device 0, or device 1, active on the
0baab86b
EF
850 * ATA channel. Works with both PIO and MMIO.
851 *
852 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
853 *
854 * LOCKING:
855 * caller.
856 */
857
858void ata_std_dev_select (struct ata_port *ap, unsigned int device)
859{
860 u8 tmp;
861
862 if (device == 0)
863 tmp = ATA_DEVICE_OBS;
864 else
865 tmp = ATA_DEVICE_OBS | ATA_DEV1;
866
0d5ff566 867 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
868 ata_pause(ap); /* needed; also flushes, for mmio */
869}
870
871/**
872 * ata_dev_select - Select device 0/1 on ATA bus
873 * @ap: ATA channel to manipulate
874 * @device: ATA device (numbered from zero) to select
875 * @wait: non-zero to wait for Status register BSY bit to clear
876 * @can_sleep: non-zero if context allows sleeping
877 *
878 * Use the method defined in the ATA specification to
879 * make either device 0, or device 1, active on the
880 * ATA channel.
881 *
882 * This is a high-level version of ata_std_dev_select(),
883 * which additionally provides the services of inserting
884 * the proper pauses and status polling, where needed.
885 *
886 * LOCKING:
887 * caller.
888 */
889
890void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep)
892{
88574551 893 if (ata_msg_probe(ap))
0dd4b21f 894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 895 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
896
897 if (wait)
898 ata_wait_idle(ap);
899
900 ap->ops->dev_select(ap, device);
901
902 if (wait) {
903 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
904 msleep(150);
905 ata_wait_idle(ap);
906 }
907}
908
909/**
910 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 911 * @id: IDENTIFY DEVICE page to dump
1da177e4 912 *
0bd3300a
TH
913 * Dump selected 16-bit words from the given IDENTIFY DEVICE
914 * page.
1da177e4
LT
915 *
916 * LOCKING:
917 * caller.
918 */
919
0bd3300a 920static inline void ata_dump_id(const u16 *id)
1da177e4
LT
921{
922 DPRINTK("49==0x%04x "
923 "53==0x%04x "
924 "63==0x%04x "
925 "64==0x%04x "
926 "75==0x%04x \n",
0bd3300a
TH
927 id[49],
928 id[53],
929 id[63],
930 id[64],
931 id[75]);
1da177e4
LT
932 DPRINTK("80==0x%04x "
933 "81==0x%04x "
934 "82==0x%04x "
935 "83==0x%04x "
936 "84==0x%04x \n",
0bd3300a
TH
937 id[80],
938 id[81],
939 id[82],
940 id[83],
941 id[84]);
1da177e4
LT
942 DPRINTK("88==0x%04x "
943 "93==0x%04x\n",
0bd3300a
TH
944 id[88],
945 id[93]);
1da177e4
LT
946}
947
cb95d562
TH
948/**
949 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
950 * @id: IDENTIFY data to compute xfer mask from
951 *
952 * Compute the xfermask for this device. This is not as trivial
953 * as it seems if we must consider early devices correctly.
954 *
955 * FIXME: pre IDE drive timing (do we care ?).
956 *
957 * LOCKING:
958 * None.
959 *
960 * RETURNS:
961 * Computed xfermask
962 */
963static unsigned int ata_id_xfermask(const u16 *id)
964{
965 unsigned int pio_mask, mwdma_mask, udma_mask;
966
967 /* Usual case. Word 53 indicates word 64 is valid */
968 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
969 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
970 pio_mask <<= 3;
971 pio_mask |= 0x7;
972 } else {
973 /* If word 64 isn't valid then Word 51 high byte holds
974 * the PIO timing number for the maximum. Turn it into
975 * a mask.
976 */
7a0f1c8a 977 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
978 if (mode < 5) /* Valid PIO range */
979 pio_mask = (2 << mode) - 1;
980 else
981 pio_mask = 1;
cb95d562
TH
982
983 /* But wait.. there's more. Design your standards by
984 * committee and you too can get a free iordy field to
985 * process. However its the speeds not the modes that
986 * are supported... Note drivers using the timing API
987 * will get this right anyway
988 */
989 }
990
991 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 992
b352e57d
AC
993 if (ata_id_is_cfa(id)) {
994 /*
995 * Process compact flash extended modes
996 */
997 int pio = id[163] & 0x7;
998 int dma = (id[163] >> 3) & 7;
999
1000 if (pio)
1001 pio_mask |= (1 << 5);
1002 if (pio > 1)
1003 pio_mask |= (1 << 6);
1004 if (dma)
1005 mwdma_mask |= (1 << 3);
1006 if (dma > 1)
1007 mwdma_mask |= (1 << 4);
1008 }
1009
fb21f0d0
TH
1010 udma_mask = 0;
1011 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1012 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1013
1014 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1015}
1016
86e45b6b
TH
1017/**
1018 * ata_port_queue_task - Queue port_task
1019 * @ap: The ata_port to queue port_task for
e2a7f77a 1020 * @fn: workqueue function to be scheduled
65f27f38 1021 * @data: data for @fn to use
e2a7f77a 1022 * @delay: delay time for workqueue function
86e45b6b
TH
1023 *
1024 * Schedule @fn(@data) for execution after @delay jiffies using
1025 * port_task. There is one port_task per port and it's the
1026 * user(low level driver)'s responsibility to make sure that only
1027 * one task is active at any given time.
1028 *
1029 * libata core layer takes care of synchronization between
1030 * port_task and EH. ata_port_queue_task() may be ignored for EH
1031 * synchronization.
1032 *
1033 * LOCKING:
1034 * Inherited from caller.
1035 */
65f27f38 1036void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1037 unsigned long delay)
1038{
1039 int rc;
1040
b51e9e5d 1041 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1042 return;
1043
65f27f38
DH
1044 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1045 ap->port_task_data = data;
86e45b6b 1046
52bad64d 1047 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1048
1049 /* rc == 0 means that another user is using port task */
1050 WARN_ON(rc == 0);
1051}
1052
1053/**
1054 * ata_port_flush_task - Flush port_task
1055 * @ap: The ata_port to flush port_task for
1056 *
1057 * After this function completes, port_task is guranteed not to
1058 * be running or scheduled.
1059 *
1060 * LOCKING:
1061 * Kernel thread context (may sleep)
1062 */
1063void ata_port_flush_task(struct ata_port *ap)
1064{
1065 unsigned long flags;
1066
1067 DPRINTK("ENTER\n");
1068
ba6a1308 1069 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1070 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1071 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1072
1073 DPRINTK("flush #1\n");
1074 flush_workqueue(ata_wq);
1075
1076 /*
1077 * At this point, if a task is running, it's guaranteed to see
1078 * the FLUSH flag; thus, it will never queue pio tasks again.
1079 * Cancel and flush.
1080 */
1081 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1082 if (ata_msg_ctl(ap))
88574551
TH
1083 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1084 __FUNCTION__);
86e45b6b
TH
1085 flush_workqueue(ata_wq);
1086 }
1087
ba6a1308 1088 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1089 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1090 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1091
0dd4b21f
BP
1092 if (ata_msg_ctl(ap))
1093 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1094}
1095
7102d230 1096static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1097{
77853bf2 1098 struct completion *waiting = qc->private_data;
a2a7a662 1099
a2a7a662 1100 complete(waiting);
a2a7a662
TH
1101}
1102
1103/**
2432697b 1104 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1105 * @dev: Device to which the command is sent
1106 * @tf: Taskfile registers for the command and the result
d69cf37d 1107 * @cdb: CDB for packet command
a2a7a662 1108 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1109 * @sg: sg list for the data buffer of the command
1110 * @n_elem: Number of sg entries
a2a7a662
TH
1111 *
1112 * Executes libata internal command with timeout. @tf contains
1113 * command on entry and result on return. Timeout and error
1114 * conditions are reported via return value. No recovery action
1115 * is taken after a command times out. It's caller's duty to
1116 * clean up after timeout.
1117 *
1118 * LOCKING:
1119 * None. Should be called with kernel context, might sleep.
551e8889
TH
1120 *
1121 * RETURNS:
1122 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1123 */
2432697b
TH
1124unsigned ata_exec_internal_sg(struct ata_device *dev,
1125 struct ata_taskfile *tf, const u8 *cdb,
1126 int dma_dir, struct scatterlist *sg,
1127 unsigned int n_elem)
a2a7a662 1128{
3373efd8 1129 struct ata_port *ap = dev->ap;
a2a7a662
TH
1130 u8 command = tf->command;
1131 struct ata_queued_cmd *qc;
2ab7db1f 1132 unsigned int tag, preempted_tag;
dedaf2b0 1133 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1134 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1135 unsigned long flags;
77853bf2 1136 unsigned int err_mask;
d95a717f 1137 int rc;
a2a7a662 1138
ba6a1308 1139 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1140
e3180499 1141 /* no internal command while frozen */
b51e9e5d 1142 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1143 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1144 return AC_ERR_SYSTEM;
1145 }
1146
2ab7db1f 1147 /* initialize internal qc */
a2a7a662 1148
2ab7db1f
TH
1149 /* XXX: Tag 0 is used for drivers with legacy EH as some
1150 * drivers choke if any other tag is given. This breaks
1151 * ata_tag_internal() test for those drivers. Don't use new
1152 * EH stuff without converting to it.
1153 */
1154 if (ap->ops->error_handler)
1155 tag = ATA_TAG_INTERNAL;
1156 else
1157 tag = 0;
1158
6cec4a39 1159 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1160 BUG();
f69499f4 1161 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1162
1163 qc->tag = tag;
1164 qc->scsicmd = NULL;
1165 qc->ap = ap;
1166 qc->dev = dev;
1167 ata_qc_reinit(qc);
1168
1169 preempted_tag = ap->active_tag;
dedaf2b0
TH
1170 preempted_sactive = ap->sactive;
1171 preempted_qc_active = ap->qc_active;
2ab7db1f 1172 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1173 ap->sactive = 0;
1174 ap->qc_active = 0;
2ab7db1f
TH
1175
1176 /* prepare & issue qc */
a2a7a662 1177 qc->tf = *tf;
d69cf37d
TH
1178 if (cdb)
1179 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1180 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1181 qc->dma_dir = dma_dir;
1182 if (dma_dir != DMA_NONE) {
2432697b
TH
1183 unsigned int i, buflen = 0;
1184
1185 for (i = 0; i < n_elem; i++)
1186 buflen += sg[i].length;
1187
1188 ata_sg_init(qc, sg, n_elem);
49c80429 1189 qc->nbytes = buflen;
a2a7a662
TH
1190 }
1191
77853bf2 1192 qc->private_data = &wait;
a2a7a662
TH
1193 qc->complete_fn = ata_qc_complete_internal;
1194
8e0e694a 1195 ata_qc_issue(qc);
a2a7a662 1196
ba6a1308 1197 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1198
a8601e5f 1199 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1200
1201 ata_port_flush_task(ap);
41ade50c 1202
d95a717f 1203 if (!rc) {
ba6a1308 1204 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1205
1206 /* We're racing with irq here. If we lose, the
1207 * following test prevents us from completing the qc
d95a717f
TH
1208 * twice. If we win, the port is frozen and will be
1209 * cleaned up by ->post_internal_cmd().
a2a7a662 1210 */
77853bf2 1211 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1212 qc->err_mask |= AC_ERR_TIMEOUT;
1213
1214 if (ap->ops->error_handler)
1215 ata_port_freeze(ap);
1216 else
1217 ata_qc_complete(qc);
f15a1daf 1218
0dd4b21f
BP
1219 if (ata_msg_warn(ap))
1220 ata_dev_printk(dev, KERN_WARNING,
88574551 1221 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1222 }
1223
ba6a1308 1224 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1225 }
1226
d95a717f
TH
1227 /* do post_internal_cmd */
1228 if (ap->ops->post_internal_cmd)
1229 ap->ops->post_internal_cmd(qc);
1230
18d90deb 1231 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1232 if (ata_msg_warn(ap))
88574551 1233 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1234 "zero err_mask for failed "
88574551 1235 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1236 qc->err_mask |= AC_ERR_OTHER;
1237 }
1238
15869303 1239 /* finish up */
ba6a1308 1240 spin_lock_irqsave(ap->lock, flags);
15869303 1241
e61e0672 1242 *tf = qc->result_tf;
77853bf2
TH
1243 err_mask = qc->err_mask;
1244
1245 ata_qc_free(qc);
2ab7db1f 1246 ap->active_tag = preempted_tag;
dedaf2b0
TH
1247 ap->sactive = preempted_sactive;
1248 ap->qc_active = preempted_qc_active;
77853bf2 1249
1f7dd3e9
TH
1250 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1251 * Until those drivers are fixed, we detect the condition
1252 * here, fail the command with AC_ERR_SYSTEM and reenable the
1253 * port.
1254 *
1255 * Note that this doesn't change any behavior as internal
1256 * command failure results in disabling the device in the
1257 * higher layer for LLDDs without new reset/EH callbacks.
1258 *
1259 * Kill the following code as soon as those drivers are fixed.
1260 */
198e0fed 1261 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1262 err_mask |= AC_ERR_SYSTEM;
1263 ata_port_probe(ap);
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
15869303 1267
77853bf2 1268 return err_mask;
a2a7a662
TH
1269}
1270
2432697b 1271/**
33480a0e 1272 * ata_exec_internal - execute libata internal command
2432697b
TH
1273 * @dev: Device to which the command is sent
1274 * @tf: Taskfile registers for the command and the result
1275 * @cdb: CDB for packet command
1276 * @dma_dir: Data tranfer direction of the command
1277 * @buf: Data buffer of the command
1278 * @buflen: Length of data buffer
1279 *
1280 * Wrapper around ata_exec_internal_sg() which takes simple
1281 * buffer instead of sg list.
1282 *
1283 * LOCKING:
1284 * None. Should be called with kernel context, might sleep.
1285 *
1286 * RETURNS:
1287 * Zero on success, AC_ERR_* mask on failure
1288 */
1289unsigned ata_exec_internal(struct ata_device *dev,
1290 struct ata_taskfile *tf, const u8 *cdb,
1291 int dma_dir, void *buf, unsigned int buflen)
1292{
33480a0e
TH
1293 struct scatterlist *psg = NULL, sg;
1294 unsigned int n_elem = 0;
2432697b 1295
33480a0e
TH
1296 if (dma_dir != DMA_NONE) {
1297 WARN_ON(!buf);
1298 sg_init_one(&sg, buf, buflen);
1299 psg = &sg;
1300 n_elem++;
1301 }
2432697b 1302
33480a0e 1303 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1304}
1305
977e6b9f
TH
1306/**
1307 * ata_do_simple_cmd - execute simple internal command
1308 * @dev: Device to which the command is sent
1309 * @cmd: Opcode to execute
1310 *
1311 * Execute a 'simple' command, that only consists of the opcode
1312 * 'cmd' itself, without filling any other registers
1313 *
1314 * LOCKING:
1315 * Kernel thread context (may sleep).
1316 *
1317 * RETURNS:
1318 * Zero on success, AC_ERR_* mask on failure
e58eb583 1319 */
77b08fb5 1320unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1321{
1322 struct ata_taskfile tf;
e58eb583
TH
1323
1324 ata_tf_init(dev, &tf);
1325
1326 tf.command = cmd;
1327 tf.flags |= ATA_TFLAG_DEVICE;
1328 tf.protocol = ATA_PROT_NODATA;
1329
977e6b9f 1330 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1331}
1332
1bc4ccff
AC
1333/**
1334 * ata_pio_need_iordy - check if iordy needed
1335 * @adev: ATA device
1336 *
1337 * Check if the current speed of the device requires IORDY. Used
1338 * by various controllers for chip configuration.
1339 */
1340
1341unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1342{
1343 int pio;
1344 int speed = adev->pio_mode - XFER_PIO_0;
1345
1346 if (speed < 2)
1347 return 0;
1348 if (speed > 2)
1349 return 1;
2e9edbf8 1350
1bc4ccff
AC
1351 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1352
1353 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1354 pio = adev->id[ATA_ID_EIDE_PIO];
1355 /* Is the speed faster than the drive allows non IORDY ? */
1356 if (pio) {
1357 /* This is cycle times not frequency - watch the logic! */
1358 if (pio > 240) /* PIO2 is 240nS per cycle */
1359 return 1;
1360 return 0;
1361 }
1362 }
1363 return 0;
1364}
1365
1da177e4 1366/**
49016aca 1367 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1368 * @dev: target device
1369 * @p_class: pointer to class of the target device (may be changed)
bff04647 1370 * @flags: ATA_READID_* flags
fe635c7e 1371 * @id: buffer to read IDENTIFY data into
1da177e4 1372 *
49016aca
TH
1373 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1374 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1375 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1376 * for pre-ATA4 drives.
1da177e4
LT
1377 *
1378 * LOCKING:
49016aca
TH
1379 * Kernel thread context (may sleep)
1380 *
1381 * RETURNS:
1382 * 0 on success, -errno otherwise.
1da177e4 1383 */
a9beec95 1384int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1385 unsigned int flags, u16 *id)
1da177e4 1386{
3373efd8 1387 struct ata_port *ap = dev->ap;
49016aca 1388 unsigned int class = *p_class;
a0123703 1389 struct ata_taskfile tf;
49016aca
TH
1390 unsigned int err_mask = 0;
1391 const char *reason;
1392 int rc;
1da177e4 1393
0dd4b21f 1394 if (ata_msg_ctl(ap))
88574551
TH
1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1396 __FUNCTION__, ap->id, dev->devno);
1da177e4 1397
49016aca 1398 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1399
49016aca 1400 retry:
3373efd8 1401 ata_tf_init(dev, &tf);
a0123703 1402
49016aca
TH
1403 switch (class) {
1404 case ATA_DEV_ATA:
a0123703 1405 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1406 break;
1407 case ATA_DEV_ATAPI:
a0123703 1408 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1409 break;
1410 default:
1411 rc = -ENODEV;
1412 reason = "unsupported class";
1413 goto err_out;
1da177e4
LT
1414 }
1415
a0123703 1416 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1417
1418 /* Some devices choke if TF registers contain garbage. Make
1419 * sure those are properly initialized.
1420 */
1421 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1422
1423 /* Device presence detection is unreliable on some
1424 * controllers. Always poll IDENTIFY if available.
1425 */
1426 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1427
3373efd8 1428 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1429 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1430 if (err_mask) {
800b3996 1431 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1432 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1433 ap->id, dev->devno);
1434 return -ENOENT;
1435 }
1436
49016aca
TH
1437 rc = -EIO;
1438 reason = "I/O error";
1da177e4
LT
1439 goto err_out;
1440 }
1441
49016aca 1442 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1443
49016aca 1444 /* sanity check */
a4f5749b
TH
1445 rc = -EINVAL;
1446 reason = "device reports illegal type";
1447
1448 if (class == ATA_DEV_ATA) {
1449 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1450 goto err_out;
1451 } else {
1452 if (ata_id_is_ata(id))
1453 goto err_out;
49016aca
TH
1454 }
1455
bff04647 1456 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1457 /*
1458 * The exact sequence expected by certain pre-ATA4 drives is:
1459 * SRST RESET
1460 * IDENTIFY
1461 * INITIALIZE DEVICE PARAMETERS
1462 * anything else..
1463 * Some drives were very specific about that exact sequence.
1464 */
1465 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1466 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1467 if (err_mask) {
1468 rc = -EIO;
1469 reason = "INIT_DEV_PARAMS failed";
1470 goto err_out;
1471 }
1472
1473 /* current CHS translation info (id[53-58]) might be
1474 * changed. reread the identify device info.
1475 */
bff04647 1476 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1477 goto retry;
1478 }
1479 }
1480
1481 *p_class = class;
fe635c7e 1482
49016aca
TH
1483 return 0;
1484
1485 err_out:
88574551 1486 if (ata_msg_warn(ap))
0dd4b21f 1487 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1488 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1489 return rc;
1490}
1491
3373efd8 1492static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1493{
3373efd8 1494 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1495}
1496
a6e6ce8e
TH
1497static void ata_dev_config_ncq(struct ata_device *dev,
1498 char *desc, size_t desc_sz)
1499{
1500 struct ata_port *ap = dev->ap;
1501 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1502
1503 if (!ata_id_has_ncq(dev->id)) {
1504 desc[0] = '\0';
1505 return;
1506 }
6919a0a6
AC
1507 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1508 snprintf(desc, desc_sz, "NCQ (not used)");
1509 return;
1510 }
a6e6ce8e 1511 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1512 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1513 dev->flags |= ATA_DFLAG_NCQ;
1514 }
1515
1516 if (hdepth >= ddepth)
1517 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1518 else
1519 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1520}
1521
e6d902a3
BK
1522static void ata_set_port_max_cmd_len(struct ata_port *ap)
1523{
1524 int i;
1525
cca3974e
JG
1526 if (ap->scsi_host) {
1527 unsigned int len = 0;
1528
e6d902a3 1529 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1530 len = max(len, ap->device[i].cdb_len);
1531
1532 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1533 }
1534}
1535
49016aca 1536/**
ffeae418 1537 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1538 * @dev: Target device to configure
1539 *
1540 * Configure @dev according to @dev->id. Generic and low-level
1541 * driver specific fixups are also applied.
49016aca
TH
1542 *
1543 * LOCKING:
ffeae418
TH
1544 * Kernel thread context (may sleep)
1545 *
1546 * RETURNS:
1547 * 0 on success, -errno otherwise
49016aca 1548 */
efdaedc4 1549int ata_dev_configure(struct ata_device *dev)
49016aca 1550{
3373efd8 1551 struct ata_port *ap = dev->ap;
efdaedc4 1552 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1553 const u16 *id = dev->id;
ff8854b2 1554 unsigned int xfer_mask;
b352e57d 1555 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1556 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1557 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1558 int rc;
49016aca 1559
0dd4b21f 1560 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1561 ata_dev_printk(dev, KERN_INFO,
1562 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1563 __FUNCTION__, ap->id, dev->devno);
ffeae418 1564 return 0;
49016aca
TH
1565 }
1566
0dd4b21f 1567 if (ata_msg_probe(ap))
88574551
TH
1568 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1569 __FUNCTION__, ap->id, dev->devno);
1da177e4 1570
c39f5ebe 1571 /* print device capabilities */
0dd4b21f 1572 if (ata_msg_probe(ap))
88574551
TH
1573 ata_dev_printk(dev, KERN_DEBUG,
1574 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1575 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1576 __FUNCTION__,
f15a1daf
TH
1577 id[49], id[82], id[83], id[84],
1578 id[85], id[86], id[87], id[88]);
c39f5ebe 1579
208a9933 1580 /* initialize to-be-configured parameters */
ea1dd4e1 1581 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1582 dev->max_sectors = 0;
1583 dev->cdb_len = 0;
1584 dev->n_sectors = 0;
1585 dev->cylinders = 0;
1586 dev->heads = 0;
1587 dev->sectors = 0;
1588
1da177e4
LT
1589 /*
1590 * common ATA, ATAPI feature tests
1591 */
1592
ff8854b2 1593 /* find max transfer mode; for printk only */
1148c3a7 1594 xfer_mask = ata_id_xfermask(id);
1da177e4 1595
0dd4b21f
BP
1596 if (ata_msg_probe(ap))
1597 ata_dump_id(id);
1da177e4
LT
1598
1599 /* ATA-specific feature tests */
1600 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1601 if (ata_id_is_cfa(id)) {
1602 if (id[162] & 1) /* CPRM may make this media unusable */
1603 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1604 ap->id, dev->devno);
1605 snprintf(revbuf, 7, "CFA");
1606 }
1607 else
1608 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1609
1148c3a7 1610 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1611
3f64f565 1612 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1613 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1614 sizeof(fwrevbuf));
1615
591a6e8e 1616 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1617 sizeof(modelbuf));
1618
1619 if (dev->id[59] & 0x100)
1620 dev->multi_count = dev->id[59] & 0xff;
1621
1148c3a7 1622 if (ata_id_has_lba(id)) {
4c2d721a 1623 const char *lba_desc;
a6e6ce8e 1624 char ncq_desc[20];
8bf62ece 1625
4c2d721a
TH
1626 lba_desc = "LBA";
1627 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1628 if (ata_id_has_lba48(id)) {
8bf62ece 1629 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1630 lba_desc = "LBA48";
6fc49adb
TH
1631
1632 if (dev->n_sectors >= (1UL << 28) &&
1633 ata_id_has_flush_ext(id))
1634 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1635 }
8bf62ece 1636
a6e6ce8e
TH
1637 /* config NCQ */
1638 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1639
8bf62ece 1640 /* print device info to dmesg */
3f64f565
EM
1641 if (ata_msg_drv(ap) && print_info) {
1642 ata_dev_printk(dev, KERN_INFO,
1643 "%s: %s, %s, max %s\n",
1644 revbuf, modelbuf, fwrevbuf,
1645 ata_mode_string(xfer_mask));
1646 ata_dev_printk(dev, KERN_INFO,
1647 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1648 (unsigned long long)dev->n_sectors,
3f64f565
EM
1649 dev->multi_count, lba_desc, ncq_desc);
1650 }
ffeae418 1651 } else {
8bf62ece
AL
1652 /* CHS */
1653
1654 /* Default translation */
1148c3a7
TH
1655 dev->cylinders = id[1];
1656 dev->heads = id[3];
1657 dev->sectors = id[6];
8bf62ece 1658
1148c3a7 1659 if (ata_id_current_chs_valid(id)) {
8bf62ece 1660 /* Current CHS translation is valid. */
1148c3a7
TH
1661 dev->cylinders = id[54];
1662 dev->heads = id[55];
1663 dev->sectors = id[56];
8bf62ece
AL
1664 }
1665
1666 /* print device info to dmesg */
3f64f565 1667 if (ata_msg_drv(ap) && print_info) {
88574551 1668 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1669 "%s: %s, %s, max %s\n",
1670 revbuf, modelbuf, fwrevbuf,
1671 ata_mode_string(xfer_mask));
1672 ata_dev_printk(dev, KERN_INFO,
1673 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1674 (unsigned long long)dev->n_sectors,
1675 dev->multi_count, dev->cylinders,
1676 dev->heads, dev->sectors);
1677 }
07f6f7d0
AL
1678 }
1679
6e7846e9 1680 dev->cdb_len = 16;
1da177e4
LT
1681 }
1682
1683 /* ATAPI-specific feature tests */
2c13b7ce 1684 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1685 char *cdb_intr_string = "";
1686
1148c3a7 1687 rc = atapi_cdb_len(id);
1da177e4 1688 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1689 if (ata_msg_warn(ap))
88574551
TH
1690 ata_dev_printk(dev, KERN_WARNING,
1691 "unsupported CDB len\n");
ffeae418 1692 rc = -EINVAL;
1da177e4
LT
1693 goto err_out_nosup;
1694 }
6e7846e9 1695 dev->cdb_len = (unsigned int) rc;
1da177e4 1696
08a556db 1697 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1698 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1699 cdb_intr_string = ", CDB intr";
1700 }
312f7da2 1701
1da177e4 1702 /* print device info to dmesg */
5afc8142 1703 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1704 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1705 ata_mode_string(xfer_mask),
1706 cdb_intr_string);
1da177e4
LT
1707 }
1708
914ed354
TH
1709 /* determine max_sectors */
1710 dev->max_sectors = ATA_MAX_SECTORS;
1711 if (dev->flags & ATA_DFLAG_LBA48)
1712 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1713
93590859
AC
1714 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1715 /* Let the user know. We don't want to disallow opens for
1716 rescue purposes, or in case the vendor is just a blithering
1717 idiot */
1718 if (print_info) {
1719 ata_dev_printk(dev, KERN_WARNING,
1720"Drive reports diagnostics failure. This may indicate a drive\n");
1721 ata_dev_printk(dev, KERN_WARNING,
1722"fault or invalid emulation. Contact drive vendor for information.\n");
1723 }
1724 }
1725
e6d902a3 1726 ata_set_port_max_cmd_len(ap);
6e7846e9 1727
4b2f3ede 1728 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1729 if (ata_dev_knobble(dev)) {
5afc8142 1730 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1731 ata_dev_printk(dev, KERN_INFO,
1732 "applying bridge limits\n");
5a529139 1733 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1734 dev->max_sectors = ATA_MAX_SECTORS;
1735 }
1736
1737 if (ap->ops->dev_config)
1738 ap->ops->dev_config(ap, dev);
1739
0dd4b21f
BP
1740 if (ata_msg_probe(ap))
1741 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1742 __FUNCTION__, ata_chk_status(ap));
ffeae418 1743 return 0;
1da177e4
LT
1744
1745err_out_nosup:
0dd4b21f 1746 if (ata_msg_probe(ap))
88574551
TH
1747 ata_dev_printk(dev, KERN_DEBUG,
1748 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1749 return rc;
1da177e4
LT
1750}
1751
1752/**
1753 * ata_bus_probe - Reset and probe ATA bus
1754 * @ap: Bus to probe
1755 *
0cba632b
JG
1756 * Master ATA bus probing function. Initiates a hardware-dependent
1757 * bus reset, then attempts to identify any devices found on
1758 * the bus.
1759 *
1da177e4 1760 * LOCKING:
0cba632b 1761 * PCI/etc. bus probe sem.
1da177e4
LT
1762 *
1763 * RETURNS:
96072e69 1764 * Zero on success, negative errno otherwise.
1da177e4
LT
1765 */
1766
80289167 1767int ata_bus_probe(struct ata_port *ap)
1da177e4 1768{
28ca5c57 1769 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1770 int tries[ATA_MAX_DEVICES];
1771 int i, rc, down_xfermask;
e82cbdb9 1772 struct ata_device *dev;
1da177e4 1773
28ca5c57 1774 ata_port_probe(ap);
c19ba8af 1775
14d2bac1
TH
1776 for (i = 0; i < ATA_MAX_DEVICES; i++)
1777 tries[i] = ATA_PROBE_MAX_TRIES;
1778
1779 retry:
1780 down_xfermask = 0;
1781
2044470c 1782 /* reset and determine device classes */
52783c5d 1783 ap->ops->phy_reset(ap);
2061a47a 1784
11ef697b
KCA
1785 /* retrieve and execute the ATA task file of _GTF */
1786 ata_acpi_exec_tfs(ap);
1787
52783c5d
TH
1788 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1789 dev = &ap->device[i];
c19ba8af 1790
52783c5d
TH
1791 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1792 dev->class != ATA_DEV_UNKNOWN)
1793 classes[dev->devno] = dev->class;
1794 else
1795 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1796
52783c5d 1797 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1798 }
1da177e4 1799
52783c5d 1800 ata_port_probe(ap);
2044470c 1801
b6079ca4
AC
1802 /* after the reset the device state is PIO 0 and the controller
1803 state is undefined. Record the mode */
1804
1805 for (i = 0; i < ATA_MAX_DEVICES; i++)
1806 ap->device[i].pio_mode = XFER_PIO_0;
1807
28ca5c57 1808 /* read IDENTIFY page and configure devices */
1da177e4 1809 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1810 dev = &ap->device[i];
28ca5c57 1811
ec573755
TH
1812 if (tries[i])
1813 dev->class = classes[i];
ffeae418 1814
14d2bac1 1815 if (!ata_dev_enabled(dev))
ffeae418 1816 continue;
ffeae418 1817
bff04647
TH
1818 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1819 dev->id);
14d2bac1
TH
1820 if (rc)
1821 goto fail;
1822
efdaedc4
TH
1823 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1824 rc = ata_dev_configure(dev);
1825 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1826 if (rc)
1827 goto fail;
1da177e4
LT
1828 }
1829
e82cbdb9 1830 /* configure transfer mode */
3adcebb2 1831 rc = ata_set_mode(ap, &dev);
51713d35
TH
1832 if (rc) {
1833 down_xfermask = 1;
1834 goto fail;
e82cbdb9 1835 }
1da177e4 1836
e82cbdb9
TH
1837 for (i = 0; i < ATA_MAX_DEVICES; i++)
1838 if (ata_dev_enabled(&ap->device[i]))
1839 return 0;
1da177e4 1840
e82cbdb9
TH
1841 /* no device present, disable port */
1842 ata_port_disable(ap);
1da177e4 1843 ap->ops->port_disable(ap);
96072e69 1844 return -ENODEV;
14d2bac1
TH
1845
1846 fail:
1847 switch (rc) {
1848 case -EINVAL:
1849 case -ENODEV:
1850 tries[dev->devno] = 0;
1851 break;
1852 case -EIO:
3c567b7d 1853 sata_down_spd_limit(ap);
14d2bac1
TH
1854 /* fall through */
1855 default:
1856 tries[dev->devno]--;
1857 if (down_xfermask &&
3373efd8 1858 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1859 tries[dev->devno] = 0;
1860 }
1861
ec573755 1862 if (!tries[dev->devno]) {
3373efd8
TH
1863 ata_down_xfermask_limit(dev, 1);
1864 ata_dev_disable(dev);
ec573755
TH
1865 }
1866
14d2bac1 1867 goto retry;
1da177e4
LT
1868}
1869
1870/**
0cba632b
JG
1871 * ata_port_probe - Mark port as enabled
1872 * @ap: Port for which we indicate enablement
1da177e4 1873 *
0cba632b
JG
1874 * Modify @ap data structure such that the system
1875 * thinks that the entire port is enabled.
1876 *
cca3974e 1877 * LOCKING: host lock, or some other form of
0cba632b 1878 * serialization.
1da177e4
LT
1879 */
1880
1881void ata_port_probe(struct ata_port *ap)
1882{
198e0fed 1883 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1884}
1885
3be680b7
TH
1886/**
1887 * sata_print_link_status - Print SATA link status
1888 * @ap: SATA port to printk link status about
1889 *
1890 * This function prints link speed and status of a SATA link.
1891 *
1892 * LOCKING:
1893 * None.
1894 */
1895static void sata_print_link_status(struct ata_port *ap)
1896{
6d5f9732 1897 u32 sstatus, scontrol, tmp;
3be680b7 1898
81952c54 1899 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1900 return;
81952c54 1901 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1902
81952c54 1903 if (ata_port_online(ap)) {
3be680b7 1904 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1905 ata_port_printk(ap, KERN_INFO,
1906 "SATA link up %s (SStatus %X SControl %X)\n",
1907 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1908 } else {
f15a1daf
TH
1909 ata_port_printk(ap, KERN_INFO,
1910 "SATA link down (SStatus %X SControl %X)\n",
1911 sstatus, scontrol);
3be680b7
TH
1912 }
1913}
1914
1da177e4 1915/**
780a87f7
JG
1916 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1917 * @ap: SATA port associated with target SATA PHY.
1da177e4 1918 *
780a87f7
JG
1919 * This function issues commands to standard SATA Sxxx
1920 * PHY registers, to wake up the phy (and device), and
1921 * clear any reset condition.
1da177e4
LT
1922 *
1923 * LOCKING:
0cba632b 1924 * PCI/etc. bus probe sem.
1da177e4
LT
1925 *
1926 */
1927void __sata_phy_reset(struct ata_port *ap)
1928{
1929 u32 sstatus;
1930 unsigned long timeout = jiffies + (HZ * 5);
1931
1932 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1933 /* issue phy wake/reset */
81952c54 1934 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1935 /* Couldn't find anything in SATA I/II specs, but
1936 * AHCI-1.1 10.4.2 says at least 1 ms. */
1937 mdelay(1);
1da177e4 1938 }
81952c54
TH
1939 /* phy wake/clear reset */
1940 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1941
1942 /* wait for phy to become ready, if necessary */
1943 do {
1944 msleep(200);
81952c54 1945 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1946 if ((sstatus & 0xf) != 1)
1947 break;
1948 } while (time_before(jiffies, timeout));
1949
3be680b7
TH
1950 /* print link status */
1951 sata_print_link_status(ap);
656563e3 1952
3be680b7 1953 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1954 if (!ata_port_offline(ap))
1da177e4 1955 ata_port_probe(ap);
3be680b7 1956 else
1da177e4 1957 ata_port_disable(ap);
1da177e4 1958
198e0fed 1959 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1960 return;
1961
1962 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1963 ata_port_disable(ap);
1964 return;
1965 }
1966
1967 ap->cbl = ATA_CBL_SATA;
1968}
1969
1970/**
780a87f7
JG
1971 * sata_phy_reset - Reset SATA bus.
1972 * @ap: SATA port associated with target SATA PHY.
1da177e4 1973 *
780a87f7
JG
1974 * This function resets the SATA bus, and then probes
1975 * the bus for devices.
1da177e4
LT
1976 *
1977 * LOCKING:
0cba632b 1978 * PCI/etc. bus probe sem.
1da177e4
LT
1979 *
1980 */
1981void sata_phy_reset(struct ata_port *ap)
1982{
1983 __sata_phy_reset(ap);
198e0fed 1984 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1985 return;
1986 ata_bus_reset(ap);
1987}
1988
ebdfca6e
AC
1989/**
1990 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1991 * @adev: device
1992 *
1993 * Obtain the other device on the same cable, or if none is
1994 * present NULL is returned
1995 */
2e9edbf8 1996
3373efd8 1997struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1998{
3373efd8 1999 struct ata_port *ap = adev->ap;
ebdfca6e 2000 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2001 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2002 return NULL;
2003 return pair;
2004}
2005
1da177e4 2006/**
780a87f7
JG
2007 * ata_port_disable - Disable port.
2008 * @ap: Port to be disabled.
1da177e4 2009 *
780a87f7
JG
2010 * Modify @ap data structure such that the system
2011 * thinks that the entire port is disabled, and should
2012 * never attempt to probe or communicate with devices
2013 * on this port.
2014 *
cca3974e 2015 * LOCKING: host lock, or some other form of
780a87f7 2016 * serialization.
1da177e4
LT
2017 */
2018
2019void ata_port_disable(struct ata_port *ap)
2020{
2021 ap->device[0].class = ATA_DEV_NONE;
2022 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2023 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2024}
2025
1c3fae4d 2026/**
3c567b7d 2027 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2028 * @ap: Port to adjust SATA spd limit for
2029 *
2030 * Adjust SATA spd limit of @ap downward. Note that this
2031 * function only adjusts the limit. The change must be applied
3c567b7d 2032 * using sata_set_spd().
1c3fae4d
TH
2033 *
2034 * LOCKING:
2035 * Inherited from caller.
2036 *
2037 * RETURNS:
2038 * 0 on success, negative errno on failure
2039 */
3c567b7d 2040int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2041{
81952c54
TH
2042 u32 sstatus, spd, mask;
2043 int rc, highbit;
1c3fae4d 2044
81952c54
TH
2045 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2046 if (rc)
2047 return rc;
1c3fae4d
TH
2048
2049 mask = ap->sata_spd_limit;
2050 if (mask <= 1)
2051 return -EINVAL;
2052 highbit = fls(mask) - 1;
2053 mask &= ~(1 << highbit);
2054
81952c54 2055 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2056 if (spd <= 1)
2057 return -EINVAL;
2058 spd--;
2059 mask &= (1 << spd) - 1;
2060 if (!mask)
2061 return -EINVAL;
2062
2063 ap->sata_spd_limit = mask;
2064
f15a1daf
TH
2065 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2066 sata_spd_string(fls(mask)));
1c3fae4d
TH
2067
2068 return 0;
2069}
2070
3c567b7d 2071static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2072{
2073 u32 spd, limit;
2074
2075 if (ap->sata_spd_limit == UINT_MAX)
2076 limit = 0;
2077 else
2078 limit = fls(ap->sata_spd_limit);
2079
2080 spd = (*scontrol >> 4) & 0xf;
2081 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2082
2083 return spd != limit;
2084}
2085
2086/**
3c567b7d 2087 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2088 * @ap: Port in question
2089 *
2090 * Test whether the spd limit in SControl matches
2091 * @ap->sata_spd_limit. This function is used to determine
2092 * whether hardreset is necessary to apply SATA spd
2093 * configuration.
2094 *
2095 * LOCKING:
2096 * Inherited from caller.
2097 *
2098 * RETURNS:
2099 * 1 if SATA spd configuration is needed, 0 otherwise.
2100 */
3c567b7d 2101int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2102{
2103 u32 scontrol;
2104
81952c54 2105 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2106 return 0;
2107
3c567b7d 2108 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2109}
2110
2111/**
3c567b7d 2112 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2113 * @ap: Port to set SATA spd for
2114 *
2115 * Set SATA spd of @ap according to sata_spd_limit.
2116 *
2117 * LOCKING:
2118 * Inherited from caller.
2119 *
2120 * RETURNS:
2121 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2122 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2123 */
3c567b7d 2124int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2125{
2126 u32 scontrol;
81952c54 2127 int rc;
1c3fae4d 2128
81952c54
TH
2129 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2130 return rc;
1c3fae4d 2131
3c567b7d 2132 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2133 return 0;
2134
81952c54
TH
2135 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2136 return rc;
2137
1c3fae4d
TH
2138 return 1;
2139}
2140
452503f9
AC
2141/*
2142 * This mode timing computation functionality is ported over from
2143 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2144 */
2145/*
b352e57d 2146 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2147 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2148 * for UDMA6, which is currently supported only by Maxtor drives.
2149 *
2150 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2151 */
2152
2153static const struct ata_timing ata_timing[] = {
2154
2155 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2156 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2157 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2158 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2159
b352e57d
AC
2160 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2161 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2162 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2163 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2164 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2165
2166/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2167
452503f9
AC
2168 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2169 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2170 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2171
452503f9
AC
2172 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2173 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2174 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2175
b352e57d
AC
2176 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2177 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2178 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2179 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2180
2181 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2182 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2183 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2184
2185/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2186
2187 { 0xFF }
2188};
2189
2190#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2191#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2192
2193static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2194{
2195 q->setup = EZ(t->setup * 1000, T);
2196 q->act8b = EZ(t->act8b * 1000, T);
2197 q->rec8b = EZ(t->rec8b * 1000, T);
2198 q->cyc8b = EZ(t->cyc8b * 1000, T);
2199 q->active = EZ(t->active * 1000, T);
2200 q->recover = EZ(t->recover * 1000, T);
2201 q->cycle = EZ(t->cycle * 1000, T);
2202 q->udma = EZ(t->udma * 1000, UT);
2203}
2204
2205void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2206 struct ata_timing *m, unsigned int what)
2207{
2208 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2209 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2210 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2211 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2212 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2213 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2214 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2215 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2216}
2217
2218static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2219{
2220 const struct ata_timing *t;
2221
2222 for (t = ata_timing; t->mode != speed; t++)
91190758 2223 if (t->mode == 0xFF)
452503f9 2224 return NULL;
2e9edbf8 2225 return t;
452503f9
AC
2226}
2227
2228int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2229 struct ata_timing *t, int T, int UT)
2230{
2231 const struct ata_timing *s;
2232 struct ata_timing p;
2233
2234 /*
2e9edbf8 2235 * Find the mode.
75b1f2f8 2236 */
452503f9
AC
2237
2238 if (!(s = ata_timing_find_mode(speed)))
2239 return -EINVAL;
2240
75b1f2f8
AL
2241 memcpy(t, s, sizeof(*s));
2242
452503f9
AC
2243 /*
2244 * If the drive is an EIDE drive, it can tell us it needs extended
2245 * PIO/MW_DMA cycle timing.
2246 */
2247
2248 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2249 memset(&p, 0, sizeof(p));
2250 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2251 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2252 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2253 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2254 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2255 }
2256 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2257 }
2258
2259 /*
2260 * Convert the timing to bus clock counts.
2261 */
2262
75b1f2f8 2263 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2264
2265 /*
c893a3ae
RD
2266 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2267 * S.M.A.R.T * and some other commands. We have to ensure that the
2268 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2269 */
2270
fd3367af 2271 if (speed > XFER_PIO_6) {
452503f9
AC
2272 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2273 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2274 }
2275
2276 /*
c893a3ae 2277 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2278 */
2279
2280 if (t->act8b + t->rec8b < t->cyc8b) {
2281 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2282 t->rec8b = t->cyc8b - t->act8b;
2283 }
2284
2285 if (t->active + t->recover < t->cycle) {
2286 t->active += (t->cycle - (t->active + t->recover)) / 2;
2287 t->recover = t->cycle - t->active;
2288 }
2289
2290 return 0;
2291}
2292
cf176e1a
TH
2293/**
2294 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2295 * @dev: Device to adjust xfer masks
2296 * @force_pio0: Force PIO0
2297 *
2298 * Adjust xfer masks of @dev downward. Note that this function
2299 * does not apply the change. Invoking ata_set_mode() afterwards
2300 * will apply the limit.
2301 *
2302 * LOCKING:
2303 * Inherited from caller.
2304 *
2305 * RETURNS:
2306 * 0 on success, negative errno on failure
2307 */
3373efd8 2308int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2309{
2310 unsigned long xfer_mask;
2311 int highbit;
2312
2313 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2314 dev->udma_mask);
2315
2316 if (!xfer_mask)
2317 goto fail;
2318 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2319 if (xfer_mask & ATA_MASK_UDMA)
2320 xfer_mask &= ~ATA_MASK_MWDMA;
2321
2322 highbit = fls(xfer_mask) - 1;
2323 xfer_mask &= ~(1 << highbit);
2324 if (force_pio0)
2325 xfer_mask &= 1 << ATA_SHIFT_PIO;
2326 if (!xfer_mask)
2327 goto fail;
2328
2329 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2330 &dev->udma_mask);
2331
f15a1daf
TH
2332 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2333 ata_mode_string(xfer_mask));
cf176e1a
TH
2334
2335 return 0;
2336
2337 fail:
2338 return -EINVAL;
2339}
2340
3373efd8 2341static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2342{
baa1e78a 2343 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2344 unsigned int err_mask;
2345 int rc;
1da177e4 2346
e8384607 2347 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2348 if (dev->xfer_shift == ATA_SHIFT_PIO)
2349 dev->flags |= ATA_DFLAG_PIO;
2350
3373efd8 2351 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2352 /* Old CFA may refuse this command, which is just fine */
2353 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2354 err_mask &= ~AC_ERR_DEV;
2355
83206a29 2356 if (err_mask) {
f15a1daf
TH
2357 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2358 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2359 return -EIO;
2360 }
1da177e4 2361
baa1e78a 2362 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2363 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2364 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2365 if (rc)
83206a29 2366 return rc;
48a8a14f 2367
23e71c3d
TH
2368 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2369 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2370
f15a1daf
TH
2371 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2372 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2373 return 0;
1da177e4
LT
2374}
2375
1da177e4
LT
2376/**
2377 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2378 * @ap: port on which timings will be programmed
e82cbdb9 2379 * @r_failed_dev: out paramter for failed device
1da177e4 2380 *
e82cbdb9
TH
2381 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2382 * ata_set_mode() fails, pointer to the failing device is
2383 * returned in @r_failed_dev.
780a87f7 2384 *
1da177e4 2385 * LOCKING:
0cba632b 2386 * PCI/etc. bus probe sem.
e82cbdb9
TH
2387 *
2388 * RETURNS:
2389 * 0 on success, negative errno otherwise
1da177e4 2390 */
1ad8e7f9 2391int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2392{
e8e0619f 2393 struct ata_device *dev;
e82cbdb9 2394 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2395
3adcebb2 2396 /* has private set_mode? */
b229a7b0
A
2397 if (ap->ops->set_mode)
2398 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2399
a6d5a51c
TH
2400 /* step 1: calculate xfer_mask */
2401 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2402 unsigned int pio_mask, dma_mask;
a6d5a51c 2403
e8e0619f
TH
2404 dev = &ap->device[i];
2405
e1211e3f 2406 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2407 continue;
2408
3373efd8 2409 ata_dev_xfermask(dev);
1da177e4 2410
acf356b1
TH
2411 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2412 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2413 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2414 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2415
4f65977d 2416 found = 1;
5444a6f4
AC
2417 if (dev->dma_mode)
2418 used_dma = 1;
a6d5a51c 2419 }
4f65977d 2420 if (!found)
e82cbdb9 2421 goto out;
a6d5a51c
TH
2422
2423 /* step 2: always set host PIO timings */
e8e0619f
TH
2424 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2425 dev = &ap->device[i];
2426 if (!ata_dev_enabled(dev))
2427 continue;
2428
2429 if (!dev->pio_mode) {
f15a1daf 2430 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2431 rc = -EINVAL;
e82cbdb9 2432 goto out;
e8e0619f
TH
2433 }
2434
2435 dev->xfer_mode = dev->pio_mode;
2436 dev->xfer_shift = ATA_SHIFT_PIO;
2437 if (ap->ops->set_piomode)
2438 ap->ops->set_piomode(ap, dev);
2439 }
1da177e4 2440
a6d5a51c 2441 /* step 3: set host DMA timings */
e8e0619f
TH
2442 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2443 dev = &ap->device[i];
2444
2445 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2446 continue;
2447
2448 dev->xfer_mode = dev->dma_mode;
2449 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2450 if (ap->ops->set_dmamode)
2451 ap->ops->set_dmamode(ap, dev);
2452 }
1da177e4
LT
2453
2454 /* step 4: update devices' xfer mode */
83206a29 2455 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2456 dev = &ap->device[i];
1da177e4 2457
18d90deb 2458 /* don't update suspended devices' xfer mode */
02670bf3 2459 if (!ata_dev_ready(dev))
83206a29
TH
2460 continue;
2461
3373efd8 2462 rc = ata_dev_set_mode(dev);
5bbc53f4 2463 if (rc)
e82cbdb9 2464 goto out;
83206a29 2465 }
1da177e4 2466
e8e0619f
TH
2467 /* Record simplex status. If we selected DMA then the other
2468 * host channels are not permitted to do so.
5444a6f4 2469 */
cca3974e
JG
2470 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2471 ap->host->simplex_claimed = 1;
5444a6f4 2472
e8e0619f 2473 /* step5: chip specific finalisation */
1da177e4
LT
2474 if (ap->ops->post_set_mode)
2475 ap->ops->post_set_mode(ap);
2476
e82cbdb9
TH
2477 out:
2478 if (rc)
2479 *r_failed_dev = dev;
2480 return rc;
1da177e4
LT
2481}
2482
1fdffbce
JG
2483/**
2484 * ata_tf_to_host - issue ATA taskfile to host controller
2485 * @ap: port to which command is being issued
2486 * @tf: ATA taskfile register set
2487 *
2488 * Issues ATA taskfile register set to ATA host controller,
2489 * with proper synchronization with interrupt handler and
2490 * other threads.
2491 *
2492 * LOCKING:
cca3974e 2493 * spin_lock_irqsave(host lock)
1fdffbce
JG
2494 */
2495
2496static inline void ata_tf_to_host(struct ata_port *ap,
2497 const struct ata_taskfile *tf)
2498{
2499 ap->ops->tf_load(ap, tf);
2500 ap->ops->exec_command(ap, tf);
2501}
2502
1da177e4
LT
2503/**
2504 * ata_busy_sleep - sleep until BSY clears, or timeout
2505 * @ap: port containing status register to be polled
2506 * @tmout_pat: impatience timeout
2507 * @tmout: overall timeout
2508 *
780a87f7
JG
2509 * Sleep until ATA Status register bit BSY clears,
2510 * or a timeout occurs.
2511 *
d1adc1bb
TH
2512 * LOCKING:
2513 * Kernel thread context (may sleep).
2514 *
2515 * RETURNS:
2516 * 0 on success, -errno otherwise.
1da177e4 2517 */
d1adc1bb
TH
2518int ata_busy_sleep(struct ata_port *ap,
2519 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2520{
2521 unsigned long timer_start, timeout;
2522 u8 status;
2523
2524 status = ata_busy_wait(ap, ATA_BUSY, 300);
2525 timer_start = jiffies;
2526 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2527 while (status != 0xff && (status & ATA_BUSY) &&
2528 time_before(jiffies, timeout)) {
1da177e4
LT
2529 msleep(50);
2530 status = ata_busy_wait(ap, ATA_BUSY, 3);
2531 }
2532
d1adc1bb 2533 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2534 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2535 "port is slow to respond, please be patient "
2536 "(Status 0x%x)\n", status);
1da177e4
LT
2537
2538 timeout = timer_start + tmout;
d1adc1bb
TH
2539 while (status != 0xff && (status & ATA_BUSY) &&
2540 time_before(jiffies, timeout)) {
1da177e4
LT
2541 msleep(50);
2542 status = ata_chk_status(ap);
2543 }
2544
d1adc1bb
TH
2545 if (status == 0xff)
2546 return -ENODEV;
2547
1da177e4 2548 if (status & ATA_BUSY) {
f15a1daf 2549 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2550 "(%lu secs, Status 0x%x)\n",
2551 tmout / HZ, status);
d1adc1bb 2552 return -EBUSY;
1da177e4
LT
2553 }
2554
2555 return 0;
2556}
2557
2558static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2559{
2560 struct ata_ioports *ioaddr = &ap->ioaddr;
2561 unsigned int dev0 = devmask & (1 << 0);
2562 unsigned int dev1 = devmask & (1 << 1);
2563 unsigned long timeout;
2564
2565 /* if device 0 was found in ata_devchk, wait for its
2566 * BSY bit to clear
2567 */
2568 if (dev0)
2569 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2570
2571 /* if device 1 was found in ata_devchk, wait for
2572 * register access, then wait for BSY to clear
2573 */
2574 timeout = jiffies + ATA_TMOUT_BOOT;
2575 while (dev1) {
2576 u8 nsect, lbal;
2577
2578 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2579 nsect = ioread8(ioaddr->nsect_addr);
2580 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2581 if ((nsect == 1) && (lbal == 1))
2582 break;
2583 if (time_after(jiffies, timeout)) {
2584 dev1 = 0;
2585 break;
2586 }
2587 msleep(50); /* give drive a breather */
2588 }
2589 if (dev1)
2590 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2591
2592 /* is all this really necessary? */
2593 ap->ops->dev_select(ap, 0);
2594 if (dev1)
2595 ap->ops->dev_select(ap, 1);
2596 if (dev0)
2597 ap->ops->dev_select(ap, 0);
2598}
2599
1da177e4
LT
2600static unsigned int ata_bus_softreset(struct ata_port *ap,
2601 unsigned int devmask)
2602{
2603 struct ata_ioports *ioaddr = &ap->ioaddr;
2604
2605 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2606
2607 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2608 iowrite8(ap->ctl, ioaddr->ctl_addr);
2609 udelay(20); /* FIXME: flush */
2610 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2611 udelay(20); /* FIXME: flush */
2612 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2613
2614 /* spec mandates ">= 2ms" before checking status.
2615 * We wait 150ms, because that was the magic delay used for
2616 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2617 * between when the ATA command register is written, and then
2618 * status is checked. Because waiting for "a while" before
2619 * checking status is fine, post SRST, we perform this magic
2620 * delay here as well.
09c7ad79
AC
2621 *
2622 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2623 */
2624 msleep(150);
2625
2e9edbf8 2626 /* Before we perform post reset processing we want to see if
298a41ca
TH
2627 * the bus shows 0xFF because the odd clown forgets the D7
2628 * pulldown resistor.
2629 */
d1adc1bb
TH
2630 if (ata_check_status(ap) == 0xFF)
2631 return 0;
09c7ad79 2632
1da177e4
LT
2633 ata_bus_post_reset(ap, devmask);
2634
2635 return 0;
2636}
2637
2638/**
2639 * ata_bus_reset - reset host port and associated ATA channel
2640 * @ap: port to reset
2641 *
2642 * This is typically the first time we actually start issuing
2643 * commands to the ATA channel. We wait for BSY to clear, then
2644 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2645 * result. Determine what devices, if any, are on the channel
2646 * by looking at the device 0/1 error register. Look at the signature
2647 * stored in each device's taskfile registers, to determine if
2648 * the device is ATA or ATAPI.
2649 *
2650 * LOCKING:
0cba632b 2651 * PCI/etc. bus probe sem.
cca3974e 2652 * Obtains host lock.
1da177e4
LT
2653 *
2654 * SIDE EFFECTS:
198e0fed 2655 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2656 */
2657
2658void ata_bus_reset(struct ata_port *ap)
2659{
2660 struct ata_ioports *ioaddr = &ap->ioaddr;
2661 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2662 u8 err;
aec5c3c1 2663 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2664
2665 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2666
2667 /* determine if device 0/1 are present */
2668 if (ap->flags & ATA_FLAG_SATA_RESET)
2669 dev0 = 1;
2670 else {
2671 dev0 = ata_devchk(ap, 0);
2672 if (slave_possible)
2673 dev1 = ata_devchk(ap, 1);
2674 }
2675
2676 if (dev0)
2677 devmask |= (1 << 0);
2678 if (dev1)
2679 devmask |= (1 << 1);
2680
2681 /* select device 0 again */
2682 ap->ops->dev_select(ap, 0);
2683
2684 /* issue bus reset */
2685 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2686 if (ata_bus_softreset(ap, devmask))
2687 goto err_out;
1da177e4
LT
2688
2689 /*
2690 * determine by signature whether we have ATA or ATAPI devices
2691 */
b4dc7623 2692 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2693 if ((slave_possible) && (err != 0x81))
b4dc7623 2694 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2695
2696 /* re-enable interrupts */
83625006 2697 ap->ops->irq_on(ap);
1da177e4
LT
2698
2699 /* is double-select really necessary? */
2700 if (ap->device[1].class != ATA_DEV_NONE)
2701 ap->ops->dev_select(ap, 1);
2702 if (ap->device[0].class != ATA_DEV_NONE)
2703 ap->ops->dev_select(ap, 0);
2704
2705 /* if no devices were detected, disable this port */
2706 if ((ap->device[0].class == ATA_DEV_NONE) &&
2707 (ap->device[1].class == ATA_DEV_NONE))
2708 goto err_out;
2709
2710 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2711 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2712 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2713 }
2714
2715 DPRINTK("EXIT\n");
2716 return;
2717
2718err_out:
f15a1daf 2719 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2720 ap->ops->port_disable(ap);
2721
2722 DPRINTK("EXIT\n");
2723}
2724
d7bb4cc7
TH
2725/**
2726 * sata_phy_debounce - debounce SATA phy status
2727 * @ap: ATA port to debounce SATA phy status for
2728 * @params: timing parameters { interval, duratinon, timeout } in msec
2729 *
2730 * Make sure SStatus of @ap reaches stable state, determined by
2731 * holding the same value where DET is not 1 for @duration polled
2732 * every @interval, before @timeout. Timeout constraints the
2733 * beginning of the stable state. Because, after hot unplugging,
2734 * DET gets stuck at 1 on some controllers, this functions waits
2735 * until timeout then returns 0 if DET is stable at 1.
2736 *
2737 * LOCKING:
2738 * Kernel thread context (may sleep)
2739 *
2740 * RETURNS:
2741 * 0 on success, -errno on failure.
2742 */
2743int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2744{
d7bb4cc7
TH
2745 unsigned long interval_msec = params[0];
2746 unsigned long duration = params[1] * HZ / 1000;
2747 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2748 unsigned long last_jiffies;
2749 u32 last, cur;
2750 int rc;
2751
2752 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2753 return rc;
2754 cur &= 0xf;
2755
2756 last = cur;
2757 last_jiffies = jiffies;
2758
2759 while (1) {
2760 msleep(interval_msec);
2761 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2762 return rc;
2763 cur &= 0xf;
2764
2765 /* DET stable? */
2766 if (cur == last) {
2767 if (cur == 1 && time_before(jiffies, timeout))
2768 continue;
2769 if (time_after(jiffies, last_jiffies + duration))
2770 return 0;
2771 continue;
2772 }
2773
2774 /* unstable, start over */
2775 last = cur;
2776 last_jiffies = jiffies;
2777
2778 /* check timeout */
2779 if (time_after(jiffies, timeout))
2780 return -EBUSY;
2781 }
2782}
2783
2784/**
2785 * sata_phy_resume - resume SATA phy
2786 * @ap: ATA port to resume SATA phy for
2787 * @params: timing parameters { interval, duratinon, timeout } in msec
2788 *
2789 * Resume SATA phy of @ap and debounce it.
2790 *
2791 * LOCKING:
2792 * Kernel thread context (may sleep)
2793 *
2794 * RETURNS:
2795 * 0 on success, -errno on failure.
2796 */
2797int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2798{
2799 u32 scontrol;
81952c54
TH
2800 int rc;
2801
2802 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2803 return rc;
7a7921e8 2804
852ee16a 2805 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2806
2807 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2808 return rc;
7a7921e8 2809
d7bb4cc7
TH
2810 /* Some PHYs react badly if SStatus is pounded immediately
2811 * after resuming. Delay 200ms before debouncing.
2812 */
2813 msleep(200);
7a7921e8 2814
d7bb4cc7 2815 return sata_phy_debounce(ap, params);
7a7921e8
TH
2816}
2817
f5914a46
TH
2818static void ata_wait_spinup(struct ata_port *ap)
2819{
2820 struct ata_eh_context *ehc = &ap->eh_context;
2821 unsigned long end, secs;
2822 int rc;
2823
2824 /* first, debounce phy if SATA */
2825 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2826 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2827
2828 /* if debounced successfully and offline, no need to wait */
2829 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2830 return;
2831 }
2832
2833 /* okay, let's give the drive time to spin up */
2834 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2835 secs = ((end - jiffies) + HZ - 1) / HZ;
2836
2837 if (time_after(jiffies, end))
2838 return;
2839
2840 if (secs > 5)
2841 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2842 "(%lu secs)\n", secs);
2843
2844 schedule_timeout_uninterruptible(end - jiffies);
2845}
2846
2847/**
2848 * ata_std_prereset - prepare for reset
2849 * @ap: ATA port to be reset
2850 *
2851 * @ap is about to be reset. Initialize it.
2852 *
2853 * LOCKING:
2854 * Kernel thread context (may sleep)
2855 *
2856 * RETURNS:
2857 * 0 on success, -errno otherwise.
2858 */
2859int ata_std_prereset(struct ata_port *ap)
2860{
2861 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2862 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2863 int rc;
2864
28324304
TH
2865 /* handle link resume & hotplug spinup */
2866 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2867 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2868 ehc->i.action |= ATA_EH_HARDRESET;
2869
2870 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2871 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2872 ata_wait_spinup(ap);
f5914a46
TH
2873
2874 /* if we're about to do hardreset, nothing more to do */
2875 if (ehc->i.action & ATA_EH_HARDRESET)
2876 return 0;
2877
2878 /* if SATA, resume phy */
2879 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2880 rc = sata_phy_resume(ap, timing);
2881 if (rc && rc != -EOPNOTSUPP) {
2882 /* phy resume failed */
2883 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2884 "link for reset (errno=%d)\n", rc);
2885 return rc;
2886 }
2887 }
2888
2889 /* Wait for !BSY if the controller can wait for the first D2H
2890 * Reg FIS and we don't know that no device is attached.
2891 */
2892 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2893 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2894
2895 return 0;
2896}
2897
c2bd5804
TH
2898/**
2899 * ata_std_softreset - reset host port via ATA SRST
2900 * @ap: port to reset
c2bd5804
TH
2901 * @classes: resulting classes of attached devices
2902 *
52783c5d 2903 * Reset host port using ATA SRST.
c2bd5804
TH
2904 *
2905 * LOCKING:
2906 * Kernel thread context (may sleep)
2907 *
2908 * RETURNS:
2909 * 0 on success, -errno otherwise.
2910 */
2bf2cb26 2911int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2912{
2913 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2914 unsigned int devmask = 0, err_mask;
2915 u8 err;
2916
2917 DPRINTK("ENTER\n");
2918
81952c54 2919 if (ata_port_offline(ap)) {
3a39746a
TH
2920 classes[0] = ATA_DEV_NONE;
2921 goto out;
2922 }
2923
c2bd5804
TH
2924 /* determine if device 0/1 are present */
2925 if (ata_devchk(ap, 0))
2926 devmask |= (1 << 0);
2927 if (slave_possible && ata_devchk(ap, 1))
2928 devmask |= (1 << 1);
2929
c2bd5804
TH
2930 /* select device 0 again */
2931 ap->ops->dev_select(ap, 0);
2932
2933 /* issue bus reset */
2934 DPRINTK("about to softreset, devmask=%x\n", devmask);
2935 err_mask = ata_bus_softreset(ap, devmask);
2936 if (err_mask) {
f15a1daf
TH
2937 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2938 err_mask);
c2bd5804
TH
2939 return -EIO;
2940 }
2941
2942 /* determine by signature whether we have ATA or ATAPI devices */
2943 classes[0] = ata_dev_try_classify(ap, 0, &err);
2944 if (slave_possible && err != 0x81)
2945 classes[1] = ata_dev_try_classify(ap, 1, &err);
2946
3a39746a 2947 out:
c2bd5804
TH
2948 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2949 return 0;
2950}
2951
2952/**
b6103f6d 2953 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2954 * @ap: port to reset
b6103f6d 2955 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2956 *
2957 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2958 *
2959 * LOCKING:
2960 * Kernel thread context (may sleep)
2961 *
2962 * RETURNS:
2963 * 0 on success, -errno otherwise.
2964 */
b6103f6d 2965int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2966{
852ee16a 2967 u32 scontrol;
81952c54 2968 int rc;
852ee16a 2969
c2bd5804
TH
2970 DPRINTK("ENTER\n");
2971
3c567b7d 2972 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2973 /* SATA spec says nothing about how to reconfigure
2974 * spd. To be on the safe side, turn off phy during
2975 * reconfiguration. This works for at least ICH7 AHCI
2976 * and Sil3124.
2977 */
81952c54 2978 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2979 goto out;
81952c54 2980
a34b6fc0 2981 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2982
2983 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2984 goto out;
1c3fae4d 2985
3c567b7d 2986 sata_set_spd(ap);
1c3fae4d
TH
2987 }
2988
2989 /* issue phy wake/reset */
81952c54 2990 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2991 goto out;
81952c54 2992
852ee16a 2993 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2994
2995 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2996 goto out;
c2bd5804 2997
1c3fae4d 2998 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2999 * 10.4.2 says at least 1 ms.
3000 */
3001 msleep(1);
3002
1c3fae4d 3003 /* bring phy back */
b6103f6d
TH
3004 rc = sata_phy_resume(ap, timing);
3005 out:
3006 DPRINTK("EXIT, rc=%d\n", rc);
3007 return rc;
3008}
3009
3010/**
3011 * sata_std_hardreset - reset host port via SATA phy reset
3012 * @ap: port to reset
3013 * @class: resulting class of attached device
3014 *
3015 * SATA phy-reset host port using DET bits of SControl register,
3016 * wait for !BSY and classify the attached device.
3017 *
3018 * LOCKING:
3019 * Kernel thread context (may sleep)
3020 *
3021 * RETURNS:
3022 * 0 on success, -errno otherwise.
3023 */
3024int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3025{
3026 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3027 int rc;
3028
3029 DPRINTK("ENTER\n");
3030
3031 /* do hardreset */
3032 rc = sata_port_hardreset(ap, timing);
3033 if (rc) {
3034 ata_port_printk(ap, KERN_ERR,
3035 "COMRESET failed (errno=%d)\n", rc);
3036 return rc;
3037 }
c2bd5804 3038
c2bd5804 3039 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3040 if (ata_port_offline(ap)) {
c2bd5804
TH
3041 *class = ATA_DEV_NONE;
3042 DPRINTK("EXIT, link offline\n");
3043 return 0;
3044 }
3045
34fee227
TH
3046 /* wait a while before checking status, see SRST for more info */
3047 msleep(150);
3048
c2bd5804 3049 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3050 ata_port_printk(ap, KERN_ERR,
3051 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3052 return -EIO;
3053 }
3054
3a39746a
TH
3055 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3056
c2bd5804
TH
3057 *class = ata_dev_try_classify(ap, 0, NULL);
3058
3059 DPRINTK("EXIT, class=%u\n", *class);
3060 return 0;
3061}
3062
3063/**
3064 * ata_std_postreset - standard postreset callback
3065 * @ap: the target ata_port
3066 * @classes: classes of attached devices
3067 *
3068 * This function is invoked after a successful reset. Note that
3069 * the device might have been reset more than once using
3070 * different reset methods before postreset is invoked.
c2bd5804 3071 *
c2bd5804
TH
3072 * LOCKING:
3073 * Kernel thread context (may sleep)
3074 */
3075void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3076{
dc2b3515
TH
3077 u32 serror;
3078
c2bd5804
TH
3079 DPRINTK("ENTER\n");
3080
c2bd5804 3081 /* print link status */
81952c54 3082 sata_print_link_status(ap);
c2bd5804 3083
dc2b3515
TH
3084 /* clear SError */
3085 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3086 sata_scr_write(ap, SCR_ERROR, serror);
3087
3a39746a 3088 /* re-enable interrupts */
83625006
AI
3089 if (!ap->ops->error_handler)
3090 ap->ops->irq_on(ap);
c2bd5804
TH
3091
3092 /* is double-select really necessary? */
3093 if (classes[0] != ATA_DEV_NONE)
3094 ap->ops->dev_select(ap, 1);
3095 if (classes[1] != ATA_DEV_NONE)
3096 ap->ops->dev_select(ap, 0);
3097
3a39746a
TH
3098 /* bail out if no device is present */
3099 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3100 DPRINTK("EXIT, no device\n");
3101 return;
3102 }
3103
3104 /* set up device control */
0d5ff566
TH
3105 if (ap->ioaddr.ctl_addr)
3106 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3107
3108 DPRINTK("EXIT\n");
3109}
3110
623a3128
TH
3111/**
3112 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3113 * @dev: device to compare against
3114 * @new_class: class of the new device
3115 * @new_id: IDENTIFY page of the new device
3116 *
3117 * Compare @new_class and @new_id against @dev and determine
3118 * whether @dev is the device indicated by @new_class and
3119 * @new_id.
3120 *
3121 * LOCKING:
3122 * None.
3123 *
3124 * RETURNS:
3125 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3126 */
3373efd8
TH
3127static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3128 const u16 *new_id)
623a3128
TH
3129{
3130 const u16 *old_id = dev->id;
a0cf733b
TH
3131 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3132 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3133 u64 new_n_sectors;
3134
3135 if (dev->class != new_class) {
f15a1daf
TH
3136 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3137 dev->class, new_class);
623a3128
TH
3138 return 0;
3139 }
3140
a0cf733b
TH
3141 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3142 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3143 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3144 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3145 new_n_sectors = ata_id_n_sectors(new_id);
3146
3147 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3148 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3149 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3150 return 0;
3151 }
3152
3153 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3154 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3155 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3156 return 0;
3157 }
3158
3159 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3160 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3161 "%llu != %llu\n",
3162 (unsigned long long)dev->n_sectors,
3163 (unsigned long long)new_n_sectors);
623a3128
TH
3164 return 0;
3165 }
3166
3167 return 1;
3168}
3169
3170/**
3171 * ata_dev_revalidate - Revalidate ATA device
623a3128 3172 * @dev: device to revalidate
bff04647 3173 * @readid_flags: read ID flags
623a3128
TH
3174 *
3175 * Re-read IDENTIFY page and make sure @dev is still attached to
3176 * the port.
3177 *
3178 * LOCKING:
3179 * Kernel thread context (may sleep)
3180 *
3181 * RETURNS:
3182 * 0 on success, negative errno otherwise
3183 */
bff04647 3184int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3185{
5eb45c02 3186 unsigned int class = dev->class;
f15a1daf 3187 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3188 int rc;
3189
5eb45c02
TH
3190 if (!ata_dev_enabled(dev)) {
3191 rc = -ENODEV;
3192 goto fail;
3193 }
623a3128 3194
fe635c7e 3195 /* read ID data */
bff04647 3196 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3197 if (rc)
3198 goto fail;
3199
3200 /* is the device still there? */
3373efd8 3201 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3202 rc = -ENODEV;
3203 goto fail;
3204 }
3205
fe635c7e 3206 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3207
3208 /* configure device according to the new ID */
efdaedc4 3209 rc = ata_dev_configure(dev);
5eb45c02
TH
3210 if (rc == 0)
3211 return 0;
623a3128
TH
3212
3213 fail:
f15a1daf 3214 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3215 return rc;
3216}
3217
6919a0a6
AC
3218struct ata_blacklist_entry {
3219 const char *model_num;
3220 const char *model_rev;
3221 unsigned long horkage;
3222};
3223
3224static const struct ata_blacklist_entry ata_device_blacklist [] = {
3225 /* Devices with DMA related problems under Linux */
3226 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3227 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3228 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3229 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3230 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3231 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3232 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3233 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3234 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3235 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3236 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3237 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3238 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3239 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3240 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3241 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3242 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3243 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3244 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3245 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3246 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3247 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3248 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3249 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3250 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3251 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3252 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3253 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3254 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3255 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3256
3257 /* Devices we expect to fail diagnostics */
3258
3259 /* Devices where NCQ should be avoided */
3260 /* NCQ is slow */
3261 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3262
3263 /* Devices with NCQ limits */
3264
3265 /* End Marker */
3266 { }
1da177e4 3267};
2e9edbf8 3268
6919a0a6 3269unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3270{
8bfa79fc
TH
3271 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3272 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3273 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3274
8bfa79fc
TH
3275 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3276 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3277
6919a0a6 3278 while (ad->model_num) {
8bfa79fc 3279 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3280 if (ad->model_rev == NULL)
3281 return ad->horkage;
8bfa79fc 3282 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3283 return ad->horkage;
f4b15fef 3284 }
6919a0a6 3285 ad++;
f4b15fef 3286 }
1da177e4
LT
3287 return 0;
3288}
3289
6919a0a6
AC
3290static int ata_dma_blacklisted(const struct ata_device *dev)
3291{
3292 /* We don't support polling DMA.
3293 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3294 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3295 */
3296 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3297 (dev->flags & ATA_DFLAG_CDB_INTR))
3298 return 1;
3299 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3300}
3301
a6d5a51c
TH
3302/**
3303 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3304 * @dev: Device to compute xfermask for
3305 *
acf356b1
TH
3306 * Compute supported xfermask of @dev and store it in
3307 * dev->*_mask. This function is responsible for applying all
3308 * known limits including host controller limits, device
3309 * blacklist, etc...
a6d5a51c
TH
3310 *
3311 * LOCKING:
3312 * None.
a6d5a51c 3313 */
3373efd8 3314static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3315{
3373efd8 3316 struct ata_port *ap = dev->ap;
cca3974e 3317 struct ata_host *host = ap->host;
a6d5a51c 3318 unsigned long xfer_mask;
1da177e4 3319
37deecb5 3320 /* controller modes available */
565083e1
TH
3321 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3322 ap->mwdma_mask, ap->udma_mask);
3323
3324 /* Apply cable rule here. Don't apply it early because when
3325 * we handle hot plug the cable type can itself change.
3326 */
3327 if (ap->cbl == ATA_CBL_PATA40)
3328 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3329 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3330 * host side are checked drive side as well. Cases where we know a
3331 * 40wire cable is used safely for 80 are not checked here.
3332 */
3333 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3334 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3335
1da177e4 3336
37deecb5
TH
3337 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3338 dev->mwdma_mask, dev->udma_mask);
3339 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3340
b352e57d
AC
3341 /*
3342 * CFA Advanced TrueIDE timings are not allowed on a shared
3343 * cable
3344 */
3345 if (ata_dev_pair(dev)) {
3346 /* No PIO5 or PIO6 */
3347 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3348 /* No MWDMA3 or MWDMA 4 */
3349 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3350 }
3351
37deecb5
TH
3352 if (ata_dma_blacklisted(dev)) {
3353 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3354 ata_dev_printk(dev, KERN_WARNING,
3355 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3356 }
a6d5a51c 3357
cca3974e 3358 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3359 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3360 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3361 "other device, disabling DMA\n");
5444a6f4 3362 }
565083e1 3363
5444a6f4
AC
3364 if (ap->ops->mode_filter)
3365 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3366
565083e1
TH
3367 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3368 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3369}
3370
1da177e4
LT
3371/**
3372 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3373 * @dev: Device to which command will be sent
3374 *
780a87f7
JG
3375 * Issue SET FEATURES - XFER MODE command to device @dev
3376 * on port @ap.
3377 *
1da177e4 3378 * LOCKING:
0cba632b 3379 * PCI/etc. bus probe sem.
83206a29
TH
3380 *
3381 * RETURNS:
3382 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3383 */
3384
3373efd8 3385static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3386{
a0123703 3387 struct ata_taskfile tf;
83206a29 3388 unsigned int err_mask;
1da177e4
LT
3389
3390 /* set up set-features taskfile */
3391 DPRINTK("set features - xfer mode\n");
3392
3373efd8 3393 ata_tf_init(dev, &tf);
a0123703
TH
3394 tf.command = ATA_CMD_SET_FEATURES;
3395 tf.feature = SETFEATURES_XFER;
3396 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3397 tf.protocol = ATA_PROT_NODATA;
3398 tf.nsect = dev->xfer_mode;
1da177e4 3399
3373efd8 3400 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3401
83206a29
TH
3402 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3403 return err_mask;
1da177e4
LT
3404}
3405
8bf62ece
AL
3406/**
3407 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3408 * @dev: Device to which command will be sent
e2a7f77a
RD
3409 * @heads: Number of heads (taskfile parameter)
3410 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3411 *
3412 * LOCKING:
6aff8f1f
TH
3413 * Kernel thread context (may sleep)
3414 *
3415 * RETURNS:
3416 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3417 */
3373efd8
TH
3418static unsigned int ata_dev_init_params(struct ata_device *dev,
3419 u16 heads, u16 sectors)
8bf62ece 3420{
a0123703 3421 struct ata_taskfile tf;
6aff8f1f 3422 unsigned int err_mask;
8bf62ece
AL
3423
3424 /* Number of sectors per track 1-255. Number of heads 1-16 */
3425 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3426 return AC_ERR_INVALID;
8bf62ece
AL
3427
3428 /* set up init dev params taskfile */
3429 DPRINTK("init dev params \n");
3430
3373efd8 3431 ata_tf_init(dev, &tf);
a0123703
TH
3432 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3433 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3434 tf.protocol = ATA_PROT_NODATA;
3435 tf.nsect = sectors;
3436 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3437
3373efd8 3438 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3439
6aff8f1f
TH
3440 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3441 return err_mask;
8bf62ece
AL
3442}
3443
1da177e4 3444/**
0cba632b
JG
3445 * ata_sg_clean - Unmap DMA memory associated with command
3446 * @qc: Command containing DMA memory to be released
3447 *
3448 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3449 *
3450 * LOCKING:
cca3974e 3451 * spin_lock_irqsave(host lock)
1da177e4 3452 */
70e6ad0c 3453void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3454{
3455 struct ata_port *ap = qc->ap;
cedc9a47 3456 struct scatterlist *sg = qc->__sg;
1da177e4 3457 int dir = qc->dma_dir;
cedc9a47 3458 void *pad_buf = NULL;
1da177e4 3459
a4631474
TH
3460 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3461 WARN_ON(sg == NULL);
1da177e4
LT
3462
3463 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3464 WARN_ON(qc->n_elem > 1);
1da177e4 3465
2c13b7ce 3466 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3467
cedc9a47
JG
3468 /* if we padded the buffer out to 32-bit bound, and data
3469 * xfer direction is from-device, we must copy from the
3470 * pad buffer back into the supplied buffer
3471 */
3472 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3473 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3474
3475 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3476 if (qc->n_elem)
2f1f610b 3477 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3478 /* restore last sg */
3479 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3480 if (pad_buf) {
3481 struct scatterlist *psg = &qc->pad_sgent;
3482 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3483 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3484 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3485 }
3486 } else {
2e242fa9 3487 if (qc->n_elem)
2f1f610b 3488 dma_unmap_single(ap->dev,
e1410f2d
JG
3489 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3490 dir);
cedc9a47
JG
3491 /* restore sg */
3492 sg->length += qc->pad_len;
3493 if (pad_buf)
3494 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3495 pad_buf, qc->pad_len);
3496 }
1da177e4
LT
3497
3498 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3499 qc->__sg = NULL;
1da177e4
LT
3500}
3501
3502/**
3503 * ata_fill_sg - Fill PCI IDE PRD table
3504 * @qc: Metadata associated with taskfile to be transferred
3505 *
780a87f7
JG
3506 * Fill PCI IDE PRD (scatter-gather) table with segments
3507 * associated with the current disk command.
3508 *
1da177e4 3509 * LOCKING:
cca3974e 3510 * spin_lock_irqsave(host lock)
1da177e4
LT
3511 *
3512 */
3513static void ata_fill_sg(struct ata_queued_cmd *qc)
3514{
1da177e4 3515 struct ata_port *ap = qc->ap;
cedc9a47
JG
3516 struct scatterlist *sg;
3517 unsigned int idx;
1da177e4 3518
a4631474 3519 WARN_ON(qc->__sg == NULL);
f131883e 3520 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3521
3522 idx = 0;
cedc9a47 3523 ata_for_each_sg(sg, qc) {
1da177e4
LT
3524 u32 addr, offset;
3525 u32 sg_len, len;
3526
3527 /* determine if physical DMA addr spans 64K boundary.
3528 * Note h/w doesn't support 64-bit, so we unconditionally
3529 * truncate dma_addr_t to u32.
3530 */
3531 addr = (u32) sg_dma_address(sg);
3532 sg_len = sg_dma_len(sg);
3533
3534 while (sg_len) {
3535 offset = addr & 0xffff;
3536 len = sg_len;
3537 if ((offset + sg_len) > 0x10000)
3538 len = 0x10000 - offset;
3539
3540 ap->prd[idx].addr = cpu_to_le32(addr);
3541 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3542 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3543
3544 idx++;
3545 sg_len -= len;
3546 addr += len;
3547 }
3548 }
3549
3550 if (idx)
3551 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3552}
3553/**
3554 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3555 * @qc: Metadata associated with taskfile to check
3556 *
780a87f7
JG
3557 * Allow low-level driver to filter ATA PACKET commands, returning
3558 * a status indicating whether or not it is OK to use DMA for the
3559 * supplied PACKET command.
3560 *
1da177e4 3561 * LOCKING:
cca3974e 3562 * spin_lock_irqsave(host lock)
0cba632b 3563 *
1da177e4
LT
3564 * RETURNS: 0 when ATAPI DMA can be used
3565 * nonzero otherwise
3566 */
3567int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3568{
3569 struct ata_port *ap = qc->ap;
3570 int rc = 0; /* Assume ATAPI DMA is OK by default */
3571
3572 if (ap->ops->check_atapi_dma)
3573 rc = ap->ops->check_atapi_dma(qc);
3574
3575 return rc;
3576}
3577/**
3578 * ata_qc_prep - Prepare taskfile for submission
3579 * @qc: Metadata associated with taskfile to be prepared
3580 *
780a87f7
JG
3581 * Prepare ATA taskfile for submission.
3582 *
1da177e4 3583 * LOCKING:
cca3974e 3584 * spin_lock_irqsave(host lock)
1da177e4
LT
3585 */
3586void ata_qc_prep(struct ata_queued_cmd *qc)
3587{
3588 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3589 return;
3590
3591 ata_fill_sg(qc);
3592}
3593
e46834cd
BK
3594void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3595
0cba632b
JG
3596/**
3597 * ata_sg_init_one - Associate command with memory buffer
3598 * @qc: Command to be associated
3599 * @buf: Memory buffer
3600 * @buflen: Length of memory buffer, in bytes.
3601 *
3602 * Initialize the data-related elements of queued_cmd @qc
3603 * to point to a single memory buffer, @buf of byte length @buflen.
3604 *
3605 * LOCKING:
cca3974e 3606 * spin_lock_irqsave(host lock)
0cba632b
JG
3607 */
3608
1da177e4
LT
3609void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3610{
1da177e4
LT
3611 qc->flags |= ATA_QCFLAG_SINGLE;
3612
cedc9a47 3613 qc->__sg = &qc->sgent;
1da177e4 3614 qc->n_elem = 1;
cedc9a47 3615 qc->orig_n_elem = 1;
1da177e4 3616 qc->buf_virt = buf;
233277ca 3617 qc->nbytes = buflen;
1da177e4 3618
61c0596c 3619 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3620}
3621
0cba632b
JG
3622/**
3623 * ata_sg_init - Associate command with scatter-gather table.
3624 * @qc: Command to be associated
3625 * @sg: Scatter-gather table.
3626 * @n_elem: Number of elements in s/g table.
3627 *
3628 * Initialize the data-related elements of queued_cmd @qc
3629 * to point to a scatter-gather table @sg, containing @n_elem
3630 * elements.
3631 *
3632 * LOCKING:
cca3974e 3633 * spin_lock_irqsave(host lock)
0cba632b
JG
3634 */
3635
1da177e4
LT
3636void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3637 unsigned int n_elem)
3638{
3639 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3640 qc->__sg = sg;
1da177e4 3641 qc->n_elem = n_elem;
cedc9a47 3642 qc->orig_n_elem = n_elem;
1da177e4
LT
3643}
3644
3645/**
0cba632b
JG
3646 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3647 * @qc: Command with memory buffer to be mapped.
3648 *
3649 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3650 *
3651 * LOCKING:
cca3974e 3652 * spin_lock_irqsave(host lock)
1da177e4
LT
3653 *
3654 * RETURNS:
0cba632b 3655 * Zero on success, negative on error.
1da177e4
LT
3656 */
3657
3658static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3659{
3660 struct ata_port *ap = qc->ap;
3661 int dir = qc->dma_dir;
cedc9a47 3662 struct scatterlist *sg = qc->__sg;
1da177e4 3663 dma_addr_t dma_address;
2e242fa9 3664 int trim_sg = 0;
1da177e4 3665
cedc9a47
JG
3666 /* we must lengthen transfers to end on a 32-bit boundary */
3667 qc->pad_len = sg->length & 3;
3668 if (qc->pad_len) {
3669 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3670 struct scatterlist *psg = &qc->pad_sgent;
3671
a4631474 3672 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3673
3674 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3675
3676 if (qc->tf.flags & ATA_TFLAG_WRITE)
3677 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3678 qc->pad_len);
3679
3680 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3681 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3682 /* trim sg */
3683 sg->length -= qc->pad_len;
2e242fa9
TH
3684 if (sg->length == 0)
3685 trim_sg = 1;
cedc9a47
JG
3686
3687 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3688 sg->length, qc->pad_len);
3689 }
3690
2e242fa9
TH
3691 if (trim_sg) {
3692 qc->n_elem--;
e1410f2d
JG
3693 goto skip_map;
3694 }
3695
2f1f610b 3696 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3697 sg->length, dir);
537a95d9
TH
3698 if (dma_mapping_error(dma_address)) {
3699 /* restore sg */
3700 sg->length += qc->pad_len;
1da177e4 3701 return -1;
537a95d9 3702 }
1da177e4
LT
3703
3704 sg_dma_address(sg) = dma_address;
32529e01 3705 sg_dma_len(sg) = sg->length;
1da177e4 3706
2e242fa9 3707skip_map:
1da177e4
LT
3708 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3709 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3710
3711 return 0;
3712}
3713
3714/**
0cba632b
JG
3715 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3716 * @qc: Command with scatter-gather table to be mapped.
3717 *
3718 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3719 *
3720 * LOCKING:
cca3974e 3721 * spin_lock_irqsave(host lock)
1da177e4
LT
3722 *
3723 * RETURNS:
0cba632b 3724 * Zero on success, negative on error.
1da177e4
LT
3725 *
3726 */
3727
3728static int ata_sg_setup(struct ata_queued_cmd *qc)
3729{
3730 struct ata_port *ap = qc->ap;
cedc9a47
JG
3731 struct scatterlist *sg = qc->__sg;
3732 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3733 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3734
3735 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3736 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3737
cedc9a47
JG
3738 /* we must lengthen transfers to end on a 32-bit boundary */
3739 qc->pad_len = lsg->length & 3;
3740 if (qc->pad_len) {
3741 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3742 struct scatterlist *psg = &qc->pad_sgent;
3743 unsigned int offset;
3744
a4631474 3745 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3746
3747 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3748
3749 /*
3750 * psg->page/offset are used to copy to-be-written
3751 * data in this function or read data in ata_sg_clean.
3752 */
3753 offset = lsg->offset + lsg->length - qc->pad_len;
3754 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3755 psg->offset = offset_in_page(offset);
3756
3757 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3758 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3759 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3760 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3761 }
3762
3763 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3764 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3765 /* trim last sg */
3766 lsg->length -= qc->pad_len;
e1410f2d
JG
3767 if (lsg->length == 0)
3768 trim_sg = 1;
cedc9a47
JG
3769
3770 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3771 qc->n_elem - 1, lsg->length, qc->pad_len);
3772 }
3773
e1410f2d
JG
3774 pre_n_elem = qc->n_elem;
3775 if (trim_sg && pre_n_elem)
3776 pre_n_elem--;
3777
3778 if (!pre_n_elem) {
3779 n_elem = 0;
3780 goto skip_map;
3781 }
3782
1da177e4 3783 dir = qc->dma_dir;
2f1f610b 3784 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3785 if (n_elem < 1) {
3786 /* restore last sg */
3787 lsg->length += qc->pad_len;
1da177e4 3788 return -1;
537a95d9 3789 }
1da177e4
LT
3790
3791 DPRINTK("%d sg elements mapped\n", n_elem);
3792
e1410f2d 3793skip_map:
1da177e4
LT
3794 qc->n_elem = n_elem;
3795
3796 return 0;
3797}
3798
0baab86b 3799/**
c893a3ae 3800 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3801 * @buf: Buffer to swap
3802 * @buf_words: Number of 16-bit words in buffer.
3803 *
3804 * Swap halves of 16-bit words if needed to convert from
3805 * little-endian byte order to native cpu byte order, or
3806 * vice-versa.
3807 *
3808 * LOCKING:
6f0ef4fa 3809 * Inherited from caller.
0baab86b 3810 */
1da177e4
LT
3811void swap_buf_le16(u16 *buf, unsigned int buf_words)
3812{
3813#ifdef __BIG_ENDIAN
3814 unsigned int i;
3815
3816 for (i = 0; i < buf_words; i++)
3817 buf[i] = le16_to_cpu(buf[i]);
3818#endif /* __BIG_ENDIAN */
3819}
3820
6ae4cfb5 3821/**
0d5ff566 3822 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3823 * @adev: device to target
6ae4cfb5
AL
3824 * @buf: data buffer
3825 * @buflen: buffer length
344babaa 3826 * @write_data: read/write
6ae4cfb5
AL
3827 *
3828 * Transfer data from/to the device data register by PIO.
3829 *
3830 * LOCKING:
3831 * Inherited from caller.
6ae4cfb5 3832 */
0d5ff566
TH
3833void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3834 unsigned int buflen, int write_data)
1da177e4 3835{
a6b2c5d4 3836 struct ata_port *ap = adev->ap;
6ae4cfb5 3837 unsigned int words = buflen >> 1;
1da177e4 3838
6ae4cfb5 3839 /* Transfer multiple of 2 bytes */
1da177e4 3840 if (write_data)
0d5ff566 3841 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3842 else
0d5ff566 3843 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3844
3845 /* Transfer trailing 1 byte, if any. */
3846 if (unlikely(buflen & 0x01)) {
3847 u16 align_buf[1] = { 0 };
3848 unsigned char *trailing_buf = buf + buflen - 1;
3849
3850 if (write_data) {
3851 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3852 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3853 } else {
0d5ff566 3854 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3855 memcpy(trailing_buf, align_buf, 1);
3856 }
3857 }
1da177e4
LT
3858}
3859
75e99585 3860/**
0d5ff566 3861 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3862 * @adev: device to target
3863 * @buf: data buffer
3864 * @buflen: buffer length
3865 * @write_data: read/write
3866 *
88574551 3867 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3868 * transfer with interrupts disabled.
3869 *
3870 * LOCKING:
3871 * Inherited from caller.
3872 */
0d5ff566
TH
3873void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3874 unsigned int buflen, int write_data)
75e99585
AC
3875{
3876 unsigned long flags;
3877 local_irq_save(flags);
0d5ff566 3878 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3879 local_irq_restore(flags);
3880}
3881
3882
6ae4cfb5
AL
3883/**
3884 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3885 * @qc: Command on going
3886 *
3887 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3888 *
3889 * LOCKING:
3890 * Inherited from caller.
3891 */
3892
1da177e4
LT
3893static void ata_pio_sector(struct ata_queued_cmd *qc)
3894{
3895 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3896 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3897 struct ata_port *ap = qc->ap;
3898 struct page *page;
3899 unsigned int offset;
3900 unsigned char *buf;
3901
726f0785 3902 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3903 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3904
3905 page = sg[qc->cursg].page;
726f0785 3906 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3907
3908 /* get the current page and offset */
3909 page = nth_page(page, (offset >> PAGE_SHIFT));
3910 offset %= PAGE_SIZE;
3911
1da177e4
LT
3912 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3913
91b8b313
AL
3914 if (PageHighMem(page)) {
3915 unsigned long flags;
3916
a6b2c5d4 3917 /* FIXME: use a bounce buffer */
91b8b313
AL
3918 local_irq_save(flags);
3919 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3920
91b8b313 3921 /* do the actual data transfer */
a6b2c5d4 3922 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3923
91b8b313
AL
3924 kunmap_atomic(buf, KM_IRQ0);
3925 local_irq_restore(flags);
3926 } else {
3927 buf = page_address(page);
a6b2c5d4 3928 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3929 }
1da177e4 3930
726f0785
TH
3931 qc->curbytes += ATA_SECT_SIZE;
3932 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3933
726f0785 3934 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3935 qc->cursg++;
3936 qc->cursg_ofs = 0;
3937 }
1da177e4 3938}
1da177e4 3939
07f6f7d0
AL
3940/**
3941 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3942 * @qc: Command on going
3943 *
c81e29b4 3944 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3945 * ATA device for the DRQ request.
3946 *
3947 * LOCKING:
3948 * Inherited from caller.
3949 */
1da177e4 3950
07f6f7d0
AL
3951static void ata_pio_sectors(struct ata_queued_cmd *qc)
3952{
3953 if (is_multi_taskfile(&qc->tf)) {
3954 /* READ/WRITE MULTIPLE */
3955 unsigned int nsect;
3956
587005de 3957 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3958
726f0785
TH
3959 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3960 qc->dev->multi_count);
07f6f7d0
AL
3961 while (nsect--)
3962 ata_pio_sector(qc);
3963 } else
3964 ata_pio_sector(qc);
3965}
3966
c71c1857
AL
3967/**
3968 * atapi_send_cdb - Write CDB bytes to hardware
3969 * @ap: Port to which ATAPI device is attached.
3970 * @qc: Taskfile currently active
3971 *
3972 * When device has indicated its readiness to accept
3973 * a CDB, this function is called. Send the CDB.
3974 *
3975 * LOCKING:
3976 * caller.
3977 */
3978
3979static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3980{
3981 /* send SCSI cdb */
3982 DPRINTK("send cdb\n");
db024d53 3983 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3984
a6b2c5d4 3985 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3986 ata_altstatus(ap); /* flush */
3987
3988 switch (qc->tf.protocol) {
3989 case ATA_PROT_ATAPI:
3990 ap->hsm_task_state = HSM_ST;
3991 break;
3992 case ATA_PROT_ATAPI_NODATA:
3993 ap->hsm_task_state = HSM_ST_LAST;
3994 break;
3995 case ATA_PROT_ATAPI_DMA:
3996 ap->hsm_task_state = HSM_ST_LAST;
3997 /* initiate bmdma */
3998 ap->ops->bmdma_start(qc);
3999 break;
4000 }
1da177e4
LT
4001}
4002
6ae4cfb5
AL
4003/**
4004 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4005 * @qc: Command on going
4006 * @bytes: number of bytes
4007 *
4008 * Transfer Transfer data from/to the ATAPI device.
4009 *
4010 * LOCKING:
4011 * Inherited from caller.
4012 *
4013 */
4014
1da177e4
LT
4015static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4016{
4017 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4018 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4019 struct ata_port *ap = qc->ap;
4020 struct page *page;
4021 unsigned char *buf;
4022 unsigned int offset, count;
4023
563a6e1f 4024 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4025 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4026
4027next_sg:
563a6e1f 4028 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4029 /*
563a6e1f
AL
4030 * The end of qc->sg is reached and the device expects
4031 * more data to transfer. In order not to overrun qc->sg
4032 * and fulfill length specified in the byte count register,
4033 * - for read case, discard trailing data from the device
4034 * - for write case, padding zero data to the device
4035 */
4036 u16 pad_buf[1] = { 0 };
4037 unsigned int words = bytes >> 1;
4038 unsigned int i;
4039
4040 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4041 ata_dev_printk(qc->dev, KERN_WARNING,
4042 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4043
4044 for (i = 0; i < words; i++)
a6b2c5d4 4045 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4046
14be71f4 4047 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4048 return;
4049 }
4050
cedc9a47 4051 sg = &qc->__sg[qc->cursg];
1da177e4 4052
1da177e4
LT
4053 page = sg->page;
4054 offset = sg->offset + qc->cursg_ofs;
4055
4056 /* get the current page and offset */
4057 page = nth_page(page, (offset >> PAGE_SHIFT));
4058 offset %= PAGE_SIZE;
4059
6952df03 4060 /* don't overrun current sg */
32529e01 4061 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4062
4063 /* don't cross page boundaries */
4064 count = min(count, (unsigned int)PAGE_SIZE - offset);
4065
7282aa4b
AL
4066 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4067
91b8b313
AL
4068 if (PageHighMem(page)) {
4069 unsigned long flags;
4070
a6b2c5d4 4071 /* FIXME: use bounce buffer */
91b8b313
AL
4072 local_irq_save(flags);
4073 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4074
91b8b313 4075 /* do the actual data transfer */
a6b2c5d4 4076 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4077
91b8b313
AL
4078 kunmap_atomic(buf, KM_IRQ0);
4079 local_irq_restore(flags);
4080 } else {
4081 buf = page_address(page);
a6b2c5d4 4082 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4083 }
1da177e4
LT
4084
4085 bytes -= count;
4086 qc->curbytes += count;
4087 qc->cursg_ofs += count;
4088
32529e01 4089 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4090 qc->cursg++;
4091 qc->cursg_ofs = 0;
4092 }
4093
563a6e1f 4094 if (bytes)
1da177e4 4095 goto next_sg;
1da177e4
LT
4096}
4097
6ae4cfb5
AL
4098/**
4099 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4100 * @qc: Command on going
4101 *
4102 * Transfer Transfer data from/to the ATAPI device.
4103 *
4104 * LOCKING:
4105 * Inherited from caller.
6ae4cfb5
AL
4106 */
4107
1da177e4
LT
4108static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4109{
4110 struct ata_port *ap = qc->ap;
4111 struct ata_device *dev = qc->dev;
4112 unsigned int ireason, bc_lo, bc_hi, bytes;
4113 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4114
eec4c3f3
AL
4115 /* Abuse qc->result_tf for temp storage of intermediate TF
4116 * here to save some kernel stack usage.
4117 * For normal completion, qc->result_tf is not relevant. For
4118 * error, qc->result_tf is later overwritten by ata_qc_complete().
4119 * So, the correctness of qc->result_tf is not affected.
4120 */
4121 ap->ops->tf_read(ap, &qc->result_tf);
4122 ireason = qc->result_tf.nsect;
4123 bc_lo = qc->result_tf.lbam;
4124 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4125 bytes = (bc_hi << 8) | bc_lo;
4126
4127 /* shall be cleared to zero, indicating xfer of data */
4128 if (ireason & (1 << 0))
4129 goto err_out;
4130
4131 /* make sure transfer direction matches expected */
4132 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4133 if (do_write != i_write)
4134 goto err_out;
4135
312f7da2
AL
4136 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4137
1da177e4
LT
4138 __atapi_pio_bytes(qc, bytes);
4139
4140 return;
4141
4142err_out:
f15a1daf 4143 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4144 qc->err_mask |= AC_ERR_HSM;
14be71f4 4145 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4146}
4147
4148/**
c234fb00
AL
4149 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4150 * @ap: the target ata_port
4151 * @qc: qc on going
1da177e4 4152 *
c234fb00
AL
4153 * RETURNS:
4154 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4155 */
c234fb00
AL
4156
4157static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4158{
c234fb00
AL
4159 if (qc->tf.flags & ATA_TFLAG_POLLING)
4160 return 1;
1da177e4 4161
c234fb00
AL
4162 if (ap->hsm_task_state == HSM_ST_FIRST) {
4163 if (qc->tf.protocol == ATA_PROT_PIO &&
4164 (qc->tf.flags & ATA_TFLAG_WRITE))
4165 return 1;
1da177e4 4166
c234fb00
AL
4167 if (is_atapi_taskfile(&qc->tf) &&
4168 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4169 return 1;
fe79e683
AL
4170 }
4171
c234fb00
AL
4172 return 0;
4173}
1da177e4 4174
c17ea20d
TH
4175/**
4176 * ata_hsm_qc_complete - finish a qc running on standard HSM
4177 * @qc: Command to complete
4178 * @in_wq: 1 if called from workqueue, 0 otherwise
4179 *
4180 * Finish @qc which is running on standard HSM.
4181 *
4182 * LOCKING:
cca3974e 4183 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4184 * Otherwise, none on entry and grabs host lock.
4185 */
4186static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4187{
4188 struct ata_port *ap = qc->ap;
4189 unsigned long flags;
4190
4191 if (ap->ops->error_handler) {
4192 if (in_wq) {
ba6a1308 4193 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4194
cca3974e
JG
4195 /* EH might have kicked in while host lock is
4196 * released.
c17ea20d
TH
4197 */
4198 qc = ata_qc_from_tag(ap, qc->tag);
4199 if (qc) {
4200 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4201 ap->ops->irq_on(ap);
c17ea20d
TH
4202 ata_qc_complete(qc);
4203 } else
4204 ata_port_freeze(ap);
4205 }
4206
ba6a1308 4207 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4208 } else {
4209 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4210 ata_qc_complete(qc);
4211 else
4212 ata_port_freeze(ap);
4213 }
4214 } else {
4215 if (in_wq) {
ba6a1308 4216 spin_lock_irqsave(ap->lock, flags);
83625006 4217 ap->ops->irq_on(ap);
c17ea20d 4218 ata_qc_complete(qc);
ba6a1308 4219 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4220 } else
4221 ata_qc_complete(qc);
4222 }
1da177e4 4223
c81e29b4 4224 ata_altstatus(ap); /* flush */
c17ea20d
TH
4225}
4226
bb5cb290
AL
4227/**
4228 * ata_hsm_move - move the HSM to the next state.
4229 * @ap: the target ata_port
4230 * @qc: qc on going
4231 * @status: current device status
4232 * @in_wq: 1 if called from workqueue, 0 otherwise
4233 *
4234 * RETURNS:
4235 * 1 when poll next status needed, 0 otherwise.
4236 */
9a1004d0
TH
4237int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4238 u8 status, int in_wq)
e2cec771 4239{
bb5cb290
AL
4240 unsigned long flags = 0;
4241 int poll_next;
4242
6912ccd5
AL
4243 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4244
bb5cb290
AL
4245 /* Make sure ata_qc_issue_prot() does not throw things
4246 * like DMA polling into the workqueue. Notice that
4247 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4248 */
c234fb00 4249 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4250
e2cec771 4251fsm_start:
999bb6f4
AL
4252 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4253 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4254
e2cec771
AL
4255 switch (ap->hsm_task_state) {
4256 case HSM_ST_FIRST:
bb5cb290
AL
4257 /* Send first data block or PACKET CDB */
4258
4259 /* If polling, we will stay in the work queue after
4260 * sending the data. Otherwise, interrupt handler
4261 * takes over after sending the data.
4262 */
4263 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4264
e2cec771 4265 /* check device status */
3655d1d3
AL
4266 if (unlikely((status & ATA_DRQ) == 0)) {
4267 /* handle BSY=0, DRQ=0 as error */
4268 if (likely(status & (ATA_ERR | ATA_DF)))
4269 /* device stops HSM for abort/error */
4270 qc->err_mask |= AC_ERR_DEV;
4271 else
4272 /* HSM violation. Let EH handle this */
4273 qc->err_mask |= AC_ERR_HSM;
4274
14be71f4 4275 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4276 goto fsm_start;
1da177e4
LT
4277 }
4278
71601958
AL
4279 /* Device should not ask for data transfer (DRQ=1)
4280 * when it finds something wrong.
eee6c32f
AL
4281 * We ignore DRQ here and stop the HSM by
4282 * changing hsm_task_state to HSM_ST_ERR and
4283 * let the EH abort the command or reset the device.
71601958
AL
4284 */
4285 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4286 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4287 ap->id, status);
3655d1d3 4288 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4289 ap->hsm_task_state = HSM_ST_ERR;
4290 goto fsm_start;
71601958 4291 }
1da177e4 4292
bb5cb290
AL
4293 /* Send the CDB (atapi) or the first data block (ata pio out).
4294 * During the state transition, interrupt handler shouldn't
4295 * be invoked before the data transfer is complete and
4296 * hsm_task_state is changed. Hence, the following locking.
4297 */
4298 if (in_wq)
ba6a1308 4299 spin_lock_irqsave(ap->lock, flags);
1da177e4 4300
bb5cb290
AL
4301 if (qc->tf.protocol == ATA_PROT_PIO) {
4302 /* PIO data out protocol.
4303 * send first data block.
4304 */
0565c26d 4305
bb5cb290
AL
4306 /* ata_pio_sectors() might change the state
4307 * to HSM_ST_LAST. so, the state is changed here
4308 * before ata_pio_sectors().
4309 */
4310 ap->hsm_task_state = HSM_ST;
4311 ata_pio_sectors(qc);
4312 ata_altstatus(ap); /* flush */
4313 } else
4314 /* send CDB */
4315 atapi_send_cdb(ap, qc);
4316
4317 if (in_wq)
ba6a1308 4318 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4319
4320 /* if polling, ata_pio_task() handles the rest.
4321 * otherwise, interrupt handler takes over from here.
4322 */
e2cec771 4323 break;
1c848984 4324
e2cec771
AL
4325 case HSM_ST:
4326 /* complete command or read/write the data register */
4327 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4328 /* ATAPI PIO protocol */
4329 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4330 /* No more data to transfer or device error.
4331 * Device error will be tagged in HSM_ST_LAST.
4332 */
e2cec771
AL
4333 ap->hsm_task_state = HSM_ST_LAST;
4334 goto fsm_start;
4335 }
1da177e4 4336
71601958
AL
4337 /* Device should not ask for data transfer (DRQ=1)
4338 * when it finds something wrong.
eee6c32f
AL
4339 * We ignore DRQ here and stop the HSM by
4340 * changing hsm_task_state to HSM_ST_ERR and
4341 * let the EH abort the command or reset the device.
71601958
AL
4342 */
4343 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4344 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4345 ap->id, status);
3655d1d3 4346 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4347 ap->hsm_task_state = HSM_ST_ERR;
4348 goto fsm_start;
71601958 4349 }
1da177e4 4350
e2cec771 4351 atapi_pio_bytes(qc);
7fb6ec28 4352
e2cec771
AL
4353 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4354 /* bad ireason reported by device */
4355 goto fsm_start;
1da177e4 4356
e2cec771
AL
4357 } else {
4358 /* ATA PIO protocol */
4359 if (unlikely((status & ATA_DRQ) == 0)) {
4360 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4361 if (likely(status & (ATA_ERR | ATA_DF)))
4362 /* device stops HSM for abort/error */
4363 qc->err_mask |= AC_ERR_DEV;
4364 else
55a8e2c8
TH
4365 /* HSM violation. Let EH handle this.
4366 * Phantom devices also trigger this
4367 * condition. Mark hint.
4368 */
4369 qc->err_mask |= AC_ERR_HSM |
4370 AC_ERR_NODEV_HINT;
3655d1d3 4371
e2cec771
AL
4372 ap->hsm_task_state = HSM_ST_ERR;
4373 goto fsm_start;
4374 }
1da177e4 4375
eee6c32f
AL
4376 /* For PIO reads, some devices may ask for
4377 * data transfer (DRQ=1) alone with ERR=1.
4378 * We respect DRQ here and transfer one
4379 * block of junk data before changing the
4380 * hsm_task_state to HSM_ST_ERR.
4381 *
4382 * For PIO writes, ERR=1 DRQ=1 doesn't make
4383 * sense since the data block has been
4384 * transferred to the device.
71601958
AL
4385 */
4386 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4387 /* data might be corrputed */
4388 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4389
4390 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4391 ata_pio_sectors(qc);
4392 ata_altstatus(ap);
4393 status = ata_wait_idle(ap);
4394 }
4395
3655d1d3
AL
4396 if (status & (ATA_BUSY | ATA_DRQ))
4397 qc->err_mask |= AC_ERR_HSM;
4398
eee6c32f
AL
4399 /* ata_pio_sectors() might change the
4400 * state to HSM_ST_LAST. so, the state
4401 * is changed after ata_pio_sectors().
4402 */
4403 ap->hsm_task_state = HSM_ST_ERR;
4404 goto fsm_start;
71601958
AL
4405 }
4406
e2cec771
AL
4407 ata_pio_sectors(qc);
4408
4409 if (ap->hsm_task_state == HSM_ST_LAST &&
4410 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4411 /* all data read */
4412 ata_altstatus(ap);
52a32205 4413 status = ata_wait_idle(ap);
e2cec771
AL
4414 goto fsm_start;
4415 }
4416 }
4417
4418 ata_altstatus(ap); /* flush */
bb5cb290 4419 poll_next = 1;
1da177e4
LT
4420 break;
4421
14be71f4 4422 case HSM_ST_LAST:
6912ccd5
AL
4423 if (unlikely(!ata_ok(status))) {
4424 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4425 ap->hsm_task_state = HSM_ST_ERR;
4426 goto fsm_start;
4427 }
4428
4429 /* no more data to transfer */
4332a771
AL
4430 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4431 ap->id, qc->dev->devno, status);
e2cec771 4432
6912ccd5
AL
4433 WARN_ON(qc->err_mask);
4434
e2cec771 4435 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4436
e2cec771 4437 /* complete taskfile transaction */
c17ea20d 4438 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4439
4440 poll_next = 0;
1da177e4
LT
4441 break;
4442
14be71f4 4443 case HSM_ST_ERR:
e2cec771
AL
4444 /* make sure qc->err_mask is available to
4445 * know what's wrong and recover
4446 */
4447 WARN_ON(qc->err_mask == 0);
4448
4449 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4450
999bb6f4 4451 /* complete taskfile transaction */
c17ea20d 4452 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4453
4454 poll_next = 0;
e2cec771
AL
4455 break;
4456 default:
bb5cb290 4457 poll_next = 0;
6912ccd5 4458 BUG();
1da177e4
LT
4459 }
4460
bb5cb290 4461 return poll_next;
1da177e4
LT
4462}
4463
65f27f38 4464static void ata_pio_task(struct work_struct *work)
8061f5f0 4465{
65f27f38
DH
4466 struct ata_port *ap =
4467 container_of(work, struct ata_port, port_task.work);
4468 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4469 u8 status;
a1af3734 4470 int poll_next;
8061f5f0 4471
7fb6ec28 4472fsm_start:
a1af3734 4473 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4474
a1af3734
AL
4475 /*
4476 * This is purely heuristic. This is a fast path.
4477 * Sometimes when we enter, BSY will be cleared in
4478 * a chk-status or two. If not, the drive is probably seeking
4479 * or something. Snooze for a couple msecs, then
4480 * chk-status again. If still busy, queue delayed work.
4481 */
4482 status = ata_busy_wait(ap, ATA_BUSY, 5);
4483 if (status & ATA_BUSY) {
4484 msleep(2);
4485 status = ata_busy_wait(ap, ATA_BUSY, 10);
4486 if (status & ATA_BUSY) {
31ce6dae 4487 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4488 return;
4489 }
8061f5f0
TH
4490 }
4491
a1af3734
AL
4492 /* move the HSM */
4493 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4494
a1af3734
AL
4495 /* another command or interrupt handler
4496 * may be running at this point.
4497 */
4498 if (poll_next)
7fb6ec28 4499 goto fsm_start;
8061f5f0
TH
4500}
4501
1da177e4
LT
4502/**
4503 * ata_qc_new - Request an available ATA command, for queueing
4504 * @ap: Port associated with device @dev
4505 * @dev: Device from whom we request an available command structure
4506 *
4507 * LOCKING:
0cba632b 4508 * None.
1da177e4
LT
4509 */
4510
4511static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4512{
4513 struct ata_queued_cmd *qc = NULL;
4514 unsigned int i;
4515
e3180499 4516 /* no command while frozen */
b51e9e5d 4517 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4518 return NULL;
4519
2ab7db1f
TH
4520 /* the last tag is reserved for internal command. */
4521 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4522 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4523 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4524 break;
4525 }
4526
4527 if (qc)
4528 qc->tag = i;
4529
4530 return qc;
4531}
4532
4533/**
4534 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4535 * @dev: Device from whom we request an available command structure
4536 *
4537 * LOCKING:
0cba632b 4538 * None.
1da177e4
LT
4539 */
4540
3373efd8 4541struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4542{
3373efd8 4543 struct ata_port *ap = dev->ap;
1da177e4
LT
4544 struct ata_queued_cmd *qc;
4545
4546 qc = ata_qc_new(ap);
4547 if (qc) {
1da177e4
LT
4548 qc->scsicmd = NULL;
4549 qc->ap = ap;
4550 qc->dev = dev;
1da177e4 4551
2c13b7ce 4552 ata_qc_reinit(qc);
1da177e4
LT
4553 }
4554
4555 return qc;
4556}
4557
1da177e4
LT
4558/**
4559 * ata_qc_free - free unused ata_queued_cmd
4560 * @qc: Command to complete
4561 *
4562 * Designed to free unused ata_queued_cmd object
4563 * in case something prevents using it.
4564 *
4565 * LOCKING:
cca3974e 4566 * spin_lock_irqsave(host lock)
1da177e4
LT
4567 */
4568void ata_qc_free(struct ata_queued_cmd *qc)
4569{
4ba946e9
TH
4570 struct ata_port *ap = qc->ap;
4571 unsigned int tag;
4572
a4631474 4573 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4574
4ba946e9
TH
4575 qc->flags = 0;
4576 tag = qc->tag;
4577 if (likely(ata_tag_valid(tag))) {
4ba946e9 4578 qc->tag = ATA_TAG_POISON;
6cec4a39 4579 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4580 }
1da177e4
LT
4581}
4582
76014427 4583void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4584{
dedaf2b0
TH
4585 struct ata_port *ap = qc->ap;
4586
a4631474
TH
4587 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4588 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4589
4590 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4591 ata_sg_clean(qc);
4592
7401abf2 4593 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4594 if (qc->tf.protocol == ATA_PROT_NCQ)
4595 ap->sactive &= ~(1 << qc->tag);
4596 else
4597 ap->active_tag = ATA_TAG_POISON;
7401abf2 4598
3f3791d3
AL
4599 /* atapi: mark qc as inactive to prevent the interrupt handler
4600 * from completing the command twice later, before the error handler
4601 * is called. (when rc != 0 and atapi request sense is needed)
4602 */
4603 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4604 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4605
1da177e4 4606 /* call completion callback */
77853bf2 4607 qc->complete_fn(qc);
1da177e4
LT
4608}
4609
39599a53
TH
4610static void fill_result_tf(struct ata_queued_cmd *qc)
4611{
4612 struct ata_port *ap = qc->ap;
4613
4614 ap->ops->tf_read(ap, &qc->result_tf);
4615 qc->result_tf.flags = qc->tf.flags;
4616}
4617
f686bcb8
TH
4618/**
4619 * ata_qc_complete - Complete an active ATA command
4620 * @qc: Command to complete
4621 * @err_mask: ATA Status register contents
4622 *
4623 * Indicate to the mid and upper layers that an ATA
4624 * command has completed, with either an ok or not-ok status.
4625 *
4626 * LOCKING:
cca3974e 4627 * spin_lock_irqsave(host lock)
f686bcb8
TH
4628 */
4629void ata_qc_complete(struct ata_queued_cmd *qc)
4630{
4631 struct ata_port *ap = qc->ap;
4632
4633 /* XXX: New EH and old EH use different mechanisms to
4634 * synchronize EH with regular execution path.
4635 *
4636 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4637 * Normal execution path is responsible for not accessing a
4638 * failed qc. libata core enforces the rule by returning NULL
4639 * from ata_qc_from_tag() for failed qcs.
4640 *
4641 * Old EH depends on ata_qc_complete() nullifying completion
4642 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4643 * not synchronize with interrupt handler. Only PIO task is
4644 * taken care of.
4645 */
4646 if (ap->ops->error_handler) {
b51e9e5d 4647 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4648
4649 if (unlikely(qc->err_mask))
4650 qc->flags |= ATA_QCFLAG_FAILED;
4651
4652 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4653 if (!ata_tag_internal(qc->tag)) {
4654 /* always fill result TF for failed qc */
39599a53 4655 fill_result_tf(qc);
f686bcb8
TH
4656 ata_qc_schedule_eh(qc);
4657 return;
4658 }
4659 }
4660
4661 /* read result TF if requested */
4662 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4663 fill_result_tf(qc);
f686bcb8
TH
4664
4665 __ata_qc_complete(qc);
4666 } else {
4667 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4668 return;
4669
4670 /* read result TF if failed or requested */
4671 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4672 fill_result_tf(qc);
f686bcb8
TH
4673
4674 __ata_qc_complete(qc);
4675 }
4676}
4677
dedaf2b0
TH
4678/**
4679 * ata_qc_complete_multiple - Complete multiple qcs successfully
4680 * @ap: port in question
4681 * @qc_active: new qc_active mask
4682 * @finish_qc: LLDD callback invoked before completing a qc
4683 *
4684 * Complete in-flight commands. This functions is meant to be
4685 * called from low-level driver's interrupt routine to complete
4686 * requests normally. ap->qc_active and @qc_active is compared
4687 * and commands are completed accordingly.
4688 *
4689 * LOCKING:
cca3974e 4690 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4691 *
4692 * RETURNS:
4693 * Number of completed commands on success, -errno otherwise.
4694 */
4695int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4696 void (*finish_qc)(struct ata_queued_cmd *))
4697{
4698 int nr_done = 0;
4699 u32 done_mask;
4700 int i;
4701
4702 done_mask = ap->qc_active ^ qc_active;
4703
4704 if (unlikely(done_mask & qc_active)) {
4705 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4706 "(%08x->%08x)\n", ap->qc_active, qc_active);
4707 return -EINVAL;
4708 }
4709
4710 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4711 struct ata_queued_cmd *qc;
4712
4713 if (!(done_mask & (1 << i)))
4714 continue;
4715
4716 if ((qc = ata_qc_from_tag(ap, i))) {
4717 if (finish_qc)
4718 finish_qc(qc);
4719 ata_qc_complete(qc);
4720 nr_done++;
4721 }
4722 }
4723
4724 return nr_done;
4725}
4726
1da177e4
LT
4727static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4728{
4729 struct ata_port *ap = qc->ap;
4730
4731 switch (qc->tf.protocol) {
3dc1d881 4732 case ATA_PROT_NCQ:
1da177e4
LT
4733 case ATA_PROT_DMA:
4734 case ATA_PROT_ATAPI_DMA:
4735 return 1;
4736
4737 case ATA_PROT_ATAPI:
4738 case ATA_PROT_PIO:
1da177e4
LT
4739 if (ap->flags & ATA_FLAG_PIO_DMA)
4740 return 1;
4741
4742 /* fall through */
4743
4744 default:
4745 return 0;
4746 }
4747
4748 /* never reached */
4749}
4750
4751/**
4752 * ata_qc_issue - issue taskfile to device
4753 * @qc: command to issue to device
4754 *
4755 * Prepare an ATA command to submission to device.
4756 * This includes mapping the data into a DMA-able
4757 * area, filling in the S/G table, and finally
4758 * writing the taskfile to hardware, starting the command.
4759 *
4760 * LOCKING:
cca3974e 4761 * spin_lock_irqsave(host lock)
1da177e4 4762 */
8e0e694a 4763void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4764{
4765 struct ata_port *ap = qc->ap;
4766
dedaf2b0
TH
4767 /* Make sure only one non-NCQ command is outstanding. The
4768 * check is skipped for old EH because it reuses active qc to
4769 * request ATAPI sense.
4770 */
4771 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4772
4773 if (qc->tf.protocol == ATA_PROT_NCQ) {
4774 WARN_ON(ap->sactive & (1 << qc->tag));
4775 ap->sactive |= 1 << qc->tag;
4776 } else {
4777 WARN_ON(ap->sactive);
4778 ap->active_tag = qc->tag;
4779 }
4780
e4a70e76 4781 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4782 ap->qc_active |= 1 << qc->tag;
e4a70e76 4783
1da177e4
LT
4784 if (ata_should_dma_map(qc)) {
4785 if (qc->flags & ATA_QCFLAG_SG) {
4786 if (ata_sg_setup(qc))
8e436af9 4787 goto sg_err;
1da177e4
LT
4788 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4789 if (ata_sg_setup_one(qc))
8e436af9 4790 goto sg_err;
1da177e4
LT
4791 }
4792 } else {
4793 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4794 }
4795
4796 ap->ops->qc_prep(qc);
4797
8e0e694a
TH
4798 qc->err_mask |= ap->ops->qc_issue(qc);
4799 if (unlikely(qc->err_mask))
4800 goto err;
4801 return;
1da177e4 4802
8e436af9
TH
4803sg_err:
4804 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4805 qc->err_mask |= AC_ERR_SYSTEM;
4806err:
4807 ata_qc_complete(qc);
1da177e4
LT
4808}
4809
4810/**
4811 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4812 * @qc: command to issue to device
4813 *
4814 * Using various libata functions and hooks, this function
4815 * starts an ATA command. ATA commands are grouped into
4816 * classes called "protocols", and issuing each type of protocol
4817 * is slightly different.
4818 *
0baab86b
EF
4819 * May be used as the qc_issue() entry in ata_port_operations.
4820 *
1da177e4 4821 * LOCKING:
cca3974e 4822 * spin_lock_irqsave(host lock)
1da177e4
LT
4823 *
4824 * RETURNS:
9a3d9eb0 4825 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4826 */
4827
9a3d9eb0 4828unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4829{
4830 struct ata_port *ap = qc->ap;
4831
e50362ec
AL
4832 /* Use polling pio if the LLD doesn't handle
4833 * interrupt driven pio and atapi CDB interrupt.
4834 */
4835 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4836 switch (qc->tf.protocol) {
4837 case ATA_PROT_PIO:
e3472cbe 4838 case ATA_PROT_NODATA:
e50362ec
AL
4839 case ATA_PROT_ATAPI:
4840 case ATA_PROT_ATAPI_NODATA:
4841 qc->tf.flags |= ATA_TFLAG_POLLING;
4842 break;
4843 case ATA_PROT_ATAPI_DMA:
4844 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4845 /* see ata_dma_blacklisted() */
e50362ec
AL
4846 BUG();
4847 break;
4848 default:
4849 break;
4850 }
4851 }
4852
3d3cca37
TH
4853 /* Some controllers show flaky interrupt behavior after
4854 * setting xfer mode. Use polling instead.
4855 */
4856 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4857 qc->tf.feature == SETFEATURES_XFER) &&
4858 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4859 qc->tf.flags |= ATA_TFLAG_POLLING;
4860
312f7da2 4861 /* select the device */
1da177e4
LT
4862 ata_dev_select(ap, qc->dev->devno, 1, 0);
4863
312f7da2 4864 /* start the command */
1da177e4
LT
4865 switch (qc->tf.protocol) {
4866 case ATA_PROT_NODATA:
312f7da2
AL
4867 if (qc->tf.flags & ATA_TFLAG_POLLING)
4868 ata_qc_set_polling(qc);
4869
e5338254 4870 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4871 ap->hsm_task_state = HSM_ST_LAST;
4872
4873 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4874 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4875
1da177e4
LT
4876 break;
4877
4878 case ATA_PROT_DMA:
587005de 4879 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4880
1da177e4
LT
4881 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4882 ap->ops->bmdma_setup(qc); /* set up bmdma */
4883 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4884 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4885 break;
4886
312f7da2
AL
4887 case ATA_PROT_PIO:
4888 if (qc->tf.flags & ATA_TFLAG_POLLING)
4889 ata_qc_set_polling(qc);
1da177e4 4890
e5338254 4891 ata_tf_to_host(ap, &qc->tf);
312f7da2 4892
54f00389
AL
4893 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4894 /* PIO data out protocol */
4895 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4896 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4897
4898 /* always send first data block using
e27486db 4899 * the ata_pio_task() codepath.
54f00389 4900 */
312f7da2 4901 } else {
54f00389
AL
4902 /* PIO data in protocol */
4903 ap->hsm_task_state = HSM_ST;
4904
4905 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4906 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4907
4908 /* if polling, ata_pio_task() handles the rest.
4909 * otherwise, interrupt handler takes over from here.
4910 */
312f7da2
AL
4911 }
4912
1da177e4
LT
4913 break;
4914
1da177e4 4915 case ATA_PROT_ATAPI:
1da177e4 4916 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4917 if (qc->tf.flags & ATA_TFLAG_POLLING)
4918 ata_qc_set_polling(qc);
4919
e5338254 4920 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4921
312f7da2
AL
4922 ap->hsm_task_state = HSM_ST_FIRST;
4923
4924 /* send cdb by polling if no cdb interrupt */
4925 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4926 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4927 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4928 break;
4929
4930 case ATA_PROT_ATAPI_DMA:
587005de 4931 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4932
1da177e4
LT
4933 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4934 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4935 ap->hsm_task_state = HSM_ST_FIRST;
4936
4937 /* send cdb by polling if no cdb interrupt */
4938 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4939 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4940 break;
4941
4942 default:
4943 WARN_ON(1);
9a3d9eb0 4944 return AC_ERR_SYSTEM;
1da177e4
LT
4945 }
4946
4947 return 0;
4948}
4949
1da177e4
LT
4950/**
4951 * ata_host_intr - Handle host interrupt for given (port, task)
4952 * @ap: Port on which interrupt arrived (possibly...)
4953 * @qc: Taskfile currently active in engine
4954 *
4955 * Handle host interrupt for given queued command. Currently,
4956 * only DMA interrupts are handled. All other commands are
4957 * handled via polling with interrupts disabled (nIEN bit).
4958 *
4959 * LOCKING:
cca3974e 4960 * spin_lock_irqsave(host lock)
1da177e4
LT
4961 *
4962 * RETURNS:
4963 * One if interrupt was handled, zero if not (shared irq).
4964 */
4965
4966inline unsigned int ata_host_intr (struct ata_port *ap,
4967 struct ata_queued_cmd *qc)
4968{
ea54763f 4969 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 4970 u8 status, host_stat = 0;
1da177e4 4971
312f7da2
AL
4972 VPRINTK("ata%u: protocol %d task_state %d\n",
4973 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4974
312f7da2
AL
4975 /* Check whether we are expecting interrupt in this state */
4976 switch (ap->hsm_task_state) {
4977 case HSM_ST_FIRST:
6912ccd5
AL
4978 /* Some pre-ATAPI-4 devices assert INTRQ
4979 * at this state when ready to receive CDB.
4980 */
1da177e4 4981
312f7da2
AL
4982 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4983 * The flag was turned on only for atapi devices.
4984 * No need to check is_atapi_taskfile(&qc->tf) again.
4985 */
4986 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4987 goto idle_irq;
1da177e4 4988 break;
312f7da2
AL
4989 case HSM_ST_LAST:
4990 if (qc->tf.protocol == ATA_PROT_DMA ||
4991 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4992 /* check status of DMA engine */
4993 host_stat = ap->ops->bmdma_status(ap);
4994 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4995
4996 /* if it's not our irq... */
4997 if (!(host_stat & ATA_DMA_INTR))
4998 goto idle_irq;
4999
5000 /* before we do anything else, clear DMA-Start bit */
5001 ap->ops->bmdma_stop(qc);
a4f16610
AL
5002
5003 if (unlikely(host_stat & ATA_DMA_ERR)) {
5004 /* error when transfering data to/from memory */
5005 qc->err_mask |= AC_ERR_HOST_BUS;
5006 ap->hsm_task_state = HSM_ST_ERR;
5007 }
312f7da2
AL
5008 }
5009 break;
5010 case HSM_ST:
5011 break;
1da177e4
LT
5012 default:
5013 goto idle_irq;
5014 }
5015
312f7da2
AL
5016 /* check altstatus */
5017 status = ata_altstatus(ap);
5018 if (status & ATA_BUSY)
5019 goto idle_irq;
1da177e4 5020
312f7da2
AL
5021 /* check main status, clearing INTRQ */
5022 status = ata_chk_status(ap);
5023 if (unlikely(status & ATA_BUSY))
5024 goto idle_irq;
1da177e4 5025
312f7da2
AL
5026 /* ack bmdma irq events */
5027 ap->ops->irq_clear(ap);
1da177e4 5028
bb5cb290 5029 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5030
5031 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5032 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5033 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5034
1da177e4
LT
5035 return 1; /* irq handled */
5036
5037idle_irq:
5038 ap->stats.idle_irq++;
5039
5040#ifdef ATA_IRQ_TRAP
5041 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5042 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5043 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5044 return 1;
1da177e4
LT
5045 }
5046#endif
5047 return 0; /* irq not handled */
5048}
5049
5050/**
5051 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5052 * @irq: irq line (unused)
cca3974e 5053 * @dev_instance: pointer to our ata_host information structure
1da177e4 5054 *
0cba632b
JG
5055 * Default interrupt handler for PCI IDE devices. Calls
5056 * ata_host_intr() for each port that is not disabled.
5057 *
1da177e4 5058 * LOCKING:
cca3974e 5059 * Obtains host lock during operation.
1da177e4
LT
5060 *
5061 * RETURNS:
0cba632b 5062 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5063 */
5064
7d12e780 5065irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5066{
cca3974e 5067 struct ata_host *host = dev_instance;
1da177e4
LT
5068 unsigned int i;
5069 unsigned int handled = 0;
5070 unsigned long flags;
5071
5072 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5073 spin_lock_irqsave(&host->lock, flags);
1da177e4 5074
cca3974e 5075 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5076 struct ata_port *ap;
5077
cca3974e 5078 ap = host->ports[i];
c1389503 5079 if (ap &&
029f5468 5080 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5081 struct ata_queued_cmd *qc;
5082
5083 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5084 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5085 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5086 handled |= ata_host_intr(ap, qc);
5087 }
5088 }
5089
cca3974e 5090 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5091
5092 return IRQ_RETVAL(handled);
5093}
5094
34bf2170
TH
5095/**
5096 * sata_scr_valid - test whether SCRs are accessible
5097 * @ap: ATA port to test SCR accessibility for
5098 *
5099 * Test whether SCRs are accessible for @ap.
5100 *
5101 * LOCKING:
5102 * None.
5103 *
5104 * RETURNS:
5105 * 1 if SCRs are accessible, 0 otherwise.
5106 */
5107int sata_scr_valid(struct ata_port *ap)
5108{
5109 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5110}
5111
5112/**
5113 * sata_scr_read - read SCR register of the specified port
5114 * @ap: ATA port to read SCR for
5115 * @reg: SCR to read
5116 * @val: Place to store read value
5117 *
5118 * Read SCR register @reg of @ap into *@val. This function is
5119 * guaranteed to succeed if the cable type of the port is SATA
5120 * and the port implements ->scr_read.
5121 *
5122 * LOCKING:
5123 * None.
5124 *
5125 * RETURNS:
5126 * 0 on success, negative errno on failure.
5127 */
5128int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5129{
5130 if (sata_scr_valid(ap)) {
5131 *val = ap->ops->scr_read(ap, reg);
5132 return 0;
5133 }
5134 return -EOPNOTSUPP;
5135}
5136
5137/**
5138 * sata_scr_write - write SCR register of the specified port
5139 * @ap: ATA port to write SCR for
5140 * @reg: SCR to write
5141 * @val: value to write
5142 *
5143 * Write @val to SCR register @reg of @ap. This function is
5144 * guaranteed to succeed if the cable type of the port is SATA
5145 * and the port implements ->scr_read.
5146 *
5147 * LOCKING:
5148 * None.
5149 *
5150 * RETURNS:
5151 * 0 on success, negative errno on failure.
5152 */
5153int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5154{
5155 if (sata_scr_valid(ap)) {
5156 ap->ops->scr_write(ap, reg, val);
5157 return 0;
5158 }
5159 return -EOPNOTSUPP;
5160}
5161
5162/**
5163 * sata_scr_write_flush - write SCR register of the specified port and flush
5164 * @ap: ATA port to write SCR for
5165 * @reg: SCR to write
5166 * @val: value to write
5167 *
5168 * This function is identical to sata_scr_write() except that this
5169 * function performs flush after writing to the register.
5170 *
5171 * LOCKING:
5172 * None.
5173 *
5174 * RETURNS:
5175 * 0 on success, negative errno on failure.
5176 */
5177int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5178{
5179 if (sata_scr_valid(ap)) {
5180 ap->ops->scr_write(ap, reg, val);
5181 ap->ops->scr_read(ap, reg);
5182 return 0;
5183 }
5184 return -EOPNOTSUPP;
5185}
5186
5187/**
5188 * ata_port_online - test whether the given port is online
5189 * @ap: ATA port to test
5190 *
5191 * Test whether @ap is online. Note that this function returns 0
5192 * if online status of @ap cannot be obtained, so
5193 * ata_port_online(ap) != !ata_port_offline(ap).
5194 *
5195 * LOCKING:
5196 * None.
5197 *
5198 * RETURNS:
5199 * 1 if the port online status is available and online.
5200 */
5201int ata_port_online(struct ata_port *ap)
5202{
5203 u32 sstatus;
5204
5205 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5206 return 1;
5207 return 0;
5208}
5209
5210/**
5211 * ata_port_offline - test whether the given port is offline
5212 * @ap: ATA port to test
5213 *
5214 * Test whether @ap is offline. Note that this function returns
5215 * 0 if offline status of @ap cannot be obtained, so
5216 * ata_port_online(ap) != !ata_port_offline(ap).
5217 *
5218 * LOCKING:
5219 * None.
5220 *
5221 * RETURNS:
5222 * 1 if the port offline status is available and offline.
5223 */
5224int ata_port_offline(struct ata_port *ap)
5225{
5226 u32 sstatus;
5227
5228 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5229 return 1;
5230 return 0;
5231}
0baab86b 5232
77b08fb5 5233int ata_flush_cache(struct ata_device *dev)
9b847548 5234{
977e6b9f 5235 unsigned int err_mask;
9b847548
JA
5236 u8 cmd;
5237
5238 if (!ata_try_flush_cache(dev))
5239 return 0;
5240
6fc49adb 5241 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5242 cmd = ATA_CMD_FLUSH_EXT;
5243 else
5244 cmd = ATA_CMD_FLUSH;
5245
977e6b9f
TH
5246 err_mask = ata_do_simple_cmd(dev, cmd);
5247 if (err_mask) {
5248 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5249 return -EIO;
5250 }
5251
5252 return 0;
9b847548
JA
5253}
5254
cca3974e
JG
5255static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5256 unsigned int action, unsigned int ehi_flags,
5257 int wait)
500530f6
TH
5258{
5259 unsigned long flags;
5260 int i, rc;
5261
cca3974e
JG
5262 for (i = 0; i < host->n_ports; i++) {
5263 struct ata_port *ap = host->ports[i];
500530f6
TH
5264
5265 /* Previous resume operation might still be in
5266 * progress. Wait for PM_PENDING to clear.
5267 */
5268 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5269 ata_port_wait_eh(ap);
5270 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5271 }
5272
5273 /* request PM ops to EH */
5274 spin_lock_irqsave(ap->lock, flags);
5275
5276 ap->pm_mesg = mesg;
5277 if (wait) {
5278 rc = 0;
5279 ap->pm_result = &rc;
5280 }
5281
5282 ap->pflags |= ATA_PFLAG_PM_PENDING;
5283 ap->eh_info.action |= action;
5284 ap->eh_info.flags |= ehi_flags;
5285
5286 ata_port_schedule_eh(ap);
5287
5288 spin_unlock_irqrestore(ap->lock, flags);
5289
5290 /* wait and check result */
5291 if (wait) {
5292 ata_port_wait_eh(ap);
5293 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5294 if (rc)
5295 return rc;
5296 }
5297 }
5298
5299 return 0;
5300}
5301
5302/**
cca3974e
JG
5303 * ata_host_suspend - suspend host
5304 * @host: host to suspend
500530f6
TH
5305 * @mesg: PM message
5306 *
cca3974e 5307 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5308 * function requests EH to perform PM operations and waits for EH
5309 * to finish.
5310 *
5311 * LOCKING:
5312 * Kernel thread context (may sleep).
5313 *
5314 * RETURNS:
5315 * 0 on success, -errno on failure.
5316 */
cca3974e 5317int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5318{
5319 int i, j, rc;
5320
cca3974e 5321 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5322 if (rc)
5323 goto fail;
5324
5325 /* EH is quiescent now. Fail if we have any ready device.
5326 * This happens if hotplug occurs between completion of device
5327 * suspension and here.
5328 */
cca3974e
JG
5329 for (i = 0; i < host->n_ports; i++) {
5330 struct ata_port *ap = host->ports[i];
500530f6
TH
5331
5332 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5333 struct ata_device *dev = &ap->device[j];
5334
5335 if (ata_dev_ready(dev)) {
5336 ata_port_printk(ap, KERN_WARNING,
5337 "suspend failed, device %d "
5338 "still active\n", dev->devno);
5339 rc = -EBUSY;
5340 goto fail;
5341 }
5342 }
5343 }
5344
cca3974e 5345 host->dev->power.power_state = mesg;
500530f6
TH
5346 return 0;
5347
5348 fail:
cca3974e 5349 ata_host_resume(host);
500530f6
TH
5350 return rc;
5351}
5352
5353/**
cca3974e
JG
5354 * ata_host_resume - resume host
5355 * @host: host to resume
500530f6 5356 *
cca3974e 5357 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5358 * function requests EH to perform PM operations and returns.
5359 * Note that all resume operations are performed parallely.
5360 *
5361 * LOCKING:
5362 * Kernel thread context (may sleep).
5363 */
cca3974e 5364void ata_host_resume(struct ata_host *host)
500530f6 5365{
cca3974e
JG
5366 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5367 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5368 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5369}
5370
c893a3ae
RD
5371/**
5372 * ata_port_start - Set port up for dma.
5373 * @ap: Port to initialize
5374 *
5375 * Called just after data structures for each port are
5376 * initialized. Allocates space for PRD table.
5377 *
5378 * May be used as the port_start() entry in ata_port_operations.
5379 *
5380 * LOCKING:
5381 * Inherited from caller.
5382 */
f0d36efd 5383int ata_port_start(struct ata_port *ap)
1da177e4 5384{
2f1f610b 5385 struct device *dev = ap->dev;
6037d6bb 5386 int rc;
1da177e4 5387
f0d36efd
TH
5388 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5389 GFP_KERNEL);
1da177e4
LT
5390 if (!ap->prd)
5391 return -ENOMEM;
5392
6037d6bb 5393 rc = ata_pad_alloc(ap, dev);
f0d36efd 5394 if (rc)
6037d6bb 5395 return rc;
1da177e4 5396
f0d36efd
TH
5397 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5398 (unsigned long long)ap->prd_dma);
1da177e4
LT
5399 return 0;
5400}
5401
3ef3b43d
TH
5402/**
5403 * ata_dev_init - Initialize an ata_device structure
5404 * @dev: Device structure to initialize
5405 *
5406 * Initialize @dev in preparation for probing.
5407 *
5408 * LOCKING:
5409 * Inherited from caller.
5410 */
5411void ata_dev_init(struct ata_device *dev)
5412{
5413 struct ata_port *ap = dev->ap;
72fa4b74
TH
5414 unsigned long flags;
5415
5a04bf4b
TH
5416 /* SATA spd limit is bound to the first device */
5417 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5418
72fa4b74
TH
5419 /* High bits of dev->flags are used to record warm plug
5420 * requests which occur asynchronously. Synchronize using
cca3974e 5421 * host lock.
72fa4b74 5422 */
ba6a1308 5423 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5424 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5425 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5426
72fa4b74
TH
5427 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5428 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5429 dev->pio_mask = UINT_MAX;
5430 dev->mwdma_mask = UINT_MAX;
5431 dev->udma_mask = UINT_MAX;
5432}
5433
1da177e4 5434/**
155a8a9c 5435 * ata_port_init - Initialize an ata_port structure
1da177e4 5436 * @ap: Structure to initialize
cca3974e 5437 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5438 * @ent: Probe information provided by low-level driver
5439 * @port_no: Port number associated with this ata_port
5440 *
155a8a9c 5441 * Initialize a new ata_port structure.
0cba632b 5442 *
1da177e4 5443 * LOCKING:
0cba632b 5444 * Inherited from caller.
1da177e4 5445 */
cca3974e 5446void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5447 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5448{
5449 unsigned int i;
5450
cca3974e 5451 ap->lock = &host->lock;
198e0fed 5452 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5453 ap->id = ata_unique_id++;
1da177e4 5454 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5455 ap->host = host;
2f1f610b 5456 ap->dev = ent->dev;
1da177e4 5457 ap->port_no = port_no;
fea63e38
TH
5458 if (port_no == 1 && ent->pinfo2) {
5459 ap->pio_mask = ent->pinfo2->pio_mask;
5460 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5461 ap->udma_mask = ent->pinfo2->udma_mask;
5462 ap->flags |= ent->pinfo2->flags;
5463 ap->ops = ent->pinfo2->port_ops;
5464 } else {
5465 ap->pio_mask = ent->pio_mask;
5466 ap->mwdma_mask = ent->mwdma_mask;
5467 ap->udma_mask = ent->udma_mask;
5468 ap->flags |= ent->port_flags;
5469 ap->ops = ent->port_ops;
5470 }
5a04bf4b 5471 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5472 ap->active_tag = ATA_TAG_POISON;
5473 ap->last_ctl = 0xFF;
bd5d825c
BP
5474
5475#if defined(ATA_VERBOSE_DEBUG)
5476 /* turn on all debugging levels */
5477 ap->msg_enable = 0x00FF;
5478#elif defined(ATA_DEBUG)
5479 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5480#else
0dd4b21f 5481 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5482#endif
1da177e4 5483
65f27f38
DH
5484 INIT_DELAYED_WORK(&ap->port_task, NULL);
5485 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5486 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5487 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5488 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5489
838df628
TH
5490 /* set cable type */
5491 ap->cbl = ATA_CBL_NONE;
5492 if (ap->flags & ATA_FLAG_SATA)
5493 ap->cbl = ATA_CBL_SATA;
5494
acf356b1
TH
5495 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5496 struct ata_device *dev = &ap->device[i];
38d87234 5497 dev->ap = ap;
72fa4b74 5498 dev->devno = i;
3ef3b43d 5499 ata_dev_init(dev);
acf356b1 5500 }
1da177e4
LT
5501
5502#ifdef ATA_IRQ_TRAP
5503 ap->stats.unhandled_irq = 1;
5504 ap->stats.idle_irq = 1;
5505#endif
5506
5507 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5508}
5509
155a8a9c 5510/**
4608c160
TH
5511 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5512 * @ap: ATA port to initialize SCSI host for
5513 * @shost: SCSI host associated with @ap
155a8a9c 5514 *
4608c160 5515 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5516 *
5517 * LOCKING:
5518 * Inherited from caller.
5519 */
4608c160 5520static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5521{
cca3974e 5522 ap->scsi_host = shost;
155a8a9c 5523
4608c160
TH
5524 shost->unique_id = ap->id;
5525 shost->max_id = 16;
5526 shost->max_lun = 1;
5527 shost->max_channel = 1;
5528 shost->max_cmd_len = 12;
155a8a9c
BK
5529}
5530
1da177e4 5531/**
996139f1 5532 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5533 * @ent: Information provided by low-level driver
cca3974e 5534 * @host: Collections of ports to which we add
1da177e4
LT
5535 * @port_no: Port number associated with this host
5536 *
0cba632b
JG
5537 * Attach low-level ATA driver to system.
5538 *
1da177e4 5539 * LOCKING:
0cba632b 5540 * PCI/etc. bus probe sem.
1da177e4
LT
5541 *
5542 * RETURNS:
0cba632b 5543 * New ata_port on success, for NULL on error.
1da177e4 5544 */
996139f1 5545static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5546 struct ata_host *host,
1da177e4
LT
5547 unsigned int port_no)
5548{
996139f1 5549 struct Scsi_Host *shost;
1da177e4 5550 struct ata_port *ap;
1da177e4
LT
5551
5552 DPRINTK("ENTER\n");
aec5c3c1 5553
52783c5d 5554 if (!ent->port_ops->error_handler &&
cca3974e 5555 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5556 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5557 port_no);
5558 return NULL;
5559 }
5560
996139f1
JG
5561 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5562 if (!shost)
1da177e4
LT
5563 return NULL;
5564
996139f1 5565 shost->transportt = &ata_scsi_transport_template;
30afc84c 5566
996139f1 5567 ap = ata_shost_to_port(shost);
1da177e4 5568
cca3974e 5569 ata_port_init(ap, host, ent, port_no);
996139f1 5570 ata_port_init_shost(ap, shost);
1da177e4 5571
1da177e4 5572 return ap;
1da177e4
LT
5573}
5574
f0d36efd
TH
5575static void ata_host_release(struct device *gendev, void *res)
5576{
5577 struct ata_host *host = dev_get_drvdata(gendev);
5578 int i;
5579
5580 for (i = 0; i < host->n_ports; i++) {
5581 struct ata_port *ap = host->ports[i];
5582
5583 if (!ap)
5584 continue;
5585
5586 if (ap->ops->port_stop)
5587 ap->ops->port_stop(ap);
5588
5589 scsi_host_put(ap->scsi_host);
5590 }
5591
5592 if (host->ops->host_stop)
5593 host->ops->host_stop(host);
5594}
5595
b03732f0 5596/**
cca3974e
JG
5597 * ata_sas_host_init - Initialize a host struct
5598 * @host: host to initialize
5599 * @dev: device host is attached to
5600 * @flags: host flags
5601 * @ops: port_ops
b03732f0
BK
5602 *
5603 * LOCKING:
5604 * PCI/etc. bus probe sem.
5605 *
5606 */
5607
cca3974e
JG
5608void ata_host_init(struct ata_host *host, struct device *dev,
5609 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5610{
cca3974e
JG
5611 spin_lock_init(&host->lock);
5612 host->dev = dev;
5613 host->flags = flags;
5614 host->ops = ops;
b03732f0
BK
5615}
5616
1da177e4 5617/**
0cba632b
JG
5618 * ata_device_add - Register hardware device with ATA and SCSI layers
5619 * @ent: Probe information describing hardware device to be registered
5620 *
5621 * This function processes the information provided in the probe
5622 * information struct @ent, allocates the necessary ATA and SCSI
5623 * host information structures, initializes them, and registers
5624 * everything with requisite kernel subsystems.
5625 *
5626 * This function requests irqs, probes the ATA bus, and probes
5627 * the SCSI bus.
1da177e4
LT
5628 *
5629 * LOCKING:
0cba632b 5630 * PCI/etc. bus probe sem.
1da177e4
LT
5631 *
5632 * RETURNS:
0cba632b 5633 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5634 */
057ace5e 5635int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5636{
6d0500df 5637 unsigned int i;
1da177e4 5638 struct device *dev = ent->dev;
cca3974e 5639 struct ata_host *host;
39b07ce6 5640 int rc;
1da177e4
LT
5641
5642 DPRINTK("ENTER\n");
f20b16ff 5643
02f076aa
AC
5644 if (ent->irq == 0) {
5645 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5646 return 0;
5647 }
f0d36efd
TH
5648
5649 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5650 return 0;
5651
1da177e4 5652 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5653 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5654 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5655 if (!host)
f0d36efd
TH
5656 goto err_out;
5657 devres_add(dev, host);
5658 dev_set_drvdata(dev, host);
1da177e4 5659
cca3974e
JG
5660 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5661 host->n_ports = ent->n_ports;
5662 host->irq = ent->irq;
5663 host->irq2 = ent->irq2;
0d5ff566 5664 host->iomap = ent->iomap;
cca3974e 5665 host->private_data = ent->private_data;
1da177e4
LT
5666
5667 /* register each port bound to this device */
cca3974e 5668 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5669 struct ata_port *ap;
5670 unsigned long xfer_mode_mask;
2ec7df04 5671 int irq_line = ent->irq;
1da177e4 5672
cca3974e 5673 ap = ata_port_add(ent, host, i);
c38778c3 5674 host->ports[i] = ap;
1da177e4
LT
5675 if (!ap)
5676 goto err_out;
5677
dd5b06c4
TH
5678 /* dummy? */
5679 if (ent->dummy_port_mask & (1 << i)) {
5680 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5681 ap->ops = &ata_dummy_port_ops;
5682 continue;
5683 }
5684
5685 /* start port */
5686 rc = ap->ops->port_start(ap);
5687 if (rc) {
cca3974e
JG
5688 host->ports[i] = NULL;
5689 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5690 goto err_out;
5691 }
5692
2ec7df04
AC
5693 /* Report the secondary IRQ for second channel legacy */
5694 if (i == 1 && ent->irq2)
5695 irq_line = ent->irq2;
5696
1da177e4
LT
5697 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5698 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5699 (ap->pio_mask << ATA_SHIFT_PIO);
5700
5701 /* print per-port info to dmesg */
0d5ff566
TH
5702 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5703 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5704 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5705 ata_mode_string(xfer_mode_mask),
5706 ap->ioaddr.cmd_addr,
5707 ap->ioaddr.ctl_addr,
5708 ap->ioaddr.bmdma_addr,
2ec7df04 5709 irq_line);
1da177e4 5710
0f0a3ad3
TH
5711 /* freeze port before requesting IRQ */
5712 ata_eh_freeze_port(ap);
1da177e4
LT
5713 }
5714
2ec7df04 5715 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5716 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5717 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5718 if (rc) {
5719 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5720 ent->irq, rc);
1da177e4 5721 goto err_out;
39b07ce6 5722 }
1da177e4 5723
2ec7df04
AC
5724 /* do we have a second IRQ for the other channel, eg legacy mode */
5725 if (ent->irq2) {
5726 /* We will get weird core code crashes later if this is true
5727 so trap it now */
5728 BUG_ON(ent->irq == ent->irq2);
5729
f0d36efd
TH
5730 rc = devm_request_irq(dev, ent->irq2,
5731 ent->port_ops->irq_handler, ent->irq_flags,
5732 DRV_NAME, host);
2ec7df04
AC
5733 if (rc) {
5734 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5735 ent->irq2, rc);
f0d36efd 5736 goto err_out;
2ec7df04
AC
5737 }
5738 }
5739
f0d36efd 5740 /* resource acquisition complete */
b878ca5d 5741 devres_remove_group(dev, ata_device_add);
f0d36efd 5742
1da177e4
LT
5743 /* perform each probe synchronously */
5744 DPRINTK("probe begin\n");
cca3974e
JG
5745 for (i = 0; i < host->n_ports; i++) {
5746 struct ata_port *ap = host->ports[i];
5a04bf4b 5747 u32 scontrol;
1da177e4
LT
5748 int rc;
5749
5a04bf4b
TH
5750 /* init sata_spd_limit to the current value */
5751 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5752 int spd = (scontrol >> 4) & 0xf;
5753 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5754 }
5755 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5756
cca3974e 5757 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5758 if (rc) {
f15a1daf 5759 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5760 /* FIXME: do something useful here */
5761 /* FIXME: handle unconditional calls to
5762 * scsi_scan_host and ata_host_remove, below,
5763 * at the very least
5764 */
5765 }
3e706399 5766
52783c5d 5767 if (ap->ops->error_handler) {
1cdaf534 5768 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5769 unsigned long flags;
5770
5771 ata_port_probe(ap);
5772
5773 /* kick EH for boot probing */
ba6a1308 5774 spin_lock_irqsave(ap->lock, flags);
3e706399 5775
1cdaf534
TH
5776 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5777 ehi->action |= ATA_EH_SOFTRESET;
5778 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5779
b51e9e5d 5780 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5781 ata_port_schedule_eh(ap);
5782
ba6a1308 5783 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5784
5785 /* wait for EH to finish */
5786 ata_port_wait_eh(ap);
5787 } else {
5788 DPRINTK("ata%u: bus probe begin\n", ap->id);
5789 rc = ata_bus_probe(ap);
5790 DPRINTK("ata%u: bus probe end\n", ap->id);
5791
5792 if (rc) {
5793 /* FIXME: do something useful here?
5794 * Current libata behavior will
5795 * tear down everything when
5796 * the module is removed
5797 * or the h/w is unplugged.
5798 */
5799 }
5800 }
1da177e4
LT
5801 }
5802
5803 /* probes are done, now scan each port's disk(s) */
c893a3ae 5804 DPRINTK("host probe begin\n");
cca3974e
JG
5805 for (i = 0; i < host->n_ports; i++) {
5806 struct ata_port *ap = host->ports[i];
1da177e4 5807
644dd0cc 5808 ata_scsi_scan_host(ap);
1da177e4
LT
5809 }
5810
1da177e4
LT
5811 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5812 return ent->n_ports; /* success */
5813
f0d36efd
TH
5814 err_out:
5815 devres_release_group(dev, ata_device_add);
5816 dev_set_drvdata(dev, NULL);
5817 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5818 return 0;
5819}
5820
720ba126
TH
5821/**
5822 * ata_port_detach - Detach ATA port in prepration of device removal
5823 * @ap: ATA port to be detached
5824 *
5825 * Detach all ATA devices and the associated SCSI devices of @ap;
5826 * then, remove the associated SCSI host. @ap is guaranteed to
5827 * be quiescent on return from this function.
5828 *
5829 * LOCKING:
5830 * Kernel thread context (may sleep).
5831 */
5832void ata_port_detach(struct ata_port *ap)
5833{
5834 unsigned long flags;
5835 int i;
5836
5837 if (!ap->ops->error_handler)
c3cf30a9 5838 goto skip_eh;
720ba126
TH
5839
5840 /* tell EH we're leaving & flush EH */
ba6a1308 5841 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5842 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5843 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5844
5845 ata_port_wait_eh(ap);
5846
5847 /* EH is now guaranteed to see UNLOADING, so no new device
5848 * will be attached. Disable all existing devices.
5849 */
ba6a1308 5850 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5851
5852 for (i = 0; i < ATA_MAX_DEVICES; i++)
5853 ata_dev_disable(&ap->device[i]);
5854
ba6a1308 5855 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5856
5857 /* Final freeze & EH. All in-flight commands are aborted. EH
5858 * will be skipped and retrials will be terminated with bad
5859 * target.
5860 */
ba6a1308 5861 spin_lock_irqsave(ap->lock, flags);
720ba126 5862 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5863 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5864
5865 ata_port_wait_eh(ap);
5866
5867 /* Flush hotplug task. The sequence is similar to
5868 * ata_port_flush_task().
5869 */
5870 flush_workqueue(ata_aux_wq);
5871 cancel_delayed_work(&ap->hotplug_task);
5872 flush_workqueue(ata_aux_wq);
5873
c3cf30a9 5874 skip_eh:
720ba126 5875 /* remove the associated SCSI host */
cca3974e 5876 scsi_remove_host(ap->scsi_host);
720ba126
TH
5877}
5878
0529c159
TH
5879/**
5880 * ata_host_detach - Detach all ports of an ATA host
5881 * @host: Host to detach
5882 *
5883 * Detach all ports of @host.
5884 *
5885 * LOCKING:
5886 * Kernel thread context (may sleep).
5887 */
5888void ata_host_detach(struct ata_host *host)
5889{
5890 int i;
5891
5892 for (i = 0; i < host->n_ports; i++)
5893 ata_port_detach(host->ports[i]);
5894}
5895
f6d950e2
BK
5896struct ata_probe_ent *
5897ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5898{
5899 struct ata_probe_ent *probe_ent;
5900
f0d36efd
TH
5901 /* XXX - the following if can go away once all LLDs are managed */
5902 if (!list_empty(&dev->devres_head))
5903 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5904 else
5905 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5906 if (!probe_ent) {
5907 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5908 kobject_name(&(dev->kobj)));
5909 return NULL;
5910 }
5911
5912 INIT_LIST_HEAD(&probe_ent->node);
5913 probe_ent->dev = dev;
5914
5915 probe_ent->sht = port->sht;
cca3974e 5916 probe_ent->port_flags = port->flags;
f6d950e2
BK
5917 probe_ent->pio_mask = port->pio_mask;
5918 probe_ent->mwdma_mask = port->mwdma_mask;
5919 probe_ent->udma_mask = port->udma_mask;
5920 probe_ent->port_ops = port->port_ops;
d639ca94 5921 probe_ent->private_data = port->private_data;
f6d950e2
BK
5922
5923 return probe_ent;
5924}
5925
1da177e4
LT
5926/**
5927 * ata_std_ports - initialize ioaddr with standard port offsets.
5928 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5929 *
5930 * Utility function which initializes data_addr, error_addr,
5931 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5932 * device_addr, status_addr, and command_addr to standard offsets
5933 * relative to cmd_addr.
5934 *
5935 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5936 */
0baab86b 5937
1da177e4
LT
5938void ata_std_ports(struct ata_ioports *ioaddr)
5939{
5940 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5941 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5942 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5943 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5944 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5945 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5946 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5947 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5948 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5949 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5950}
5951
0baab86b 5952
374b1873
JG
5953#ifdef CONFIG_PCI
5954
1da177e4
LT
5955/**
5956 * ata_pci_remove_one - PCI layer callback for device removal
5957 * @pdev: PCI device that was removed
5958 *
b878ca5d
TH
5959 * PCI layer indicates to libata via this hook that hot-unplug or
5960 * module unload event has occurred. Detach all ports. Resource
5961 * release is handled via devres.
1da177e4
LT
5962 *
5963 * LOCKING:
5964 * Inherited from PCI layer (may sleep).
5965 */
f0d36efd 5966void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
5967{
5968 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5969 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5970
b878ca5d 5971 ata_host_detach(host);
1da177e4
LT
5972}
5973
5974/* move to PCI subsystem */
057ace5e 5975int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5976{
5977 unsigned long tmp = 0;
5978
5979 switch (bits->width) {
5980 case 1: {
5981 u8 tmp8 = 0;
5982 pci_read_config_byte(pdev, bits->reg, &tmp8);
5983 tmp = tmp8;
5984 break;
5985 }
5986 case 2: {
5987 u16 tmp16 = 0;
5988 pci_read_config_word(pdev, bits->reg, &tmp16);
5989 tmp = tmp16;
5990 break;
5991 }
5992 case 4: {
5993 u32 tmp32 = 0;
5994 pci_read_config_dword(pdev, bits->reg, &tmp32);
5995 tmp = tmp32;
5996 break;
5997 }
5998
5999 default:
6000 return -EINVAL;
6001 }
6002
6003 tmp &= bits->mask;
6004
6005 return (tmp == bits->val) ? 1 : 0;
6006}
9b847548 6007
3c5100c1 6008void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6009{
6010 pci_save_state(pdev);
500530f6 6011
3c5100c1 6012 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
6013 pci_disable_device(pdev);
6014 pci_set_power_state(pdev, PCI_D3hot);
6015 }
9b847548
JA
6016}
6017
553c4aa6 6018int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6019{
553c4aa6
TH
6020 int rc;
6021
9b847548
JA
6022 pci_set_power_state(pdev, PCI_D0);
6023 pci_restore_state(pdev);
553c4aa6 6024
b878ca5d 6025 rc = pcim_enable_device(pdev);
553c4aa6
TH
6026 if (rc) {
6027 dev_printk(KERN_ERR, &pdev->dev,
6028 "failed to enable device after resume (%d)\n", rc);
6029 return rc;
6030 }
6031
9b847548 6032 pci_set_master(pdev);
553c4aa6 6033 return 0;
500530f6
TH
6034}
6035
3c5100c1 6036int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6037{
cca3974e 6038 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6039 int rc = 0;
6040
cca3974e 6041 rc = ata_host_suspend(host, mesg);
500530f6
TH
6042 if (rc)
6043 return rc;
6044
3c5100c1 6045 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6046
6047 return 0;
6048}
6049
6050int ata_pci_device_resume(struct pci_dev *pdev)
6051{
cca3974e 6052 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6053 int rc;
500530f6 6054
553c4aa6
TH
6055 rc = ata_pci_device_do_resume(pdev);
6056 if (rc == 0)
6057 ata_host_resume(host);
6058 return rc;
9b847548 6059}
1da177e4
LT
6060#endif /* CONFIG_PCI */
6061
6062
1da177e4
LT
6063static int __init ata_init(void)
6064{
a8601e5f 6065 ata_probe_timeout *= HZ;
1da177e4
LT
6066 ata_wq = create_workqueue("ata");
6067 if (!ata_wq)
6068 return -ENOMEM;
6069
453b07ac
TH
6070 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6071 if (!ata_aux_wq) {
6072 destroy_workqueue(ata_wq);
6073 return -ENOMEM;
6074 }
6075
1da177e4
LT
6076 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6077 return 0;
6078}
6079
6080static void __exit ata_exit(void)
6081{
6082 destroy_workqueue(ata_wq);
453b07ac 6083 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6084}
6085
a4625085 6086subsys_initcall(ata_init);
1da177e4
LT
6087module_exit(ata_exit);
6088
67846b30 6089static unsigned long ratelimit_time;
34af946a 6090static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6091
6092int ata_ratelimit(void)
6093{
6094 int rc;
6095 unsigned long flags;
6096
6097 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6098
6099 if (time_after(jiffies, ratelimit_time)) {
6100 rc = 1;
6101 ratelimit_time = jiffies + (HZ/5);
6102 } else
6103 rc = 0;
6104
6105 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6106
6107 return rc;
6108}
6109
c22daff4
TH
6110/**
6111 * ata_wait_register - wait until register value changes
6112 * @reg: IO-mapped register
6113 * @mask: Mask to apply to read register value
6114 * @val: Wait condition
6115 * @interval_msec: polling interval in milliseconds
6116 * @timeout_msec: timeout in milliseconds
6117 *
6118 * Waiting for some bits of register to change is a common
6119 * operation for ATA controllers. This function reads 32bit LE
6120 * IO-mapped register @reg and tests for the following condition.
6121 *
6122 * (*@reg & mask) != val
6123 *
6124 * If the condition is met, it returns; otherwise, the process is
6125 * repeated after @interval_msec until timeout.
6126 *
6127 * LOCKING:
6128 * Kernel thread context (may sleep)
6129 *
6130 * RETURNS:
6131 * The final register value.
6132 */
6133u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6134 unsigned long interval_msec,
6135 unsigned long timeout_msec)
6136{
6137 unsigned long timeout;
6138 u32 tmp;
6139
6140 tmp = ioread32(reg);
6141
6142 /* Calculate timeout _after_ the first read to make sure
6143 * preceding writes reach the controller before starting to
6144 * eat away the timeout.
6145 */
6146 timeout = jiffies + (timeout_msec * HZ) / 1000;
6147
6148 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6149 msleep(interval_msec);
6150 tmp = ioread32(reg);
6151 }
6152
6153 return tmp;
6154}
6155
dd5b06c4
TH
6156/*
6157 * Dummy port_ops
6158 */
6159static void ata_dummy_noret(struct ata_port *ap) { }
6160static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6161static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6162
6163static u8 ata_dummy_check_status(struct ata_port *ap)
6164{
6165 return ATA_DRDY;
6166}
6167
6168static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6169{
6170 return AC_ERR_SYSTEM;
6171}
6172
6173const struct ata_port_operations ata_dummy_port_ops = {
6174 .port_disable = ata_port_disable,
6175 .check_status = ata_dummy_check_status,
6176 .check_altstatus = ata_dummy_check_status,
6177 .dev_select = ata_noop_dev_select,
6178 .qc_prep = ata_noop_qc_prep,
6179 .qc_issue = ata_dummy_qc_issue,
6180 .freeze = ata_dummy_noret,
6181 .thaw = ata_dummy_noret,
6182 .error_handler = ata_dummy_noret,
6183 .post_internal_cmd = ata_dummy_qc_noret,
6184 .irq_clear = ata_dummy_noret,
6185 .port_start = ata_dummy_ret0,
6186 .port_stop = ata_dummy_noret,
6187};
6188
1da177e4
LT
6189/*
6190 * libata is essentially a library of internal helper functions for
6191 * low-level ATA host controller drivers. As such, the API/ABI is
6192 * likely to change as new drivers are added and updated.
6193 * Do not depend on ABI/API stability.
6194 */
6195
e9c83914
TH
6196EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6197EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6198EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6199EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6200EXPORT_SYMBOL_GPL(ata_std_bios_param);
6201EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6202EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6203EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6204EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6205EXPORT_SYMBOL_GPL(ata_sg_init);
6206EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6207EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6208EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6209EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6210EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6211EXPORT_SYMBOL_GPL(ata_tf_load);
6212EXPORT_SYMBOL_GPL(ata_tf_read);
6213EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6214EXPORT_SYMBOL_GPL(ata_std_dev_select);
6215EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6216EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6217EXPORT_SYMBOL_GPL(ata_check_status);
6218EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6219EXPORT_SYMBOL_GPL(ata_exec_command);
6220EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6221EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6222EXPORT_SYMBOL_GPL(ata_data_xfer);
6223EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6224EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6225EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6226EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6227EXPORT_SYMBOL_GPL(ata_bmdma_start);
6228EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6229EXPORT_SYMBOL_GPL(ata_bmdma_status);
6230EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6231EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6232EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6233EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6234EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6235EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6236EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6237EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6238EXPORT_SYMBOL_GPL(sata_phy_debounce);
6239EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6240EXPORT_SYMBOL_GPL(sata_phy_reset);
6241EXPORT_SYMBOL_GPL(__sata_phy_reset);
6242EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6243EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6244EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6245EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6246EXPORT_SYMBOL_GPL(sata_std_hardreset);
6247EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6248EXPORT_SYMBOL_GPL(ata_dev_classify);
6249EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6250EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6251EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6252EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6253EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6254EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6255EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6256EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6257EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6258EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6259EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6260EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6261EXPORT_SYMBOL_GPL(sata_scr_valid);
6262EXPORT_SYMBOL_GPL(sata_scr_read);
6263EXPORT_SYMBOL_GPL(sata_scr_write);
6264EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6265EXPORT_SYMBOL_GPL(ata_port_online);
6266EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6267EXPORT_SYMBOL_GPL(ata_host_suspend);
6268EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6269EXPORT_SYMBOL_GPL(ata_id_string);
6270EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6271EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6272EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6273
1bc4ccff 6274EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6275EXPORT_SYMBOL_GPL(ata_timing_compute);
6276EXPORT_SYMBOL_GPL(ata_timing_merge);
6277
1da177e4
LT
6278#ifdef CONFIG_PCI
6279EXPORT_SYMBOL_GPL(pci_test_config_bits);
6280EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6281EXPORT_SYMBOL_GPL(ata_pci_init_one);
6282EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6283EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6284EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6285EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6286EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6287EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6288EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6289#endif /* CONFIG_PCI */
9b847548 6290
9b847548
JA
6291EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6292EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6293
ece1d636 6294EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6295EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6296EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6297EXPORT_SYMBOL_GPL(ata_port_freeze);
6298EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6299EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6300EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6301EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6302EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6303EXPORT_SYMBOL_GPL(ata_irq_on);
6304EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6305EXPORT_SYMBOL_GPL(ata_irq_ack);
6306EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
This page took 1.135658 seconds and 5 git commands to generate.