[PATCH] libata: check if port is disabled after internal command
[deliverable/linux.git] / drivers / scsi / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/list.h>
41#include <linux/mm.h>
42#include <linux/highmem.h>
43#include <linux/spinlock.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/timer.h>
47#include <linux/interrupt.h>
48#include <linux/completion.h>
49#include <linux/suspend.h>
50#include <linux/workqueue.h>
67846b30 51#include <linux/jiffies.h>
378f058c 52#include <linux/scatterlist.h>
1da177e4 53#include <scsi/scsi.h>
1da177e4 54#include "scsi_priv.h"
193515d5 55#include <scsi/scsi_cmnd.h>
1da177e4
LT
56#include <scsi/scsi_host.h>
57#include <linux/libata.h>
58#include <asm/io.h>
59#include <asm/semaphore.h>
60#include <asm/byteorder.h>
61
62#include "libata.h"
63
6aff8f1f
TH
64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
1da177e4
LT
66static void ata_set_mode(struct ata_port *ap);
67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
acf356b1 68static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
1da177e4
LT
69
70static unsigned int ata_unique_id = 1;
71static struct workqueue_struct *ata_wq;
72
418dc1f5 73int atapi_enabled = 1;
1623c81e
JG
74module_param(atapi_enabled, int, 0444);
75MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
76
c3c013a2
JG
77int libata_fua = 0;
78module_param_named(fua, libata_fua, int, 0444);
79MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
80
1da177e4
LT
81MODULE_AUTHOR("Jeff Garzik");
82MODULE_DESCRIPTION("Library module for ATA devices");
83MODULE_LICENSE("GPL");
84MODULE_VERSION(DRV_VERSION);
85
0baab86b 86
1da177e4
LT
87/**
88 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
89 * @tf: Taskfile to convert
90 * @fis: Buffer into which data will output
91 * @pmp: Port multiplier port
92 *
93 * Converts a standard ATA taskfile to a Serial ATA
94 * FIS structure (Register - Host to Device).
95 *
96 * LOCKING:
97 * Inherited from caller.
98 */
99
057ace5e 100void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
101{
102 fis[0] = 0x27; /* Register - Host to Device FIS */
103 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
104 bit 7 indicates Command FIS */
105 fis[2] = tf->command;
106 fis[3] = tf->feature;
107
108 fis[4] = tf->lbal;
109 fis[5] = tf->lbam;
110 fis[6] = tf->lbah;
111 fis[7] = tf->device;
112
113 fis[8] = tf->hob_lbal;
114 fis[9] = tf->hob_lbam;
115 fis[10] = tf->hob_lbah;
116 fis[11] = tf->hob_feature;
117
118 fis[12] = tf->nsect;
119 fis[13] = tf->hob_nsect;
120 fis[14] = 0;
121 fis[15] = tf->ctl;
122
123 fis[16] = 0;
124 fis[17] = 0;
125 fis[18] = 0;
126 fis[19] = 0;
127}
128
129/**
130 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
131 * @fis: Buffer from which data will be input
132 * @tf: Taskfile to output
133 *
e12a1be6 134 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
135 *
136 * LOCKING:
137 * Inherited from caller.
138 */
139
057ace5e 140void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
141{
142 tf->command = fis[2]; /* status */
143 tf->feature = fis[3]; /* error */
144
145 tf->lbal = fis[4];
146 tf->lbam = fis[5];
147 tf->lbah = fis[6];
148 tf->device = fis[7];
149
150 tf->hob_lbal = fis[8];
151 tf->hob_lbam = fis[9];
152 tf->hob_lbah = fis[10];
153
154 tf->nsect = fis[12];
155 tf->hob_nsect = fis[13];
156}
157
8cbd6df1
AL
158static const u8 ata_rw_cmds[] = {
159 /* pio multi */
160 ATA_CMD_READ_MULTI,
161 ATA_CMD_WRITE_MULTI,
162 ATA_CMD_READ_MULTI_EXT,
163 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
164 0,
165 0,
166 0,
167 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
168 /* pio */
169 ATA_CMD_PIO_READ,
170 ATA_CMD_PIO_WRITE,
171 ATA_CMD_PIO_READ_EXT,
172 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
173 0,
174 0,
175 0,
176 0,
8cbd6df1
AL
177 /* dma */
178 ATA_CMD_READ,
179 ATA_CMD_WRITE,
180 ATA_CMD_READ_EXT,
9a3dccc4
TH
181 ATA_CMD_WRITE_EXT,
182 0,
183 0,
184 0,
185 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 186};
1da177e4
LT
187
188/**
8cbd6df1
AL
189 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
190 * @qc: command to examine and configure
1da177e4 191 *
8cbd6df1
AL
192 * Examine the device configuration and tf->flags to calculate
193 * the proper read/write commands and protocol to use.
1da177e4
LT
194 *
195 * LOCKING:
196 * caller.
197 */
9a3dccc4 198int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
1da177e4 199{
8cbd6df1
AL
200 struct ata_taskfile *tf = &qc->tf;
201 struct ata_device *dev = qc->dev;
9a3dccc4 202 u8 cmd;
1da177e4 203
9a3dccc4 204 int index, fua, lba48, write;
8cbd6df1 205
9a3dccc4 206 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
207 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
208 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 209
8cbd6df1
AL
210 if (dev->flags & ATA_DFLAG_PIO) {
211 tf->protocol = ATA_PROT_PIO;
9a3dccc4 212 index = dev->multi_count ? 0 : 8;
8d238e01
AC
213 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
214 /* Unable to use DMA due to host limitation */
215 tf->protocol = ATA_PROT_PIO;
0565c26d 216 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
217 } else {
218 tf->protocol = ATA_PROT_DMA;
9a3dccc4 219 index = 16;
8cbd6df1 220 }
1da177e4 221
9a3dccc4
TH
222 cmd = ata_rw_cmds[index + fua + lba48 + write];
223 if (cmd) {
224 tf->command = cmd;
225 return 0;
226 }
227 return -1;
1da177e4
LT
228}
229
cb95d562
TH
230/**
231 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
232 * @pio_mask: pio_mask
233 * @mwdma_mask: mwdma_mask
234 * @udma_mask: udma_mask
235 *
236 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
237 * unsigned int xfer_mask.
238 *
239 * LOCKING:
240 * None.
241 *
242 * RETURNS:
243 * Packed xfer_mask.
244 */
245static unsigned int ata_pack_xfermask(unsigned int pio_mask,
246 unsigned int mwdma_mask,
247 unsigned int udma_mask)
248{
249 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
250 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
251 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
252}
253
c0489e4e
TH
254/**
255 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
256 * @xfer_mask: xfer_mask to unpack
257 * @pio_mask: resulting pio_mask
258 * @mwdma_mask: resulting mwdma_mask
259 * @udma_mask: resulting udma_mask
260 *
261 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
262 * Any NULL distination masks will be ignored.
263 */
264static void ata_unpack_xfermask(unsigned int xfer_mask,
265 unsigned int *pio_mask,
266 unsigned int *mwdma_mask,
267 unsigned int *udma_mask)
268{
269 if (pio_mask)
270 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
271 if (mwdma_mask)
272 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
273 if (udma_mask)
274 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
275}
276
cb95d562
TH
277static const struct ata_xfer_ent {
278 unsigned int shift, bits;
279 u8 base;
280} ata_xfer_tbl[] = {
281 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
282 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
283 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
284 { -1, },
285};
286
287/**
288 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
289 * @xfer_mask: xfer_mask of interest
290 *
291 * Return matching XFER_* value for @xfer_mask. Only the highest
292 * bit of @xfer_mask is considered.
293 *
294 * LOCKING:
295 * None.
296 *
297 * RETURNS:
298 * Matching XFER_* value, 0 if no match found.
299 */
300static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
301{
302 int highbit = fls(xfer_mask) - 1;
303 const struct ata_xfer_ent *ent;
304
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
307 return ent->base + highbit - ent->shift;
308 return 0;
309}
310
311/**
312 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_mask for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_mask, 0 if no match found.
322 */
323static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
324{
325 const struct ata_xfer_ent *ent;
326
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return 1 << (ent->shift + xfer_mode - ent->base);
330 return 0;
331}
332
333/**
334 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
335 * @xfer_mode: XFER_* of interest
336 *
337 * Return matching xfer_shift for @xfer_mode.
338 *
339 * LOCKING:
340 * None.
341 *
342 * RETURNS:
343 * Matching xfer_shift, -1 if no match found.
344 */
345static int ata_xfer_mode2shift(unsigned int xfer_mode)
346{
347 const struct ata_xfer_ent *ent;
348
349 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
350 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
351 return ent->shift;
352 return -1;
353}
354
1da177e4 355/**
1da7b0d0
TH
356 * ata_mode_string - convert xfer_mask to string
357 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
358 *
359 * Determine string which represents the highest speed
1da7b0d0 360 * (highest bit in @modemask).
1da177e4
LT
361 *
362 * LOCKING:
363 * None.
364 *
365 * RETURNS:
366 * Constant C string representing highest speed listed in
1da7b0d0 367 * @mode_mask, or the constant C string "<n/a>".
1da177e4 368 */
1da7b0d0 369static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 370{
75f554bc
TH
371 static const char * const xfer_mode_str[] = {
372 "PIO0",
373 "PIO1",
374 "PIO2",
375 "PIO3",
376 "PIO4",
377 "MWDMA0",
378 "MWDMA1",
379 "MWDMA2",
380 "UDMA/16",
381 "UDMA/25",
382 "UDMA/33",
383 "UDMA/44",
384 "UDMA/66",
385 "UDMA/100",
386 "UDMA/133",
387 "UDMA7",
388 };
1da7b0d0 389 int highbit;
1da177e4 390
1da7b0d0
TH
391 highbit = fls(xfer_mask) - 1;
392 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
393 return xfer_mode_str[highbit];
1da177e4 394 return "<n/a>";
1da177e4
LT
395}
396
397/**
398 * ata_pio_devchk - PATA device presence detection
399 * @ap: ATA channel to examine
400 * @device: Device to examine (starting at zero)
401 *
402 * This technique was originally described in
403 * Hale Landis's ATADRVR (www.ata-atapi.com), and
404 * later found its way into the ATA/ATAPI spec.
405 *
406 * Write a pattern to the ATA shadow registers,
407 * and if a device is present, it will respond by
408 * correctly storing and echoing back the
409 * ATA shadow register contents.
410 *
411 * LOCKING:
412 * caller.
413 */
414
415static unsigned int ata_pio_devchk(struct ata_port *ap,
416 unsigned int device)
417{
418 struct ata_ioports *ioaddr = &ap->ioaddr;
419 u8 nsect, lbal;
420
421 ap->ops->dev_select(ap, device);
422
423 outb(0x55, ioaddr->nsect_addr);
424 outb(0xaa, ioaddr->lbal_addr);
425
426 outb(0xaa, ioaddr->nsect_addr);
427 outb(0x55, ioaddr->lbal_addr);
428
429 outb(0x55, ioaddr->nsect_addr);
430 outb(0xaa, ioaddr->lbal_addr);
431
432 nsect = inb(ioaddr->nsect_addr);
433 lbal = inb(ioaddr->lbal_addr);
434
435 if ((nsect == 0x55) && (lbal == 0xaa))
436 return 1; /* we found a device */
437
438 return 0; /* nothing found */
439}
440
441/**
442 * ata_mmio_devchk - PATA device presence detection
443 * @ap: ATA channel to examine
444 * @device: Device to examine (starting at zero)
445 *
446 * This technique was originally described in
447 * Hale Landis's ATADRVR (www.ata-atapi.com), and
448 * later found its way into the ATA/ATAPI spec.
449 *
450 * Write a pattern to the ATA shadow registers,
451 * and if a device is present, it will respond by
452 * correctly storing and echoing back the
453 * ATA shadow register contents.
454 *
455 * LOCKING:
456 * caller.
457 */
458
459static unsigned int ata_mmio_devchk(struct ata_port *ap,
460 unsigned int device)
461{
462 struct ata_ioports *ioaddr = &ap->ioaddr;
463 u8 nsect, lbal;
464
465 ap->ops->dev_select(ap, device);
466
467 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
468 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
469
470 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
471 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
472
473 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
474 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
475
476 nsect = readb((void __iomem *) ioaddr->nsect_addr);
477 lbal = readb((void __iomem *) ioaddr->lbal_addr);
478
479 if ((nsect == 0x55) && (lbal == 0xaa))
480 return 1; /* we found a device */
481
482 return 0; /* nothing found */
483}
484
485/**
486 * ata_devchk - PATA device presence detection
487 * @ap: ATA channel to examine
488 * @device: Device to examine (starting at zero)
489 *
490 * Dispatch ATA device presence detection, depending
491 * on whether we are using PIO or MMIO to talk to the
492 * ATA shadow registers.
493 *
494 * LOCKING:
495 * caller.
496 */
497
498static unsigned int ata_devchk(struct ata_port *ap,
499 unsigned int device)
500{
501 if (ap->flags & ATA_FLAG_MMIO)
502 return ata_mmio_devchk(ap, device);
503 return ata_pio_devchk(ap, device);
504}
505
506/**
507 * ata_dev_classify - determine device type based on ATA-spec signature
508 * @tf: ATA taskfile register set for device to be identified
509 *
510 * Determine from taskfile register contents whether a device is
511 * ATA or ATAPI, as per "Signature and persistence" section
512 * of ATA/PI spec (volume 1, sect 5.14).
513 *
514 * LOCKING:
515 * None.
516 *
517 * RETURNS:
518 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
519 * the event of failure.
520 */
521
057ace5e 522unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
523{
524 /* Apple's open source Darwin code hints that some devices only
525 * put a proper signature into the LBA mid/high registers,
526 * So, we only check those. It's sufficient for uniqueness.
527 */
528
529 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
530 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
531 DPRINTK("found ATA device by sig\n");
532 return ATA_DEV_ATA;
533 }
534
535 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
536 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
537 DPRINTK("found ATAPI device by sig\n");
538 return ATA_DEV_ATAPI;
539 }
540
541 DPRINTK("unknown device\n");
542 return ATA_DEV_UNKNOWN;
543}
544
545/**
546 * ata_dev_try_classify - Parse returned ATA device signature
547 * @ap: ATA channel to examine
548 * @device: Device to examine (starting at zero)
b4dc7623 549 * @r_err: Value of error register on completion
1da177e4
LT
550 *
551 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
552 * an ATA/ATAPI-defined set of values is placed in the ATA
553 * shadow registers, indicating the results of device detection
554 * and diagnostics.
555 *
556 * Select the ATA device, and read the values from the ATA shadow
557 * registers. Then parse according to the Error register value,
558 * and the spec-defined values examined by ata_dev_classify().
559 *
560 * LOCKING:
561 * caller.
b4dc7623
TH
562 *
563 * RETURNS:
564 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
565 */
566
b4dc7623
TH
567static unsigned int
568ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 569{
1da177e4
LT
570 struct ata_taskfile tf;
571 unsigned int class;
572 u8 err;
573
574 ap->ops->dev_select(ap, device);
575
576 memset(&tf, 0, sizeof(tf));
577
1da177e4 578 ap->ops->tf_read(ap, &tf);
0169e284 579 err = tf.feature;
b4dc7623
TH
580 if (r_err)
581 *r_err = err;
1da177e4
LT
582
583 /* see if device passed diags */
584 if (err == 1)
585 /* do nothing */ ;
586 else if ((device == 0) && (err == 0x81))
587 /* do nothing */ ;
588 else
b4dc7623 589 return ATA_DEV_NONE;
1da177e4 590
b4dc7623 591 /* determine if device is ATA or ATAPI */
1da177e4 592 class = ata_dev_classify(&tf);
b4dc7623 593
1da177e4 594 if (class == ATA_DEV_UNKNOWN)
b4dc7623 595 return ATA_DEV_NONE;
1da177e4 596 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
597 return ATA_DEV_NONE;
598 return class;
1da177e4
LT
599}
600
601/**
6a62a04d 602 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
603 * @id: IDENTIFY DEVICE results we will examine
604 * @s: string into which data is output
605 * @ofs: offset into identify device page
606 * @len: length of string to return. must be an even number.
607 *
608 * The strings in the IDENTIFY DEVICE page are broken up into
609 * 16-bit chunks. Run through the string, and output each
610 * 8-bit chunk linearly, regardless of platform.
611 *
612 * LOCKING:
613 * caller.
614 */
615
6a62a04d
TH
616void ata_id_string(const u16 *id, unsigned char *s,
617 unsigned int ofs, unsigned int len)
1da177e4
LT
618{
619 unsigned int c;
620
621 while (len > 0) {
622 c = id[ofs] >> 8;
623 *s = c;
624 s++;
625
626 c = id[ofs] & 0xff;
627 *s = c;
628 s++;
629
630 ofs++;
631 len -= 2;
632 }
633}
634
0e949ff3 635/**
6a62a04d 636 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
637 * @id: IDENTIFY DEVICE results we will examine
638 * @s: string into which data is output
639 * @ofs: offset into identify device page
640 * @len: length of string to return. must be an odd number.
641 *
6a62a04d 642 * This function is identical to ata_id_string except that it
0e949ff3
TH
643 * trims trailing spaces and terminates the resulting string with
644 * null. @len must be actual maximum length (even number) + 1.
645 *
646 * LOCKING:
647 * caller.
648 */
6a62a04d
TH
649void ata_id_c_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
0e949ff3
TH
651{
652 unsigned char *p;
653
654 WARN_ON(!(len & 1));
655
6a62a04d 656 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
657
658 p = s + strnlen(s, len - 1);
659 while (p > s && p[-1] == ' ')
660 p--;
661 *p = '\0';
662}
0baab86b 663
2940740b
TH
664static u64 ata_id_n_sectors(const u16 *id)
665{
666 if (ata_id_has_lba(id)) {
667 if (ata_id_has_lba48(id))
668 return ata_id_u64(id, 100);
669 else
670 return ata_id_u32(id, 60);
671 } else {
672 if (ata_id_current_chs_valid(id))
673 return ata_id_u32(id, 57);
674 else
675 return id[1] * id[3] * id[6];
676 }
677}
678
0baab86b
EF
679/**
680 * ata_noop_dev_select - Select device 0/1 on ATA bus
681 * @ap: ATA channel to manipulate
682 * @device: ATA device (numbered from zero) to select
683 *
684 * This function performs no actual function.
685 *
686 * May be used as the dev_select() entry in ata_port_operations.
687 *
688 * LOCKING:
689 * caller.
690 */
1da177e4
LT
691void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
692{
693}
694
0baab86b 695
1da177e4
LT
696/**
697 * ata_std_dev_select - Select device 0/1 on ATA bus
698 * @ap: ATA channel to manipulate
699 * @device: ATA device (numbered from zero) to select
700 *
701 * Use the method defined in the ATA specification to
702 * make either device 0, or device 1, active on the
0baab86b
EF
703 * ATA channel. Works with both PIO and MMIO.
704 *
705 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
706 *
707 * LOCKING:
708 * caller.
709 */
710
711void ata_std_dev_select (struct ata_port *ap, unsigned int device)
712{
713 u8 tmp;
714
715 if (device == 0)
716 tmp = ATA_DEVICE_OBS;
717 else
718 tmp = ATA_DEVICE_OBS | ATA_DEV1;
719
720 if (ap->flags & ATA_FLAG_MMIO) {
721 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
722 } else {
723 outb(tmp, ap->ioaddr.device_addr);
724 }
725 ata_pause(ap); /* needed; also flushes, for mmio */
726}
727
728/**
729 * ata_dev_select - Select device 0/1 on ATA bus
730 * @ap: ATA channel to manipulate
731 * @device: ATA device (numbered from zero) to select
732 * @wait: non-zero to wait for Status register BSY bit to clear
733 * @can_sleep: non-zero if context allows sleeping
734 *
735 * Use the method defined in the ATA specification to
736 * make either device 0, or device 1, active on the
737 * ATA channel.
738 *
739 * This is a high-level version of ata_std_dev_select(),
740 * which additionally provides the services of inserting
741 * the proper pauses and status polling, where needed.
742 *
743 * LOCKING:
744 * caller.
745 */
746
747void ata_dev_select(struct ata_port *ap, unsigned int device,
748 unsigned int wait, unsigned int can_sleep)
749{
750 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
751 ap->id, device, wait);
752
753 if (wait)
754 ata_wait_idle(ap);
755
756 ap->ops->dev_select(ap, device);
757
758 if (wait) {
759 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
760 msleep(150);
761 ata_wait_idle(ap);
762 }
763}
764
765/**
766 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 767 * @id: IDENTIFY DEVICE page to dump
1da177e4 768 *
0bd3300a
TH
769 * Dump selected 16-bit words from the given IDENTIFY DEVICE
770 * page.
1da177e4
LT
771 *
772 * LOCKING:
773 * caller.
774 */
775
0bd3300a 776static inline void ata_dump_id(const u16 *id)
1da177e4
LT
777{
778 DPRINTK("49==0x%04x "
779 "53==0x%04x "
780 "63==0x%04x "
781 "64==0x%04x "
782 "75==0x%04x \n",
0bd3300a
TH
783 id[49],
784 id[53],
785 id[63],
786 id[64],
787 id[75]);
1da177e4
LT
788 DPRINTK("80==0x%04x "
789 "81==0x%04x "
790 "82==0x%04x "
791 "83==0x%04x "
792 "84==0x%04x \n",
0bd3300a
TH
793 id[80],
794 id[81],
795 id[82],
796 id[83],
797 id[84]);
1da177e4
LT
798 DPRINTK("88==0x%04x "
799 "93==0x%04x\n",
0bd3300a
TH
800 id[88],
801 id[93]);
1da177e4
LT
802}
803
cb95d562
TH
804/**
805 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
806 * @id: IDENTIFY data to compute xfer mask from
807 *
808 * Compute the xfermask for this device. This is not as trivial
809 * as it seems if we must consider early devices correctly.
810 *
811 * FIXME: pre IDE drive timing (do we care ?).
812 *
813 * LOCKING:
814 * None.
815 *
816 * RETURNS:
817 * Computed xfermask
818 */
819static unsigned int ata_id_xfermask(const u16 *id)
820{
821 unsigned int pio_mask, mwdma_mask, udma_mask;
822
823 /* Usual case. Word 53 indicates word 64 is valid */
824 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
825 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
826 pio_mask <<= 3;
827 pio_mask |= 0x7;
828 } else {
829 /* If word 64 isn't valid then Word 51 high byte holds
830 * the PIO timing number for the maximum. Turn it into
831 * a mask.
832 */
833 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
834
835 /* But wait.. there's more. Design your standards by
836 * committee and you too can get a free iordy field to
837 * process. However its the speeds not the modes that
838 * are supported... Note drivers using the timing API
839 * will get this right anyway
840 */
841 }
842
843 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0
TH
844
845 udma_mask = 0;
846 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
847 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
848
849 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
850}
851
86e45b6b
TH
852/**
853 * ata_port_queue_task - Queue port_task
854 * @ap: The ata_port to queue port_task for
855 *
856 * Schedule @fn(@data) for execution after @delay jiffies using
857 * port_task. There is one port_task per port and it's the
858 * user(low level driver)'s responsibility to make sure that only
859 * one task is active at any given time.
860 *
861 * libata core layer takes care of synchronization between
862 * port_task and EH. ata_port_queue_task() may be ignored for EH
863 * synchronization.
864 *
865 * LOCKING:
866 * Inherited from caller.
867 */
868void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
869 unsigned long delay)
870{
871 int rc;
872
2e755f68 873 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
86e45b6b
TH
874 return;
875
876 PREPARE_WORK(&ap->port_task, fn, data);
877
878 if (!delay)
879 rc = queue_work(ata_wq, &ap->port_task);
880 else
881 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
882
883 /* rc == 0 means that another user is using port task */
884 WARN_ON(rc == 0);
885}
886
887/**
888 * ata_port_flush_task - Flush port_task
889 * @ap: The ata_port to flush port_task for
890 *
891 * After this function completes, port_task is guranteed not to
892 * be running or scheduled.
893 *
894 * LOCKING:
895 * Kernel thread context (may sleep)
896 */
897void ata_port_flush_task(struct ata_port *ap)
898{
899 unsigned long flags;
900
901 DPRINTK("ENTER\n");
902
903 spin_lock_irqsave(&ap->host_set->lock, flags);
2e755f68 904 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
86e45b6b
TH
905 spin_unlock_irqrestore(&ap->host_set->lock, flags);
906
907 DPRINTK("flush #1\n");
908 flush_workqueue(ata_wq);
909
910 /*
911 * At this point, if a task is running, it's guaranteed to see
912 * the FLUSH flag; thus, it will never queue pio tasks again.
913 * Cancel and flush.
914 */
915 if (!cancel_delayed_work(&ap->port_task)) {
916 DPRINTK("flush #2\n");
917 flush_workqueue(ata_wq);
918 }
919
920 spin_lock_irqsave(&ap->host_set->lock, flags);
2e755f68 921 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
86e45b6b
TH
922 spin_unlock_irqrestore(&ap->host_set->lock, flags);
923
924 DPRINTK("EXIT\n");
925}
926
77853bf2 927void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 928{
77853bf2 929 struct completion *waiting = qc->private_data;
a2a7a662 930
77853bf2 931 qc->ap->ops->tf_read(qc->ap, &qc->tf);
a2a7a662 932 complete(waiting);
a2a7a662
TH
933}
934
935/**
936 * ata_exec_internal - execute libata internal command
937 * @ap: Port to which the command is sent
938 * @dev: Device to which the command is sent
939 * @tf: Taskfile registers for the command and the result
940 * @dma_dir: Data tranfer direction of the command
941 * @buf: Data buffer of the command
942 * @buflen: Length of data buffer
943 *
944 * Executes libata internal command with timeout. @tf contains
945 * command on entry and result on return. Timeout and error
946 * conditions are reported via return value. No recovery action
947 * is taken after a command times out. It's caller's duty to
948 * clean up after timeout.
949 *
950 * LOCKING:
951 * None. Should be called with kernel context, might sleep.
952 */
953
954static unsigned
955ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
956 struct ata_taskfile *tf,
957 int dma_dir, void *buf, unsigned int buflen)
958{
959 u8 command = tf->command;
960 struct ata_queued_cmd *qc;
961 DECLARE_COMPLETION(wait);
962 unsigned long flags;
77853bf2 963 unsigned int err_mask;
a2a7a662
TH
964
965 spin_lock_irqsave(&ap->host_set->lock, flags);
966
967 qc = ata_qc_new_init(ap, dev);
968 BUG_ON(qc == NULL);
969
970 qc->tf = *tf;
971 qc->dma_dir = dma_dir;
972 if (dma_dir != DMA_NONE) {
973 ata_sg_init_one(qc, buf, buflen);
974 qc->nsect = buflen / ATA_SECT_SIZE;
975 }
976
77853bf2 977 qc->private_data = &wait;
a2a7a662
TH
978 qc->complete_fn = ata_qc_complete_internal;
979
9a3d9eb0
TH
980 qc->err_mask = ata_qc_issue(qc);
981 if (qc->err_mask)
8e436af9 982 ata_qc_complete(qc);
a2a7a662
TH
983
984 spin_unlock_irqrestore(&ap->host_set->lock, flags);
985
986 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
41ade50c
AL
987 ata_port_flush_task(ap);
988
a2a7a662
TH
989 spin_lock_irqsave(&ap->host_set->lock, flags);
990
991 /* We're racing with irq here. If we lose, the
992 * following test prevents us from completing the qc
993 * again. If completion irq occurs after here but
994 * before the caller cleans up, it will result in a
995 * spurious interrupt. We can live with that.
996 */
77853bf2 997 if (qc->flags & ATA_QCFLAG_ACTIVE) {
11a56d24 998 qc->err_mask = AC_ERR_TIMEOUT;
a2a7a662
TH
999 ata_qc_complete(qc);
1000 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1001 ap->id, command);
1002 }
1003
1004 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1005 }
1006
77853bf2
TH
1007 *tf = qc->tf;
1008 err_mask = qc->err_mask;
1009
1010 ata_qc_free(qc);
1011
1f7dd3e9
TH
1012 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1013 * Until those drivers are fixed, we detect the condition
1014 * here, fail the command with AC_ERR_SYSTEM and reenable the
1015 * port.
1016 *
1017 * Note that this doesn't change any behavior as internal
1018 * command failure results in disabling the device in the
1019 * higher layer for LLDDs without new reset/EH callbacks.
1020 *
1021 * Kill the following code as soon as those drivers are fixed.
1022 */
1023 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1024 err_mask |= AC_ERR_SYSTEM;
1025 ata_port_probe(ap);
1026 }
1027
77853bf2 1028 return err_mask;
a2a7a662
TH
1029}
1030
1bc4ccff
AC
1031/**
1032 * ata_pio_need_iordy - check if iordy needed
1033 * @adev: ATA device
1034 *
1035 * Check if the current speed of the device requires IORDY. Used
1036 * by various controllers for chip configuration.
1037 */
1038
1039unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1040{
1041 int pio;
1042 int speed = adev->pio_mode - XFER_PIO_0;
1043
1044 if (speed < 2)
1045 return 0;
1046 if (speed > 2)
1047 return 1;
1048
1049 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1050
1051 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1052 pio = adev->id[ATA_ID_EIDE_PIO];
1053 /* Is the speed faster than the drive allows non IORDY ? */
1054 if (pio) {
1055 /* This is cycle times not frequency - watch the logic! */
1056 if (pio > 240) /* PIO2 is 240nS per cycle */
1057 return 1;
1058 return 0;
1059 }
1060 }
1061 return 0;
1062}
1063
1da177e4 1064/**
49016aca
TH
1065 * ata_dev_read_id - Read ID data from the specified device
1066 * @ap: port on which target device resides
1067 * @dev: target device
1068 * @p_class: pointer to class of the target device (may be changed)
1069 * @post_reset: is this read ID post-reset?
d9572b1d 1070 * @p_id: read IDENTIFY page (newly allocated)
1da177e4 1071 *
49016aca
TH
1072 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1074 * devices. This function also takes care of EDD signature
1075 * misreporting (to be removed once EDD support is gone) and
1076 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1da177e4
LT
1077 *
1078 * LOCKING:
49016aca
TH
1079 * Kernel thread context (may sleep)
1080 *
1081 * RETURNS:
1082 * 0 on success, -errno otherwise.
1da177e4 1083 */
49016aca 1084static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
d9572b1d 1085 unsigned int *p_class, int post_reset, u16 **p_id)
1da177e4 1086{
49016aca 1087 unsigned int class = *p_class;
1da177e4 1088 unsigned int using_edd;
a0123703 1089 struct ata_taskfile tf;
49016aca 1090 unsigned int err_mask = 0;
d9572b1d 1091 u16 *id;
49016aca
TH
1092 const char *reason;
1093 int rc;
1da177e4 1094
49016aca 1095 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1da177e4 1096
61eb066a
TH
1097 if (ap->ops->probe_reset ||
1098 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1da177e4
LT
1099 using_edd = 0;
1100 else
1101 using_edd = 1;
1102
49016aca 1103 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1104
d9572b1d
TH
1105 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1106 if (id == NULL) {
1107 rc = -ENOMEM;
1108 reason = "out of memory";
1109 goto err_out;
1110 }
1111
49016aca
TH
1112 retry:
1113 ata_tf_init(ap, &tf, dev->devno);
a0123703 1114
49016aca
TH
1115 switch (class) {
1116 case ATA_DEV_ATA:
a0123703 1117 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1118 break;
1119 case ATA_DEV_ATAPI:
a0123703 1120 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1121 break;
1122 default:
1123 rc = -ENODEV;
1124 reason = "unsupported class";
1125 goto err_out;
1da177e4
LT
1126 }
1127
a0123703 1128 tf.protocol = ATA_PROT_PIO;
1da177e4 1129
a0123703 1130 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
49016aca 1131 id, sizeof(id[0]) * ATA_ID_WORDS);
1da177e4 1132
a0123703 1133 if (err_mask) {
49016aca
TH
1134 rc = -EIO;
1135 reason = "I/O error";
1136
a0123703
TH
1137 if (err_mask & ~AC_ERR_DEV)
1138 goto err_out;
0169e284 1139
1da177e4
LT
1140 /*
1141 * arg! EDD works for all test cases, but seems to return
1142 * the ATA signature for some ATAPI devices. Until the
1143 * reason for this is found and fixed, we fix up the mess
1144 * here. If IDENTIFY DEVICE returns command aborted
1145 * (as ATAPI devices do), then we issue an
1146 * IDENTIFY PACKET DEVICE.
1147 *
1148 * ATA software reset (SRST, the default) does not appear
1149 * to have this problem.
1150 */
49016aca 1151 if ((using_edd) && (class == ATA_DEV_ATA)) {
a0123703 1152 u8 err = tf.feature;
1da177e4 1153 if (err & ATA_ABORTED) {
49016aca 1154 class = ATA_DEV_ATAPI;
1da177e4
LT
1155 goto retry;
1156 }
1157 }
1158 goto err_out;
1159 }
1160
49016aca 1161 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1162
49016aca
TH
1163 /* sanity check */
1164 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1165 rc = -EINVAL;
1166 reason = "device reports illegal type";
1167 goto err_out;
1168 }
1169
1170 if (post_reset && class == ATA_DEV_ATA) {
1171 /*
1172 * The exact sequence expected by certain pre-ATA4 drives is:
1173 * SRST RESET
1174 * IDENTIFY
1175 * INITIALIZE DEVICE PARAMETERS
1176 * anything else..
1177 * Some drives were very specific about that exact sequence.
1178 */
1179 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1180 err_mask = ata_dev_init_params(ap, dev);
1181 if (err_mask) {
1182 rc = -EIO;
1183 reason = "INIT_DEV_PARAMS failed";
1184 goto err_out;
1185 }
1186
1187 /* current CHS translation info (id[53-58]) might be
1188 * changed. reread the identify device info.
1189 */
1190 post_reset = 0;
1191 goto retry;
1192 }
1193 }
1194
1195 *p_class = class;
d9572b1d 1196 *p_id = id;
49016aca
TH
1197 return 0;
1198
1199 err_out:
1200 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1201 ap->id, dev->devno, reason);
d9572b1d 1202 kfree(id);
49016aca
TH
1203 return rc;
1204}
1205
4b2f3ede
TH
1206static inline u8 ata_dev_knobble(const struct ata_port *ap,
1207 struct ata_device *dev)
1208{
1209 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1210}
1211
49016aca 1212/**
ffeae418
TH
1213 * ata_dev_configure - Configure the specified ATA/ATAPI device
1214 * @ap: Port on which target device resides
1215 * @dev: Target device to configure
4c2d721a 1216 * @print_info: Enable device info printout
ffeae418
TH
1217 *
1218 * Configure @dev according to @dev->id. Generic and low-level
1219 * driver specific fixups are also applied.
49016aca
TH
1220 *
1221 * LOCKING:
ffeae418
TH
1222 * Kernel thread context (may sleep)
1223 *
1224 * RETURNS:
1225 * 0 on success, -errno otherwise
49016aca 1226 */
4c2d721a
TH
1227static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1228 int print_info)
49016aca 1229{
1148c3a7 1230 const u16 *id = dev->id;
ff8854b2 1231 unsigned int xfer_mask;
49016aca
TH
1232 int i, rc;
1233
1234 if (!ata_dev_present(dev)) {
1235 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
ffeae418
TH
1236 ap->id, dev->devno);
1237 return 0;
49016aca
TH
1238 }
1239
ffeae418 1240 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1da177e4 1241
c39f5ebe
TH
1242 /* print device capabilities */
1243 if (print_info)
1244 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1245 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1246 ap->id, dev->devno, id[49], id[82], id[83],
1247 id[84], id[85], id[86], id[87], id[88]);
1248
208a9933
TH
1249 /* initialize to-be-configured parameters */
1250 dev->flags = 0;
1251 dev->max_sectors = 0;
1252 dev->cdb_len = 0;
1253 dev->n_sectors = 0;
1254 dev->cylinders = 0;
1255 dev->heads = 0;
1256 dev->sectors = 0;
1257
1da177e4
LT
1258 /*
1259 * common ATA, ATAPI feature tests
1260 */
1261
ff8854b2 1262 /* find max transfer mode; for printk only */
1148c3a7 1263 xfer_mask = ata_id_xfermask(id);
1da177e4 1264
1148c3a7 1265 ata_dump_id(id);
1da177e4
LT
1266
1267 /* ATA-specific feature tests */
1268 if (dev->class == ATA_DEV_ATA) {
1148c3a7 1269 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1270
1148c3a7 1271 if (ata_id_has_lba(id)) {
4c2d721a 1272 const char *lba_desc;
8bf62ece 1273
4c2d721a
TH
1274 lba_desc = "LBA";
1275 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1276 if (ata_id_has_lba48(id)) {
8bf62ece 1277 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a
TH
1278 lba_desc = "LBA48";
1279 }
8bf62ece
AL
1280
1281 /* print device info to dmesg */
4c2d721a
TH
1282 if (print_info)
1283 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1284 "max %s, %Lu sectors: %s\n",
1285 ap->id, dev->devno,
1148c3a7 1286 ata_id_major_version(id),
ff8854b2 1287 ata_mode_string(xfer_mask),
4c2d721a
TH
1288 (unsigned long long)dev->n_sectors,
1289 lba_desc);
ffeae418 1290 } else {
8bf62ece
AL
1291 /* CHS */
1292
1293 /* Default translation */
1148c3a7
TH
1294 dev->cylinders = id[1];
1295 dev->heads = id[3];
1296 dev->sectors = id[6];
8bf62ece 1297
1148c3a7 1298 if (ata_id_current_chs_valid(id)) {
8bf62ece 1299 /* Current CHS translation is valid. */
1148c3a7
TH
1300 dev->cylinders = id[54];
1301 dev->heads = id[55];
1302 dev->sectors = id[56];
8bf62ece
AL
1303 }
1304
1305 /* print device info to dmesg */
4c2d721a
TH
1306 if (print_info)
1307 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1308 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1309 ap->id, dev->devno,
1148c3a7 1310 ata_id_major_version(id),
ff8854b2 1311 ata_mode_string(xfer_mask),
4c2d721a
TH
1312 (unsigned long long)dev->n_sectors,
1313 dev->cylinders, dev->heads, dev->sectors);
1da177e4
LT
1314 }
1315
6e7846e9 1316 dev->cdb_len = 16;
1da177e4
LT
1317 }
1318
1319 /* ATAPI-specific feature tests */
2c13b7ce 1320 else if (dev->class == ATA_DEV_ATAPI) {
1148c3a7 1321 rc = atapi_cdb_len(id);
1da177e4
LT
1322 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1323 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
ffeae418 1324 rc = -EINVAL;
1da177e4
LT
1325 goto err_out_nosup;
1326 }
6e7846e9 1327 dev->cdb_len = (unsigned int) rc;
1da177e4
LT
1328
1329 /* print device info to dmesg */
4c2d721a
TH
1330 if (print_info)
1331 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
ff8854b2 1332 ap->id, dev->devno, ata_mode_string(xfer_mask));
1da177e4
LT
1333 }
1334
6e7846e9
TH
1335 ap->host->max_cmd_len = 0;
1336 for (i = 0; i < ATA_MAX_DEVICES; i++)
1337 ap->host->max_cmd_len = max_t(unsigned int,
1338 ap->host->max_cmd_len,
1339 ap->device[i].cdb_len);
1340
4b2f3ede
TH
1341 /* limit bridge transfers to udma5, 200 sectors */
1342 if (ata_dev_knobble(ap, dev)) {
4c2d721a
TH
1343 if (print_info)
1344 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1345 ap->id, dev->devno);
5a529139 1346 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1347 dev->max_sectors = ATA_MAX_SECTORS;
1348 }
1349
1350 if (ap->ops->dev_config)
1351 ap->ops->dev_config(ap, dev);
1352
1da177e4 1353 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
ffeae418 1354 return 0;
1da177e4
LT
1355
1356err_out_nosup:
1357 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
ffeae418 1358 ap->id, dev->devno);
1da177e4 1359 DPRINTK("EXIT, err\n");
ffeae418 1360 return rc;
1da177e4
LT
1361}
1362
1363/**
1364 * ata_bus_probe - Reset and probe ATA bus
1365 * @ap: Bus to probe
1366 *
0cba632b
JG
1367 * Master ATA bus probing function. Initiates a hardware-dependent
1368 * bus reset, then attempts to identify any devices found on
1369 * the bus.
1370 *
1da177e4 1371 * LOCKING:
0cba632b 1372 * PCI/etc. bus probe sem.
1da177e4
LT
1373 *
1374 * RETURNS:
1375 * Zero on success, non-zero on error.
1376 */
1377
1378static int ata_bus_probe(struct ata_port *ap)
1379{
28ca5c57
TH
1380 unsigned int classes[ATA_MAX_DEVICES];
1381 unsigned int i, rc, found = 0;
1da177e4 1382
28ca5c57 1383 ata_port_probe(ap);
c19ba8af 1384
2044470c
TH
1385 /* reset and determine device classes */
1386 for (i = 0; i < ATA_MAX_DEVICES; i++)
1387 classes[i] = ATA_DEV_UNKNOWN;
2061a47a 1388
2044470c 1389 if (ap->ops->probe_reset) {
c19ba8af 1390 rc = ap->ops->probe_reset(ap, classes);
28ca5c57
TH
1391 if (rc) {
1392 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1393 return rc;
c19ba8af 1394 }
28ca5c57 1395 } else {
c19ba8af
TH
1396 ap->ops->phy_reset(ap);
1397
2044470c
TH
1398 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1399 for (i = 0; i < ATA_MAX_DEVICES; i++)
28ca5c57 1400 classes[i] = ap->device[i].class;
2044470c 1401
28ca5c57
TH
1402 ata_port_probe(ap);
1403 }
1da177e4 1404
2044470c
TH
1405 for (i = 0; i < ATA_MAX_DEVICES; i++)
1406 if (classes[i] == ATA_DEV_UNKNOWN)
1407 classes[i] = ATA_DEV_NONE;
1408
28ca5c57 1409 /* read IDENTIFY page and configure devices */
1da177e4 1410 for (i = 0; i < ATA_MAX_DEVICES; i++) {
ffeae418
TH
1411 struct ata_device *dev = &ap->device[i];
1412
28ca5c57
TH
1413 dev->class = classes[i];
1414
ffeae418
TH
1415 if (!ata_dev_present(dev))
1416 continue;
1417
1418 WARN_ON(dev->id != NULL);
1419 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1420 dev->class = ATA_DEV_NONE;
1421 continue;
1422 }
1423
4c2d721a 1424 if (ata_dev_configure(ap, dev, 1)) {
ffeae418
TH
1425 dev->class++; /* disable device */
1426 continue;
1da177e4 1427 }
ffeae418 1428
ffeae418 1429 found = 1;
1da177e4
LT
1430 }
1431
28ca5c57 1432 if (!found)
1da177e4
LT
1433 goto err_out_disable;
1434
1435 ata_set_mode(ap);
1436 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1437 goto err_out_disable;
1438
1439 return 0;
1440
1441err_out_disable:
1442 ap->ops->port_disable(ap);
1da177e4
LT
1443 return -1;
1444}
1445
1446/**
0cba632b
JG
1447 * ata_port_probe - Mark port as enabled
1448 * @ap: Port for which we indicate enablement
1da177e4 1449 *
0cba632b
JG
1450 * Modify @ap data structure such that the system
1451 * thinks that the entire port is enabled.
1452 *
1453 * LOCKING: host_set lock, or some other form of
1454 * serialization.
1da177e4
LT
1455 */
1456
1457void ata_port_probe(struct ata_port *ap)
1458{
1459 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1460}
1461
3be680b7
TH
1462/**
1463 * sata_print_link_status - Print SATA link status
1464 * @ap: SATA port to printk link status about
1465 *
1466 * This function prints link speed and status of a SATA link.
1467 *
1468 * LOCKING:
1469 * None.
1470 */
1471static void sata_print_link_status(struct ata_port *ap)
1472{
1473 u32 sstatus, tmp;
1474 const char *speed;
1475
1476 if (!ap->ops->scr_read)
1477 return;
1478
1479 sstatus = scr_read(ap, SCR_STATUS);
1480
1481 if (sata_dev_present(ap)) {
1482 tmp = (sstatus >> 4) & 0xf;
1483 if (tmp & (1 << 0))
1484 speed = "1.5";
1485 else if (tmp & (1 << 1))
1486 speed = "3.0";
1487 else
1488 speed = "<unknown>";
1489 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1490 ap->id, speed, sstatus);
1491 } else {
1492 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1493 ap->id, sstatus);
1494 }
1495}
1496
1da177e4 1497/**
780a87f7
JG
1498 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1499 * @ap: SATA port associated with target SATA PHY.
1da177e4 1500 *
780a87f7
JG
1501 * This function issues commands to standard SATA Sxxx
1502 * PHY registers, to wake up the phy (and device), and
1503 * clear any reset condition.
1da177e4
LT
1504 *
1505 * LOCKING:
0cba632b 1506 * PCI/etc. bus probe sem.
1da177e4
LT
1507 *
1508 */
1509void __sata_phy_reset(struct ata_port *ap)
1510{
1511 u32 sstatus;
1512 unsigned long timeout = jiffies + (HZ * 5);
1513
1514 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e
BR
1515 /* issue phy wake/reset */
1516 scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1517 /* Couldn't find anything in SATA I/II specs, but
1518 * AHCI-1.1 10.4.2 says at least 1 ms. */
1519 mdelay(1);
1da177e4 1520 }
cdcca89e 1521 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1da177e4
LT
1522
1523 /* wait for phy to become ready, if necessary */
1524 do {
1525 msleep(200);
1526 sstatus = scr_read(ap, SCR_STATUS);
1527 if ((sstatus & 0xf) != 1)
1528 break;
1529 } while (time_before(jiffies, timeout));
1530
3be680b7
TH
1531 /* print link status */
1532 sata_print_link_status(ap);
656563e3 1533
3be680b7
TH
1534 /* TODO: phy layer with polling, timeouts, etc. */
1535 if (sata_dev_present(ap))
1da177e4 1536 ata_port_probe(ap);
3be680b7 1537 else
1da177e4 1538 ata_port_disable(ap);
1da177e4
LT
1539
1540 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1541 return;
1542
1543 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1544 ata_port_disable(ap);
1545 return;
1546 }
1547
1548 ap->cbl = ATA_CBL_SATA;
1549}
1550
1551/**
780a87f7
JG
1552 * sata_phy_reset - Reset SATA bus.
1553 * @ap: SATA port associated with target SATA PHY.
1da177e4 1554 *
780a87f7
JG
1555 * This function resets the SATA bus, and then probes
1556 * the bus for devices.
1da177e4
LT
1557 *
1558 * LOCKING:
0cba632b 1559 * PCI/etc. bus probe sem.
1da177e4
LT
1560 *
1561 */
1562void sata_phy_reset(struct ata_port *ap)
1563{
1564 __sata_phy_reset(ap);
1565 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1566 return;
1567 ata_bus_reset(ap);
1568}
1569
1570/**
780a87f7
JG
1571 * ata_port_disable - Disable port.
1572 * @ap: Port to be disabled.
1da177e4 1573 *
780a87f7
JG
1574 * Modify @ap data structure such that the system
1575 * thinks that the entire port is disabled, and should
1576 * never attempt to probe or communicate with devices
1577 * on this port.
1578 *
1579 * LOCKING: host_set lock, or some other form of
1580 * serialization.
1da177e4
LT
1581 */
1582
1583void ata_port_disable(struct ata_port *ap)
1584{
1585 ap->device[0].class = ATA_DEV_NONE;
1586 ap->device[1].class = ATA_DEV_NONE;
1587 ap->flags |= ATA_FLAG_PORT_DISABLED;
1588}
1589
452503f9
AC
1590/*
1591 * This mode timing computation functionality is ported over from
1592 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1593 */
1594/*
1595 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1596 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1597 * for PIO 5, which is a nonstandard extension and UDMA6, which
1598 * is currently supported only by Maxtor drives.
1599 */
1600
1601static const struct ata_timing ata_timing[] = {
1602
1603 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1604 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1605 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1606 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1607
1608 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1609 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1610 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1611
1612/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1613
1614 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1615 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1616 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1617
1618 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1619 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1620 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1621
1622/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1623 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1624 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1625
1626 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1627 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1628 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1629
1630/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1631
1632 { 0xFF }
1633};
1634
1635#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1636#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1637
1638static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1639{
1640 q->setup = EZ(t->setup * 1000, T);
1641 q->act8b = EZ(t->act8b * 1000, T);
1642 q->rec8b = EZ(t->rec8b * 1000, T);
1643 q->cyc8b = EZ(t->cyc8b * 1000, T);
1644 q->active = EZ(t->active * 1000, T);
1645 q->recover = EZ(t->recover * 1000, T);
1646 q->cycle = EZ(t->cycle * 1000, T);
1647 q->udma = EZ(t->udma * 1000, UT);
1648}
1649
1650void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1651 struct ata_timing *m, unsigned int what)
1652{
1653 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1654 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1655 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1656 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1657 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1658 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1659 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1660 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1661}
1662
1663static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1664{
1665 const struct ata_timing *t;
1666
1667 for (t = ata_timing; t->mode != speed; t++)
91190758 1668 if (t->mode == 0xFF)
452503f9
AC
1669 return NULL;
1670 return t;
1671}
1672
1673int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1674 struct ata_timing *t, int T, int UT)
1675{
1676 const struct ata_timing *s;
1677 struct ata_timing p;
1678
1679 /*
1680 * Find the mode.
75b1f2f8 1681 */
452503f9
AC
1682
1683 if (!(s = ata_timing_find_mode(speed)))
1684 return -EINVAL;
1685
75b1f2f8
AL
1686 memcpy(t, s, sizeof(*s));
1687
452503f9
AC
1688 /*
1689 * If the drive is an EIDE drive, it can tell us it needs extended
1690 * PIO/MW_DMA cycle timing.
1691 */
1692
1693 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1694 memset(&p, 0, sizeof(p));
1695 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1696 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1697 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1698 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1699 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1700 }
1701 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1702 }
1703
1704 /*
1705 * Convert the timing to bus clock counts.
1706 */
1707
75b1f2f8 1708 ata_timing_quantize(t, t, T, UT);
452503f9
AC
1709
1710 /*
c893a3ae
RD
1711 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1712 * S.M.A.R.T * and some other commands. We have to ensure that the
1713 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
1714 */
1715
1716 if (speed > XFER_PIO_4) {
1717 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1718 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1719 }
1720
1721 /*
c893a3ae 1722 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
1723 */
1724
1725 if (t->act8b + t->rec8b < t->cyc8b) {
1726 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1727 t->rec8b = t->cyc8b - t->act8b;
1728 }
1729
1730 if (t->active + t->recover < t->cycle) {
1731 t->active += (t->cycle - (t->active + t->recover)) / 2;
1732 t->recover = t->cycle - t->active;
1733 }
1734
1735 return 0;
1736}
1737
1da177e4
LT
1738static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1739{
1da177e4
LT
1740 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1741 return;
1742
1743 if (dev->xfer_shift == ATA_SHIFT_PIO)
1744 dev->flags |= ATA_DFLAG_PIO;
1745
1746 ata_dev_set_xfermode(ap, dev);
1747
48a8a14f
TH
1748 if (ata_dev_revalidate(ap, dev, 0)) {
1749 printk(KERN_ERR "ata%u: failed to revalidate after set "
1750 "xfermode, disabled\n", ap->id);
1751 ata_port_disable(ap);
1752 }
1753
23e71c3d
TH
1754 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1755 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4
LT
1756
1757 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
23e71c3d
TH
1758 ap->id, dev->devno,
1759 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1da177e4
LT
1760}
1761
1762static int ata_host_set_pio(struct ata_port *ap)
1763{
a6d5a51c 1764 int i;
1da177e4
LT
1765
1766 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1767 struct ata_device *dev = &ap->device[i];
a6d5a51c
TH
1768
1769 if (!ata_dev_present(dev))
1770 continue;
1771
1772 if (!dev->pio_mode) {
88f93a31 1773 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
a6d5a51c 1774 return -1;
1da177e4 1775 }
a6d5a51c
TH
1776
1777 dev->xfer_mode = dev->pio_mode;
1778 dev->xfer_shift = ATA_SHIFT_PIO;
1779 if (ap->ops->set_piomode)
1780 ap->ops->set_piomode(ap, dev);
1da177e4
LT
1781 }
1782
1783 return 0;
1784}
1785
a6d5a51c 1786static void ata_host_set_dma(struct ata_port *ap)
1da177e4
LT
1787{
1788 int i;
1789
1790 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1791 struct ata_device *dev = &ap->device[i];
a6d5a51c
TH
1792
1793 if (!ata_dev_present(dev) || !dev->dma_mode)
1794 continue;
1795
1796 dev->xfer_mode = dev->dma_mode;
1797 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1798 if (ap->ops->set_dmamode)
1799 ap->ops->set_dmamode(ap, dev);
1da177e4
LT
1800 }
1801}
1802
1803/**
1804 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1805 * @ap: port on which timings will be programmed
1806 *
780a87f7
JG
1807 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1808 *
1da177e4 1809 * LOCKING:
0cba632b 1810 * PCI/etc. bus probe sem.
1da177e4
LT
1811 */
1812static void ata_set_mode(struct ata_port *ap)
1813{
a6d5a51c 1814 int i, rc;
1da177e4 1815
a6d5a51c
TH
1816 /* step 1: calculate xfer_mask */
1817 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1818 struct ata_device *dev = &ap->device[i];
acf356b1 1819 unsigned int pio_mask, dma_mask;
a6d5a51c
TH
1820
1821 if (!ata_dev_present(dev))
1822 continue;
1823
acf356b1 1824 ata_dev_xfermask(ap, dev);
1da177e4 1825
acf356b1
TH
1826 /* TODO: let LLDD filter dev->*_mask here */
1827
1828 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1829 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1830 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1831 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
a6d5a51c
TH
1832 }
1833
1834 /* step 2: always set host PIO timings */
1835 rc = ata_host_set_pio(ap);
1da177e4
LT
1836 if (rc)
1837 goto err_out;
1838
a6d5a51c
TH
1839 /* step 3: set host DMA timings */
1840 ata_host_set_dma(ap);
1da177e4
LT
1841
1842 /* step 4: update devices' xfer mode */
a6d5a51c
TH
1843 for (i = 0; i < ATA_MAX_DEVICES; i++)
1844 ata_dev_set_mode(ap, &ap->device[i]);
1da177e4
LT
1845
1846 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1847 return;
1848
1849 if (ap->ops->post_set_mode)
1850 ap->ops->post_set_mode(ap);
1851
1da177e4
LT
1852 return;
1853
1854err_out:
1855 ata_port_disable(ap);
1856}
1857
1fdffbce
JG
1858/**
1859 * ata_tf_to_host - issue ATA taskfile to host controller
1860 * @ap: port to which command is being issued
1861 * @tf: ATA taskfile register set
1862 *
1863 * Issues ATA taskfile register set to ATA host controller,
1864 * with proper synchronization with interrupt handler and
1865 * other threads.
1866 *
1867 * LOCKING:
1868 * spin_lock_irqsave(host_set lock)
1869 */
1870
1871static inline void ata_tf_to_host(struct ata_port *ap,
1872 const struct ata_taskfile *tf)
1873{
1874 ap->ops->tf_load(ap, tf);
1875 ap->ops->exec_command(ap, tf);
1876}
1877
1da177e4
LT
1878/**
1879 * ata_busy_sleep - sleep until BSY clears, or timeout
1880 * @ap: port containing status register to be polled
1881 * @tmout_pat: impatience timeout
1882 * @tmout: overall timeout
1883 *
780a87f7
JG
1884 * Sleep until ATA Status register bit BSY clears,
1885 * or a timeout occurs.
1886 *
1887 * LOCKING: None.
1da177e4
LT
1888 */
1889
6f8b9958
TH
1890unsigned int ata_busy_sleep (struct ata_port *ap,
1891 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
1892{
1893 unsigned long timer_start, timeout;
1894 u8 status;
1895
1896 status = ata_busy_wait(ap, ATA_BUSY, 300);
1897 timer_start = jiffies;
1898 timeout = timer_start + tmout_pat;
1899 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1900 msleep(50);
1901 status = ata_busy_wait(ap, ATA_BUSY, 3);
1902 }
1903
1904 if (status & ATA_BUSY)
1905 printk(KERN_WARNING "ata%u is slow to respond, "
1906 "please be patient\n", ap->id);
1907
1908 timeout = timer_start + tmout;
1909 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1910 msleep(50);
1911 status = ata_chk_status(ap);
1912 }
1913
1914 if (status & ATA_BUSY) {
1915 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1916 ap->id, tmout / HZ);
1917 return 1;
1918 }
1919
1920 return 0;
1921}
1922
1923static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1924{
1925 struct ata_ioports *ioaddr = &ap->ioaddr;
1926 unsigned int dev0 = devmask & (1 << 0);
1927 unsigned int dev1 = devmask & (1 << 1);
1928 unsigned long timeout;
1929
1930 /* if device 0 was found in ata_devchk, wait for its
1931 * BSY bit to clear
1932 */
1933 if (dev0)
1934 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1935
1936 /* if device 1 was found in ata_devchk, wait for
1937 * register access, then wait for BSY to clear
1938 */
1939 timeout = jiffies + ATA_TMOUT_BOOT;
1940 while (dev1) {
1941 u8 nsect, lbal;
1942
1943 ap->ops->dev_select(ap, 1);
1944 if (ap->flags & ATA_FLAG_MMIO) {
1945 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1946 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1947 } else {
1948 nsect = inb(ioaddr->nsect_addr);
1949 lbal = inb(ioaddr->lbal_addr);
1950 }
1951 if ((nsect == 1) && (lbal == 1))
1952 break;
1953 if (time_after(jiffies, timeout)) {
1954 dev1 = 0;
1955 break;
1956 }
1957 msleep(50); /* give drive a breather */
1958 }
1959 if (dev1)
1960 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1961
1962 /* is all this really necessary? */
1963 ap->ops->dev_select(ap, 0);
1964 if (dev1)
1965 ap->ops->dev_select(ap, 1);
1966 if (dev0)
1967 ap->ops->dev_select(ap, 0);
1968}
1969
1970/**
0cba632b
JG
1971 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1972 * @ap: Port to reset and probe
1973 *
1974 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1975 * probe the bus. Not often used these days.
1da177e4
LT
1976 *
1977 * LOCKING:
0cba632b 1978 * PCI/etc. bus probe sem.
e5338254 1979 * Obtains host_set lock.
1da177e4
LT
1980 *
1981 */
1982
1983static unsigned int ata_bus_edd(struct ata_port *ap)
1984{
1985 struct ata_taskfile tf;
e5338254 1986 unsigned long flags;
1da177e4
LT
1987
1988 /* set up execute-device-diag (bus reset) taskfile */
1989 /* also, take interrupts to a known state (disabled) */
1990 DPRINTK("execute-device-diag\n");
1991 ata_tf_init(ap, &tf, 0);
1992 tf.ctl |= ATA_NIEN;
1993 tf.command = ATA_CMD_EDD;
1994 tf.protocol = ATA_PROT_NODATA;
1995
1996 /* do bus reset */
e5338254 1997 spin_lock_irqsave(&ap->host_set->lock, flags);
1da177e4 1998 ata_tf_to_host(ap, &tf);
e5338254 1999 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1da177e4
LT
2000
2001 /* spec says at least 2ms. but who knows with those
2002 * crazy ATAPI devices...
2003 */
2004 msleep(150);
2005
2006 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2007}
2008
2009static unsigned int ata_bus_softreset(struct ata_port *ap,
2010 unsigned int devmask)
2011{
2012 struct ata_ioports *ioaddr = &ap->ioaddr;
2013
2014 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2015
2016 /* software reset. causes dev0 to be selected */
2017 if (ap->flags & ATA_FLAG_MMIO) {
2018 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2019 udelay(20); /* FIXME: flush */
2020 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2021 udelay(20); /* FIXME: flush */
2022 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2023 } else {
2024 outb(ap->ctl, ioaddr->ctl_addr);
2025 udelay(10);
2026 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2027 udelay(10);
2028 outb(ap->ctl, ioaddr->ctl_addr);
2029 }
2030
2031 /* spec mandates ">= 2ms" before checking status.
2032 * We wait 150ms, because that was the magic delay used for
2033 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2034 * between when the ATA command register is written, and then
2035 * status is checked. Because waiting for "a while" before
2036 * checking status is fine, post SRST, we perform this magic
2037 * delay here as well.
09c7ad79
AC
2038 *
2039 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2040 */
2041 msleep(150);
2042
09c7ad79
AC
2043
2044 /* Before we perform post reset processing we want to see if
2045 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2046 resistor */
2047
2048 if (ata_check_status(ap) == 0xFF)
2049 return 1; /* Positive is failure for some reason */
2050
1da177e4
LT
2051 ata_bus_post_reset(ap, devmask);
2052
2053 return 0;
2054}
2055
2056/**
2057 * ata_bus_reset - reset host port and associated ATA channel
2058 * @ap: port to reset
2059 *
2060 * This is typically the first time we actually start issuing
2061 * commands to the ATA channel. We wait for BSY to clear, then
2062 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2063 * result. Determine what devices, if any, are on the channel
2064 * by looking at the device 0/1 error register. Look at the signature
2065 * stored in each device's taskfile registers, to determine if
2066 * the device is ATA or ATAPI.
2067 *
2068 * LOCKING:
0cba632b
JG
2069 * PCI/etc. bus probe sem.
2070 * Obtains host_set lock.
1da177e4
LT
2071 *
2072 * SIDE EFFECTS:
2073 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2074 */
2075
2076void ata_bus_reset(struct ata_port *ap)
2077{
2078 struct ata_ioports *ioaddr = &ap->ioaddr;
2079 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2080 u8 err;
2081 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2082
2083 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2084
2085 /* determine if device 0/1 are present */
2086 if (ap->flags & ATA_FLAG_SATA_RESET)
2087 dev0 = 1;
2088 else {
2089 dev0 = ata_devchk(ap, 0);
2090 if (slave_possible)
2091 dev1 = ata_devchk(ap, 1);
2092 }
2093
2094 if (dev0)
2095 devmask |= (1 << 0);
2096 if (dev1)
2097 devmask |= (1 << 1);
2098
2099 /* select device 0 again */
2100 ap->ops->dev_select(ap, 0);
2101
2102 /* issue bus reset */
2103 if (ap->flags & ATA_FLAG_SRST)
2104 rc = ata_bus_softreset(ap, devmask);
2105 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2106 /* set up device control */
2107 if (ap->flags & ATA_FLAG_MMIO)
2108 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2109 else
2110 outb(ap->ctl, ioaddr->ctl_addr);
2111 rc = ata_bus_edd(ap);
2112 }
2113
2114 if (rc)
2115 goto err_out;
2116
2117 /*
2118 * determine by signature whether we have ATA or ATAPI devices
2119 */
b4dc7623 2120 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2121 if ((slave_possible) && (err != 0x81))
b4dc7623 2122 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2123
2124 /* re-enable interrupts */
2125 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2126 ata_irq_on(ap);
2127
2128 /* is double-select really necessary? */
2129 if (ap->device[1].class != ATA_DEV_NONE)
2130 ap->ops->dev_select(ap, 1);
2131 if (ap->device[0].class != ATA_DEV_NONE)
2132 ap->ops->dev_select(ap, 0);
2133
2134 /* if no devices were detected, disable this port */
2135 if ((ap->device[0].class == ATA_DEV_NONE) &&
2136 (ap->device[1].class == ATA_DEV_NONE))
2137 goto err_out;
2138
2139 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2140 /* set up device control for ATA_FLAG_SATA_RESET */
2141 if (ap->flags & ATA_FLAG_MMIO)
2142 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2143 else
2144 outb(ap->ctl, ioaddr->ctl_addr);
2145 }
2146
2147 DPRINTK("EXIT\n");
2148 return;
2149
2150err_out:
2151 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2152 ap->ops->port_disable(ap);
2153
2154 DPRINTK("EXIT\n");
2155}
2156
7a7921e8
TH
2157static int sata_phy_resume(struct ata_port *ap)
2158{
2159 unsigned long timeout = jiffies + (HZ * 5);
2160 u32 sstatus;
2161
2162 scr_write_flush(ap, SCR_CONTROL, 0x300);
2163
2164 /* Wait for phy to become ready, if necessary. */
2165 do {
2166 msleep(200);
2167 sstatus = scr_read(ap, SCR_STATUS);
2168 if ((sstatus & 0xf) != 1)
2169 return 0;
2170 } while (time_before(jiffies, timeout));
2171
2172 return -1;
2173}
2174
8a19ac89
TH
2175/**
2176 * ata_std_probeinit - initialize probing
2177 * @ap: port to be probed
2178 *
2179 * @ap is about to be probed. Initialize it. This function is
2180 * to be used as standard callback for ata_drive_probe_reset().
3a39746a
TH
2181 *
2182 * NOTE!!! Do not use this function as probeinit if a low level
2183 * driver implements only hardreset. Just pass NULL as probeinit
2184 * in that case. Using this function is probably okay but doing
2185 * so makes reset sequence different from the original
2186 * ->phy_reset implementation and Jeff nervous. :-P
8a19ac89
TH
2187 */
2188extern void ata_std_probeinit(struct ata_port *ap)
2189{
3a39746a 2190 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
8a19ac89 2191 sata_phy_resume(ap);
3a39746a
TH
2192 if (sata_dev_present(ap))
2193 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2194 }
8a19ac89
TH
2195}
2196
c2bd5804
TH
2197/**
2198 * ata_std_softreset - reset host port via ATA SRST
2199 * @ap: port to reset
2200 * @verbose: fail verbosely
2201 * @classes: resulting classes of attached devices
2202 *
2203 * Reset host port using ATA SRST. This function is to be used
2204 * as standard callback for ata_drive_*_reset() functions.
2205 *
2206 * LOCKING:
2207 * Kernel thread context (may sleep)
2208 *
2209 * RETURNS:
2210 * 0 on success, -errno otherwise.
2211 */
2212int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2213{
2214 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2215 unsigned int devmask = 0, err_mask;
2216 u8 err;
2217
2218 DPRINTK("ENTER\n");
2219
3a39746a
TH
2220 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2221 classes[0] = ATA_DEV_NONE;
2222 goto out;
2223 }
2224
c2bd5804
TH
2225 /* determine if device 0/1 are present */
2226 if (ata_devchk(ap, 0))
2227 devmask |= (1 << 0);
2228 if (slave_possible && ata_devchk(ap, 1))
2229 devmask |= (1 << 1);
2230
c2bd5804
TH
2231 /* select device 0 again */
2232 ap->ops->dev_select(ap, 0);
2233
2234 /* issue bus reset */
2235 DPRINTK("about to softreset, devmask=%x\n", devmask);
2236 err_mask = ata_bus_softreset(ap, devmask);
2237 if (err_mask) {
2238 if (verbose)
2239 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2240 ap->id, err_mask);
2241 else
2242 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2243 err_mask);
2244 return -EIO;
2245 }
2246
2247 /* determine by signature whether we have ATA or ATAPI devices */
2248 classes[0] = ata_dev_try_classify(ap, 0, &err);
2249 if (slave_possible && err != 0x81)
2250 classes[1] = ata_dev_try_classify(ap, 1, &err);
2251
3a39746a 2252 out:
c2bd5804
TH
2253 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2254 return 0;
2255}
2256
2257/**
2258 * sata_std_hardreset - reset host port via SATA phy reset
2259 * @ap: port to reset
2260 * @verbose: fail verbosely
2261 * @class: resulting class of attached device
2262 *
2263 * SATA phy-reset host port using DET bits of SControl register.
2264 * This function is to be used as standard callback for
2265 * ata_drive_*_reset().
2266 *
2267 * LOCKING:
2268 * Kernel thread context (may sleep)
2269 *
2270 * RETURNS:
2271 * 0 on success, -errno otherwise.
2272 */
2273int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2274{
c2bd5804
TH
2275 DPRINTK("ENTER\n");
2276
2277 /* Issue phy wake/reset */
2278 scr_write_flush(ap, SCR_CONTROL, 0x301);
2279
2280 /*
2281 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2282 * 10.4.2 says at least 1 ms.
2283 */
2284 msleep(1);
2285
7a7921e8
TH
2286 /* Bring phy back */
2287 sata_phy_resume(ap);
c2bd5804 2288
c2bd5804
TH
2289 /* TODO: phy layer with polling, timeouts, etc. */
2290 if (!sata_dev_present(ap)) {
2291 *class = ATA_DEV_NONE;
2292 DPRINTK("EXIT, link offline\n");
2293 return 0;
2294 }
2295
2296 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2297 if (verbose)
2298 printk(KERN_ERR "ata%u: COMRESET failed "
2299 "(device not ready)\n", ap->id);
2300 else
2301 DPRINTK("EXIT, device not ready\n");
2302 return -EIO;
2303 }
2304
3a39746a
TH
2305 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2306
c2bd5804
TH
2307 *class = ata_dev_try_classify(ap, 0, NULL);
2308
2309 DPRINTK("EXIT, class=%u\n", *class);
2310 return 0;
2311}
2312
2313/**
2314 * ata_std_postreset - standard postreset callback
2315 * @ap: the target ata_port
2316 * @classes: classes of attached devices
2317 *
2318 * This function is invoked after a successful reset. Note that
2319 * the device might have been reset more than once using
2320 * different reset methods before postreset is invoked.
c2bd5804
TH
2321 *
2322 * This function is to be used as standard callback for
2323 * ata_drive_*_reset().
2324 *
2325 * LOCKING:
2326 * Kernel thread context (may sleep)
2327 */
2328void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2329{
2330 DPRINTK("ENTER\n");
2331
56497bd5 2332 /* set cable type if it isn't already set */
c2bd5804
TH
2333 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2334 ap->cbl = ATA_CBL_SATA;
2335
2336 /* print link status */
2337 if (ap->cbl == ATA_CBL_SATA)
2338 sata_print_link_status(ap);
2339
3a39746a
TH
2340 /* re-enable interrupts */
2341 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2342 ata_irq_on(ap);
c2bd5804
TH
2343
2344 /* is double-select really necessary? */
2345 if (classes[0] != ATA_DEV_NONE)
2346 ap->ops->dev_select(ap, 1);
2347 if (classes[1] != ATA_DEV_NONE)
2348 ap->ops->dev_select(ap, 0);
2349
3a39746a
TH
2350 /* bail out if no device is present */
2351 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2352 DPRINTK("EXIT, no device\n");
2353 return;
2354 }
2355
2356 /* set up device control */
2357 if (ap->ioaddr.ctl_addr) {
2358 if (ap->flags & ATA_FLAG_MMIO)
2359 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2360 else
2361 outb(ap->ctl, ap->ioaddr.ctl_addr);
2362 }
c2bd5804
TH
2363
2364 DPRINTK("EXIT\n");
2365}
2366
2367/**
2368 * ata_std_probe_reset - standard probe reset method
2369 * @ap: prot to perform probe-reset
2370 * @classes: resulting classes of attached devices
2371 *
2372 * The stock off-the-shelf ->probe_reset method.
2373 *
2374 * LOCKING:
2375 * Kernel thread context (may sleep)
2376 *
2377 * RETURNS:
2378 * 0 on success, -errno otherwise.
2379 */
2380int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2381{
2382 ata_reset_fn_t hardreset;
2383
2384 hardreset = NULL;
b911fc3a 2385 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
c2bd5804
TH
2386 hardreset = sata_std_hardreset;
2387
8a19ac89 2388 return ata_drive_probe_reset(ap, ata_std_probeinit,
7944ea95 2389 ata_std_softreset, hardreset,
c2bd5804
TH
2390 ata_std_postreset, classes);
2391}
2392
a62c0fc5
TH
2393static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2394 ata_postreset_fn_t postreset,
2395 unsigned int *classes)
2396{
2397 int i, rc;
2398
2399 for (i = 0; i < ATA_MAX_DEVICES; i++)
2400 classes[i] = ATA_DEV_UNKNOWN;
2401
2402 rc = reset(ap, 0, classes);
2403 if (rc)
2404 return rc;
2405
2406 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2407 * is complete and convert all ATA_DEV_UNKNOWN to
2408 * ATA_DEV_NONE.
2409 */
2410 for (i = 0; i < ATA_MAX_DEVICES; i++)
2411 if (classes[i] != ATA_DEV_UNKNOWN)
2412 break;
2413
2414 if (i < ATA_MAX_DEVICES)
2415 for (i = 0; i < ATA_MAX_DEVICES; i++)
2416 if (classes[i] == ATA_DEV_UNKNOWN)
2417 classes[i] = ATA_DEV_NONE;
2418
2419 if (postreset)
2420 postreset(ap, classes);
2421
2422 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2423}
2424
2425/**
2426 * ata_drive_probe_reset - Perform probe reset with given methods
2427 * @ap: port to reset
7944ea95 2428 * @probeinit: probeinit method (can be NULL)
a62c0fc5
TH
2429 * @softreset: softreset method (can be NULL)
2430 * @hardreset: hardreset method (can be NULL)
2431 * @postreset: postreset method (can be NULL)
2432 * @classes: resulting classes of attached devices
2433 *
2434 * Reset the specified port and classify attached devices using
2435 * given methods. This function prefers softreset but tries all
2436 * possible reset sequences to reset and classify devices. This
2437 * function is intended to be used for constructing ->probe_reset
2438 * callback by low level drivers.
2439 *
2440 * Reset methods should follow the following rules.
2441 *
2442 * - Return 0 on sucess, -errno on failure.
2443 * - If classification is supported, fill classes[] with
2444 * recognized class codes.
2445 * - If classification is not supported, leave classes[] alone.
2446 * - If verbose is non-zero, print error message on failure;
2447 * otherwise, shut up.
2448 *
2449 * LOCKING:
2450 * Kernel thread context (may sleep)
2451 *
2452 * RETURNS:
2453 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2454 * if classification fails, and any error code from reset
2455 * methods.
2456 */
7944ea95 2457int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
a62c0fc5
TH
2458 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2459 ata_postreset_fn_t postreset, unsigned int *classes)
2460{
2461 int rc = -EINVAL;
2462
7944ea95
TH
2463 if (probeinit)
2464 probeinit(ap);
2465
a62c0fc5
TH
2466 if (softreset) {
2467 rc = do_probe_reset(ap, softreset, postreset, classes);
2468 if (rc == 0)
2469 return 0;
2470 }
2471
2472 if (!hardreset)
2473 return rc;
2474
2475 rc = do_probe_reset(ap, hardreset, postreset, classes);
2476 if (rc == 0 || rc != -ENODEV)
2477 return rc;
2478
2479 if (softreset)
2480 rc = do_probe_reset(ap, softreset, postreset, classes);
2481
2482 return rc;
2483}
2484
623a3128
TH
2485/**
2486 * ata_dev_same_device - Determine whether new ID matches configured device
2487 * @ap: port on which the device to compare against resides
2488 * @dev: device to compare against
2489 * @new_class: class of the new device
2490 * @new_id: IDENTIFY page of the new device
2491 *
2492 * Compare @new_class and @new_id against @dev and determine
2493 * whether @dev is the device indicated by @new_class and
2494 * @new_id.
2495 *
2496 * LOCKING:
2497 * None.
2498 *
2499 * RETURNS:
2500 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2501 */
2502static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2503 unsigned int new_class, const u16 *new_id)
2504{
2505 const u16 *old_id = dev->id;
2506 unsigned char model[2][41], serial[2][21];
2507 u64 new_n_sectors;
2508
2509 if (dev->class != new_class) {
2510 printk(KERN_INFO
2511 "ata%u: dev %u class mismatch %d != %d\n",
2512 ap->id, dev->devno, dev->class, new_class);
2513 return 0;
2514 }
2515
2516 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2517 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2518 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2519 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2520 new_n_sectors = ata_id_n_sectors(new_id);
2521
2522 if (strcmp(model[0], model[1])) {
2523 printk(KERN_INFO
2524 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2525 ap->id, dev->devno, model[0], model[1]);
2526 return 0;
2527 }
2528
2529 if (strcmp(serial[0], serial[1])) {
2530 printk(KERN_INFO
2531 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2532 ap->id, dev->devno, serial[0], serial[1]);
2533 return 0;
2534 }
2535
2536 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2537 printk(KERN_INFO
2538 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2539 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2540 (unsigned long long)new_n_sectors);
2541 return 0;
2542 }
2543
2544 return 1;
2545}
2546
2547/**
2548 * ata_dev_revalidate - Revalidate ATA device
2549 * @ap: port on which the device to revalidate resides
2550 * @dev: device to revalidate
2551 * @post_reset: is this revalidation after reset?
2552 *
2553 * Re-read IDENTIFY page and make sure @dev is still attached to
2554 * the port.
2555 *
2556 * LOCKING:
2557 * Kernel thread context (may sleep)
2558 *
2559 * RETURNS:
2560 * 0 on success, negative errno otherwise
2561 */
2562int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2563 int post_reset)
2564{
2565 unsigned int class;
2566 u16 *id;
2567 int rc;
2568
2569 if (!ata_dev_present(dev))
2570 return -ENODEV;
2571
2572 class = dev->class;
2573 id = NULL;
2574
2575 /* allocate & read ID data */
2576 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2577 if (rc)
2578 goto fail;
2579
2580 /* is the device still there? */
2581 if (!ata_dev_same_device(ap, dev, class, id)) {
2582 rc = -ENODEV;
2583 goto fail;
2584 }
2585
2586 kfree(dev->id);
2587 dev->id = id;
2588
2589 /* configure device according to the new ID */
2590 return ata_dev_configure(ap, dev, 0);
2591
2592 fail:
2593 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2594 ap->id, dev->devno, rc);
2595 kfree(id);
2596 return rc;
2597}
2598
98ac62de 2599static const char * const ata_dma_blacklist [] = {
f4b15fef
AC
2600 "WDC AC11000H", NULL,
2601 "WDC AC22100H", NULL,
2602 "WDC AC32500H", NULL,
2603 "WDC AC33100H", NULL,
2604 "WDC AC31600H", NULL,
2605 "WDC AC32100H", "24.09P07",
2606 "WDC AC23200L", "21.10N21",
2607 "Compaq CRD-8241B", NULL,
2608 "CRD-8400B", NULL,
2609 "CRD-8480B", NULL,
2610 "CRD-8482B", NULL,
2611 "CRD-84", NULL,
2612 "SanDisk SDP3B", NULL,
2613 "SanDisk SDP3B-64", NULL,
2614 "SANYO CD-ROM CRD", NULL,
2615 "HITACHI CDR-8", NULL,
2616 "HITACHI CDR-8335", NULL,
2617 "HITACHI CDR-8435", NULL,
2618 "Toshiba CD-ROM XM-6202B", NULL,
2619 "TOSHIBA CD-ROM XM-1702BC", NULL,
2620 "CD-532E-A", NULL,
2621 "E-IDE CD-ROM CR-840", NULL,
2622 "CD-ROM Drive/F5A", NULL,
2623 "WPI CDD-820", NULL,
2624 "SAMSUNG CD-ROM SC-148C", NULL,
2625 "SAMSUNG CD-ROM SC", NULL,
2626 "SanDisk SDP3B-64", NULL,
2627 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2628 "_NEC DV5800A", NULL,
2629 "SAMSUNG CD-ROM SN-124", "N001"
1da177e4 2630};
f4b15fef
AC
2631
2632static int ata_strim(char *s, size_t len)
2633{
2634 len = strnlen(s, len);
2635
2636 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2637 while ((len > 0) && (s[len - 1] == ' ')) {
2638 len--;
2639 s[len] = 0;
2640 }
2641 return len;
2642}
1da177e4 2643
057ace5e 2644static int ata_dma_blacklisted(const struct ata_device *dev)
1da177e4 2645{
f4b15fef
AC
2646 unsigned char model_num[40];
2647 unsigned char model_rev[16];
2648 unsigned int nlen, rlen;
1da177e4
LT
2649 int i;
2650
f4b15fef
AC
2651 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2652 sizeof(model_num));
2653 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2654 sizeof(model_rev));
2655 nlen = ata_strim(model_num, sizeof(model_num));
2656 rlen = ata_strim(model_rev, sizeof(model_rev));
1da177e4 2657
f4b15fef
AC
2658 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2659 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2660 if (ata_dma_blacklist[i+1] == NULL)
2661 return 1;
2662 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2663 return 1;
2664 }
2665 }
1da177e4
LT
2666 return 0;
2667}
2668
a6d5a51c
TH
2669/**
2670 * ata_dev_xfermask - Compute supported xfermask of the given device
2671 * @ap: Port on which the device to compute xfermask for resides
2672 * @dev: Device to compute xfermask for
2673 *
acf356b1
TH
2674 * Compute supported xfermask of @dev and store it in
2675 * dev->*_mask. This function is responsible for applying all
2676 * known limits including host controller limits, device
2677 * blacklist, etc...
a6d5a51c
TH
2678 *
2679 * LOCKING:
2680 * None.
a6d5a51c 2681 */
acf356b1 2682static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
1da177e4 2683{
a6d5a51c
TH
2684 unsigned long xfer_mask;
2685 int i;
1da177e4 2686
a6d5a51c
TH
2687 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2688 ap->udma_mask);
1da177e4 2689
a6d5a51c
TH
2690 /* use port-wide xfermask for now */
2691 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2692 struct ata_device *d = &ap->device[i];
2693 if (!ata_dev_present(d))
2694 continue;
acf356b1
TH
2695 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2696 d->udma_mask);
a6d5a51c
TH
2697 xfer_mask &= ata_id_xfermask(d->id);
2698 if (ata_dma_blacklisted(d))
2699 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1da177e4
LT
2700 }
2701
a6d5a51c
TH
2702 if (ata_dma_blacklisted(dev))
2703 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2704 "disabling DMA\n", ap->id, dev->devno);
2705
acf356b1
TH
2706 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2707 &dev->udma_mask);
1da177e4
LT
2708}
2709
1da177e4
LT
2710/**
2711 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2712 * @ap: Port associated with device @dev
2713 * @dev: Device to which command will be sent
2714 *
780a87f7
JG
2715 * Issue SET FEATURES - XFER MODE command to device @dev
2716 * on port @ap.
2717 *
1da177e4 2718 * LOCKING:
0cba632b 2719 * PCI/etc. bus probe sem.
1da177e4
LT
2720 */
2721
2722static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2723{
a0123703 2724 struct ata_taskfile tf;
1da177e4
LT
2725
2726 /* set up set-features taskfile */
2727 DPRINTK("set features - xfer mode\n");
2728
a0123703
TH
2729 ata_tf_init(ap, &tf, dev->devno);
2730 tf.command = ATA_CMD_SET_FEATURES;
2731 tf.feature = SETFEATURES_XFER;
2732 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2733 tf.protocol = ATA_PROT_NODATA;
2734 tf.nsect = dev->xfer_mode;
1da177e4 2735
a0123703
TH
2736 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2737 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2738 ap->id);
1da177e4 2739 ata_port_disable(ap);
a0123703 2740 }
1da177e4
LT
2741
2742 DPRINTK("EXIT\n");
2743}
2744
8bf62ece
AL
2745/**
2746 * ata_dev_init_params - Issue INIT DEV PARAMS command
2747 * @ap: Port associated with device @dev
2748 * @dev: Device to which command will be sent
2749 *
2750 * LOCKING:
6aff8f1f
TH
2751 * Kernel thread context (may sleep)
2752 *
2753 * RETURNS:
2754 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece
AL
2755 */
2756
6aff8f1f
TH
2757static unsigned int ata_dev_init_params(struct ata_port *ap,
2758 struct ata_device *dev)
8bf62ece 2759{
a0123703 2760 struct ata_taskfile tf;
6aff8f1f 2761 unsigned int err_mask;
8bf62ece
AL
2762 u16 sectors = dev->id[6];
2763 u16 heads = dev->id[3];
2764
2765 /* Number of sectors per track 1-255. Number of heads 1-16 */
2766 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
6aff8f1f 2767 return 0;
8bf62ece
AL
2768
2769 /* set up init dev params taskfile */
2770 DPRINTK("init dev params \n");
2771
a0123703
TH
2772 ata_tf_init(ap, &tf, dev->devno);
2773 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2774 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2775 tf.protocol = ATA_PROT_NODATA;
2776 tf.nsect = sectors;
2777 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 2778
6aff8f1f 2779 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
8bf62ece 2780
6aff8f1f
TH
2781 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2782 return err_mask;
8bf62ece
AL
2783}
2784
1da177e4 2785/**
0cba632b
JG
2786 * ata_sg_clean - Unmap DMA memory associated with command
2787 * @qc: Command containing DMA memory to be released
2788 *
2789 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
2790 *
2791 * LOCKING:
0cba632b 2792 * spin_lock_irqsave(host_set lock)
1da177e4
LT
2793 */
2794
2795static void ata_sg_clean(struct ata_queued_cmd *qc)
2796{
2797 struct ata_port *ap = qc->ap;
cedc9a47 2798 struct scatterlist *sg = qc->__sg;
1da177e4 2799 int dir = qc->dma_dir;
cedc9a47 2800 void *pad_buf = NULL;
1da177e4 2801
a4631474
TH
2802 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2803 WARN_ON(sg == NULL);
1da177e4
LT
2804
2805 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 2806 WARN_ON(qc->n_elem > 1);
1da177e4 2807
2c13b7ce 2808 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 2809
cedc9a47
JG
2810 /* if we padded the buffer out to 32-bit bound, and data
2811 * xfer direction is from-device, we must copy from the
2812 * pad buffer back into the supplied buffer
2813 */
2814 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2815 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2816
2817 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d
JG
2818 if (qc->n_elem)
2819 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
cedc9a47
JG
2820 /* restore last sg */
2821 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2822 if (pad_buf) {
2823 struct scatterlist *psg = &qc->pad_sgent;
2824 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2825 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 2826 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
2827 }
2828 } else {
2e242fa9 2829 if (qc->n_elem)
e1410f2d
JG
2830 dma_unmap_single(ap->host_set->dev,
2831 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2832 dir);
cedc9a47
JG
2833 /* restore sg */
2834 sg->length += qc->pad_len;
2835 if (pad_buf)
2836 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2837 pad_buf, qc->pad_len);
2838 }
1da177e4
LT
2839
2840 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 2841 qc->__sg = NULL;
1da177e4
LT
2842}
2843
2844/**
2845 * ata_fill_sg - Fill PCI IDE PRD table
2846 * @qc: Metadata associated with taskfile to be transferred
2847 *
780a87f7
JG
2848 * Fill PCI IDE PRD (scatter-gather) table with segments
2849 * associated with the current disk command.
2850 *
1da177e4 2851 * LOCKING:
780a87f7 2852 * spin_lock_irqsave(host_set lock)
1da177e4
LT
2853 *
2854 */
2855static void ata_fill_sg(struct ata_queued_cmd *qc)
2856{
1da177e4 2857 struct ata_port *ap = qc->ap;
cedc9a47
JG
2858 struct scatterlist *sg;
2859 unsigned int idx;
1da177e4 2860
a4631474 2861 WARN_ON(qc->__sg == NULL);
f131883e 2862 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
2863
2864 idx = 0;
cedc9a47 2865 ata_for_each_sg(sg, qc) {
1da177e4
LT
2866 u32 addr, offset;
2867 u32 sg_len, len;
2868
2869 /* determine if physical DMA addr spans 64K boundary.
2870 * Note h/w doesn't support 64-bit, so we unconditionally
2871 * truncate dma_addr_t to u32.
2872 */
2873 addr = (u32) sg_dma_address(sg);
2874 sg_len = sg_dma_len(sg);
2875
2876 while (sg_len) {
2877 offset = addr & 0xffff;
2878 len = sg_len;
2879 if ((offset + sg_len) > 0x10000)
2880 len = 0x10000 - offset;
2881
2882 ap->prd[idx].addr = cpu_to_le32(addr);
2883 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2884 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2885
2886 idx++;
2887 sg_len -= len;
2888 addr += len;
2889 }
2890 }
2891
2892 if (idx)
2893 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2894}
2895/**
2896 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2897 * @qc: Metadata associated with taskfile to check
2898 *
780a87f7
JG
2899 * Allow low-level driver to filter ATA PACKET commands, returning
2900 * a status indicating whether or not it is OK to use DMA for the
2901 * supplied PACKET command.
2902 *
1da177e4 2903 * LOCKING:
0cba632b
JG
2904 * spin_lock_irqsave(host_set lock)
2905 *
1da177e4
LT
2906 * RETURNS: 0 when ATAPI DMA can be used
2907 * nonzero otherwise
2908 */
2909int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2910{
2911 struct ata_port *ap = qc->ap;
2912 int rc = 0; /* Assume ATAPI DMA is OK by default */
2913
2914 if (ap->ops->check_atapi_dma)
2915 rc = ap->ops->check_atapi_dma(qc);
2916
2917 return rc;
2918}
2919/**
2920 * ata_qc_prep - Prepare taskfile for submission
2921 * @qc: Metadata associated with taskfile to be prepared
2922 *
780a87f7
JG
2923 * Prepare ATA taskfile for submission.
2924 *
1da177e4
LT
2925 * LOCKING:
2926 * spin_lock_irqsave(host_set lock)
2927 */
2928void ata_qc_prep(struct ata_queued_cmd *qc)
2929{
2930 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2931 return;
2932
2933 ata_fill_sg(qc);
2934}
2935
e46834cd
BK
2936void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2937
0cba632b
JG
2938/**
2939 * ata_sg_init_one - Associate command with memory buffer
2940 * @qc: Command to be associated
2941 * @buf: Memory buffer
2942 * @buflen: Length of memory buffer, in bytes.
2943 *
2944 * Initialize the data-related elements of queued_cmd @qc
2945 * to point to a single memory buffer, @buf of byte length @buflen.
2946 *
2947 * LOCKING:
2948 * spin_lock_irqsave(host_set lock)
2949 */
2950
1da177e4
LT
2951void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2952{
2953 struct scatterlist *sg;
2954
2955 qc->flags |= ATA_QCFLAG_SINGLE;
2956
2957 memset(&qc->sgent, 0, sizeof(qc->sgent));
cedc9a47 2958 qc->__sg = &qc->sgent;
1da177e4 2959 qc->n_elem = 1;
cedc9a47 2960 qc->orig_n_elem = 1;
1da177e4
LT
2961 qc->buf_virt = buf;
2962
cedc9a47 2963 sg = qc->__sg;
f0612bbc 2964 sg_init_one(sg, buf, buflen);
1da177e4
LT
2965}
2966
0cba632b
JG
2967/**
2968 * ata_sg_init - Associate command with scatter-gather table.
2969 * @qc: Command to be associated
2970 * @sg: Scatter-gather table.
2971 * @n_elem: Number of elements in s/g table.
2972 *
2973 * Initialize the data-related elements of queued_cmd @qc
2974 * to point to a scatter-gather table @sg, containing @n_elem
2975 * elements.
2976 *
2977 * LOCKING:
2978 * spin_lock_irqsave(host_set lock)
2979 */
2980
1da177e4
LT
2981void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2982 unsigned int n_elem)
2983{
2984 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 2985 qc->__sg = sg;
1da177e4 2986 qc->n_elem = n_elem;
cedc9a47 2987 qc->orig_n_elem = n_elem;
1da177e4
LT
2988}
2989
2990/**
0cba632b
JG
2991 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2992 * @qc: Command with memory buffer to be mapped.
2993 *
2994 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
2995 *
2996 * LOCKING:
2997 * spin_lock_irqsave(host_set lock)
2998 *
2999 * RETURNS:
0cba632b 3000 * Zero on success, negative on error.
1da177e4
LT
3001 */
3002
3003static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3004{
3005 struct ata_port *ap = qc->ap;
3006 int dir = qc->dma_dir;
cedc9a47 3007 struct scatterlist *sg = qc->__sg;
1da177e4 3008 dma_addr_t dma_address;
2e242fa9 3009 int trim_sg = 0;
1da177e4 3010
cedc9a47
JG
3011 /* we must lengthen transfers to end on a 32-bit boundary */
3012 qc->pad_len = sg->length & 3;
3013 if (qc->pad_len) {
3014 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3015 struct scatterlist *psg = &qc->pad_sgent;
3016
a4631474 3017 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3018
3019 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3020
3021 if (qc->tf.flags & ATA_TFLAG_WRITE)
3022 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3023 qc->pad_len);
3024
3025 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3026 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3027 /* trim sg */
3028 sg->length -= qc->pad_len;
2e242fa9
TH
3029 if (sg->length == 0)
3030 trim_sg = 1;
cedc9a47
JG
3031
3032 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3033 sg->length, qc->pad_len);
3034 }
3035
2e242fa9
TH
3036 if (trim_sg) {
3037 qc->n_elem--;
e1410f2d
JG
3038 goto skip_map;
3039 }
3040
1da177e4 3041 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
32529e01 3042 sg->length, dir);
537a95d9
TH
3043 if (dma_mapping_error(dma_address)) {
3044 /* restore sg */
3045 sg->length += qc->pad_len;
1da177e4 3046 return -1;
537a95d9 3047 }
1da177e4
LT
3048
3049 sg_dma_address(sg) = dma_address;
32529e01 3050 sg_dma_len(sg) = sg->length;
1da177e4 3051
2e242fa9 3052skip_map:
1da177e4
LT
3053 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3054 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3055
3056 return 0;
3057}
3058
3059/**
0cba632b
JG
3060 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3061 * @qc: Command with scatter-gather table to be mapped.
3062 *
3063 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3064 *
3065 * LOCKING:
3066 * spin_lock_irqsave(host_set lock)
3067 *
3068 * RETURNS:
0cba632b 3069 * Zero on success, negative on error.
1da177e4
LT
3070 *
3071 */
3072
3073static int ata_sg_setup(struct ata_queued_cmd *qc)
3074{
3075 struct ata_port *ap = qc->ap;
cedc9a47
JG
3076 struct scatterlist *sg = qc->__sg;
3077 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3078 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3079
3080 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3081 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3082
cedc9a47
JG
3083 /* we must lengthen transfers to end on a 32-bit boundary */
3084 qc->pad_len = lsg->length & 3;
3085 if (qc->pad_len) {
3086 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3087 struct scatterlist *psg = &qc->pad_sgent;
3088 unsigned int offset;
3089
a4631474 3090 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3091
3092 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3093
3094 /*
3095 * psg->page/offset are used to copy to-be-written
3096 * data in this function or read data in ata_sg_clean.
3097 */
3098 offset = lsg->offset + lsg->length - qc->pad_len;
3099 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3100 psg->offset = offset_in_page(offset);
3101
3102 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3103 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3104 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3105 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3106 }
3107
3108 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3109 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3110 /* trim last sg */
3111 lsg->length -= qc->pad_len;
e1410f2d
JG
3112 if (lsg->length == 0)
3113 trim_sg = 1;
cedc9a47
JG
3114
3115 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3116 qc->n_elem - 1, lsg->length, qc->pad_len);
3117 }
3118
e1410f2d
JG
3119 pre_n_elem = qc->n_elem;
3120 if (trim_sg && pre_n_elem)
3121 pre_n_elem--;
3122
3123 if (!pre_n_elem) {
3124 n_elem = 0;
3125 goto skip_map;
3126 }
3127
1da177e4 3128 dir = qc->dma_dir;
e1410f2d 3129 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
537a95d9
TH
3130 if (n_elem < 1) {
3131 /* restore last sg */
3132 lsg->length += qc->pad_len;
1da177e4 3133 return -1;
537a95d9 3134 }
1da177e4
LT
3135
3136 DPRINTK("%d sg elements mapped\n", n_elem);
3137
e1410f2d 3138skip_map:
1da177e4
LT
3139 qc->n_elem = n_elem;
3140
3141 return 0;
3142}
3143
40e8c82c
TH
3144/**
3145 * ata_poll_qc_complete - turn irq back on and finish qc
3146 * @qc: Command to complete
8e8b77dd 3147 * @err_mask: ATA status register content
40e8c82c
TH
3148 *
3149 * LOCKING:
3150 * None. (grabs host lock)
3151 */
3152
a22e2eb0 3153void ata_poll_qc_complete(struct ata_queued_cmd *qc)
40e8c82c
TH
3154{
3155 struct ata_port *ap = qc->ap;
b8f6153e 3156 unsigned long flags;
40e8c82c 3157
b8f6153e 3158 spin_lock_irqsave(&ap->host_set->lock, flags);
40e8c82c
TH
3159 ap->flags &= ~ATA_FLAG_NOINTR;
3160 ata_irq_on(ap);
a22e2eb0 3161 ata_qc_complete(qc);
b8f6153e 3162 spin_unlock_irqrestore(&ap->host_set->lock, flags);
40e8c82c
TH
3163}
3164
1da177e4 3165/**
c893a3ae 3166 * ata_pio_poll - poll using PIO, depending on current state
6f0ef4fa 3167 * @ap: the target ata_port
1da177e4
LT
3168 *
3169 * LOCKING:
0cba632b 3170 * None. (executing in kernel thread context)
1da177e4
LT
3171 *
3172 * RETURNS:
6f0ef4fa 3173 * timeout value to use
1da177e4
LT
3174 */
3175
3176static unsigned long ata_pio_poll(struct ata_port *ap)
3177{
c14b8331 3178 struct ata_queued_cmd *qc;
1da177e4 3179 u8 status;
14be71f4
AL
3180 unsigned int poll_state = HSM_ST_UNKNOWN;
3181 unsigned int reg_state = HSM_ST_UNKNOWN;
14be71f4 3182
c14b8331 3183 qc = ata_qc_from_tag(ap, ap->active_tag);
a4631474 3184 WARN_ON(qc == NULL);
c14b8331 3185
14be71f4
AL
3186 switch (ap->hsm_task_state) {
3187 case HSM_ST:
3188 case HSM_ST_POLL:
3189 poll_state = HSM_ST_POLL;
3190 reg_state = HSM_ST;
1da177e4 3191 break;
14be71f4
AL
3192 case HSM_ST_LAST:
3193 case HSM_ST_LAST_POLL:
3194 poll_state = HSM_ST_LAST_POLL;
3195 reg_state = HSM_ST_LAST;
1da177e4
LT
3196 break;
3197 default:
3198 BUG();
3199 break;
3200 }
3201
3202 status = ata_chk_status(ap);
3203 if (status & ATA_BUSY) {
3204 if (time_after(jiffies, ap->pio_task_timeout)) {
11a56d24 3205 qc->err_mask |= AC_ERR_TIMEOUT;
7c398335 3206 ap->hsm_task_state = HSM_ST_TMOUT;
1da177e4
LT
3207 return 0;
3208 }
14be71f4 3209 ap->hsm_task_state = poll_state;
1da177e4
LT
3210 return ATA_SHORT_PAUSE;
3211 }
3212
14be71f4 3213 ap->hsm_task_state = reg_state;
1da177e4
LT
3214 return 0;
3215}
3216
3217/**
6f0ef4fa
RD
3218 * ata_pio_complete - check if drive is busy or idle
3219 * @ap: the target ata_port
1da177e4
LT
3220 *
3221 * LOCKING:
0cba632b 3222 * None. (executing in kernel thread context)
7fb6ec28
JG
3223 *
3224 * RETURNS:
3225 * Non-zero if qc completed, zero otherwise.
1da177e4
LT
3226 */
3227
7fb6ec28 3228static int ata_pio_complete (struct ata_port *ap)
1da177e4
LT
3229{
3230 struct ata_queued_cmd *qc;
3231 u8 drv_stat;
3232
3233 /*
31433ea3
AC
3234 * This is purely heuristic. This is a fast path. Sometimes when
3235 * we enter, BSY will be cleared in a chk-status or two. If not,
3236 * the drive is probably seeking or something. Snooze for a couple
3237 * msecs, then chk-status again. If still busy, fall back to
14be71f4 3238 * HSM_ST_POLL state.
1da177e4 3239 */
fe79e683
AL
3240 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3241 if (drv_stat & ATA_BUSY) {
1da177e4 3242 msleep(2);
fe79e683
AL
3243 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3244 if (drv_stat & ATA_BUSY) {
14be71f4 3245 ap->hsm_task_state = HSM_ST_LAST_POLL;
1da177e4 3246 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
7fb6ec28 3247 return 0;
1da177e4
LT
3248 }
3249 }
3250
c14b8331 3251 qc = ata_qc_from_tag(ap, ap->active_tag);
a4631474 3252 WARN_ON(qc == NULL);
c14b8331 3253
1da177e4
LT
3254 drv_stat = ata_wait_idle(ap);
3255 if (!ata_ok(drv_stat)) {
1c848984 3256 qc->err_mask |= __ac_err_mask(drv_stat);
14be71f4 3257 ap->hsm_task_state = HSM_ST_ERR;
7fb6ec28 3258 return 0;
1da177e4
LT
3259 }
3260
14be71f4 3261 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 3262
a4631474 3263 WARN_ON(qc->err_mask);
a22e2eb0 3264 ata_poll_qc_complete(qc);
7fb6ec28
JG
3265
3266 /* another command may start at this point */
3267
3268 return 1;
1da177e4
LT
3269}
3270
0baab86b
EF
3271
3272/**
c893a3ae 3273 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3274 * @buf: Buffer to swap
3275 * @buf_words: Number of 16-bit words in buffer.
3276 *
3277 * Swap halves of 16-bit words if needed to convert from
3278 * little-endian byte order to native cpu byte order, or
3279 * vice-versa.
3280 *
3281 * LOCKING:
6f0ef4fa 3282 * Inherited from caller.
0baab86b 3283 */
1da177e4
LT
3284void swap_buf_le16(u16 *buf, unsigned int buf_words)
3285{
3286#ifdef __BIG_ENDIAN
3287 unsigned int i;
3288
3289 for (i = 0; i < buf_words; i++)
3290 buf[i] = le16_to_cpu(buf[i]);
3291#endif /* __BIG_ENDIAN */
3292}
3293
6ae4cfb5
AL
3294/**
3295 * ata_mmio_data_xfer - Transfer data by MMIO
3296 * @ap: port to read/write
3297 * @buf: data buffer
3298 * @buflen: buffer length
344babaa 3299 * @write_data: read/write
6ae4cfb5
AL
3300 *
3301 * Transfer data from/to the device data register by MMIO.
3302 *
3303 * LOCKING:
3304 * Inherited from caller.
6ae4cfb5
AL
3305 */
3306
1da177e4
LT
3307static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3308 unsigned int buflen, int write_data)
3309{
3310 unsigned int i;
3311 unsigned int words = buflen >> 1;
3312 u16 *buf16 = (u16 *) buf;
3313 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3314
6ae4cfb5 3315 /* Transfer multiple of 2 bytes */
1da177e4
LT
3316 if (write_data) {
3317 for (i = 0; i < words; i++)
3318 writew(le16_to_cpu(buf16[i]), mmio);
3319 } else {
3320 for (i = 0; i < words; i++)
3321 buf16[i] = cpu_to_le16(readw(mmio));
3322 }
6ae4cfb5
AL
3323
3324 /* Transfer trailing 1 byte, if any. */
3325 if (unlikely(buflen & 0x01)) {
3326 u16 align_buf[1] = { 0 };
3327 unsigned char *trailing_buf = buf + buflen - 1;
3328
3329 if (write_data) {
3330 memcpy(align_buf, trailing_buf, 1);
3331 writew(le16_to_cpu(align_buf[0]), mmio);
3332 } else {
3333 align_buf[0] = cpu_to_le16(readw(mmio));
3334 memcpy(trailing_buf, align_buf, 1);
3335 }
3336 }
1da177e4
LT
3337}
3338
6ae4cfb5
AL
3339/**
3340 * ata_pio_data_xfer - Transfer data by PIO
3341 * @ap: port to read/write
3342 * @buf: data buffer
3343 * @buflen: buffer length
344babaa 3344 * @write_data: read/write
6ae4cfb5
AL
3345 *
3346 * Transfer data from/to the device data register by PIO.
3347 *
3348 * LOCKING:
3349 * Inherited from caller.
6ae4cfb5
AL
3350 */
3351
1da177e4
LT
3352static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3353 unsigned int buflen, int write_data)
3354{
6ae4cfb5 3355 unsigned int words = buflen >> 1;
1da177e4 3356
6ae4cfb5 3357 /* Transfer multiple of 2 bytes */
1da177e4 3358 if (write_data)
6ae4cfb5 3359 outsw(ap->ioaddr.data_addr, buf, words);
1da177e4 3360 else
6ae4cfb5
AL
3361 insw(ap->ioaddr.data_addr, buf, words);
3362
3363 /* Transfer trailing 1 byte, if any. */
3364 if (unlikely(buflen & 0x01)) {
3365 u16 align_buf[1] = { 0 };
3366 unsigned char *trailing_buf = buf + buflen - 1;
3367
3368 if (write_data) {
3369 memcpy(align_buf, trailing_buf, 1);
3370 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3371 } else {
3372 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3373 memcpy(trailing_buf, align_buf, 1);
3374 }
3375 }
1da177e4
LT
3376}
3377
6ae4cfb5
AL
3378/**
3379 * ata_data_xfer - Transfer data from/to the data register.
3380 * @ap: port to read/write
3381 * @buf: data buffer
3382 * @buflen: buffer length
3383 * @do_write: read/write
3384 *
3385 * Transfer data from/to the device data register.
3386 *
3387 * LOCKING:
3388 * Inherited from caller.
6ae4cfb5
AL
3389 */
3390
1da177e4
LT
3391static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3392 unsigned int buflen, int do_write)
3393{
a1bd9e68
AC
3394 /* Make the crap hardware pay the costs not the good stuff */
3395 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3396 unsigned long flags;
3397 local_irq_save(flags);
3398 if (ap->flags & ATA_FLAG_MMIO)
3399 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3400 else
3401 ata_pio_data_xfer(ap, buf, buflen, do_write);
3402 local_irq_restore(flags);
3403 } else {
3404 if (ap->flags & ATA_FLAG_MMIO)
3405 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3406 else
3407 ata_pio_data_xfer(ap, buf, buflen, do_write);
3408 }
1da177e4
LT
3409}
3410
6ae4cfb5
AL
3411/**
3412 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3413 * @qc: Command on going
3414 *
3415 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3416 *
3417 * LOCKING:
3418 * Inherited from caller.
3419 */
3420
1da177e4
LT
3421static void ata_pio_sector(struct ata_queued_cmd *qc)
3422{
3423 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3424 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3425 struct ata_port *ap = qc->ap;
3426 struct page *page;
3427 unsigned int offset;
3428 unsigned char *buf;
3429
3430 if (qc->cursect == (qc->nsect - 1))
14be71f4 3431 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3432
3433 page = sg[qc->cursg].page;
3434 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3435
3436 /* get the current page and offset */
3437 page = nth_page(page, (offset >> PAGE_SHIFT));
3438 offset %= PAGE_SIZE;
3439
3440 buf = kmap(page) + offset;
3441
3442 qc->cursect++;
3443 qc->cursg_ofs++;
3444
32529e01 3445 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
1da177e4
LT
3446 qc->cursg++;
3447 qc->cursg_ofs = 0;
3448 }
3449
3450 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3451
3452 /* do the actual data transfer */
3453 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3454 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3455
3456 kunmap(page);
3457}
3458
6ae4cfb5
AL
3459/**
3460 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3461 * @qc: Command on going
3462 * @bytes: number of bytes
3463 *
3464 * Transfer Transfer data from/to the ATAPI device.
3465 *
3466 * LOCKING:
3467 * Inherited from caller.
3468 *
3469 */
3470
1da177e4
LT
3471static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3472{
3473 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3474 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3475 struct ata_port *ap = qc->ap;
3476 struct page *page;
3477 unsigned char *buf;
3478 unsigned int offset, count;
3479
563a6e1f 3480 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3481 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3482
3483next_sg:
563a6e1f 3484 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 3485 /*
563a6e1f
AL
3486 * The end of qc->sg is reached and the device expects
3487 * more data to transfer. In order not to overrun qc->sg
3488 * and fulfill length specified in the byte count register,
3489 * - for read case, discard trailing data from the device
3490 * - for write case, padding zero data to the device
3491 */
3492 u16 pad_buf[1] = { 0 };
3493 unsigned int words = bytes >> 1;
3494 unsigned int i;
3495
3496 if (words) /* warning if bytes > 1 */
7fb6ec28 3497 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
563a6e1f
AL
3498 ap->id, bytes);
3499
3500 for (i = 0; i < words; i++)
3501 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3502
14be71f4 3503 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
3504 return;
3505 }
3506
cedc9a47 3507 sg = &qc->__sg[qc->cursg];
1da177e4 3508
1da177e4
LT
3509 page = sg->page;
3510 offset = sg->offset + qc->cursg_ofs;
3511
3512 /* get the current page and offset */
3513 page = nth_page(page, (offset >> PAGE_SHIFT));
3514 offset %= PAGE_SIZE;
3515
6952df03 3516 /* don't overrun current sg */
32529e01 3517 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
3518
3519 /* don't cross page boundaries */
3520 count = min(count, (unsigned int)PAGE_SIZE - offset);
3521
3522 buf = kmap(page) + offset;
3523
3524 bytes -= count;
3525 qc->curbytes += count;
3526 qc->cursg_ofs += count;
3527
32529e01 3528 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
3529 qc->cursg++;
3530 qc->cursg_ofs = 0;
3531 }
3532
3533 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3534
3535 /* do the actual data transfer */
3536 ata_data_xfer(ap, buf, count, do_write);
3537
3538 kunmap(page);
3539
563a6e1f 3540 if (bytes)
1da177e4 3541 goto next_sg;
1da177e4
LT
3542}
3543
6ae4cfb5
AL
3544/**
3545 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3546 * @qc: Command on going
3547 *
3548 * Transfer Transfer data from/to the ATAPI device.
3549 *
3550 * LOCKING:
3551 * Inherited from caller.
6ae4cfb5
AL
3552 */
3553
1da177e4
LT
3554static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3555{
3556 struct ata_port *ap = qc->ap;
3557 struct ata_device *dev = qc->dev;
3558 unsigned int ireason, bc_lo, bc_hi, bytes;
3559 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3560
3561 ap->ops->tf_read(ap, &qc->tf);
3562 ireason = qc->tf.nsect;
3563 bc_lo = qc->tf.lbam;
3564 bc_hi = qc->tf.lbah;
3565 bytes = (bc_hi << 8) | bc_lo;
3566
3567 /* shall be cleared to zero, indicating xfer of data */
3568 if (ireason & (1 << 0))
3569 goto err_out;
3570
3571 /* make sure transfer direction matches expected */
3572 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3573 if (do_write != i_write)
3574 goto err_out;
3575
3576 __atapi_pio_bytes(qc, bytes);
3577
3578 return;
3579
3580err_out:
3581 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3582 ap->id, dev->devno);
11a56d24 3583 qc->err_mask |= AC_ERR_HSM;
14be71f4 3584 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
3585}
3586
3587/**
6f0ef4fa
RD
3588 * ata_pio_block - start PIO on a block
3589 * @ap: the target ata_port
1da177e4
LT
3590 *
3591 * LOCKING:
0cba632b 3592 * None. (executing in kernel thread context)
1da177e4
LT
3593 */
3594
3595static void ata_pio_block(struct ata_port *ap)
3596{
3597 struct ata_queued_cmd *qc;
3598 u8 status;
3599
3600 /*
6f0ef4fa 3601 * This is purely heuristic. This is a fast path.
1da177e4
LT
3602 * Sometimes when we enter, BSY will be cleared in
3603 * a chk-status or two. If not, the drive is probably seeking
3604 * or something. Snooze for a couple msecs, then
3605 * chk-status again. If still busy, fall back to
14be71f4 3606 * HSM_ST_POLL state.
1da177e4
LT
3607 */
3608 status = ata_busy_wait(ap, ATA_BUSY, 5);
3609 if (status & ATA_BUSY) {
3610 msleep(2);
3611 status = ata_busy_wait(ap, ATA_BUSY, 10);
3612 if (status & ATA_BUSY) {
14be71f4 3613 ap->hsm_task_state = HSM_ST_POLL;
1da177e4
LT
3614 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3615 return;
3616 }
3617 }
3618
3619 qc = ata_qc_from_tag(ap, ap->active_tag);
a4631474 3620 WARN_ON(qc == NULL);
1da177e4 3621
fe79e683
AL
3622 /* check error */
3623 if (status & (ATA_ERR | ATA_DF)) {
3624 qc->err_mask |= AC_ERR_DEV;
3625 ap->hsm_task_state = HSM_ST_ERR;
3626 return;
3627 }
3628
3629 /* transfer data if any */
1da177e4 3630 if (is_atapi_taskfile(&qc->tf)) {
fe79e683 3631 /* DRQ=0 means no more data to transfer */
1da177e4 3632 if ((status & ATA_DRQ) == 0) {
14be71f4 3633 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3634 return;
3635 }
3636
3637 atapi_pio_bytes(qc);
3638 } else {
3639 /* handle BSY=0, DRQ=0 as error */
3640 if ((status & ATA_DRQ) == 0) {
11a56d24 3641 qc->err_mask |= AC_ERR_HSM;
14be71f4 3642 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
3643 return;
3644 }
3645
3646 ata_pio_sector(qc);
3647 }
3648}
3649
3650static void ata_pio_error(struct ata_port *ap)
3651{
3652 struct ata_queued_cmd *qc;
a7dac447 3653
1da177e4 3654 qc = ata_qc_from_tag(ap, ap->active_tag);
a4631474 3655 WARN_ON(qc == NULL);
1da177e4 3656
0565c26d
AL
3657 if (qc->tf.command != ATA_CMD_PACKET)
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3659
1c848984
AL
3660 /* make sure qc->err_mask is available to
3661 * know what's wrong and recover
3662 */
a4631474 3663 WARN_ON(qc->err_mask == 0);
1c848984 3664
14be71f4 3665 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 3666
a22e2eb0 3667 ata_poll_qc_complete(qc);
1da177e4
LT
3668}
3669
3670static void ata_pio_task(void *_data)
3671{
3672 struct ata_port *ap = _data;
7fb6ec28
JG
3673 unsigned long timeout;
3674 int qc_completed;
3675
3676fsm_start:
3677 timeout = 0;
3678 qc_completed = 0;
1da177e4 3679
14be71f4
AL
3680 switch (ap->hsm_task_state) {
3681 case HSM_ST_IDLE:
1da177e4
LT
3682 return;
3683
14be71f4 3684 case HSM_ST:
1da177e4
LT
3685 ata_pio_block(ap);
3686 break;
3687
14be71f4 3688 case HSM_ST_LAST:
7fb6ec28 3689 qc_completed = ata_pio_complete(ap);
1da177e4
LT
3690 break;
3691
14be71f4
AL
3692 case HSM_ST_POLL:
3693 case HSM_ST_LAST_POLL:
1da177e4
LT
3694 timeout = ata_pio_poll(ap);
3695 break;
3696
14be71f4
AL
3697 case HSM_ST_TMOUT:
3698 case HSM_ST_ERR:
1da177e4
LT
3699 ata_pio_error(ap);
3700 return;
3701 }
3702
3703 if (timeout)
8061f5f0 3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
7fb6ec28
JG
3705 else if (!qc_completed)
3706 goto fsm_start;
1da177e4
LT
3707}
3708
8061f5f0
TH
3709/**
3710 * atapi_packet_task - Write CDB bytes to hardware
3711 * @_data: Port to which ATAPI device is attached.
3712 *
3713 * When device has indicated its readiness to accept
3714 * a CDB, this function is called. Send the CDB.
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3718 *
3719 * LOCKING:
3720 * Kernel thread context (may sleep)
3721 */
3722
3723static void atapi_packet_task(void *_data)
3724{
3725 struct ata_port *ap = _data;
3726 struct ata_queued_cmd *qc;
3727 u8 status;
3728
3729 qc = ata_qc_from_tag(ap, ap->active_tag);
3730 WARN_ON(qc == NULL);
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3732
3733 /* sleep-wait for BSY to clear */
3734 DPRINTK("busy wait\n");
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3736 qc->err_mask |= AC_ERR_TIMEOUT;
3737 goto err_out;
3738 }
3739
3740 /* make sure DRQ is set */
3741 status = ata_chk_status(ap);
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3743 qc->err_mask |= AC_ERR_HSM;
3744 goto err_out;
3745 }
3746
3747 /* send SCSI cdb */
3748 DPRINTK("send cdb\n");
3749 WARN_ON(qc->dev->cdb_len < 12);
3750
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3753 unsigned long flags;
3754
3755 /* Once we're done issuing command and kicking bmdma,
3756 * irq handler takes over. To not lose irq, we need
3757 * to clear NOINTR flag before sending cdb, but
3758 * interrupt handler shouldn't be invoked before we're
3759 * finished. Hence, the following locking.
3760 */
3761 spin_lock_irqsave(&ap->host_set->lock, flags);
3762 ap->flags &= ~ATA_FLAG_NOINTR;
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3765 ap->ops->bmdma_start(qc); /* initiate bmdma */
3766 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3767 } else {
3768 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3769
3770 /* PIO commands are handled by polling */
3771 ap->hsm_task_state = HSM_ST;
3772 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3773 }
3774
3775 return;
3776
3777err_out:
3778 ata_poll_qc_complete(qc);
3779}
3780
1da177e4
LT
3781/**
3782 * ata_qc_timeout - Handle timeout of queued command
3783 * @qc: Command that timed out
3784 *
3785 * Some part of the kernel (currently, only the SCSI layer)
3786 * has noticed that the active command on port @ap has not
3787 * completed after a specified length of time. Handle this
3788 * condition by disabling DMA (if necessary) and completing
3789 * transactions, with error if necessary.
3790 *
3791 * This also handles the case of the "lost interrupt", where
3792 * for some reason (possibly hardware bug, possibly driver bug)
3793 * an interrupt was not delivered to the driver, even though the
3794 * transaction completed successfully.
3795 *
3796 * LOCKING:
0cba632b 3797 * Inherited from SCSI layer (none, can sleep)
1da177e4
LT
3798 */
3799
3800static void ata_qc_timeout(struct ata_queued_cmd *qc)
3801{
3802 struct ata_port *ap = qc->ap;
b8f6153e 3803 struct ata_host_set *host_set = ap->host_set;
1da177e4 3804 u8 host_stat = 0, drv_stat;
b8f6153e 3805 unsigned long flags;
1da177e4
LT
3806
3807 DPRINTK("ENTER\n");
3808
c18d06f8
TH
3809 ap->hsm_task_state = HSM_ST_IDLE;
3810
b8f6153e
JG
3811 spin_lock_irqsave(&host_set->lock, flags);
3812
1da177e4
LT
3813 switch (qc->tf.protocol) {
3814
3815 case ATA_PROT_DMA:
3816 case ATA_PROT_ATAPI_DMA:
3817 host_stat = ap->ops->bmdma_status(ap);
3818
3819 /* before we do anything else, clear DMA-Start bit */
b73fc89f 3820 ap->ops->bmdma_stop(qc);
1da177e4
LT
3821
3822 /* fall through */
3823
3824 default:
3825 ata_altstatus(ap);
3826 drv_stat = ata_chk_status(ap);
3827
3828 /* ack bmdma irq events */
3829 ap->ops->irq_clear(ap);
3830
3831 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3832 ap->id, qc->tf.command, drv_stat, host_stat);
3833
3834 /* complete taskfile transaction */
a22e2eb0 3835 qc->err_mask |= ac_err_mask(drv_stat);
1da177e4
LT
3836 break;
3837 }
b8f6153e
JG
3838
3839 spin_unlock_irqrestore(&host_set->lock, flags);
3840
a72ec4ce
TH
3841 ata_eh_qc_complete(qc);
3842
1da177e4
LT
3843 DPRINTK("EXIT\n");
3844}
3845
3846/**
3847 * ata_eng_timeout - Handle timeout of queued command
3848 * @ap: Port on which timed-out command is active
3849 *
3850 * Some part of the kernel (currently, only the SCSI layer)
3851 * has noticed that the active command on port @ap has not
3852 * completed after a specified length of time. Handle this
3853 * condition by disabling DMA (if necessary) and completing
3854 * transactions, with error if necessary.
3855 *
3856 * This also handles the case of the "lost interrupt", where
3857 * for some reason (possibly hardware bug, possibly driver bug)
3858 * an interrupt was not delivered to the driver, even though the
3859 * transaction completed successfully.
3860 *
3861 * LOCKING:
3862 * Inherited from SCSI layer (none, can sleep)
3863 */
3864
3865void ata_eng_timeout(struct ata_port *ap)
3866{
1da177e4
LT
3867 DPRINTK("ENTER\n");
3868
f6379020 3869 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
1da177e4 3870
1da177e4
LT
3871 DPRINTK("EXIT\n");
3872}
3873
3874/**
3875 * ata_qc_new - Request an available ATA command, for queueing
3876 * @ap: Port associated with device @dev
3877 * @dev: Device from whom we request an available command structure
3878 *
3879 * LOCKING:
0cba632b 3880 * None.
1da177e4
LT
3881 */
3882
3883static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3884{
3885 struct ata_queued_cmd *qc = NULL;
3886 unsigned int i;
3887
3888 for (i = 0; i < ATA_MAX_QUEUE; i++)
3889 if (!test_and_set_bit(i, &ap->qactive)) {
3890 qc = ata_qc_from_tag(ap, i);
3891 break;
3892 }
3893
3894 if (qc)
3895 qc->tag = i;
3896
3897 return qc;
3898}
3899
3900/**
3901 * ata_qc_new_init - Request an available ATA command, and initialize it
3902 * @ap: Port associated with device @dev
3903 * @dev: Device from whom we request an available command structure
3904 *
3905 * LOCKING:
0cba632b 3906 * None.
1da177e4
LT
3907 */
3908
3909struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3910 struct ata_device *dev)
3911{
3912 struct ata_queued_cmd *qc;
3913
3914 qc = ata_qc_new(ap);
3915 if (qc) {
1da177e4
LT
3916 qc->scsicmd = NULL;
3917 qc->ap = ap;
3918 qc->dev = dev;
1da177e4 3919
2c13b7ce 3920 ata_qc_reinit(qc);
1da177e4
LT
3921 }
3922
3923 return qc;
3924}
3925
1da177e4
LT
3926/**
3927 * ata_qc_free - free unused ata_queued_cmd
3928 * @qc: Command to complete
3929 *
3930 * Designed to free unused ata_queued_cmd object
3931 * in case something prevents using it.
3932 *
3933 * LOCKING:
0cba632b 3934 * spin_lock_irqsave(host_set lock)
1da177e4
LT
3935 */
3936void ata_qc_free(struct ata_queued_cmd *qc)
3937{
4ba946e9
TH
3938 struct ata_port *ap = qc->ap;
3939 unsigned int tag;
3940
a4631474 3941 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 3942
4ba946e9
TH
3943 qc->flags = 0;
3944 tag = qc->tag;
3945 if (likely(ata_tag_valid(tag))) {
3946 if (tag == ap->active_tag)
3947 ap->active_tag = ATA_TAG_POISON;
3948 qc->tag = ATA_TAG_POISON;
3949 clear_bit(tag, &ap->qactive);
3950 }
1da177e4
LT
3951}
3952
76014427 3953void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 3954{
a4631474
TH
3955 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3956 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
3957
3958 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3959 ata_sg_clean(qc);
3960
3f3791d3
AL
3961 /* atapi: mark qc as inactive to prevent the interrupt handler
3962 * from completing the command twice later, before the error handler
3963 * is called. (when rc != 0 and atapi request sense is needed)
3964 */
3965 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3966
1da177e4 3967 /* call completion callback */
77853bf2 3968 qc->complete_fn(qc);
1da177e4
LT
3969}
3970
3971static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3972{
3973 struct ata_port *ap = qc->ap;
3974
3975 switch (qc->tf.protocol) {
3976 case ATA_PROT_DMA:
3977 case ATA_PROT_ATAPI_DMA:
3978 return 1;
3979
3980 case ATA_PROT_ATAPI:
3981 case ATA_PROT_PIO:
1da177e4
LT
3982 if (ap->flags & ATA_FLAG_PIO_DMA)
3983 return 1;
3984
3985 /* fall through */
3986
3987 default:
3988 return 0;
3989 }
3990
3991 /* never reached */
3992}
3993
3994/**
3995 * ata_qc_issue - issue taskfile to device
3996 * @qc: command to issue to device
3997 *
3998 * Prepare an ATA command to submission to device.
3999 * This includes mapping the data into a DMA-able
4000 * area, filling in the S/G table, and finally
4001 * writing the taskfile to hardware, starting the command.
4002 *
4003 * LOCKING:
4004 * spin_lock_irqsave(host_set lock)
4005 *
4006 * RETURNS:
9a3d9eb0 4007 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4008 */
4009
9a3d9eb0 4010unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4011{
4012 struct ata_port *ap = qc->ap;
4013
4014 if (ata_should_dma_map(qc)) {
4015 if (qc->flags & ATA_QCFLAG_SG) {
4016 if (ata_sg_setup(qc))
8e436af9 4017 goto sg_err;
1da177e4
LT
4018 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4019 if (ata_sg_setup_one(qc))
8e436af9 4020 goto sg_err;
1da177e4
LT
4021 }
4022 } else {
4023 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4024 }
4025
4026 ap->ops->qc_prep(qc);
4027
4028 qc->ap->active_tag = qc->tag;
4029 qc->flags |= ATA_QCFLAG_ACTIVE;
4030
4031 return ap->ops->qc_issue(qc);
4032
8e436af9
TH
4033sg_err:
4034 qc->flags &= ~ATA_QCFLAG_DMAMAP;
9a3d9eb0 4035 return AC_ERR_SYSTEM;
1da177e4
LT
4036}
4037
0baab86b 4038
1da177e4
LT
4039/**
4040 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4041 * @qc: command to issue to device
4042 *
4043 * Using various libata functions and hooks, this function
4044 * starts an ATA command. ATA commands are grouped into
4045 * classes called "protocols", and issuing each type of protocol
4046 * is slightly different.
4047 *
0baab86b
EF
4048 * May be used as the qc_issue() entry in ata_port_operations.
4049 *
1da177e4
LT
4050 * LOCKING:
4051 * spin_lock_irqsave(host_set lock)
4052 *
4053 * RETURNS:
9a3d9eb0 4054 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4055 */
4056
9a3d9eb0 4057unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4058{
4059 struct ata_port *ap = qc->ap;
4060
4061 ata_dev_select(ap, qc->dev->devno, 1, 0);
4062
4063 switch (qc->tf.protocol) {
4064 case ATA_PROT_NODATA:
e5338254 4065 ata_tf_to_host(ap, &qc->tf);
1da177e4
LT
4066 break;
4067
4068 case ATA_PROT_DMA:
4069 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4070 ap->ops->bmdma_setup(qc); /* set up bmdma */
4071 ap->ops->bmdma_start(qc); /* initiate bmdma */
4072 break;
4073
4074 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4075 ata_qc_set_polling(qc);
e5338254 4076 ata_tf_to_host(ap, &qc->tf);
14be71f4 4077 ap->hsm_task_state = HSM_ST;
8061f5f0 4078 ata_port_queue_task(ap, ata_pio_task, ap, 0);
1da177e4
LT
4079 break;
4080
4081 case ATA_PROT_ATAPI:
4082 ata_qc_set_polling(qc);
e5338254 4083 ata_tf_to_host(ap, &qc->tf);
8061f5f0 4084 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
1da177e4
LT
4085 break;
4086
4087 case ATA_PROT_ATAPI_NODATA:
c1389503 4088 ap->flags |= ATA_FLAG_NOINTR;
e5338254 4089 ata_tf_to_host(ap, &qc->tf);
8061f5f0 4090 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
1da177e4
LT
4091 break;
4092
4093 case ATA_PROT_ATAPI_DMA:
c1389503 4094 ap->flags |= ATA_FLAG_NOINTR;
1da177e4
LT
4095 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4096 ap->ops->bmdma_setup(qc); /* set up bmdma */
8061f5f0 4097 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
1da177e4
LT
4098 break;
4099
4100 default:
4101 WARN_ON(1);
9a3d9eb0 4102 return AC_ERR_SYSTEM;
1da177e4
LT
4103 }
4104
4105 return 0;
4106}
4107
1da177e4
LT
4108/**
4109 * ata_host_intr - Handle host interrupt for given (port, task)
4110 * @ap: Port on which interrupt arrived (possibly...)
4111 * @qc: Taskfile currently active in engine
4112 *
4113 * Handle host interrupt for given queued command. Currently,
4114 * only DMA interrupts are handled. All other commands are
4115 * handled via polling with interrupts disabled (nIEN bit).
4116 *
4117 * LOCKING:
4118 * spin_lock_irqsave(host_set lock)
4119 *
4120 * RETURNS:
4121 * One if interrupt was handled, zero if not (shared irq).
4122 */
4123
4124inline unsigned int ata_host_intr (struct ata_port *ap,
4125 struct ata_queued_cmd *qc)
4126{
4127 u8 status, host_stat;
4128
4129 switch (qc->tf.protocol) {
4130
4131 case ATA_PROT_DMA:
4132 case ATA_PROT_ATAPI_DMA:
4133 case ATA_PROT_ATAPI:
4134 /* check status of DMA engine */
4135 host_stat = ap->ops->bmdma_status(ap);
4136 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4137
4138 /* if it's not our irq... */
4139 if (!(host_stat & ATA_DMA_INTR))
4140 goto idle_irq;
4141
4142 /* before we do anything else, clear DMA-Start bit */
b73fc89f 4143 ap->ops->bmdma_stop(qc);
1da177e4
LT
4144
4145 /* fall through */
4146
4147 case ATA_PROT_ATAPI_NODATA:
4148 case ATA_PROT_NODATA:
4149 /* check altstatus */
4150 status = ata_altstatus(ap);
4151 if (status & ATA_BUSY)
4152 goto idle_irq;
4153
4154 /* check main status, clearing INTRQ */
4155 status = ata_chk_status(ap);
4156 if (unlikely(status & ATA_BUSY))
4157 goto idle_irq;
4158 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4159 ap->id, qc->tf.protocol, status);
4160
4161 /* ack bmdma irq events */
4162 ap->ops->irq_clear(ap);
4163
4164 /* complete taskfile transaction */
a22e2eb0
AL
4165 qc->err_mask |= ac_err_mask(status);
4166 ata_qc_complete(qc);
1da177e4
LT
4167 break;
4168
4169 default:
4170 goto idle_irq;
4171 }
4172
4173 return 1; /* irq handled */
4174
4175idle_irq:
4176 ap->stats.idle_irq++;
4177
4178#ifdef ATA_IRQ_TRAP
4179 if ((ap->stats.idle_irq % 1000) == 0) {
1da177e4
LT
4180 ata_irq_ack(ap, 0); /* debug trap */
4181 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
23cfce89 4182 return 1;
1da177e4
LT
4183 }
4184#endif
4185 return 0; /* irq not handled */
4186}
4187
4188/**
4189 * ata_interrupt - Default ATA host interrupt handler
0cba632b
JG
4190 * @irq: irq line (unused)
4191 * @dev_instance: pointer to our ata_host_set information structure
1da177e4
LT
4192 * @regs: unused
4193 *
0cba632b
JG
4194 * Default interrupt handler for PCI IDE devices. Calls
4195 * ata_host_intr() for each port that is not disabled.
4196 *
1da177e4 4197 * LOCKING:
0cba632b 4198 * Obtains host_set lock during operation.
1da177e4
LT
4199 *
4200 * RETURNS:
0cba632b 4201 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
4202 */
4203
4204irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4205{
4206 struct ata_host_set *host_set = dev_instance;
4207 unsigned int i;
4208 unsigned int handled = 0;
4209 unsigned long flags;
4210
4211 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4212 spin_lock_irqsave(&host_set->lock, flags);
4213
4214 for (i = 0; i < host_set->n_ports; i++) {
4215 struct ata_port *ap;
4216
4217 ap = host_set->ports[i];
c1389503
TH
4218 if (ap &&
4219 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
1da177e4
LT
4220 struct ata_queued_cmd *qc;
4221
4222 qc = ata_qc_from_tag(ap, ap->active_tag);
21b1ed74
AL
4223 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4224 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
4225 handled |= ata_host_intr(ap, qc);
4226 }
4227 }
4228
4229 spin_unlock_irqrestore(&host_set->lock, flags);
4230
4231 return IRQ_RETVAL(handled);
4232}
4233
0baab86b 4234
9b847548
JA
4235/*
4236 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4237 * without filling any other registers
4238 */
4239static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4240 u8 cmd)
4241{
4242 struct ata_taskfile tf;
4243 int err;
4244
4245 ata_tf_init(ap, &tf, dev->devno);
4246
4247 tf.command = cmd;
4248 tf.flags |= ATA_TFLAG_DEVICE;
4249 tf.protocol = ATA_PROT_NODATA;
4250
4251 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4252 if (err)
4253 printk(KERN_ERR "%s: ata command failed: %d\n",
4254 __FUNCTION__, err);
4255
4256 return err;
4257}
4258
4259static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4260{
4261 u8 cmd;
4262
4263 if (!ata_try_flush_cache(dev))
4264 return 0;
4265
4266 if (ata_id_has_flush_ext(dev->id))
4267 cmd = ATA_CMD_FLUSH_EXT;
4268 else
4269 cmd = ATA_CMD_FLUSH;
4270
4271 return ata_do_simple_cmd(ap, dev, cmd);
4272}
4273
4274static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4275{
4276 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4277}
4278
4279static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4280{
4281 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4282}
4283
4284/**
4285 * ata_device_resume - wakeup a previously suspended devices
c893a3ae
RD
4286 * @ap: port the device is connected to
4287 * @dev: the device to resume
9b847548
JA
4288 *
4289 * Kick the drive back into action, by sending it an idle immediate
4290 * command and making sure its transfer mode matches between drive
4291 * and host.
4292 *
4293 */
4294int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4295{
4296 if (ap->flags & ATA_FLAG_SUSPENDED) {
4297 ap->flags &= ~ATA_FLAG_SUSPENDED;
4298 ata_set_mode(ap);
4299 }
4300 if (!ata_dev_present(dev))
4301 return 0;
4302 if (dev->class == ATA_DEV_ATA)
4303 ata_start_drive(ap, dev);
4304
4305 return 0;
4306}
4307
4308/**
4309 * ata_device_suspend - prepare a device for suspend
c893a3ae
RD
4310 * @ap: port the device is connected to
4311 * @dev: the device to suspend
9b847548
JA
4312 *
4313 * Flush the cache on the drive, if appropriate, then issue a
4314 * standbynow command.
9b847548
JA
4315 */
4316int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4317{
4318 if (!ata_dev_present(dev))
4319 return 0;
4320 if (dev->class == ATA_DEV_ATA)
4321 ata_flush_cache(ap, dev);
4322
4323 ata_standby_drive(ap, dev);
4324 ap->flags |= ATA_FLAG_SUSPENDED;
4325 return 0;
4326}
4327
c893a3ae
RD
4328/**
4329 * ata_port_start - Set port up for dma.
4330 * @ap: Port to initialize
4331 *
4332 * Called just after data structures for each port are
4333 * initialized. Allocates space for PRD table.
4334 *
4335 * May be used as the port_start() entry in ata_port_operations.
4336 *
4337 * LOCKING:
4338 * Inherited from caller.
4339 */
4340
1da177e4
LT
4341int ata_port_start (struct ata_port *ap)
4342{
4343 struct device *dev = ap->host_set->dev;
6037d6bb 4344 int rc;
1da177e4
LT
4345
4346 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4347 if (!ap->prd)
4348 return -ENOMEM;
4349
6037d6bb
JG
4350 rc = ata_pad_alloc(ap, dev);
4351 if (rc) {
cedc9a47 4352 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 4353 return rc;
cedc9a47
JG
4354 }
4355
1da177e4
LT
4356 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4357
4358 return 0;
4359}
4360
0baab86b
EF
4361
4362/**
4363 * ata_port_stop - Undo ata_port_start()
4364 * @ap: Port to shut down
4365 *
4366 * Frees the PRD table.
4367 *
4368 * May be used as the port_stop() entry in ata_port_operations.
4369 *
4370 * LOCKING:
6f0ef4fa 4371 * Inherited from caller.
0baab86b
EF
4372 */
4373
1da177e4
LT
4374void ata_port_stop (struct ata_port *ap)
4375{
4376 struct device *dev = ap->host_set->dev;
4377
4378 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 4379 ata_pad_free(ap, dev);
1da177e4
LT
4380}
4381
aa8f0dc6
JG
4382void ata_host_stop (struct ata_host_set *host_set)
4383{
4384 if (host_set->mmio_base)
4385 iounmap(host_set->mmio_base);
4386}
4387
4388
1da177e4
LT
4389/**
4390 * ata_host_remove - Unregister SCSI host structure with upper layers
4391 * @ap: Port to unregister
4392 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4393 *
4394 * LOCKING:
6f0ef4fa 4395 * Inherited from caller.
1da177e4
LT
4396 */
4397
4398static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4399{
4400 struct Scsi_Host *sh = ap->host;
4401
4402 DPRINTK("ENTER\n");
4403
4404 if (do_unregister)
4405 scsi_remove_host(sh);
4406
4407 ap->ops->port_stop(ap);
4408}
4409
4410/**
4411 * ata_host_init - Initialize an ata_port structure
4412 * @ap: Structure to initialize
4413 * @host: associated SCSI mid-layer structure
4414 * @host_set: Collection of hosts to which @ap belongs
4415 * @ent: Probe information provided by low-level driver
4416 * @port_no: Port number associated with this ata_port
4417 *
0cba632b
JG
4418 * Initialize a new ata_port structure, and its associated
4419 * scsi_host.
4420 *
1da177e4 4421 * LOCKING:
0cba632b 4422 * Inherited from caller.
1da177e4
LT
4423 */
4424
4425static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4426 struct ata_host_set *host_set,
057ace5e 4427 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
4428{
4429 unsigned int i;
4430
4431 host->max_id = 16;
4432 host->max_lun = 1;
4433 host->max_channel = 1;
4434 host->unique_id = ata_unique_id++;
4435 host->max_cmd_len = 12;
12413197 4436
1da177e4
LT
4437 ap->flags = ATA_FLAG_PORT_DISABLED;
4438 ap->id = host->unique_id;
4439 ap->host = host;
4440 ap->ctl = ATA_DEVCTL_OBS;
4441 ap->host_set = host_set;
4442 ap->port_no = port_no;
4443 ap->hard_port_no =
4444 ent->legacy_mode ? ent->hard_port_no : port_no;
4445 ap->pio_mask = ent->pio_mask;
4446 ap->mwdma_mask = ent->mwdma_mask;
4447 ap->udma_mask = ent->udma_mask;
4448 ap->flags |= ent->host_flags;
4449 ap->ops = ent->port_ops;
4450 ap->cbl = ATA_CBL_NONE;
4451 ap->active_tag = ATA_TAG_POISON;
4452 ap->last_ctl = 0xFF;
4453
86e45b6b 4454 INIT_WORK(&ap->port_task, NULL, NULL);
a72ec4ce 4455 INIT_LIST_HEAD(&ap->eh_done_q);
1da177e4 4456
acf356b1
TH
4457 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4458 struct ata_device *dev = &ap->device[i];
4459 dev->devno = i;
4460 dev->pio_mask = UINT_MAX;
4461 dev->mwdma_mask = UINT_MAX;
4462 dev->udma_mask = UINT_MAX;
4463 }
1da177e4
LT
4464
4465#ifdef ATA_IRQ_TRAP
4466 ap->stats.unhandled_irq = 1;
4467 ap->stats.idle_irq = 1;
4468#endif
4469
4470 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4471}
4472
4473/**
4474 * ata_host_add - Attach low-level ATA driver to system
4475 * @ent: Information provided by low-level driver
4476 * @host_set: Collections of ports to which we add
4477 * @port_no: Port number associated with this host
4478 *
0cba632b
JG
4479 * Attach low-level ATA driver to system.
4480 *
1da177e4 4481 * LOCKING:
0cba632b 4482 * PCI/etc. bus probe sem.
1da177e4
LT
4483 *
4484 * RETURNS:
0cba632b 4485 * New ata_port on success, for NULL on error.
1da177e4
LT
4486 */
4487
057ace5e 4488static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
1da177e4
LT
4489 struct ata_host_set *host_set,
4490 unsigned int port_no)
4491{
4492 struct Scsi_Host *host;
4493 struct ata_port *ap;
4494 int rc;
4495
4496 DPRINTK("ENTER\n");
4497 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4498 if (!host)
4499 return NULL;
4500
30afc84c
TH
4501 host->transportt = &ata_scsi_transport_template;
4502
1da177e4
LT
4503 ap = (struct ata_port *) &host->hostdata[0];
4504
4505 ata_host_init(ap, host, host_set, ent, port_no);
4506
4507 rc = ap->ops->port_start(ap);
4508 if (rc)
4509 goto err_out;
4510
4511 return ap;
4512
4513err_out:
4514 scsi_host_put(host);
4515 return NULL;
4516}
4517
4518/**
0cba632b
JG
4519 * ata_device_add - Register hardware device with ATA and SCSI layers
4520 * @ent: Probe information describing hardware device to be registered
4521 *
4522 * This function processes the information provided in the probe
4523 * information struct @ent, allocates the necessary ATA and SCSI
4524 * host information structures, initializes them, and registers
4525 * everything with requisite kernel subsystems.
4526 *
4527 * This function requests irqs, probes the ATA bus, and probes
4528 * the SCSI bus.
1da177e4
LT
4529 *
4530 * LOCKING:
0cba632b 4531 * PCI/etc. bus probe sem.
1da177e4
LT
4532 *
4533 * RETURNS:
0cba632b 4534 * Number of ports registered. Zero on error (no ports registered).
1da177e4
LT
4535 */
4536
057ace5e 4537int ata_device_add(const struct ata_probe_ent *ent)
1da177e4
LT
4538{
4539 unsigned int count = 0, i;
4540 struct device *dev = ent->dev;
4541 struct ata_host_set *host_set;
4542
4543 DPRINTK("ENTER\n");
4544 /* alloc a container for our list of ATA ports (buses) */
57f3bda8 4545 host_set = kzalloc(sizeof(struct ata_host_set) +
1da177e4
LT
4546 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4547 if (!host_set)
4548 return 0;
1da177e4
LT
4549 spin_lock_init(&host_set->lock);
4550
4551 host_set->dev = dev;
4552 host_set->n_ports = ent->n_ports;
4553 host_set->irq = ent->irq;
4554 host_set->mmio_base = ent->mmio_base;
4555 host_set->private_data = ent->private_data;
4556 host_set->ops = ent->port_ops;
4557
4558 /* register each port bound to this device */
4559 for (i = 0; i < ent->n_ports; i++) {
4560 struct ata_port *ap;
4561 unsigned long xfer_mode_mask;
4562
4563 ap = ata_host_add(ent, host_set, i);
4564 if (!ap)
4565 goto err_out;
4566
4567 host_set->ports[i] = ap;
4568 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4569 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4570 (ap->pio_mask << ATA_SHIFT_PIO);
4571
4572 /* print per-port info to dmesg */
4573 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4574 "bmdma 0x%lX irq %lu\n",
4575 ap->id,
4576 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4577 ata_mode_string(xfer_mode_mask),
4578 ap->ioaddr.cmd_addr,
4579 ap->ioaddr.ctl_addr,
4580 ap->ioaddr.bmdma_addr,
4581 ent->irq);
4582
4583 ata_chk_status(ap);
4584 host_set->ops->irq_clear(ap);
4585 count++;
4586 }
4587
57f3bda8
RD
4588 if (!count)
4589 goto err_free_ret;
1da177e4
LT
4590
4591 /* obtain irq, that is shared between channels */
4592 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4593 DRV_NAME, host_set))
4594 goto err_out;
4595
4596 /* perform each probe synchronously */
4597 DPRINTK("probe begin\n");
4598 for (i = 0; i < count; i++) {
4599 struct ata_port *ap;
4600 int rc;
4601
4602 ap = host_set->ports[i];
4603
c893a3ae 4604 DPRINTK("ata%u: bus probe begin\n", ap->id);
1da177e4 4605 rc = ata_bus_probe(ap);
c893a3ae 4606 DPRINTK("ata%u: bus probe end\n", ap->id);
1da177e4
LT
4607
4608 if (rc) {
4609 /* FIXME: do something useful here?
4610 * Current libata behavior will
4611 * tear down everything when
4612 * the module is removed
4613 * or the h/w is unplugged.
4614 */
4615 }
4616
4617 rc = scsi_add_host(ap->host, dev);
4618 if (rc) {
4619 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4620 ap->id);
4621 /* FIXME: do something useful here */
4622 /* FIXME: handle unconditional calls to
4623 * scsi_scan_host and ata_host_remove, below,
4624 * at the very least
4625 */
4626 }
4627 }
4628
4629 /* probes are done, now scan each port's disk(s) */
c893a3ae 4630 DPRINTK("host probe begin\n");
1da177e4
LT
4631 for (i = 0; i < count; i++) {
4632 struct ata_port *ap = host_set->ports[i];
4633
644dd0cc 4634 ata_scsi_scan_host(ap);
1da177e4
LT
4635 }
4636
4637 dev_set_drvdata(dev, host_set);
4638
4639 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4640 return ent->n_ports; /* success */
4641
4642err_out:
4643 for (i = 0; i < count; i++) {
4644 ata_host_remove(host_set->ports[i], 1);
4645 scsi_host_put(host_set->ports[i]->host);
4646 }
57f3bda8 4647err_free_ret:
1da177e4
LT
4648 kfree(host_set);
4649 VPRINTK("EXIT, returning 0\n");
4650 return 0;
4651}
4652
17b14451
AC
4653/**
4654 * ata_host_set_remove - PCI layer callback for device removal
4655 * @host_set: ATA host set that was removed
4656 *
4657 * Unregister all objects associated with this host set. Free those
4658 * objects.
4659 *
4660 * LOCKING:
4661 * Inherited from calling layer (may sleep).
4662 */
4663
17b14451
AC
4664void ata_host_set_remove(struct ata_host_set *host_set)
4665{
4666 struct ata_port *ap;
4667 unsigned int i;
4668
4669 for (i = 0; i < host_set->n_ports; i++) {
4670 ap = host_set->ports[i];
4671 scsi_remove_host(ap->host);
4672 }
4673
4674 free_irq(host_set->irq, host_set);
4675
4676 for (i = 0; i < host_set->n_ports; i++) {
4677 ap = host_set->ports[i];
4678
4679 ata_scsi_release(ap->host);
4680
4681 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4682 struct ata_ioports *ioaddr = &ap->ioaddr;
4683
4684 if (ioaddr->cmd_addr == 0x1f0)
4685 release_region(0x1f0, 8);
4686 else if (ioaddr->cmd_addr == 0x170)
4687 release_region(0x170, 8);
4688 }
4689
4690 scsi_host_put(ap->host);
4691 }
4692
4693 if (host_set->ops->host_stop)
4694 host_set->ops->host_stop(host_set);
4695
4696 kfree(host_set);
4697}
4698
1da177e4
LT
4699/**
4700 * ata_scsi_release - SCSI layer callback hook for host unload
4701 * @host: libata host to be unloaded
4702 *
4703 * Performs all duties necessary to shut down a libata port...
4704 * Kill port kthread, disable port, and release resources.
4705 *
4706 * LOCKING:
4707 * Inherited from SCSI layer.
4708 *
4709 * RETURNS:
4710 * One.
4711 */
4712
4713int ata_scsi_release(struct Scsi_Host *host)
4714{
4715 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
d9572b1d 4716 int i;
1da177e4
LT
4717
4718 DPRINTK("ENTER\n");
4719
4720 ap->ops->port_disable(ap);
4721 ata_host_remove(ap, 0);
d9572b1d
TH
4722 for (i = 0; i < ATA_MAX_DEVICES; i++)
4723 kfree(ap->device[i].id);
1da177e4
LT
4724
4725 DPRINTK("EXIT\n");
4726 return 1;
4727}
4728
4729/**
4730 * ata_std_ports - initialize ioaddr with standard port offsets.
4731 * @ioaddr: IO address structure to be initialized
0baab86b
EF
4732 *
4733 * Utility function which initializes data_addr, error_addr,
4734 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4735 * device_addr, status_addr, and command_addr to standard offsets
4736 * relative to cmd_addr.
4737 *
4738 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 4739 */
0baab86b 4740
1da177e4
LT
4741void ata_std_ports(struct ata_ioports *ioaddr)
4742{
4743 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4744 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4745 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4746 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4747 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4748 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4749 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4750 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4751 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4752 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4753}
4754
0baab86b 4755
374b1873
JG
4756#ifdef CONFIG_PCI
4757
4758void ata_pci_host_stop (struct ata_host_set *host_set)
4759{
4760 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4761
4762 pci_iounmap(pdev, host_set->mmio_base);
4763}
4764
1da177e4
LT
4765/**
4766 * ata_pci_remove_one - PCI layer callback for device removal
4767 * @pdev: PCI device that was removed
4768 *
4769 * PCI layer indicates to libata via this hook that
6f0ef4fa 4770 * hot-unplug or module unload event has occurred.
1da177e4
LT
4771 * Handle this by unregistering all objects associated
4772 * with this PCI device. Free those objects. Then finally
4773 * release PCI resources and disable device.
4774 *
4775 * LOCKING:
4776 * Inherited from PCI layer (may sleep).
4777 */
4778
4779void ata_pci_remove_one (struct pci_dev *pdev)
4780{
4781 struct device *dev = pci_dev_to_dev(pdev);
4782 struct ata_host_set *host_set = dev_get_drvdata(dev);
1da177e4 4783
17b14451 4784 ata_host_set_remove(host_set);
1da177e4
LT
4785 pci_release_regions(pdev);
4786 pci_disable_device(pdev);
4787 dev_set_drvdata(dev, NULL);
4788}
4789
4790/* move to PCI subsystem */
057ace5e 4791int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
4792{
4793 unsigned long tmp = 0;
4794
4795 switch (bits->width) {
4796 case 1: {
4797 u8 tmp8 = 0;
4798 pci_read_config_byte(pdev, bits->reg, &tmp8);
4799 tmp = tmp8;
4800 break;
4801 }
4802 case 2: {
4803 u16 tmp16 = 0;
4804 pci_read_config_word(pdev, bits->reg, &tmp16);
4805 tmp = tmp16;
4806 break;
4807 }
4808 case 4: {
4809 u32 tmp32 = 0;
4810 pci_read_config_dword(pdev, bits->reg, &tmp32);
4811 tmp = tmp32;
4812 break;
4813 }
4814
4815 default:
4816 return -EINVAL;
4817 }
4818
4819 tmp &= bits->mask;
4820
4821 return (tmp == bits->val) ? 1 : 0;
4822}
9b847548
JA
4823
4824int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4825{
4826 pci_save_state(pdev);
4827 pci_disable_device(pdev);
4828 pci_set_power_state(pdev, PCI_D3hot);
4829 return 0;
4830}
4831
4832int ata_pci_device_resume(struct pci_dev *pdev)
4833{
4834 pci_set_power_state(pdev, PCI_D0);
4835 pci_restore_state(pdev);
4836 pci_enable_device(pdev);
4837 pci_set_master(pdev);
4838 return 0;
4839}
1da177e4
LT
4840#endif /* CONFIG_PCI */
4841
4842
1da177e4
LT
4843static int __init ata_init(void)
4844{
4845 ata_wq = create_workqueue("ata");
4846 if (!ata_wq)
4847 return -ENOMEM;
4848
4849 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4850 return 0;
4851}
4852
4853static void __exit ata_exit(void)
4854{
4855 destroy_workqueue(ata_wq);
4856}
4857
4858module_init(ata_init);
4859module_exit(ata_exit);
4860
67846b30
JG
4861static unsigned long ratelimit_time;
4862static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4863
4864int ata_ratelimit(void)
4865{
4866 int rc;
4867 unsigned long flags;
4868
4869 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4870
4871 if (time_after(jiffies, ratelimit_time)) {
4872 rc = 1;
4873 ratelimit_time = jiffies + (HZ/5);
4874 } else
4875 rc = 0;
4876
4877 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4878
4879 return rc;
4880}
4881
1da177e4
LT
4882/*
4883 * libata is essentially a library of internal helper functions for
4884 * low-level ATA host controller drivers. As such, the API/ABI is
4885 * likely to change as new drivers are added and updated.
4886 * Do not depend on ABI/API stability.
4887 */
4888
4889EXPORT_SYMBOL_GPL(ata_std_bios_param);
4890EXPORT_SYMBOL_GPL(ata_std_ports);
4891EXPORT_SYMBOL_GPL(ata_device_add);
17b14451 4892EXPORT_SYMBOL_GPL(ata_host_set_remove);
1da177e4
LT
4893EXPORT_SYMBOL_GPL(ata_sg_init);
4894EXPORT_SYMBOL_GPL(ata_sg_init_one);
76014427 4895EXPORT_SYMBOL_GPL(__ata_qc_complete);
1da177e4
LT
4896EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4897EXPORT_SYMBOL_GPL(ata_eng_timeout);
4898EXPORT_SYMBOL_GPL(ata_tf_load);
4899EXPORT_SYMBOL_GPL(ata_tf_read);
4900EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4901EXPORT_SYMBOL_GPL(ata_std_dev_select);
4902EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4903EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4904EXPORT_SYMBOL_GPL(ata_check_status);
4905EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
4906EXPORT_SYMBOL_GPL(ata_exec_command);
4907EXPORT_SYMBOL_GPL(ata_port_start);
4908EXPORT_SYMBOL_GPL(ata_port_stop);
aa8f0dc6 4909EXPORT_SYMBOL_GPL(ata_host_stop);
1da177e4
LT
4910EXPORT_SYMBOL_GPL(ata_interrupt);
4911EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 4912EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
4913EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4914EXPORT_SYMBOL_GPL(ata_bmdma_start);
4915EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4916EXPORT_SYMBOL_GPL(ata_bmdma_status);
4917EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4918EXPORT_SYMBOL_GPL(ata_port_probe);
4919EXPORT_SYMBOL_GPL(sata_phy_reset);
4920EXPORT_SYMBOL_GPL(__sata_phy_reset);
4921EXPORT_SYMBOL_GPL(ata_bus_reset);
8a19ac89 4922EXPORT_SYMBOL_GPL(ata_std_probeinit);
c2bd5804
TH
4923EXPORT_SYMBOL_GPL(ata_std_softreset);
4924EXPORT_SYMBOL_GPL(sata_std_hardreset);
4925EXPORT_SYMBOL_GPL(ata_std_postreset);
4926EXPORT_SYMBOL_GPL(ata_std_probe_reset);
a62c0fc5 4927EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
623a3128 4928EXPORT_SYMBOL_GPL(ata_dev_revalidate);
1da177e4 4929EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 4930EXPORT_SYMBOL_GPL(ata_ratelimit);
6f8b9958 4931EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 4932EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
4933EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4934EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4935EXPORT_SYMBOL_GPL(ata_scsi_error);
4936EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4937EXPORT_SYMBOL_GPL(ata_scsi_release);
4938EXPORT_SYMBOL_GPL(ata_host_intr);
4939EXPORT_SYMBOL_GPL(ata_dev_classify);
6a62a04d
TH
4940EXPORT_SYMBOL_GPL(ata_id_string);
4941EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4 4942EXPORT_SYMBOL_GPL(ata_scsi_simulate);
a72ec4ce
TH
4943EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4944EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
1da177e4 4945
1bc4ccff 4946EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
4947EXPORT_SYMBOL_GPL(ata_timing_compute);
4948EXPORT_SYMBOL_GPL(ata_timing_merge);
4949
1da177e4
LT
4950#ifdef CONFIG_PCI
4951EXPORT_SYMBOL_GPL(pci_test_config_bits);
374b1873 4952EXPORT_SYMBOL_GPL(ata_pci_host_stop);
1da177e4
LT
4953EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4954EXPORT_SYMBOL_GPL(ata_pci_init_one);
4955EXPORT_SYMBOL_GPL(ata_pci_remove_one);
9b847548
JA
4956EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4957EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
4958EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4959EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 4960#endif /* CONFIG_PCI */
9b847548
JA
4961
4962EXPORT_SYMBOL_GPL(ata_device_suspend);
4963EXPORT_SYMBOL_GPL(ata_device_resume);
4964EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4965EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 1.636356 seconds and 5 git commands to generate.