libata-pmp: extend ACPI support to cover PMP
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
633273a3
TH
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
674 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 675 */
057ace5e 676unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
677{
678 /* Apple's open source Darwin code hints that some devices only
679 * put a proper signature into the LBA mid/high registers,
680 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
681 *
682 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
683 * signatures for ATA and ATAPI devices attached on SerialATA,
684 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
685 * spec has never mentioned about using different signatures
686 * for ATA/ATAPI devices. Then, Serial ATA II: Port
687 * Multiplier specification began to use 0x69/0x96 to identify
688 * port multpliers and 0x3c/0xc3 to identify SEMB device.
689 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
690 * 0x69/0x96 shortly and described them as reserved for
691 * SerialATA.
692 *
693 * We follow the current spec and consider that 0x69/0x96
694 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 695 */
633273a3 696 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
697 DPRINTK("found ATA device by sig\n");
698 return ATA_DEV_ATA;
699 }
700
633273a3 701 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
702 DPRINTK("found ATAPI device by sig\n");
703 return ATA_DEV_ATAPI;
704 }
705
633273a3
TH
706 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
707 DPRINTK("found PMP device by sig\n");
708 return ATA_DEV_PMP;
709 }
710
711 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
712 printk("ata: SEMB device ignored\n");
713 return ATA_DEV_SEMB_UNSUP; /* not yet */
714 }
715
1da177e4
LT
716 DPRINTK("unknown device\n");
717 return ATA_DEV_UNKNOWN;
718}
719
720/**
721 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
722 * @dev: ATA device to classify (starting at zero)
723 * @present: device seems present
b4dc7623 724 * @r_err: Value of error register on completion
1da177e4
LT
725 *
726 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
727 * an ATA/ATAPI-defined set of values is placed in the ATA
728 * shadow registers, indicating the results of device detection
729 * and diagnostics.
730 *
731 * Select the ATA device, and read the values from the ATA shadow
732 * registers. Then parse according to the Error register value,
733 * and the spec-defined values examined by ata_dev_classify().
734 *
735 * LOCKING:
736 * caller.
b4dc7623
TH
737 *
738 * RETURNS:
739 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 740 */
3f19859e
TH
741unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
742 u8 *r_err)
1da177e4 743{
3f19859e 744 struct ata_port *ap = dev->link->ap;
1da177e4
LT
745 struct ata_taskfile tf;
746 unsigned int class;
747 u8 err;
748
3f19859e 749 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
750
751 memset(&tf, 0, sizeof(tf));
752
1da177e4 753 ap->ops->tf_read(ap, &tf);
0169e284 754 err = tf.feature;
b4dc7623
TH
755 if (r_err)
756 *r_err = err;
1da177e4 757
93590859 758 /* see if device passed diags: if master then continue and warn later */
3f19859e 759 if (err == 0 && dev->devno == 0)
93590859 760 /* diagnostic fail : do nothing _YET_ */
3f19859e 761 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 762 else if (err == 1)
1da177e4 763 /* do nothing */ ;
3f19859e 764 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
765 /* do nothing */ ;
766 else
b4dc7623 767 return ATA_DEV_NONE;
1da177e4 768
b4dc7623 769 /* determine if device is ATA or ATAPI */
1da177e4 770 class = ata_dev_classify(&tf);
b4dc7623 771
d7fbee05
TH
772 if (class == ATA_DEV_UNKNOWN) {
773 /* If the device failed diagnostic, it's likely to
774 * have reported incorrect device signature too.
775 * Assume ATA device if the device seems present but
776 * device signature is invalid with diagnostic
777 * failure.
778 */
779 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
780 class = ATA_DEV_ATA;
781 else
782 class = ATA_DEV_NONE;
783 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
784 class = ATA_DEV_NONE;
785
b4dc7623 786 return class;
1da177e4
LT
787}
788
789/**
6a62a04d 790 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
791 * @id: IDENTIFY DEVICE results we will examine
792 * @s: string into which data is output
793 * @ofs: offset into identify device page
794 * @len: length of string to return. must be an even number.
795 *
796 * The strings in the IDENTIFY DEVICE page are broken up into
797 * 16-bit chunks. Run through the string, and output each
798 * 8-bit chunk linearly, regardless of platform.
799 *
800 * LOCKING:
801 * caller.
802 */
803
6a62a04d
TH
804void ata_id_string(const u16 *id, unsigned char *s,
805 unsigned int ofs, unsigned int len)
1da177e4
LT
806{
807 unsigned int c;
808
809 while (len > 0) {
810 c = id[ofs] >> 8;
811 *s = c;
812 s++;
813
814 c = id[ofs] & 0xff;
815 *s = c;
816 s++;
817
818 ofs++;
819 len -= 2;
820 }
821}
822
0e949ff3 823/**
6a62a04d 824 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
825 * @id: IDENTIFY DEVICE results we will examine
826 * @s: string into which data is output
827 * @ofs: offset into identify device page
828 * @len: length of string to return. must be an odd number.
829 *
6a62a04d 830 * This function is identical to ata_id_string except that it
0e949ff3
TH
831 * trims trailing spaces and terminates the resulting string with
832 * null. @len must be actual maximum length (even number) + 1.
833 *
834 * LOCKING:
835 * caller.
836 */
6a62a04d
TH
837void ata_id_c_string(const u16 *id, unsigned char *s,
838 unsigned int ofs, unsigned int len)
0e949ff3
TH
839{
840 unsigned char *p;
841
842 WARN_ON(!(len & 1));
843
6a62a04d 844 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
845
846 p = s + strnlen(s, len - 1);
847 while (p > s && p[-1] == ' ')
848 p--;
849 *p = '\0';
850}
0baab86b 851
db6f8759
TH
852static u64 ata_id_n_sectors(const u16 *id)
853{
854 if (ata_id_has_lba(id)) {
855 if (ata_id_has_lba48(id))
856 return ata_id_u64(id, 100);
857 else
858 return ata_id_u32(id, 60);
859 } else {
860 if (ata_id_current_chs_valid(id))
861 return ata_id_u32(id, 57);
862 else
863 return id[1] * id[3] * id[6];
864 }
865}
866
1e999736
AC
867static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
868{
869 u64 sectors = 0;
870
871 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
872 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
873 sectors |= (tf->hob_lbal & 0xff) << 24;
874 sectors |= (tf->lbah & 0xff) << 16;
875 sectors |= (tf->lbam & 0xff) << 8;
876 sectors |= (tf->lbal & 0xff);
877
878 return ++sectors;
879}
880
881static u64 ata_tf_to_lba(struct ata_taskfile *tf)
882{
883 u64 sectors = 0;
884
885 sectors |= (tf->device & 0x0f) << 24;
886 sectors |= (tf->lbah & 0xff) << 16;
887 sectors |= (tf->lbam & 0xff) << 8;
888 sectors |= (tf->lbal & 0xff);
889
890 return ++sectors;
891}
892
893/**
c728a914
TH
894 * ata_read_native_max_address - Read native max address
895 * @dev: target device
896 * @max_sectors: out parameter for the result native max address
1e999736 897 *
c728a914
TH
898 * Perform an LBA48 or LBA28 native size query upon the device in
899 * question.
1e999736 900 *
c728a914
TH
901 * RETURNS:
902 * 0 on success, -EACCES if command is aborted by the drive.
903 * -EIO on other errors.
1e999736 904 */
c728a914 905static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 906{
c728a914 907 unsigned int err_mask;
1e999736 908 struct ata_taskfile tf;
c728a914 909 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
910
911 ata_tf_init(dev, &tf);
912
c728a914 913 /* always clear all address registers */
1e999736 914 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 915
c728a914
TH
916 if (lba48) {
917 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
918 tf.flags |= ATA_TFLAG_LBA48;
919 } else
920 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 921
1e999736 922 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
923 tf.device |= ATA_LBA;
924
925 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
926 if (err_mask) {
927 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
928 "max address (err_mask=0x%x)\n", err_mask);
929 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
930 return -EACCES;
931 return -EIO;
932 }
1e999736 933
c728a914
TH
934 if (lba48)
935 *max_sectors = ata_tf_to_lba48(&tf);
936 else
937 *max_sectors = ata_tf_to_lba(&tf);
93328e11
AC
938 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
939 (*max_sectors)--;
c728a914 940 return 0;
1e999736
AC
941}
942
943/**
c728a914
TH
944 * ata_set_max_sectors - Set max sectors
945 * @dev: target device
6b38d1d1 946 * @new_sectors: new max sectors value to set for the device
1e999736 947 *
c728a914
TH
948 * Set max sectors of @dev to @new_sectors.
949 *
950 * RETURNS:
951 * 0 on success, -EACCES if command is aborted or denied (due to
952 * previous non-volatile SET_MAX) by the drive. -EIO on other
953 * errors.
1e999736 954 */
05027adc 955static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 956{
c728a914 957 unsigned int err_mask;
1e999736 958 struct ata_taskfile tf;
c728a914 959 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
960
961 new_sectors--;
962
963 ata_tf_init(dev, &tf);
964
1e999736 965 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
966
967 if (lba48) {
968 tf.command = ATA_CMD_SET_MAX_EXT;
969 tf.flags |= ATA_TFLAG_LBA48;
970
971 tf.hob_lbal = (new_sectors >> 24) & 0xff;
972 tf.hob_lbam = (new_sectors >> 32) & 0xff;
973 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 974 } else {
c728a914
TH
975 tf.command = ATA_CMD_SET_MAX;
976
1e582ba4
TH
977 tf.device |= (new_sectors >> 24) & 0xf;
978 }
979
1e999736 980 tf.protocol |= ATA_PROT_NODATA;
c728a914 981 tf.device |= ATA_LBA;
1e999736
AC
982
983 tf.lbal = (new_sectors >> 0) & 0xff;
984 tf.lbam = (new_sectors >> 8) & 0xff;
985 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 986
c728a914
TH
987 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
988 if (err_mask) {
989 ata_dev_printk(dev, KERN_WARNING, "failed to set "
990 "max address (err_mask=0x%x)\n", err_mask);
991 if (err_mask == AC_ERR_DEV &&
992 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
993 return -EACCES;
994 return -EIO;
995 }
996
c728a914 997 return 0;
1e999736
AC
998}
999
1000/**
1001 * ata_hpa_resize - Resize a device with an HPA set
1002 * @dev: Device to resize
1003 *
1004 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1005 * it if required to the full size of the media. The caller must check
1006 * the drive has the HPA feature set enabled.
05027adc
TH
1007 *
1008 * RETURNS:
1009 * 0 on success, -errno on failure.
1e999736 1010 */
05027adc 1011static int ata_hpa_resize(struct ata_device *dev)
1e999736 1012{
05027adc
TH
1013 struct ata_eh_context *ehc = &dev->link->eh_context;
1014 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1015 u64 sectors = ata_id_n_sectors(dev->id);
1016 u64 native_sectors;
c728a914 1017 int rc;
a617c09f 1018
05027adc
TH
1019 /* do we need to do it? */
1020 if (dev->class != ATA_DEV_ATA ||
1021 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1022 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1023 return 0;
1e999736 1024
05027adc
TH
1025 /* read native max address */
1026 rc = ata_read_native_max_address(dev, &native_sectors);
1027 if (rc) {
1028 /* If HPA isn't going to be unlocked, skip HPA
1029 * resizing from the next try.
1030 */
1031 if (!ata_ignore_hpa) {
1032 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1033 "broken, will skip HPA handling\n");
1034 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1035
1036 /* we can continue if device aborted the command */
1037 if (rc == -EACCES)
1038 rc = 0;
1e999736 1039 }
37301a55 1040
05027adc
TH
1041 return rc;
1042 }
1043
1044 /* nothing to do? */
1045 if (native_sectors <= sectors || !ata_ignore_hpa) {
1046 if (!print_info || native_sectors == sectors)
1047 return 0;
1048
1049 if (native_sectors > sectors)
1050 ata_dev_printk(dev, KERN_INFO,
1051 "HPA detected: current %llu, native %llu\n",
1052 (unsigned long long)sectors,
1053 (unsigned long long)native_sectors);
1054 else if (native_sectors < sectors)
1055 ata_dev_printk(dev, KERN_WARNING,
1056 "native sectors (%llu) is smaller than "
1057 "sectors (%llu)\n",
1058 (unsigned long long)native_sectors,
1059 (unsigned long long)sectors);
1060 return 0;
1061 }
1062
1063 /* let's unlock HPA */
1064 rc = ata_set_max_sectors(dev, native_sectors);
1065 if (rc == -EACCES) {
1066 /* if device aborted the command, skip HPA resizing */
1067 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1068 "(%llu -> %llu), skipping HPA handling\n",
1069 (unsigned long long)sectors,
1070 (unsigned long long)native_sectors);
1071 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1072 return 0;
1073 } else if (rc)
1074 return rc;
1075
1076 /* re-read IDENTIFY data */
1077 rc = ata_dev_reread_id(dev, 0);
1078 if (rc) {
1079 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1080 "data after HPA resizing\n");
1081 return rc;
1082 }
1083
1084 if (print_info) {
1085 u64 new_sectors = ata_id_n_sectors(dev->id);
1086 ata_dev_printk(dev, KERN_INFO,
1087 "HPA unlocked: %llu -> %llu, native %llu\n",
1088 (unsigned long long)sectors,
1089 (unsigned long long)new_sectors,
1090 (unsigned long long)native_sectors);
1091 }
1092
1093 return 0;
1e999736
AC
1094}
1095
10305f0f
A
1096/**
1097 * ata_id_to_dma_mode - Identify DMA mode from id block
1098 * @dev: device to identify
cc261267 1099 * @unknown: mode to assume if we cannot tell
10305f0f
A
1100 *
1101 * Set up the timing values for the device based upon the identify
1102 * reported values for the DMA mode. This function is used by drivers
1103 * which rely upon firmware configured modes, but wish to report the
1104 * mode correctly when possible.
1105 *
1106 * In addition we emit similarly formatted messages to the default
1107 * ata_dev_set_mode handler, in order to provide consistency of
1108 * presentation.
1109 */
1110
1111void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1112{
1113 unsigned int mask;
1114 u8 mode;
1115
1116 /* Pack the DMA modes */
1117 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1118 if (dev->id[53] & 0x04)
1119 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1120
1121 /* Select the mode in use */
1122 mode = ata_xfer_mask2mode(mask);
1123
1124 if (mode != 0) {
1125 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1126 ata_mode_string(mask));
1127 } else {
1128 /* SWDMA perhaps ? */
1129 mode = unknown;
1130 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1131 }
1132
1133 /* Configure the device reporting */
1134 dev->xfer_mode = mode;
1135 dev->xfer_shift = ata_xfer_mode2shift(mode);
1136}
1137
0baab86b
EF
1138/**
1139 * ata_noop_dev_select - Select device 0/1 on ATA bus
1140 * @ap: ATA channel to manipulate
1141 * @device: ATA device (numbered from zero) to select
1142 *
1143 * This function performs no actual function.
1144 *
1145 * May be used as the dev_select() entry in ata_port_operations.
1146 *
1147 * LOCKING:
1148 * caller.
1149 */
1da177e4
LT
1150void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1151{
1152}
1153
0baab86b 1154
1da177e4
LT
1155/**
1156 * ata_std_dev_select - Select device 0/1 on ATA bus
1157 * @ap: ATA channel to manipulate
1158 * @device: ATA device (numbered from zero) to select
1159 *
1160 * Use the method defined in the ATA specification to
1161 * make either device 0, or device 1, active on the
0baab86b
EF
1162 * ATA channel. Works with both PIO and MMIO.
1163 *
1164 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1165 *
1166 * LOCKING:
1167 * caller.
1168 */
1169
1170void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1171{
1172 u8 tmp;
1173
1174 if (device == 0)
1175 tmp = ATA_DEVICE_OBS;
1176 else
1177 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1178
0d5ff566 1179 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1180 ata_pause(ap); /* needed; also flushes, for mmio */
1181}
1182
1183/**
1184 * ata_dev_select - Select device 0/1 on ATA bus
1185 * @ap: ATA channel to manipulate
1186 * @device: ATA device (numbered from zero) to select
1187 * @wait: non-zero to wait for Status register BSY bit to clear
1188 * @can_sleep: non-zero if context allows sleeping
1189 *
1190 * Use the method defined in the ATA specification to
1191 * make either device 0, or device 1, active on the
1192 * ATA channel.
1193 *
1194 * This is a high-level version of ata_std_dev_select(),
1195 * which additionally provides the services of inserting
1196 * the proper pauses and status polling, where needed.
1197 *
1198 * LOCKING:
1199 * caller.
1200 */
1201
1202void ata_dev_select(struct ata_port *ap, unsigned int device,
1203 unsigned int wait, unsigned int can_sleep)
1204{
88574551 1205 if (ata_msg_probe(ap))
44877b4e
TH
1206 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1207 "device %u, wait %u\n", device, wait);
1da177e4
LT
1208
1209 if (wait)
1210 ata_wait_idle(ap);
1211
1212 ap->ops->dev_select(ap, device);
1213
1214 if (wait) {
9af5c9c9 1215 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1216 msleep(150);
1217 ata_wait_idle(ap);
1218 }
1219}
1220
1221/**
1222 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1223 * @id: IDENTIFY DEVICE page to dump
1da177e4 1224 *
0bd3300a
TH
1225 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1226 * page.
1da177e4
LT
1227 *
1228 * LOCKING:
1229 * caller.
1230 */
1231
0bd3300a 1232static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1233{
1234 DPRINTK("49==0x%04x "
1235 "53==0x%04x "
1236 "63==0x%04x "
1237 "64==0x%04x "
1238 "75==0x%04x \n",
0bd3300a
TH
1239 id[49],
1240 id[53],
1241 id[63],
1242 id[64],
1243 id[75]);
1da177e4
LT
1244 DPRINTK("80==0x%04x "
1245 "81==0x%04x "
1246 "82==0x%04x "
1247 "83==0x%04x "
1248 "84==0x%04x \n",
0bd3300a
TH
1249 id[80],
1250 id[81],
1251 id[82],
1252 id[83],
1253 id[84]);
1da177e4
LT
1254 DPRINTK("88==0x%04x "
1255 "93==0x%04x\n",
0bd3300a
TH
1256 id[88],
1257 id[93]);
1da177e4
LT
1258}
1259
cb95d562
TH
1260/**
1261 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1262 * @id: IDENTIFY data to compute xfer mask from
1263 *
1264 * Compute the xfermask for this device. This is not as trivial
1265 * as it seems if we must consider early devices correctly.
1266 *
1267 * FIXME: pre IDE drive timing (do we care ?).
1268 *
1269 * LOCKING:
1270 * None.
1271 *
1272 * RETURNS:
1273 * Computed xfermask
1274 */
1275static unsigned int ata_id_xfermask(const u16 *id)
1276{
1277 unsigned int pio_mask, mwdma_mask, udma_mask;
1278
1279 /* Usual case. Word 53 indicates word 64 is valid */
1280 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1281 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1282 pio_mask <<= 3;
1283 pio_mask |= 0x7;
1284 } else {
1285 /* If word 64 isn't valid then Word 51 high byte holds
1286 * the PIO timing number for the maximum. Turn it into
1287 * a mask.
1288 */
7a0f1c8a 1289 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1290 if (mode < 5) /* Valid PIO range */
1291 pio_mask = (2 << mode) - 1;
1292 else
1293 pio_mask = 1;
cb95d562
TH
1294
1295 /* But wait.. there's more. Design your standards by
1296 * committee and you too can get a free iordy field to
1297 * process. However its the speeds not the modes that
1298 * are supported... Note drivers using the timing API
1299 * will get this right anyway
1300 */
1301 }
1302
1303 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1304
b352e57d
AC
1305 if (ata_id_is_cfa(id)) {
1306 /*
1307 * Process compact flash extended modes
1308 */
1309 int pio = id[163] & 0x7;
1310 int dma = (id[163] >> 3) & 7;
1311
1312 if (pio)
1313 pio_mask |= (1 << 5);
1314 if (pio > 1)
1315 pio_mask |= (1 << 6);
1316 if (dma)
1317 mwdma_mask |= (1 << 3);
1318 if (dma > 1)
1319 mwdma_mask |= (1 << 4);
1320 }
1321
fb21f0d0
TH
1322 udma_mask = 0;
1323 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1324 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1325
1326 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1327}
1328
86e45b6b
TH
1329/**
1330 * ata_port_queue_task - Queue port_task
1331 * @ap: The ata_port to queue port_task for
e2a7f77a 1332 * @fn: workqueue function to be scheduled
65f27f38 1333 * @data: data for @fn to use
e2a7f77a 1334 * @delay: delay time for workqueue function
86e45b6b
TH
1335 *
1336 * Schedule @fn(@data) for execution after @delay jiffies using
1337 * port_task. There is one port_task per port and it's the
1338 * user(low level driver)'s responsibility to make sure that only
1339 * one task is active at any given time.
1340 *
1341 * libata core layer takes care of synchronization between
1342 * port_task and EH. ata_port_queue_task() may be ignored for EH
1343 * synchronization.
1344 *
1345 * LOCKING:
1346 * Inherited from caller.
1347 */
65f27f38 1348void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1349 unsigned long delay)
1350{
65f27f38
DH
1351 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1352 ap->port_task_data = data;
86e45b6b 1353
45a66c1c
ON
1354 /* may fail if ata_port_flush_task() in progress */
1355 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1356}
1357
1358/**
1359 * ata_port_flush_task - Flush port_task
1360 * @ap: The ata_port to flush port_task for
1361 *
1362 * After this function completes, port_task is guranteed not to
1363 * be running or scheduled.
1364 *
1365 * LOCKING:
1366 * Kernel thread context (may sleep)
1367 */
1368void ata_port_flush_task(struct ata_port *ap)
1369{
86e45b6b
TH
1370 DPRINTK("ENTER\n");
1371
45a66c1c 1372 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1373
0dd4b21f
BP
1374 if (ata_msg_ctl(ap))
1375 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1376}
1377
7102d230 1378static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1379{
77853bf2 1380 struct completion *waiting = qc->private_data;
a2a7a662 1381
a2a7a662 1382 complete(waiting);
a2a7a662
TH
1383}
1384
1385/**
2432697b 1386 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1387 * @dev: Device to which the command is sent
1388 * @tf: Taskfile registers for the command and the result
d69cf37d 1389 * @cdb: CDB for packet command
a2a7a662 1390 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1391 * @sg: sg list for the data buffer of the command
1392 * @n_elem: Number of sg entries
a2a7a662
TH
1393 *
1394 * Executes libata internal command with timeout. @tf contains
1395 * command on entry and result on return. Timeout and error
1396 * conditions are reported via return value. No recovery action
1397 * is taken after a command times out. It's caller's duty to
1398 * clean up after timeout.
1399 *
1400 * LOCKING:
1401 * None. Should be called with kernel context, might sleep.
551e8889
TH
1402 *
1403 * RETURNS:
1404 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1405 */
2432697b
TH
1406unsigned ata_exec_internal_sg(struct ata_device *dev,
1407 struct ata_taskfile *tf, const u8 *cdb,
1408 int dma_dir, struct scatterlist *sg,
1409 unsigned int n_elem)
a2a7a662 1410{
9af5c9c9
TH
1411 struct ata_link *link = dev->link;
1412 struct ata_port *ap = link->ap;
a2a7a662
TH
1413 u8 command = tf->command;
1414 struct ata_queued_cmd *qc;
2ab7db1f 1415 unsigned int tag, preempted_tag;
dedaf2b0 1416 u32 preempted_sactive, preempted_qc_active;
da917d69 1417 int preempted_nr_active_links;
60be6b9a 1418 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1419 unsigned long flags;
77853bf2 1420 unsigned int err_mask;
d95a717f 1421 int rc;
a2a7a662 1422
ba6a1308 1423 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1424
e3180499 1425 /* no internal command while frozen */
b51e9e5d 1426 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1427 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1428 return AC_ERR_SYSTEM;
1429 }
1430
2ab7db1f 1431 /* initialize internal qc */
a2a7a662 1432
2ab7db1f
TH
1433 /* XXX: Tag 0 is used for drivers with legacy EH as some
1434 * drivers choke if any other tag is given. This breaks
1435 * ata_tag_internal() test for those drivers. Don't use new
1436 * EH stuff without converting to it.
1437 */
1438 if (ap->ops->error_handler)
1439 tag = ATA_TAG_INTERNAL;
1440 else
1441 tag = 0;
1442
6cec4a39 1443 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1444 BUG();
f69499f4 1445 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1446
1447 qc->tag = tag;
1448 qc->scsicmd = NULL;
1449 qc->ap = ap;
1450 qc->dev = dev;
1451 ata_qc_reinit(qc);
1452
9af5c9c9
TH
1453 preempted_tag = link->active_tag;
1454 preempted_sactive = link->sactive;
dedaf2b0 1455 preempted_qc_active = ap->qc_active;
da917d69 1456 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1457 link->active_tag = ATA_TAG_POISON;
1458 link->sactive = 0;
dedaf2b0 1459 ap->qc_active = 0;
da917d69 1460 ap->nr_active_links = 0;
2ab7db1f
TH
1461
1462 /* prepare & issue qc */
a2a7a662 1463 qc->tf = *tf;
d69cf37d
TH
1464 if (cdb)
1465 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1466 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1467 qc->dma_dir = dma_dir;
1468 if (dma_dir != DMA_NONE) {
2432697b
TH
1469 unsigned int i, buflen = 0;
1470
1471 for (i = 0; i < n_elem; i++)
1472 buflen += sg[i].length;
1473
1474 ata_sg_init(qc, sg, n_elem);
49c80429 1475 qc->nbytes = buflen;
a2a7a662
TH
1476 }
1477
77853bf2 1478 qc->private_data = &wait;
a2a7a662
TH
1479 qc->complete_fn = ata_qc_complete_internal;
1480
8e0e694a 1481 ata_qc_issue(qc);
a2a7a662 1482
ba6a1308 1483 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1484
a8601e5f 1485 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1486
1487 ata_port_flush_task(ap);
41ade50c 1488
d95a717f 1489 if (!rc) {
ba6a1308 1490 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1491
1492 /* We're racing with irq here. If we lose, the
1493 * following test prevents us from completing the qc
d95a717f
TH
1494 * twice. If we win, the port is frozen and will be
1495 * cleaned up by ->post_internal_cmd().
a2a7a662 1496 */
77853bf2 1497 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1498 qc->err_mask |= AC_ERR_TIMEOUT;
1499
1500 if (ap->ops->error_handler)
1501 ata_port_freeze(ap);
1502 else
1503 ata_qc_complete(qc);
f15a1daf 1504
0dd4b21f
BP
1505 if (ata_msg_warn(ap))
1506 ata_dev_printk(dev, KERN_WARNING,
88574551 1507 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1508 }
1509
ba6a1308 1510 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1511 }
1512
d95a717f
TH
1513 /* do post_internal_cmd */
1514 if (ap->ops->post_internal_cmd)
1515 ap->ops->post_internal_cmd(qc);
1516
a51d644a
TH
1517 /* perform minimal error analysis */
1518 if (qc->flags & ATA_QCFLAG_FAILED) {
1519 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1520 qc->err_mask |= AC_ERR_DEV;
1521
1522 if (!qc->err_mask)
1523 qc->err_mask |= AC_ERR_OTHER;
1524
1525 if (qc->err_mask & ~AC_ERR_OTHER)
1526 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1527 }
1528
15869303 1529 /* finish up */
ba6a1308 1530 spin_lock_irqsave(ap->lock, flags);
15869303 1531
e61e0672 1532 *tf = qc->result_tf;
77853bf2
TH
1533 err_mask = qc->err_mask;
1534
1535 ata_qc_free(qc);
9af5c9c9
TH
1536 link->active_tag = preempted_tag;
1537 link->sactive = preempted_sactive;
dedaf2b0 1538 ap->qc_active = preempted_qc_active;
da917d69 1539 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1540
1f7dd3e9
TH
1541 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1542 * Until those drivers are fixed, we detect the condition
1543 * here, fail the command with AC_ERR_SYSTEM and reenable the
1544 * port.
1545 *
1546 * Note that this doesn't change any behavior as internal
1547 * command failure results in disabling the device in the
1548 * higher layer for LLDDs without new reset/EH callbacks.
1549 *
1550 * Kill the following code as soon as those drivers are fixed.
1551 */
198e0fed 1552 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1553 err_mask |= AC_ERR_SYSTEM;
1554 ata_port_probe(ap);
1555 }
1556
ba6a1308 1557 spin_unlock_irqrestore(ap->lock, flags);
15869303 1558
77853bf2 1559 return err_mask;
a2a7a662
TH
1560}
1561
2432697b 1562/**
33480a0e 1563 * ata_exec_internal - execute libata internal command
2432697b
TH
1564 * @dev: Device to which the command is sent
1565 * @tf: Taskfile registers for the command and the result
1566 * @cdb: CDB for packet command
1567 * @dma_dir: Data tranfer direction of the command
1568 * @buf: Data buffer of the command
1569 * @buflen: Length of data buffer
1570 *
1571 * Wrapper around ata_exec_internal_sg() which takes simple
1572 * buffer instead of sg list.
1573 *
1574 * LOCKING:
1575 * None. Should be called with kernel context, might sleep.
1576 *
1577 * RETURNS:
1578 * Zero on success, AC_ERR_* mask on failure
1579 */
1580unsigned ata_exec_internal(struct ata_device *dev,
1581 struct ata_taskfile *tf, const u8 *cdb,
1582 int dma_dir, void *buf, unsigned int buflen)
1583{
33480a0e
TH
1584 struct scatterlist *psg = NULL, sg;
1585 unsigned int n_elem = 0;
2432697b 1586
33480a0e
TH
1587 if (dma_dir != DMA_NONE) {
1588 WARN_ON(!buf);
1589 sg_init_one(&sg, buf, buflen);
1590 psg = &sg;
1591 n_elem++;
1592 }
2432697b 1593
33480a0e 1594 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1595}
1596
977e6b9f
TH
1597/**
1598 * ata_do_simple_cmd - execute simple internal command
1599 * @dev: Device to which the command is sent
1600 * @cmd: Opcode to execute
1601 *
1602 * Execute a 'simple' command, that only consists of the opcode
1603 * 'cmd' itself, without filling any other registers
1604 *
1605 * LOCKING:
1606 * Kernel thread context (may sleep).
1607 *
1608 * RETURNS:
1609 * Zero on success, AC_ERR_* mask on failure
e58eb583 1610 */
77b08fb5 1611unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1612{
1613 struct ata_taskfile tf;
e58eb583
TH
1614
1615 ata_tf_init(dev, &tf);
1616
1617 tf.command = cmd;
1618 tf.flags |= ATA_TFLAG_DEVICE;
1619 tf.protocol = ATA_PROT_NODATA;
1620
977e6b9f 1621 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1622}
1623
1bc4ccff
AC
1624/**
1625 * ata_pio_need_iordy - check if iordy needed
1626 * @adev: ATA device
1627 *
1628 * Check if the current speed of the device requires IORDY. Used
1629 * by various controllers for chip configuration.
1630 */
a617c09f 1631
1bc4ccff
AC
1632unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1633{
432729f0
AC
1634 /* Controller doesn't support IORDY. Probably a pointless check
1635 as the caller should know this */
9af5c9c9 1636 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1637 return 0;
432729f0
AC
1638 /* PIO3 and higher it is mandatory */
1639 if (adev->pio_mode > XFER_PIO_2)
1640 return 1;
1641 /* We turn it on when possible */
1642 if (ata_id_has_iordy(adev->id))
1bc4ccff 1643 return 1;
432729f0
AC
1644 return 0;
1645}
2e9edbf8 1646
432729f0
AC
1647/**
1648 * ata_pio_mask_no_iordy - Return the non IORDY mask
1649 * @adev: ATA device
1650 *
1651 * Compute the highest mode possible if we are not using iordy. Return
1652 * -1 if no iordy mode is available.
1653 */
a617c09f 1654
432729f0
AC
1655static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1656{
1bc4ccff 1657 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1658 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1659 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1660 /* Is the speed faster than the drive allows non IORDY ? */
1661 if (pio) {
1662 /* This is cycle times not frequency - watch the logic! */
1663 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1664 return 3 << ATA_SHIFT_PIO;
1665 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1666 }
1667 }
432729f0 1668 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1669}
1670
1da177e4 1671/**
49016aca 1672 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1673 * @dev: target device
1674 * @p_class: pointer to class of the target device (may be changed)
bff04647 1675 * @flags: ATA_READID_* flags
fe635c7e 1676 * @id: buffer to read IDENTIFY data into
1da177e4 1677 *
49016aca
TH
1678 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1679 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1680 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1681 * for pre-ATA4 drives.
1da177e4 1682 *
50a99018
AC
1683 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1684 * now we abort if we hit that case.
1685 *
1da177e4 1686 * LOCKING:
49016aca
TH
1687 * Kernel thread context (may sleep)
1688 *
1689 * RETURNS:
1690 * 0 on success, -errno otherwise.
1da177e4 1691 */
a9beec95 1692int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1693 unsigned int flags, u16 *id)
1da177e4 1694{
9af5c9c9 1695 struct ata_port *ap = dev->link->ap;
49016aca 1696 unsigned int class = *p_class;
a0123703 1697 struct ata_taskfile tf;
49016aca
TH
1698 unsigned int err_mask = 0;
1699 const char *reason;
54936f8b 1700 int may_fallback = 1, tried_spinup = 0;
49016aca 1701 int rc;
1da177e4 1702
0dd4b21f 1703 if (ata_msg_ctl(ap))
44877b4e 1704 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1705
49016aca 1706 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1707 retry:
3373efd8 1708 ata_tf_init(dev, &tf);
a0123703 1709
49016aca
TH
1710 switch (class) {
1711 case ATA_DEV_ATA:
a0123703 1712 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1713 break;
1714 case ATA_DEV_ATAPI:
a0123703 1715 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1716 break;
1717 default:
1718 rc = -ENODEV;
1719 reason = "unsupported class";
1720 goto err_out;
1da177e4
LT
1721 }
1722
a0123703 1723 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1724
1725 /* Some devices choke if TF registers contain garbage. Make
1726 * sure those are properly initialized.
1727 */
1728 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1729
1730 /* Device presence detection is unreliable on some
1731 * controllers. Always poll IDENTIFY if available.
1732 */
1733 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1734
3373efd8 1735 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1736 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1737 if (err_mask) {
800b3996 1738 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1739 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1740 ap->print_id, dev->devno);
55a8e2c8
TH
1741 return -ENOENT;
1742 }
1743
54936f8b
TH
1744 /* Device or controller might have reported the wrong
1745 * device class. Give a shot at the other IDENTIFY if
1746 * the current one is aborted by the device.
1747 */
1748 if (may_fallback &&
1749 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1750 may_fallback = 0;
1751
1752 if (class == ATA_DEV_ATA)
1753 class = ATA_DEV_ATAPI;
1754 else
1755 class = ATA_DEV_ATA;
1756 goto retry;
1757 }
1758
49016aca
TH
1759 rc = -EIO;
1760 reason = "I/O error";
1da177e4
LT
1761 goto err_out;
1762 }
1763
54936f8b
TH
1764 /* Falling back doesn't make sense if ID data was read
1765 * successfully at least once.
1766 */
1767 may_fallback = 0;
1768
49016aca 1769 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1770
49016aca 1771 /* sanity check */
a4f5749b 1772 rc = -EINVAL;
6070068b 1773 reason = "device reports invalid type";
a4f5749b
TH
1774
1775 if (class == ATA_DEV_ATA) {
1776 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1777 goto err_out;
1778 } else {
1779 if (ata_id_is_ata(id))
1780 goto err_out;
49016aca
TH
1781 }
1782
169439c2
ML
1783 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1784 tried_spinup = 1;
1785 /*
1786 * Drive powered-up in standby mode, and requires a specific
1787 * SET_FEATURES spin-up subcommand before it will accept
1788 * anything other than the original IDENTIFY command.
1789 */
1790 ata_tf_init(dev, &tf);
1791 tf.command = ATA_CMD_SET_FEATURES;
1792 tf.feature = SETFEATURES_SPINUP;
1793 tf.protocol = ATA_PROT_NODATA;
1794 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1795 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1796 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1797 rc = -EIO;
1798 reason = "SPINUP failed";
1799 goto err_out;
1800 }
1801 /*
1802 * If the drive initially returned incomplete IDENTIFY info,
1803 * we now must reissue the IDENTIFY command.
1804 */
1805 if (id[2] == 0x37c8)
1806 goto retry;
1807 }
1808
bff04647 1809 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1810 /*
1811 * The exact sequence expected by certain pre-ATA4 drives is:
1812 * SRST RESET
50a99018
AC
1813 * IDENTIFY (optional in early ATA)
1814 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1815 * anything else..
1816 * Some drives were very specific about that exact sequence.
50a99018
AC
1817 *
1818 * Note that ATA4 says lba is mandatory so the second check
1819 * shoud never trigger.
49016aca
TH
1820 */
1821 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1822 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1823 if (err_mask) {
1824 rc = -EIO;
1825 reason = "INIT_DEV_PARAMS failed";
1826 goto err_out;
1827 }
1828
1829 /* current CHS translation info (id[53-58]) might be
1830 * changed. reread the identify device info.
1831 */
bff04647 1832 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1833 goto retry;
1834 }
1835 }
1836
1837 *p_class = class;
fe635c7e 1838
49016aca
TH
1839 return 0;
1840
1841 err_out:
88574551 1842 if (ata_msg_warn(ap))
0dd4b21f 1843 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1844 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1845 return rc;
1846}
1847
3373efd8 1848static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1849{
9af5c9c9
TH
1850 struct ata_port *ap = dev->link->ap;
1851 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1852}
1853
a6e6ce8e
TH
1854static void ata_dev_config_ncq(struct ata_device *dev,
1855 char *desc, size_t desc_sz)
1856{
9af5c9c9 1857 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1858 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1859
1860 if (!ata_id_has_ncq(dev->id)) {
1861 desc[0] = '\0';
1862 return;
1863 }
75683fe7 1864 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1865 snprintf(desc, desc_sz, "NCQ (not used)");
1866 return;
1867 }
a6e6ce8e 1868 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1869 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1870 dev->flags |= ATA_DFLAG_NCQ;
1871 }
1872
1873 if (hdepth >= ddepth)
1874 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1875 else
1876 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1877}
1878
49016aca 1879/**
ffeae418 1880 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1881 * @dev: Target device to configure
1882 *
1883 * Configure @dev according to @dev->id. Generic and low-level
1884 * driver specific fixups are also applied.
49016aca
TH
1885 *
1886 * LOCKING:
ffeae418
TH
1887 * Kernel thread context (may sleep)
1888 *
1889 * RETURNS:
1890 * 0 on success, -errno otherwise
49016aca 1891 */
efdaedc4 1892int ata_dev_configure(struct ata_device *dev)
49016aca 1893{
9af5c9c9
TH
1894 struct ata_port *ap = dev->link->ap;
1895 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1896 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1897 const u16 *id = dev->id;
ff8854b2 1898 unsigned int xfer_mask;
b352e57d 1899 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1900 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1901 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1902 int rc;
49016aca 1903
0dd4b21f 1904 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1905 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1906 __FUNCTION__);
ffeae418 1907 return 0;
49016aca
TH
1908 }
1909
0dd4b21f 1910 if (ata_msg_probe(ap))
44877b4e 1911 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1912
75683fe7
TH
1913 /* set horkage */
1914 dev->horkage |= ata_dev_blacklisted(dev);
1915
6746544c
TH
1916 /* let ACPI work its magic */
1917 rc = ata_acpi_on_devcfg(dev);
1918 if (rc)
1919 return rc;
08573a86 1920
05027adc
TH
1921 /* massage HPA, do it early as it might change IDENTIFY data */
1922 rc = ata_hpa_resize(dev);
1923 if (rc)
1924 return rc;
1925
c39f5ebe 1926 /* print device capabilities */
0dd4b21f 1927 if (ata_msg_probe(ap))
88574551
TH
1928 ata_dev_printk(dev, KERN_DEBUG,
1929 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1930 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1931 __FUNCTION__,
f15a1daf
TH
1932 id[49], id[82], id[83], id[84],
1933 id[85], id[86], id[87], id[88]);
c39f5ebe 1934
208a9933 1935 /* initialize to-be-configured parameters */
ea1dd4e1 1936 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1937 dev->max_sectors = 0;
1938 dev->cdb_len = 0;
1939 dev->n_sectors = 0;
1940 dev->cylinders = 0;
1941 dev->heads = 0;
1942 dev->sectors = 0;
1943
1da177e4
LT
1944 /*
1945 * common ATA, ATAPI feature tests
1946 */
1947
ff8854b2 1948 /* find max transfer mode; for printk only */
1148c3a7 1949 xfer_mask = ata_id_xfermask(id);
1da177e4 1950
0dd4b21f
BP
1951 if (ata_msg_probe(ap))
1952 ata_dump_id(id);
1da177e4 1953
ef143d57
AL
1954 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1955 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1956 sizeof(fwrevbuf));
1957
1958 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1959 sizeof(modelbuf));
1960
1da177e4
LT
1961 /* ATA-specific feature tests */
1962 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1963 if (ata_id_is_cfa(id)) {
1964 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1965 ata_dev_printk(dev, KERN_WARNING,
1966 "supports DRM functions and may "
1967 "not be fully accessable.\n");
b352e57d
AC
1968 snprintf(revbuf, 7, "CFA");
1969 }
1970 else
1971 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1972
1148c3a7 1973 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1974
3f64f565
EM
1975 if (dev->id[59] & 0x100)
1976 dev->multi_count = dev->id[59] & 0xff;
1977
1148c3a7 1978 if (ata_id_has_lba(id)) {
4c2d721a 1979 const char *lba_desc;
a6e6ce8e 1980 char ncq_desc[20];
8bf62ece 1981
4c2d721a
TH
1982 lba_desc = "LBA";
1983 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1984 if (ata_id_has_lba48(id)) {
8bf62ece 1985 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1986 lba_desc = "LBA48";
6fc49adb
TH
1987
1988 if (dev->n_sectors >= (1UL << 28) &&
1989 ata_id_has_flush_ext(id))
1990 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1991 }
8bf62ece 1992
a6e6ce8e
TH
1993 /* config NCQ */
1994 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1995
8bf62ece 1996 /* print device info to dmesg */
3f64f565
EM
1997 if (ata_msg_drv(ap) && print_info) {
1998 ata_dev_printk(dev, KERN_INFO,
1999 "%s: %s, %s, max %s\n",
2000 revbuf, modelbuf, fwrevbuf,
2001 ata_mode_string(xfer_mask));
2002 ata_dev_printk(dev, KERN_INFO,
2003 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2004 (unsigned long long)dev->n_sectors,
3f64f565
EM
2005 dev->multi_count, lba_desc, ncq_desc);
2006 }
ffeae418 2007 } else {
8bf62ece
AL
2008 /* CHS */
2009
2010 /* Default translation */
1148c3a7
TH
2011 dev->cylinders = id[1];
2012 dev->heads = id[3];
2013 dev->sectors = id[6];
8bf62ece 2014
1148c3a7 2015 if (ata_id_current_chs_valid(id)) {
8bf62ece 2016 /* Current CHS translation is valid. */
1148c3a7
TH
2017 dev->cylinders = id[54];
2018 dev->heads = id[55];
2019 dev->sectors = id[56];
8bf62ece
AL
2020 }
2021
2022 /* print device info to dmesg */
3f64f565 2023 if (ata_msg_drv(ap) && print_info) {
88574551 2024 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2025 "%s: %s, %s, max %s\n",
2026 revbuf, modelbuf, fwrevbuf,
2027 ata_mode_string(xfer_mask));
a84471fe 2028 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2029 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2030 (unsigned long long)dev->n_sectors,
2031 dev->multi_count, dev->cylinders,
2032 dev->heads, dev->sectors);
2033 }
07f6f7d0
AL
2034 }
2035
6e7846e9 2036 dev->cdb_len = 16;
1da177e4
LT
2037 }
2038
2039 /* ATAPI-specific feature tests */
2c13b7ce 2040 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2041 const char *cdb_intr_string = "";
2042 const char *atapi_an_string = "";
7d77b247 2043 u32 sntf;
08a556db 2044
1148c3a7 2045 rc = atapi_cdb_len(id);
1da177e4 2046 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2047 if (ata_msg_warn(ap))
88574551
TH
2048 ata_dev_printk(dev, KERN_WARNING,
2049 "unsupported CDB len\n");
ffeae418 2050 rc = -EINVAL;
1da177e4
LT
2051 goto err_out_nosup;
2052 }
6e7846e9 2053 dev->cdb_len = (unsigned int) rc;
1da177e4 2054
7d77b247
TH
2055 /* Enable ATAPI AN if both the host and device have
2056 * the support. If PMP is attached, SNTF is required
2057 * to enable ATAPI AN to discern between PHY status
2058 * changed notifications and ATAPI ANs.
9f45cbd3 2059 */
7d77b247
TH
2060 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2061 (!ap->nr_pmp_links ||
2062 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2063 unsigned int err_mask;
2064
9f45cbd3 2065 /* issue SET feature command to turn this on */
854c73a2
TH
2066 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2067 if (err_mask)
9f45cbd3 2068 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2069 "failed to enable ATAPI AN "
2070 "(err_mask=0x%x)\n", err_mask);
2071 else {
9f45cbd3 2072 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2073 atapi_an_string = ", ATAPI AN";
2074 }
9f45cbd3
KCA
2075 }
2076
08a556db 2077 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2078 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2079 cdb_intr_string = ", CDB intr";
2080 }
312f7da2 2081
1da177e4 2082 /* print device info to dmesg */
5afc8142 2083 if (ata_msg_drv(ap) && print_info)
ef143d57 2084 ata_dev_printk(dev, KERN_INFO,
854c73a2 2085 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2086 modelbuf, fwrevbuf,
12436c30 2087 ata_mode_string(xfer_mask),
854c73a2 2088 cdb_intr_string, atapi_an_string);
1da177e4
LT
2089 }
2090
914ed354
TH
2091 /* determine max_sectors */
2092 dev->max_sectors = ATA_MAX_SECTORS;
2093 if (dev->flags & ATA_DFLAG_LBA48)
2094 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2095
93590859
AC
2096 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2097 /* Let the user know. We don't want to disallow opens for
2098 rescue purposes, or in case the vendor is just a blithering
2099 idiot */
2100 if (print_info) {
2101 ata_dev_printk(dev, KERN_WARNING,
2102"Drive reports diagnostics failure. This may indicate a drive\n");
2103 ata_dev_printk(dev, KERN_WARNING,
2104"fault or invalid emulation. Contact drive vendor for information.\n");
2105 }
2106 }
2107
4b2f3ede 2108 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2109 if (ata_dev_knobble(dev)) {
5afc8142 2110 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2111 ata_dev_printk(dev, KERN_INFO,
2112 "applying bridge limits\n");
5a529139 2113 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2114 dev->max_sectors = ATA_MAX_SECTORS;
2115 }
2116
75683fe7 2117 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2118 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2119 dev->max_sectors);
18d6e9d5 2120
4b2f3ede 2121 if (ap->ops->dev_config)
cd0d3bbc 2122 ap->ops->dev_config(dev);
4b2f3ede 2123
0dd4b21f
BP
2124 if (ata_msg_probe(ap))
2125 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2126 __FUNCTION__, ata_chk_status(ap));
ffeae418 2127 return 0;
1da177e4
LT
2128
2129err_out_nosup:
0dd4b21f 2130 if (ata_msg_probe(ap))
88574551
TH
2131 ata_dev_printk(dev, KERN_DEBUG,
2132 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2133 return rc;
1da177e4
LT
2134}
2135
be0d18df 2136/**
2e41e8e6 2137 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2138 * @ap: port
2139 *
2e41e8e6 2140 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2141 * detection.
2142 */
2143
2144int ata_cable_40wire(struct ata_port *ap)
2145{
2146 return ATA_CBL_PATA40;
2147}
2148
2149/**
2e41e8e6 2150 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2151 * @ap: port
2152 *
2e41e8e6 2153 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2154 * detection.
2155 */
2156
2157int ata_cable_80wire(struct ata_port *ap)
2158{
2159 return ATA_CBL_PATA80;
2160}
2161
2162/**
2163 * ata_cable_unknown - return unknown PATA cable.
2164 * @ap: port
2165 *
2166 * Helper method for drivers which have no PATA cable detection.
2167 */
2168
2169int ata_cable_unknown(struct ata_port *ap)
2170{
2171 return ATA_CBL_PATA_UNK;
2172}
2173
2174/**
2175 * ata_cable_sata - return SATA cable type
2176 * @ap: port
2177 *
2178 * Helper method for drivers which have SATA cables
2179 */
2180
2181int ata_cable_sata(struct ata_port *ap)
2182{
2183 return ATA_CBL_SATA;
2184}
2185
1da177e4
LT
2186/**
2187 * ata_bus_probe - Reset and probe ATA bus
2188 * @ap: Bus to probe
2189 *
0cba632b
JG
2190 * Master ATA bus probing function. Initiates a hardware-dependent
2191 * bus reset, then attempts to identify any devices found on
2192 * the bus.
2193 *
1da177e4 2194 * LOCKING:
0cba632b 2195 * PCI/etc. bus probe sem.
1da177e4
LT
2196 *
2197 * RETURNS:
96072e69 2198 * Zero on success, negative errno otherwise.
1da177e4
LT
2199 */
2200
80289167 2201int ata_bus_probe(struct ata_port *ap)
1da177e4 2202{
28ca5c57 2203 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2204 int tries[ATA_MAX_DEVICES];
f58229f8 2205 int rc;
e82cbdb9 2206 struct ata_device *dev;
1da177e4 2207
28ca5c57 2208 ata_port_probe(ap);
c19ba8af 2209
f58229f8
TH
2210 ata_link_for_each_dev(dev, &ap->link)
2211 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2212
2213 retry:
2044470c 2214 /* reset and determine device classes */
52783c5d 2215 ap->ops->phy_reset(ap);
2061a47a 2216
f58229f8 2217 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2218 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2219 dev->class != ATA_DEV_UNKNOWN)
2220 classes[dev->devno] = dev->class;
2221 else
2222 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2223
52783c5d 2224 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2225 }
1da177e4 2226
52783c5d 2227 ata_port_probe(ap);
2044470c 2228
b6079ca4
AC
2229 /* after the reset the device state is PIO 0 and the controller
2230 state is undefined. Record the mode */
2231
f58229f8
TH
2232 ata_link_for_each_dev(dev, &ap->link)
2233 dev->pio_mode = XFER_PIO_0;
b6079ca4 2234
f31f0cc2
JG
2235 /* read IDENTIFY page and configure devices. We have to do the identify
2236 specific sequence bass-ackwards so that PDIAG- is released by
2237 the slave device */
2238
f58229f8
TH
2239 ata_link_for_each_dev(dev, &ap->link) {
2240 if (tries[dev->devno])
2241 dev->class = classes[dev->devno];
ffeae418 2242
14d2bac1 2243 if (!ata_dev_enabled(dev))
ffeae418 2244 continue;
ffeae418 2245
bff04647
TH
2246 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2247 dev->id);
14d2bac1
TH
2248 if (rc)
2249 goto fail;
f31f0cc2
JG
2250 }
2251
be0d18df
AC
2252 /* Now ask for the cable type as PDIAG- should have been released */
2253 if (ap->ops->cable_detect)
2254 ap->cbl = ap->ops->cable_detect(ap);
2255
614fe29b
AC
2256 /* We may have SATA bridge glue hiding here irrespective of the
2257 reported cable types and sensed types */
2258 ata_link_for_each_dev(dev, &ap->link) {
2259 if (!ata_dev_enabled(dev))
2260 continue;
2261 /* SATA drives indicate we have a bridge. We don't know which
2262 end of the link the bridge is which is a problem */
2263 if (ata_id_is_sata(dev->id))
2264 ap->cbl = ATA_CBL_SATA;
2265 }
2266
f31f0cc2
JG
2267 /* After the identify sequence we can now set up the devices. We do
2268 this in the normal order so that the user doesn't get confused */
2269
f58229f8 2270 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2271 if (!ata_dev_enabled(dev))
2272 continue;
14d2bac1 2273
9af5c9c9 2274 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2275 rc = ata_dev_configure(dev);
9af5c9c9 2276 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2277 if (rc)
2278 goto fail;
1da177e4
LT
2279 }
2280
e82cbdb9 2281 /* configure transfer mode */
0260731f 2282 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2283 if (rc)
51713d35 2284 goto fail;
1da177e4 2285
f58229f8
TH
2286 ata_link_for_each_dev(dev, &ap->link)
2287 if (ata_dev_enabled(dev))
e82cbdb9 2288 return 0;
1da177e4 2289
e82cbdb9
TH
2290 /* no device present, disable port */
2291 ata_port_disable(ap);
96072e69 2292 return -ENODEV;
14d2bac1
TH
2293
2294 fail:
4ae72a1e
TH
2295 tries[dev->devno]--;
2296
14d2bac1
TH
2297 switch (rc) {
2298 case -EINVAL:
4ae72a1e 2299 /* eeek, something went very wrong, give up */
14d2bac1
TH
2300 tries[dev->devno] = 0;
2301 break;
4ae72a1e
TH
2302
2303 case -ENODEV:
2304 /* give it just one more chance */
2305 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2306 case -EIO:
4ae72a1e
TH
2307 if (tries[dev->devno] == 1) {
2308 /* This is the last chance, better to slow
2309 * down than lose it.
2310 */
936fd732 2311 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2312 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2313 }
14d2bac1
TH
2314 }
2315
4ae72a1e 2316 if (!tries[dev->devno])
3373efd8 2317 ata_dev_disable(dev);
ec573755 2318
14d2bac1 2319 goto retry;
1da177e4
LT
2320}
2321
2322/**
0cba632b
JG
2323 * ata_port_probe - Mark port as enabled
2324 * @ap: Port for which we indicate enablement
1da177e4 2325 *
0cba632b
JG
2326 * Modify @ap data structure such that the system
2327 * thinks that the entire port is enabled.
2328 *
cca3974e 2329 * LOCKING: host lock, or some other form of
0cba632b 2330 * serialization.
1da177e4
LT
2331 */
2332
2333void ata_port_probe(struct ata_port *ap)
2334{
198e0fed 2335 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2336}
2337
3be680b7
TH
2338/**
2339 * sata_print_link_status - Print SATA link status
936fd732 2340 * @link: SATA link to printk link status about
3be680b7
TH
2341 *
2342 * This function prints link speed and status of a SATA link.
2343 *
2344 * LOCKING:
2345 * None.
2346 */
936fd732 2347void sata_print_link_status(struct ata_link *link)
3be680b7 2348{
6d5f9732 2349 u32 sstatus, scontrol, tmp;
3be680b7 2350
936fd732 2351 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2352 return;
936fd732 2353 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2354
936fd732 2355 if (ata_link_online(link)) {
3be680b7 2356 tmp = (sstatus >> 4) & 0xf;
936fd732 2357 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2358 "SATA link up %s (SStatus %X SControl %X)\n",
2359 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2360 } else {
936fd732 2361 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2362 "SATA link down (SStatus %X SControl %X)\n",
2363 sstatus, scontrol);
3be680b7
TH
2364 }
2365}
2366
1da177e4 2367/**
780a87f7
JG
2368 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2369 * @ap: SATA port associated with target SATA PHY.
1da177e4 2370 *
780a87f7
JG
2371 * This function issues commands to standard SATA Sxxx
2372 * PHY registers, to wake up the phy (and device), and
2373 * clear any reset condition.
1da177e4
LT
2374 *
2375 * LOCKING:
0cba632b 2376 * PCI/etc. bus probe sem.
1da177e4
LT
2377 *
2378 */
2379void __sata_phy_reset(struct ata_port *ap)
2380{
936fd732 2381 struct ata_link *link = &ap->link;
1da177e4 2382 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2383 u32 sstatus;
1da177e4
LT
2384
2385 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2386 /* issue phy wake/reset */
936fd732 2387 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2388 /* Couldn't find anything in SATA I/II specs, but
2389 * AHCI-1.1 10.4.2 says at least 1 ms. */
2390 mdelay(1);
1da177e4 2391 }
81952c54 2392 /* phy wake/clear reset */
936fd732 2393 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2394
2395 /* wait for phy to become ready, if necessary */
2396 do {
2397 msleep(200);
936fd732 2398 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2399 if ((sstatus & 0xf) != 1)
2400 break;
2401 } while (time_before(jiffies, timeout));
2402
3be680b7 2403 /* print link status */
936fd732 2404 sata_print_link_status(link);
656563e3 2405
3be680b7 2406 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2407 if (!ata_link_offline(link))
1da177e4 2408 ata_port_probe(ap);
3be680b7 2409 else
1da177e4 2410 ata_port_disable(ap);
1da177e4 2411
198e0fed 2412 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2413 return;
2414
2415 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2416 ata_port_disable(ap);
2417 return;
2418 }
2419
2420 ap->cbl = ATA_CBL_SATA;
2421}
2422
2423/**
780a87f7
JG
2424 * sata_phy_reset - Reset SATA bus.
2425 * @ap: SATA port associated with target SATA PHY.
1da177e4 2426 *
780a87f7
JG
2427 * This function resets the SATA bus, and then probes
2428 * the bus for devices.
1da177e4
LT
2429 *
2430 * LOCKING:
0cba632b 2431 * PCI/etc. bus probe sem.
1da177e4
LT
2432 *
2433 */
2434void sata_phy_reset(struct ata_port *ap)
2435{
2436 __sata_phy_reset(ap);
198e0fed 2437 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2438 return;
2439 ata_bus_reset(ap);
2440}
2441
ebdfca6e
AC
2442/**
2443 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2444 * @adev: device
2445 *
2446 * Obtain the other device on the same cable, or if none is
2447 * present NULL is returned
2448 */
2e9edbf8 2449
3373efd8 2450struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2451{
9af5c9c9
TH
2452 struct ata_link *link = adev->link;
2453 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2454 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2455 return NULL;
2456 return pair;
2457}
2458
1da177e4 2459/**
780a87f7
JG
2460 * ata_port_disable - Disable port.
2461 * @ap: Port to be disabled.
1da177e4 2462 *
780a87f7
JG
2463 * Modify @ap data structure such that the system
2464 * thinks that the entire port is disabled, and should
2465 * never attempt to probe or communicate with devices
2466 * on this port.
2467 *
cca3974e 2468 * LOCKING: host lock, or some other form of
780a87f7 2469 * serialization.
1da177e4
LT
2470 */
2471
2472void ata_port_disable(struct ata_port *ap)
2473{
9af5c9c9
TH
2474 ap->link.device[0].class = ATA_DEV_NONE;
2475 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2476 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2477}
2478
1c3fae4d 2479/**
3c567b7d 2480 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2481 * @link: Link to adjust SATA spd limit for
1c3fae4d 2482 *
936fd732 2483 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2484 * function only adjusts the limit. The change must be applied
3c567b7d 2485 * using sata_set_spd().
1c3fae4d
TH
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 *
2490 * RETURNS:
2491 * 0 on success, negative errno on failure
2492 */
936fd732 2493int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2494{
81952c54
TH
2495 u32 sstatus, spd, mask;
2496 int rc, highbit;
1c3fae4d 2497
936fd732 2498 if (!sata_scr_valid(link))
008a7896
TH
2499 return -EOPNOTSUPP;
2500
2501 /* If SCR can be read, use it to determine the current SPD.
936fd732 2502 * If not, use cached value in link->sata_spd.
008a7896 2503 */
936fd732 2504 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2505 if (rc == 0)
2506 spd = (sstatus >> 4) & 0xf;
2507 else
936fd732 2508 spd = link->sata_spd;
1c3fae4d 2509
936fd732 2510 mask = link->sata_spd_limit;
1c3fae4d
TH
2511 if (mask <= 1)
2512 return -EINVAL;
008a7896
TH
2513
2514 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2515 highbit = fls(mask) - 1;
2516 mask &= ~(1 << highbit);
2517
008a7896
TH
2518 /* Mask off all speeds higher than or equal to the current
2519 * one. Force 1.5Gbps if current SPD is not available.
2520 */
2521 if (spd > 1)
2522 mask &= (1 << (spd - 1)) - 1;
2523 else
2524 mask &= 1;
2525
2526 /* were we already at the bottom? */
1c3fae4d
TH
2527 if (!mask)
2528 return -EINVAL;
2529
936fd732 2530 link->sata_spd_limit = mask;
1c3fae4d 2531
936fd732 2532 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2533 sata_spd_string(fls(mask)));
1c3fae4d
TH
2534
2535 return 0;
2536}
2537
936fd732 2538static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2539{
2540 u32 spd, limit;
2541
936fd732 2542 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2543 limit = 0;
2544 else
936fd732 2545 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2546
2547 spd = (*scontrol >> 4) & 0xf;
2548 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2549
2550 return spd != limit;
2551}
2552
2553/**
3c567b7d 2554 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2555 * @link: Link in question
1c3fae4d
TH
2556 *
2557 * Test whether the spd limit in SControl matches
936fd732 2558 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2559 * whether hardreset is necessary to apply SATA spd
2560 * configuration.
2561 *
2562 * LOCKING:
2563 * Inherited from caller.
2564 *
2565 * RETURNS:
2566 * 1 if SATA spd configuration is needed, 0 otherwise.
2567 */
936fd732 2568int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2569{
2570 u32 scontrol;
2571
936fd732 2572 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2573 return 0;
2574
936fd732 2575 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2576}
2577
2578/**
3c567b7d 2579 * sata_set_spd - set SATA spd according to spd limit
936fd732 2580 * @link: Link to set SATA spd for
1c3fae4d 2581 *
936fd732 2582 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2583 *
2584 * LOCKING:
2585 * Inherited from caller.
2586 *
2587 * RETURNS:
2588 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2589 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2590 */
936fd732 2591int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2592{
2593 u32 scontrol;
81952c54 2594 int rc;
1c3fae4d 2595
936fd732 2596 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2597 return rc;
1c3fae4d 2598
936fd732 2599 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2600 return 0;
2601
936fd732 2602 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2603 return rc;
2604
1c3fae4d
TH
2605 return 1;
2606}
2607
452503f9
AC
2608/*
2609 * This mode timing computation functionality is ported over from
2610 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2611 */
2612/*
b352e57d 2613 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2614 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2615 * for UDMA6, which is currently supported only by Maxtor drives.
2616 *
2617 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2618 */
2619
2620static const struct ata_timing ata_timing[] = {
2621
2622 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2623 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2624 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2625 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2626
b352e57d
AC
2627 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2628 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2629 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2630 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2631 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2632
2633/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2634
452503f9
AC
2635 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2636 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2637 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2638
452503f9
AC
2639 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2640 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2641 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2642
b352e57d
AC
2643 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2644 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2645 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2646 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2647
2648 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2649 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2650 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2651
2652/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2653
2654 { 0xFF }
2655};
2656
2657#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2658#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2659
2660static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2661{
2662 q->setup = EZ(t->setup * 1000, T);
2663 q->act8b = EZ(t->act8b * 1000, T);
2664 q->rec8b = EZ(t->rec8b * 1000, T);
2665 q->cyc8b = EZ(t->cyc8b * 1000, T);
2666 q->active = EZ(t->active * 1000, T);
2667 q->recover = EZ(t->recover * 1000, T);
2668 q->cycle = EZ(t->cycle * 1000, T);
2669 q->udma = EZ(t->udma * 1000, UT);
2670}
2671
2672void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2673 struct ata_timing *m, unsigned int what)
2674{
2675 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2676 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2677 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2678 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2679 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2680 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2681 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2682 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2683}
2684
2685static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2686{
2687 const struct ata_timing *t;
2688
2689 for (t = ata_timing; t->mode != speed; t++)
91190758 2690 if (t->mode == 0xFF)
452503f9 2691 return NULL;
2e9edbf8 2692 return t;
452503f9
AC
2693}
2694
2695int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2696 struct ata_timing *t, int T, int UT)
2697{
2698 const struct ata_timing *s;
2699 struct ata_timing p;
2700
2701 /*
2e9edbf8 2702 * Find the mode.
75b1f2f8 2703 */
452503f9
AC
2704
2705 if (!(s = ata_timing_find_mode(speed)))
2706 return -EINVAL;
2707
75b1f2f8
AL
2708 memcpy(t, s, sizeof(*s));
2709
452503f9
AC
2710 /*
2711 * If the drive is an EIDE drive, it can tell us it needs extended
2712 * PIO/MW_DMA cycle timing.
2713 */
2714
2715 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2716 memset(&p, 0, sizeof(p));
2717 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2718 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2719 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2720 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2721 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2722 }
2723 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2724 }
2725
2726 /*
2727 * Convert the timing to bus clock counts.
2728 */
2729
75b1f2f8 2730 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2731
2732 /*
c893a3ae
RD
2733 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2734 * S.M.A.R.T * and some other commands. We have to ensure that the
2735 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2736 */
2737
fd3367af 2738 if (speed > XFER_PIO_6) {
452503f9
AC
2739 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2740 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2741 }
2742
2743 /*
c893a3ae 2744 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2745 */
2746
2747 if (t->act8b + t->rec8b < t->cyc8b) {
2748 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2749 t->rec8b = t->cyc8b - t->act8b;
2750 }
2751
2752 if (t->active + t->recover < t->cycle) {
2753 t->active += (t->cycle - (t->active + t->recover)) / 2;
2754 t->recover = t->cycle - t->active;
2755 }
a617c09f 2756
4f701d1e
AC
2757 /* In a few cases quantisation may produce enough errors to
2758 leave t->cycle too low for the sum of active and recovery
2759 if so we must correct this */
2760 if (t->active + t->recover > t->cycle)
2761 t->cycle = t->active + t->recover;
452503f9
AC
2762
2763 return 0;
2764}
2765
cf176e1a
TH
2766/**
2767 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2768 * @dev: Device to adjust xfer masks
458337db 2769 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2770 *
2771 * Adjust xfer masks of @dev downward. Note that this function
2772 * does not apply the change. Invoking ata_set_mode() afterwards
2773 * will apply the limit.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 *
2778 * RETURNS:
2779 * 0 on success, negative errno on failure
2780 */
458337db 2781int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2782{
458337db
TH
2783 char buf[32];
2784 unsigned int orig_mask, xfer_mask;
2785 unsigned int pio_mask, mwdma_mask, udma_mask;
2786 int quiet, highbit;
cf176e1a 2787
458337db
TH
2788 quiet = !!(sel & ATA_DNXFER_QUIET);
2789 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2790
458337db
TH
2791 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2792 dev->mwdma_mask,
2793 dev->udma_mask);
2794 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2795
458337db
TH
2796 switch (sel) {
2797 case ATA_DNXFER_PIO:
2798 highbit = fls(pio_mask) - 1;
2799 pio_mask &= ~(1 << highbit);
2800 break;
2801
2802 case ATA_DNXFER_DMA:
2803 if (udma_mask) {
2804 highbit = fls(udma_mask) - 1;
2805 udma_mask &= ~(1 << highbit);
2806 if (!udma_mask)
2807 return -ENOENT;
2808 } else if (mwdma_mask) {
2809 highbit = fls(mwdma_mask) - 1;
2810 mwdma_mask &= ~(1 << highbit);
2811 if (!mwdma_mask)
2812 return -ENOENT;
2813 }
2814 break;
2815
2816 case ATA_DNXFER_40C:
2817 udma_mask &= ATA_UDMA_MASK_40C;
2818 break;
2819
2820 case ATA_DNXFER_FORCE_PIO0:
2821 pio_mask &= 1;
2822 case ATA_DNXFER_FORCE_PIO:
2823 mwdma_mask = 0;
2824 udma_mask = 0;
2825 break;
2826
458337db
TH
2827 default:
2828 BUG();
2829 }
2830
2831 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2832
2833 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2834 return -ENOENT;
2835
2836 if (!quiet) {
2837 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2838 snprintf(buf, sizeof(buf), "%s:%s",
2839 ata_mode_string(xfer_mask),
2840 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2841 else
2842 snprintf(buf, sizeof(buf), "%s",
2843 ata_mode_string(xfer_mask));
2844
2845 ata_dev_printk(dev, KERN_WARNING,
2846 "limiting speed to %s\n", buf);
2847 }
cf176e1a
TH
2848
2849 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2850 &dev->udma_mask);
2851
cf176e1a 2852 return 0;
cf176e1a
TH
2853}
2854
3373efd8 2855static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2856{
9af5c9c9 2857 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2858 unsigned int err_mask;
2859 int rc;
1da177e4 2860
e8384607 2861 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2862 if (dev->xfer_shift == ATA_SHIFT_PIO)
2863 dev->flags |= ATA_DFLAG_PIO;
2864
3373efd8 2865 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2866 /* Old CFA may refuse this command, which is just fine */
2867 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2868 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2869 /* Some very old devices and some bad newer ones fail any kind of
2870 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2871 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2872 dev->pio_mode <= XFER_PIO_2)
2873 err_mask &= ~AC_ERR_DEV;
83206a29 2874 if (err_mask) {
f15a1daf
TH
2875 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2876 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2877 return -EIO;
2878 }
1da177e4 2879
baa1e78a 2880 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2881 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2882 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2883 if (rc)
83206a29 2884 return rc;
48a8a14f 2885
23e71c3d
TH
2886 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2887 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2888
f15a1daf
TH
2889 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2890 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2891 return 0;
1da177e4
LT
2892}
2893
1da177e4 2894/**
04351821 2895 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2896 * @link: link on which timings will be programmed
e82cbdb9 2897 * @r_failed_dev: out paramter for failed device
1da177e4 2898 *
04351821
A
2899 * Standard implementation of the function used to tune and set
2900 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2901 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2902 * returned in @r_failed_dev.
780a87f7 2903 *
1da177e4 2904 * LOCKING:
0cba632b 2905 * PCI/etc. bus probe sem.
e82cbdb9
TH
2906 *
2907 * RETURNS:
2908 * 0 on success, negative errno otherwise
1da177e4 2909 */
04351821 2910
0260731f 2911int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2912{
0260731f 2913 struct ata_port *ap = link->ap;
e8e0619f 2914 struct ata_device *dev;
f58229f8 2915 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2916
a6d5a51c 2917 /* step 1: calculate xfer_mask */
f58229f8 2918 ata_link_for_each_dev(dev, link) {
acf356b1 2919 unsigned int pio_mask, dma_mask;
a6d5a51c 2920
e1211e3f 2921 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2922 continue;
2923
3373efd8 2924 ata_dev_xfermask(dev);
1da177e4 2925
acf356b1
TH
2926 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2927 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2928 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2929 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2930
4f65977d 2931 found = 1;
5444a6f4
AC
2932 if (dev->dma_mode)
2933 used_dma = 1;
a6d5a51c 2934 }
4f65977d 2935 if (!found)
e82cbdb9 2936 goto out;
a6d5a51c
TH
2937
2938 /* step 2: always set host PIO timings */
f58229f8 2939 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2940 if (!ata_dev_enabled(dev))
2941 continue;
2942
2943 if (!dev->pio_mode) {
f15a1daf 2944 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2945 rc = -EINVAL;
e82cbdb9 2946 goto out;
e8e0619f
TH
2947 }
2948
2949 dev->xfer_mode = dev->pio_mode;
2950 dev->xfer_shift = ATA_SHIFT_PIO;
2951 if (ap->ops->set_piomode)
2952 ap->ops->set_piomode(ap, dev);
2953 }
1da177e4 2954
a6d5a51c 2955 /* step 3: set host DMA timings */
f58229f8 2956 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2957 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2958 continue;
2959
2960 dev->xfer_mode = dev->dma_mode;
2961 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2962 if (ap->ops->set_dmamode)
2963 ap->ops->set_dmamode(ap, dev);
2964 }
1da177e4
LT
2965
2966 /* step 4: update devices' xfer mode */
f58229f8 2967 ata_link_for_each_dev(dev, link) {
18d90deb 2968 /* don't update suspended devices' xfer mode */
9666f400 2969 if (!ata_dev_enabled(dev))
83206a29
TH
2970 continue;
2971
3373efd8 2972 rc = ata_dev_set_mode(dev);
5bbc53f4 2973 if (rc)
e82cbdb9 2974 goto out;
83206a29 2975 }
1da177e4 2976
e8e0619f
TH
2977 /* Record simplex status. If we selected DMA then the other
2978 * host channels are not permitted to do so.
5444a6f4 2979 */
cca3974e 2980 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2981 ap->host->simplex_claimed = ap;
5444a6f4 2982
e82cbdb9
TH
2983 out:
2984 if (rc)
2985 *r_failed_dev = dev;
2986 return rc;
1da177e4
LT
2987}
2988
04351821
A
2989/**
2990 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2991 * @link: link on which timings will be programmed
04351821
A
2992 * @r_failed_dev: out paramter for failed device
2993 *
2994 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2995 * ata_set_mode() fails, pointer to the failing device is
2996 * returned in @r_failed_dev.
2997 *
2998 * LOCKING:
2999 * PCI/etc. bus probe sem.
3000 *
3001 * RETURNS:
3002 * 0 on success, negative errno otherwise
3003 */
0260731f 3004int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3005{
0260731f
TH
3006 struct ata_port *ap = link->ap;
3007
04351821
A
3008 /* has private set_mode? */
3009 if (ap->ops->set_mode)
0260731f
TH
3010 return ap->ops->set_mode(link, r_failed_dev);
3011 return ata_do_set_mode(link, r_failed_dev);
04351821
A
3012}
3013
1fdffbce
JG
3014/**
3015 * ata_tf_to_host - issue ATA taskfile to host controller
3016 * @ap: port to which command is being issued
3017 * @tf: ATA taskfile register set
3018 *
3019 * Issues ATA taskfile register set to ATA host controller,
3020 * with proper synchronization with interrupt handler and
3021 * other threads.
3022 *
3023 * LOCKING:
cca3974e 3024 * spin_lock_irqsave(host lock)
1fdffbce
JG
3025 */
3026
3027static inline void ata_tf_to_host(struct ata_port *ap,
3028 const struct ata_taskfile *tf)
3029{
3030 ap->ops->tf_load(ap, tf);
3031 ap->ops->exec_command(ap, tf);
3032}
3033
1da177e4
LT
3034/**
3035 * ata_busy_sleep - sleep until BSY clears, or timeout
3036 * @ap: port containing status register to be polled
3037 * @tmout_pat: impatience timeout
3038 * @tmout: overall timeout
3039 *
780a87f7
JG
3040 * Sleep until ATA Status register bit BSY clears,
3041 * or a timeout occurs.
3042 *
d1adc1bb
TH
3043 * LOCKING:
3044 * Kernel thread context (may sleep).
3045 *
3046 * RETURNS:
3047 * 0 on success, -errno otherwise.
1da177e4 3048 */
d1adc1bb
TH
3049int ata_busy_sleep(struct ata_port *ap,
3050 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3051{
3052 unsigned long timer_start, timeout;
3053 u8 status;
3054
3055 status = ata_busy_wait(ap, ATA_BUSY, 300);
3056 timer_start = jiffies;
3057 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3058 while (status != 0xff && (status & ATA_BUSY) &&
3059 time_before(jiffies, timeout)) {
1da177e4
LT
3060 msleep(50);
3061 status = ata_busy_wait(ap, ATA_BUSY, 3);
3062 }
3063
d1adc1bb 3064 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3065 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3066 "port is slow to respond, please be patient "
3067 "(Status 0x%x)\n", status);
1da177e4
LT
3068
3069 timeout = timer_start + tmout;
d1adc1bb
TH
3070 while (status != 0xff && (status & ATA_BUSY) &&
3071 time_before(jiffies, timeout)) {
1da177e4
LT
3072 msleep(50);
3073 status = ata_chk_status(ap);
3074 }
3075
d1adc1bb
TH
3076 if (status == 0xff)
3077 return -ENODEV;
3078
1da177e4 3079 if (status & ATA_BUSY) {
f15a1daf 3080 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3081 "(%lu secs, Status 0x%x)\n",
3082 tmout / HZ, status);
d1adc1bb 3083 return -EBUSY;
1da177e4
LT
3084 }
3085
3086 return 0;
3087}
3088
d4b2bab4
TH
3089/**
3090 * ata_wait_ready - sleep until BSY clears, or timeout
3091 * @ap: port containing status register to be polled
3092 * @deadline: deadline jiffies for the operation
3093 *
3094 * Sleep until ATA Status register bit BSY clears, or timeout
3095 * occurs.
3096 *
3097 * LOCKING:
3098 * Kernel thread context (may sleep).
3099 *
3100 * RETURNS:
3101 * 0 on success, -errno otherwise.
3102 */
3103int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3104{
3105 unsigned long start = jiffies;
3106 int warned = 0;
3107
3108 while (1) {
3109 u8 status = ata_chk_status(ap);
3110 unsigned long now = jiffies;
3111
3112 if (!(status & ATA_BUSY))
3113 return 0;
936fd732 3114 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3115 return -ENODEV;
3116 if (time_after(now, deadline))
3117 return -EBUSY;
3118
3119 if (!warned && time_after(now, start + 5 * HZ) &&
3120 (deadline - now > 3 * HZ)) {
3121 ata_port_printk(ap, KERN_WARNING,
3122 "port is slow to respond, please be patient "
3123 "(Status 0x%x)\n", status);
3124 warned = 1;
3125 }
3126
3127 msleep(50);
3128 }
3129}
3130
3131static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3132 unsigned long deadline)
1da177e4
LT
3133{
3134 struct ata_ioports *ioaddr = &ap->ioaddr;
3135 unsigned int dev0 = devmask & (1 << 0);
3136 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3137 int rc, ret = 0;
1da177e4
LT
3138
3139 /* if device 0 was found in ata_devchk, wait for its
3140 * BSY bit to clear
3141 */
d4b2bab4
TH
3142 if (dev0) {
3143 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3144 if (rc) {
3145 if (rc != -ENODEV)
3146 return rc;
3147 ret = rc;
3148 }
d4b2bab4 3149 }
1da177e4 3150
e141d999
TH
3151 /* if device 1 was found in ata_devchk, wait for register
3152 * access briefly, then wait for BSY to clear.
1da177e4 3153 */
e141d999
TH
3154 if (dev1) {
3155 int i;
1da177e4
LT
3156
3157 ap->ops->dev_select(ap, 1);
e141d999
TH
3158
3159 /* Wait for register access. Some ATAPI devices fail
3160 * to set nsect/lbal after reset, so don't waste too
3161 * much time on it. We're gonna wait for !BSY anyway.
3162 */
3163 for (i = 0; i < 2; i++) {
3164 u8 nsect, lbal;
3165
3166 nsect = ioread8(ioaddr->nsect_addr);
3167 lbal = ioread8(ioaddr->lbal_addr);
3168 if ((nsect == 1) && (lbal == 1))
3169 break;
3170 msleep(50); /* give drive a breather */
3171 }
3172
d4b2bab4 3173 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3174 if (rc) {
3175 if (rc != -ENODEV)
3176 return rc;
3177 ret = rc;
3178 }
d4b2bab4 3179 }
1da177e4
LT
3180
3181 /* is all this really necessary? */
3182 ap->ops->dev_select(ap, 0);
3183 if (dev1)
3184 ap->ops->dev_select(ap, 1);
3185 if (dev0)
3186 ap->ops->dev_select(ap, 0);
d4b2bab4 3187
9b89391c 3188 return ret;
1da177e4
LT
3189}
3190
d4b2bab4
TH
3191static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3192 unsigned long deadline)
1da177e4
LT
3193{
3194 struct ata_ioports *ioaddr = &ap->ioaddr;
3195
44877b4e 3196 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3197
3198 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3199 iowrite8(ap->ctl, ioaddr->ctl_addr);
3200 udelay(20); /* FIXME: flush */
3201 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3202 udelay(20); /* FIXME: flush */
3203 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3204
3205 /* spec mandates ">= 2ms" before checking status.
3206 * We wait 150ms, because that was the magic delay used for
3207 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3208 * between when the ATA command register is written, and then
3209 * status is checked. Because waiting for "a while" before
3210 * checking status is fine, post SRST, we perform this magic
3211 * delay here as well.
09c7ad79
AC
3212 *
3213 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3214 */
3215 msleep(150);
3216
2e9edbf8 3217 /* Before we perform post reset processing we want to see if
298a41ca
TH
3218 * the bus shows 0xFF because the odd clown forgets the D7
3219 * pulldown resistor.
3220 */
d1adc1bb 3221 if (ata_check_status(ap) == 0xFF)
9b89391c 3222 return -ENODEV;
09c7ad79 3223
d4b2bab4 3224 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3225}
3226
3227/**
3228 * ata_bus_reset - reset host port and associated ATA channel
3229 * @ap: port to reset
3230 *
3231 * This is typically the first time we actually start issuing
3232 * commands to the ATA channel. We wait for BSY to clear, then
3233 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3234 * result. Determine what devices, if any, are on the channel
3235 * by looking at the device 0/1 error register. Look at the signature
3236 * stored in each device's taskfile registers, to determine if
3237 * the device is ATA or ATAPI.
3238 *
3239 * LOCKING:
0cba632b 3240 * PCI/etc. bus probe sem.
cca3974e 3241 * Obtains host lock.
1da177e4
LT
3242 *
3243 * SIDE EFFECTS:
198e0fed 3244 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3245 */
3246
3247void ata_bus_reset(struct ata_port *ap)
3248{
9af5c9c9 3249 struct ata_device *device = ap->link.device;
1da177e4
LT
3250 struct ata_ioports *ioaddr = &ap->ioaddr;
3251 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3252 u8 err;
aec5c3c1 3253 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3254 int rc;
1da177e4 3255
44877b4e 3256 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3257
3258 /* determine if device 0/1 are present */
3259 if (ap->flags & ATA_FLAG_SATA_RESET)
3260 dev0 = 1;
3261 else {
3262 dev0 = ata_devchk(ap, 0);
3263 if (slave_possible)
3264 dev1 = ata_devchk(ap, 1);
3265 }
3266
3267 if (dev0)
3268 devmask |= (1 << 0);
3269 if (dev1)
3270 devmask |= (1 << 1);
3271
3272 /* select device 0 again */
3273 ap->ops->dev_select(ap, 0);
3274
3275 /* issue bus reset */
9b89391c
TH
3276 if (ap->flags & ATA_FLAG_SRST) {
3277 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3278 if (rc && rc != -ENODEV)
aec5c3c1 3279 goto err_out;
9b89391c 3280 }
1da177e4
LT
3281
3282 /*
3283 * determine by signature whether we have ATA or ATAPI devices
3284 */
3f19859e 3285 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3286 if ((slave_possible) && (err != 0x81))
3f19859e 3287 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3288
1da177e4 3289 /* is double-select really necessary? */
9af5c9c9 3290 if (device[1].class != ATA_DEV_NONE)
1da177e4 3291 ap->ops->dev_select(ap, 1);
9af5c9c9 3292 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3293 ap->ops->dev_select(ap, 0);
3294
3295 /* if no devices were detected, disable this port */
9af5c9c9
TH
3296 if ((device[0].class == ATA_DEV_NONE) &&
3297 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3298 goto err_out;
3299
3300 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3301 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3302 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3303 }
3304
3305 DPRINTK("EXIT\n");
3306 return;
3307
3308err_out:
f15a1daf 3309 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3310 ata_port_disable(ap);
1da177e4
LT
3311
3312 DPRINTK("EXIT\n");
3313}
3314
d7bb4cc7 3315/**
936fd732
TH
3316 * sata_link_debounce - debounce SATA phy status
3317 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3318 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3319 * @deadline: deadline jiffies for the operation
d7bb4cc7 3320 *
936fd732 3321* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3322 * holding the same value where DET is not 1 for @duration polled
3323 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3324 * beginning of the stable state. Because DET gets stuck at 1 on
3325 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3326 * until timeout then returns 0 if DET is stable at 1.
3327 *
d4b2bab4
TH
3328 * @timeout is further limited by @deadline. The sooner of the
3329 * two is used.
3330 *
d7bb4cc7
TH
3331 * LOCKING:
3332 * Kernel thread context (may sleep)
3333 *
3334 * RETURNS:
3335 * 0 on success, -errno on failure.
3336 */
936fd732
TH
3337int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3338 unsigned long deadline)
7a7921e8 3339{
d7bb4cc7 3340 unsigned long interval_msec = params[0];
d4b2bab4
TH
3341 unsigned long duration = msecs_to_jiffies(params[1]);
3342 unsigned long last_jiffies, t;
d7bb4cc7
TH
3343 u32 last, cur;
3344 int rc;
3345
d4b2bab4
TH
3346 t = jiffies + msecs_to_jiffies(params[2]);
3347 if (time_before(t, deadline))
3348 deadline = t;
3349
936fd732 3350 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3351 return rc;
3352 cur &= 0xf;
3353
3354 last = cur;
3355 last_jiffies = jiffies;
3356
3357 while (1) {
3358 msleep(interval_msec);
936fd732 3359 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3360 return rc;
3361 cur &= 0xf;
3362
3363 /* DET stable? */
3364 if (cur == last) {
d4b2bab4 3365 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3366 continue;
3367 if (time_after(jiffies, last_jiffies + duration))
3368 return 0;
3369 continue;
3370 }
3371
3372 /* unstable, start over */
3373 last = cur;
3374 last_jiffies = jiffies;
3375
f1545154
TH
3376 /* Check deadline. If debouncing failed, return
3377 * -EPIPE to tell upper layer to lower link speed.
3378 */
d4b2bab4 3379 if (time_after(jiffies, deadline))
f1545154 3380 return -EPIPE;
d7bb4cc7
TH
3381 }
3382}
3383
3384/**
936fd732
TH
3385 * sata_link_resume - resume SATA link
3386 * @link: ATA link to resume SATA
d7bb4cc7 3387 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3388 * @deadline: deadline jiffies for the operation
d7bb4cc7 3389 *
936fd732 3390 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3391 *
3392 * LOCKING:
3393 * Kernel thread context (may sleep)
3394 *
3395 * RETURNS:
3396 * 0 on success, -errno on failure.
3397 */
936fd732
TH
3398int sata_link_resume(struct ata_link *link, const unsigned long *params,
3399 unsigned long deadline)
d7bb4cc7
TH
3400{
3401 u32 scontrol;
81952c54
TH
3402 int rc;
3403
936fd732 3404 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3405 return rc;
7a7921e8 3406
852ee16a 3407 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3408
936fd732 3409 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3410 return rc;
7a7921e8 3411
d7bb4cc7
TH
3412 /* Some PHYs react badly if SStatus is pounded immediately
3413 * after resuming. Delay 200ms before debouncing.
3414 */
3415 msleep(200);
7a7921e8 3416
936fd732 3417 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3418}
3419
f5914a46
TH
3420/**
3421 * ata_std_prereset - prepare for reset
cc0680a5 3422 * @link: ATA link to be reset
d4b2bab4 3423 * @deadline: deadline jiffies for the operation
f5914a46 3424 *
cc0680a5 3425 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3426 * prereset makes libata abort whole reset sequence and give up
3427 * that port, so prereset should be best-effort. It does its
3428 * best to prepare for reset sequence but if things go wrong, it
3429 * should just whine, not fail.
f5914a46
TH
3430 *
3431 * LOCKING:
3432 * Kernel thread context (may sleep)
3433 *
3434 * RETURNS:
3435 * 0 on success, -errno otherwise.
3436 */
cc0680a5 3437int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3438{
cc0680a5 3439 struct ata_port *ap = link->ap;
936fd732 3440 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3441 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3442 int rc;
3443
31daabda 3444 /* handle link resume */
28324304 3445 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3446 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3447 ehc->i.action |= ATA_EH_HARDRESET;
3448
633273a3
TH
3449 /* Some PMPs don't work with only SRST, force hardreset if PMP
3450 * is supported.
3451 */
3452 if (ap->flags & ATA_FLAG_PMP)
3453 ehc->i.action |= ATA_EH_HARDRESET;
3454
f5914a46
TH
3455 /* if we're about to do hardreset, nothing more to do */
3456 if (ehc->i.action & ATA_EH_HARDRESET)
3457 return 0;
3458
936fd732 3459 /* if SATA, resume link */
a16abc0b 3460 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3461 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3462 /* whine about phy resume failure but proceed */
3463 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3464 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3465 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3466 }
3467
3468 /* Wait for !BSY if the controller can wait for the first D2H
3469 * Reg FIS and we don't know that no device is attached.
3470 */
0c88758b 3471 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3472 rc = ata_wait_ready(ap, deadline);
6dffaf61 3473 if (rc && rc != -ENODEV) {
cc0680a5 3474 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3475 "(errno=%d), forcing hardreset\n", rc);
3476 ehc->i.action |= ATA_EH_HARDRESET;
3477 }
3478 }
f5914a46
TH
3479
3480 return 0;
3481}
3482
c2bd5804
TH
3483/**
3484 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3485 * @link: ATA link to reset
c2bd5804 3486 * @classes: resulting classes of attached devices
d4b2bab4 3487 * @deadline: deadline jiffies for the operation
c2bd5804 3488 *
52783c5d 3489 * Reset host port using ATA SRST.
c2bd5804
TH
3490 *
3491 * LOCKING:
3492 * Kernel thread context (may sleep)
3493 *
3494 * RETURNS:
3495 * 0 on success, -errno otherwise.
3496 */
cc0680a5 3497int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3498 unsigned long deadline)
c2bd5804 3499{
cc0680a5 3500 struct ata_port *ap = link->ap;
c2bd5804 3501 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3502 unsigned int devmask = 0;
3503 int rc;
c2bd5804
TH
3504 u8 err;
3505
3506 DPRINTK("ENTER\n");
3507
936fd732 3508 if (ata_link_offline(link)) {
3a39746a
TH
3509 classes[0] = ATA_DEV_NONE;
3510 goto out;
3511 }
3512
c2bd5804
TH
3513 /* determine if device 0/1 are present */
3514 if (ata_devchk(ap, 0))
3515 devmask |= (1 << 0);
3516 if (slave_possible && ata_devchk(ap, 1))
3517 devmask |= (1 << 1);
3518
c2bd5804
TH
3519 /* select device 0 again */
3520 ap->ops->dev_select(ap, 0);
3521
3522 /* issue bus reset */
3523 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3524 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3525 /* if link is occupied, -ENODEV too is an error */
936fd732 3526 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3527 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3528 return rc;
c2bd5804
TH
3529 }
3530
3531 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3532 classes[0] = ata_dev_try_classify(&link->device[0],
3533 devmask & (1 << 0), &err);
c2bd5804 3534 if (slave_possible && err != 0x81)
3f19859e
TH
3535 classes[1] = ata_dev_try_classify(&link->device[1],
3536 devmask & (1 << 1), &err);
c2bd5804 3537
3a39746a 3538 out:
c2bd5804
TH
3539 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3540 return 0;
3541}
3542
3543/**
cc0680a5
TH
3544 * sata_link_hardreset - reset link via SATA phy reset
3545 * @link: link to reset
b6103f6d 3546 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3547 * @deadline: deadline jiffies for the operation
c2bd5804 3548 *
cc0680a5 3549 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3550 *
3551 * LOCKING:
3552 * Kernel thread context (may sleep)
3553 *
3554 * RETURNS:
3555 * 0 on success, -errno otherwise.
3556 */
cc0680a5 3557int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3558 unsigned long deadline)
c2bd5804 3559{
852ee16a 3560 u32 scontrol;
81952c54 3561 int rc;
852ee16a 3562
c2bd5804
TH
3563 DPRINTK("ENTER\n");
3564
936fd732 3565 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3566 /* SATA spec says nothing about how to reconfigure
3567 * spd. To be on the safe side, turn off phy during
3568 * reconfiguration. This works for at least ICH7 AHCI
3569 * and Sil3124.
3570 */
936fd732 3571 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3572 goto out;
81952c54 3573
a34b6fc0 3574 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3575
936fd732 3576 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3577 goto out;
1c3fae4d 3578
936fd732 3579 sata_set_spd(link);
1c3fae4d
TH
3580 }
3581
3582 /* issue phy wake/reset */
936fd732 3583 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3584 goto out;
81952c54 3585
852ee16a 3586 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3587
936fd732 3588 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3589 goto out;
c2bd5804 3590
1c3fae4d 3591 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3592 * 10.4.2 says at least 1 ms.
3593 */
3594 msleep(1);
3595
936fd732
TH
3596 /* bring link back */
3597 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3598 out:
3599 DPRINTK("EXIT, rc=%d\n", rc);
3600 return rc;
3601}
3602
3603/**
3604 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3605 * @link: link to reset
b6103f6d 3606 * @class: resulting class of attached device
d4b2bab4 3607 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3608 *
3609 * SATA phy-reset host port using DET bits of SControl register,
3610 * wait for !BSY and classify the attached device.
3611 *
3612 * LOCKING:
3613 * Kernel thread context (may sleep)
3614 *
3615 * RETURNS:
3616 * 0 on success, -errno otherwise.
3617 */
cc0680a5 3618int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3619 unsigned long deadline)
b6103f6d 3620{
cc0680a5 3621 struct ata_port *ap = link->ap;
936fd732 3622 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3623 int rc;
3624
3625 DPRINTK("ENTER\n");
3626
3627 /* do hardreset */
cc0680a5 3628 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3629 if (rc) {
cc0680a5 3630 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3631 "COMRESET failed (errno=%d)\n", rc);
3632 return rc;
3633 }
c2bd5804 3634
c2bd5804 3635 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3636 if (ata_link_offline(link)) {
c2bd5804
TH
3637 *class = ATA_DEV_NONE;
3638 DPRINTK("EXIT, link offline\n");
3639 return 0;
3640 }
3641
34fee227
TH
3642 /* wait a while before checking status, see SRST for more info */
3643 msleep(150);
3644
633273a3
TH
3645 /* If PMP is supported, we have to do follow-up SRST. Note
3646 * that some PMPs don't send D2H Reg FIS after hardreset at
3647 * all if the first port is empty. Wait for it just for a
3648 * second and request follow-up SRST.
3649 */
3650 if (ap->flags & ATA_FLAG_PMP) {
3651 ata_wait_ready(ap, jiffies + HZ);
3652 return -EAGAIN;
3653 }
3654
d4b2bab4 3655 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3656 /* link occupied, -ENODEV too is an error */
3657 if (rc) {
cc0680a5 3658 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3659 "COMRESET failed (errno=%d)\n", rc);
3660 return rc;
c2bd5804
TH
3661 }
3662
3a39746a
TH
3663 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3664
3f19859e 3665 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3666
3667 DPRINTK("EXIT, class=%u\n", *class);
3668 return 0;
3669}
3670
3671/**
3672 * ata_std_postreset - standard postreset callback
cc0680a5 3673 * @link: the target ata_link
c2bd5804
TH
3674 * @classes: classes of attached devices
3675 *
3676 * This function is invoked after a successful reset. Note that
3677 * the device might have been reset more than once using
3678 * different reset methods before postreset is invoked.
c2bd5804 3679 *
c2bd5804
TH
3680 * LOCKING:
3681 * Kernel thread context (may sleep)
3682 */
cc0680a5 3683void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3684{
cc0680a5 3685 struct ata_port *ap = link->ap;
dc2b3515
TH
3686 u32 serror;
3687
c2bd5804
TH
3688 DPRINTK("ENTER\n");
3689
c2bd5804 3690 /* print link status */
936fd732 3691 sata_print_link_status(link);
c2bd5804 3692
dc2b3515 3693 /* clear SError */
936fd732
TH
3694 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3695 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3696
c2bd5804
TH
3697 /* is double-select really necessary? */
3698 if (classes[0] != ATA_DEV_NONE)
3699 ap->ops->dev_select(ap, 1);
3700 if (classes[1] != ATA_DEV_NONE)
3701 ap->ops->dev_select(ap, 0);
3702
3a39746a
TH
3703 /* bail out if no device is present */
3704 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3705 DPRINTK("EXIT, no device\n");
3706 return;
3707 }
3708
3709 /* set up device control */
0d5ff566
TH
3710 if (ap->ioaddr.ctl_addr)
3711 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3712
3713 DPRINTK("EXIT\n");
3714}
3715
623a3128
TH
3716/**
3717 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3718 * @dev: device to compare against
3719 * @new_class: class of the new device
3720 * @new_id: IDENTIFY page of the new device
3721 *
3722 * Compare @new_class and @new_id against @dev and determine
3723 * whether @dev is the device indicated by @new_class and
3724 * @new_id.
3725 *
3726 * LOCKING:
3727 * None.
3728 *
3729 * RETURNS:
3730 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3731 */
3373efd8
TH
3732static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3733 const u16 *new_id)
623a3128
TH
3734{
3735 const u16 *old_id = dev->id;
a0cf733b
TH
3736 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3737 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3738
3739 if (dev->class != new_class) {
f15a1daf
TH
3740 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3741 dev->class, new_class);
623a3128
TH
3742 return 0;
3743 }
3744
a0cf733b
TH
3745 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3746 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3747 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3748 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3749
3750 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3751 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3752 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3753 return 0;
3754 }
3755
3756 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3757 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3758 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3759 return 0;
3760 }
3761
623a3128
TH
3762 return 1;
3763}
3764
3765/**
fe30911b 3766 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3767 * @dev: target ATA device
bff04647 3768 * @readid_flags: read ID flags
623a3128
TH
3769 *
3770 * Re-read IDENTIFY page and make sure @dev is still attached to
3771 * the port.
3772 *
3773 * LOCKING:
3774 * Kernel thread context (may sleep)
3775 *
3776 * RETURNS:
3777 * 0 on success, negative errno otherwise
3778 */
fe30911b 3779int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3780{
5eb45c02 3781 unsigned int class = dev->class;
9af5c9c9 3782 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3783 int rc;
3784
fe635c7e 3785 /* read ID data */
bff04647 3786 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3787 if (rc)
fe30911b 3788 return rc;
623a3128
TH
3789
3790 /* is the device still there? */
fe30911b
TH
3791 if (!ata_dev_same_device(dev, class, id))
3792 return -ENODEV;
623a3128 3793
fe635c7e 3794 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3795 return 0;
3796}
3797
3798/**
3799 * ata_dev_revalidate - Revalidate ATA device
3800 * @dev: device to revalidate
422c9daa 3801 * @new_class: new class code
fe30911b
TH
3802 * @readid_flags: read ID flags
3803 *
3804 * Re-read IDENTIFY page, make sure @dev is still attached to the
3805 * port and reconfigure it according to the new IDENTIFY page.
3806 *
3807 * LOCKING:
3808 * Kernel thread context (may sleep)
3809 *
3810 * RETURNS:
3811 * 0 on success, negative errno otherwise
3812 */
422c9daa
TH
3813int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3814 unsigned int readid_flags)
fe30911b 3815{
6ddcd3b0 3816 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3817 int rc;
3818
3819 if (!ata_dev_enabled(dev))
3820 return -ENODEV;
3821
422c9daa
TH
3822 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3823 if (ata_class_enabled(new_class) &&
3824 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3825 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3826 dev->class, new_class);
3827 rc = -ENODEV;
3828 goto fail;
3829 }
3830
fe30911b
TH
3831 /* re-read ID */
3832 rc = ata_dev_reread_id(dev, readid_flags);
3833 if (rc)
3834 goto fail;
623a3128
TH
3835
3836 /* configure device according to the new ID */
efdaedc4 3837 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3838 if (rc)
3839 goto fail;
3840
3841 /* verify n_sectors hasn't changed */
b54eebd6
TH
3842 if (dev->class == ATA_DEV_ATA && n_sectors &&
3843 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3844 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3845 "%llu != %llu\n",
3846 (unsigned long long)n_sectors,
3847 (unsigned long long)dev->n_sectors);
8270bec4
TH
3848
3849 /* restore original n_sectors */
3850 dev->n_sectors = n_sectors;
3851
6ddcd3b0
TH
3852 rc = -ENODEV;
3853 goto fail;
3854 }
3855
3856 return 0;
623a3128
TH
3857
3858 fail:
f15a1daf 3859 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3860 return rc;
3861}
3862
6919a0a6
AC
3863struct ata_blacklist_entry {
3864 const char *model_num;
3865 const char *model_rev;
3866 unsigned long horkage;
3867};
3868
3869static const struct ata_blacklist_entry ata_device_blacklist [] = {
3870 /* Devices with DMA related problems under Linux */
3871 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3872 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3873 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3874 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3875 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3876 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3877 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3878 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3879 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3880 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3881 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3882 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3883 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3884 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3885 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3886 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3887 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3888 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3889 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3890 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3891 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3892 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3893 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3894 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3895 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3896 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3897 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3898 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3899 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3900 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3901 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3902 { "IOMEGA ZIP 250 ATAPI Floppy",
3903 NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
3904 /* Odd clown on sil3726/4726 PMPs */
3905 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3906 ATA_HORKAGE_SKIP_PM },
6919a0a6 3907
18d6e9d5 3908 /* Weird ATAPI devices */
40a1d531 3909 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3910
6919a0a6
AC
3911 /* Devices we expect to fail diagnostics */
3912
3913 /* Devices where NCQ should be avoided */
3914 /* NCQ is slow */
3915 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3916 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3917 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3918 /* NCQ is broken */
539cc7c7 3919 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3920 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
3921 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3922 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
539cc7c7 3923
36e337d0
RH
3924 /* Blacklist entries taken from Silicon Image 3124/3132
3925 Windows driver .inf file - also several Linux problem reports */
3926 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3927 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3928 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3929 /* Drives which do spurious command completion */
3930 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3931 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3932 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3933 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 3934 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 3935 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 3936 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 3937 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
7f567620
TH
3938 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3939 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 3940 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3941
16c55b03
TH
3942 /* devices which puke on READ_NATIVE_MAX */
3943 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3944 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3945 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3946 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3947
93328e11
AC
3948 /* Devices which report 1 sector over size HPA */
3949 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3950 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3951
6919a0a6
AC
3952 /* End Marker */
3953 { }
1da177e4 3954};
2e9edbf8 3955
539cc7c7
JG
3956int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3957{
3958 const char *p;
3959 int len;
3960
3961 /*
3962 * check for trailing wildcard: *\0
3963 */
3964 p = strchr(patt, wildchar);
3965 if (p && ((*(p + 1)) == 0))
3966 len = p - patt;
3967 else
3968 len = strlen(name);
3969
3970 return strncmp(patt, name, len);
3971}
3972
75683fe7 3973static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3974{
8bfa79fc
TH
3975 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3976 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3977 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3978
8bfa79fc
TH
3979 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3980 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3981
6919a0a6 3982 while (ad->model_num) {
539cc7c7 3983 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3984 if (ad->model_rev == NULL)
3985 return ad->horkage;
539cc7c7 3986 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3987 return ad->horkage;
f4b15fef 3988 }
6919a0a6 3989 ad++;
f4b15fef 3990 }
1da177e4
LT
3991 return 0;
3992}
3993
6919a0a6
AC
3994static int ata_dma_blacklisted(const struct ata_device *dev)
3995{
3996 /* We don't support polling DMA.
3997 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3998 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3999 */
9af5c9c9 4000 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4001 (dev->flags & ATA_DFLAG_CDB_INTR))
4002 return 1;
75683fe7 4003 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4004}
4005
a6d5a51c
TH
4006/**
4007 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4008 * @dev: Device to compute xfermask for
4009 *
acf356b1
TH
4010 * Compute supported xfermask of @dev and store it in
4011 * dev->*_mask. This function is responsible for applying all
4012 * known limits including host controller limits, device
4013 * blacklist, etc...
a6d5a51c
TH
4014 *
4015 * LOCKING:
4016 * None.
a6d5a51c 4017 */
3373efd8 4018static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4019{
9af5c9c9
TH
4020 struct ata_link *link = dev->link;
4021 struct ata_port *ap = link->ap;
cca3974e 4022 struct ata_host *host = ap->host;
a6d5a51c 4023 unsigned long xfer_mask;
1da177e4 4024
37deecb5 4025 /* controller modes available */
565083e1
TH
4026 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4027 ap->mwdma_mask, ap->udma_mask);
4028
8343f889 4029 /* drive modes available */
37deecb5
TH
4030 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4031 dev->mwdma_mask, dev->udma_mask);
4032 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4033
b352e57d
AC
4034 /*
4035 * CFA Advanced TrueIDE timings are not allowed on a shared
4036 * cable
4037 */
4038 if (ata_dev_pair(dev)) {
4039 /* No PIO5 or PIO6 */
4040 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4041 /* No MWDMA3 or MWDMA 4 */
4042 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4043 }
4044
37deecb5
TH
4045 if (ata_dma_blacklisted(dev)) {
4046 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4047 ata_dev_printk(dev, KERN_WARNING,
4048 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4049 }
a6d5a51c 4050
14d66ab7
PV
4051 if ((host->flags & ATA_HOST_SIMPLEX) &&
4052 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4053 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4054 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4055 "other device, disabling DMA\n");
5444a6f4 4056 }
565083e1 4057
e424675f
JG
4058 if (ap->flags & ATA_FLAG_NO_IORDY)
4059 xfer_mask &= ata_pio_mask_no_iordy(dev);
4060
5444a6f4 4061 if (ap->ops->mode_filter)
a76b62ca 4062 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4063
8343f889
RH
4064 /* Apply cable rule here. Don't apply it early because when
4065 * we handle hot plug the cable type can itself change.
4066 * Check this last so that we know if the transfer rate was
4067 * solely limited by the cable.
4068 * Unknown or 80 wire cables reported host side are checked
4069 * drive side as well. Cases where we know a 40wire cable
4070 * is used safely for 80 are not checked here.
4071 */
4072 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4073 /* UDMA/44 or higher would be available */
4074 if((ap->cbl == ATA_CBL_PATA40) ||
4075 (ata_drive_40wire(dev->id) &&
4076 (ap->cbl == ATA_CBL_PATA_UNK ||
4077 ap->cbl == ATA_CBL_PATA80))) {
4078 ata_dev_printk(dev, KERN_WARNING,
4079 "limited to UDMA/33 due to 40-wire cable\n");
4080 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4081 }
4082
565083e1
TH
4083 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4084 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4085}
4086
1da177e4
LT
4087/**
4088 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4089 * @dev: Device to which command will be sent
4090 *
780a87f7
JG
4091 * Issue SET FEATURES - XFER MODE command to device @dev
4092 * on port @ap.
4093 *
1da177e4 4094 * LOCKING:
0cba632b 4095 * PCI/etc. bus probe sem.
83206a29
TH
4096 *
4097 * RETURNS:
4098 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4099 */
4100
3373efd8 4101static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4102{
a0123703 4103 struct ata_taskfile tf;
83206a29 4104 unsigned int err_mask;
1da177e4
LT
4105
4106 /* set up set-features taskfile */
4107 DPRINTK("set features - xfer mode\n");
4108
464cf177
TH
4109 /* Some controllers and ATAPI devices show flaky interrupt
4110 * behavior after setting xfer mode. Use polling instead.
4111 */
3373efd8 4112 ata_tf_init(dev, &tf);
a0123703
TH
4113 tf.command = ATA_CMD_SET_FEATURES;
4114 tf.feature = SETFEATURES_XFER;
464cf177 4115 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4116 tf.protocol = ATA_PROT_NODATA;
4117 tf.nsect = dev->xfer_mode;
1da177e4 4118
3373efd8 4119 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4120
4121 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4122 return err_mask;
4123}
4124
4125/**
4126 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4127 * @dev: Device to which command will be sent
4128 * @enable: Whether to enable or disable the feature
4129 *
4130 * Issue SET FEATURES - SATA FEATURES command to device @dev
4131 * on port @ap with sector count set to indicate Asynchronous
4132 * Notification feature
4133 *
4134 * LOCKING:
4135 * PCI/etc. bus probe sem.
4136 *
4137 * RETURNS:
4138 * 0 on success, AC_ERR_* mask otherwise.
4139 */
4140static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4141{
4142 struct ata_taskfile tf;
4143 unsigned int err_mask;
4144
4145 /* set up set-features taskfile */
4146 DPRINTK("set features - SATA features\n");
4147
4148 ata_tf_init(dev, &tf);
4149 tf.command = ATA_CMD_SET_FEATURES;
4150 tf.feature = enable;
4151 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4152 tf.protocol = ATA_PROT_NODATA;
4153 tf.nsect = SATA_AN;
4154
4155 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4156
83206a29
TH
4157 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4158 return err_mask;
1da177e4
LT
4159}
4160
8bf62ece
AL
4161/**
4162 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4163 * @dev: Device to which command will be sent
e2a7f77a
RD
4164 * @heads: Number of heads (taskfile parameter)
4165 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4166 *
4167 * LOCKING:
6aff8f1f
TH
4168 * Kernel thread context (may sleep)
4169 *
4170 * RETURNS:
4171 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4172 */
3373efd8
TH
4173static unsigned int ata_dev_init_params(struct ata_device *dev,
4174 u16 heads, u16 sectors)
8bf62ece 4175{
a0123703 4176 struct ata_taskfile tf;
6aff8f1f 4177 unsigned int err_mask;
8bf62ece
AL
4178
4179 /* Number of sectors per track 1-255. Number of heads 1-16 */
4180 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4181 return AC_ERR_INVALID;
8bf62ece
AL
4182
4183 /* set up init dev params taskfile */
4184 DPRINTK("init dev params \n");
4185
3373efd8 4186 ata_tf_init(dev, &tf);
a0123703
TH
4187 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4188 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4189 tf.protocol = ATA_PROT_NODATA;
4190 tf.nsect = sectors;
4191 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4192
3373efd8 4193 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4194 /* A clean abort indicates an original or just out of spec drive
4195 and we should continue as we issue the setup based on the
4196 drive reported working geometry */
4197 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4198 err_mask = 0;
8bf62ece 4199
6aff8f1f
TH
4200 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4201 return err_mask;
8bf62ece
AL
4202}
4203
1da177e4 4204/**
0cba632b
JG
4205 * ata_sg_clean - Unmap DMA memory associated with command
4206 * @qc: Command containing DMA memory to be released
4207 *
4208 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4209 *
4210 * LOCKING:
cca3974e 4211 * spin_lock_irqsave(host lock)
1da177e4 4212 */
70e6ad0c 4213void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4214{
4215 struct ata_port *ap = qc->ap;
cedc9a47 4216 struct scatterlist *sg = qc->__sg;
1da177e4 4217 int dir = qc->dma_dir;
cedc9a47 4218 void *pad_buf = NULL;
1da177e4 4219
a4631474
TH
4220 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4221 WARN_ON(sg == NULL);
1da177e4
LT
4222
4223 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4224 WARN_ON(qc->n_elem > 1);
1da177e4 4225
2c13b7ce 4226 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4227
cedc9a47
JG
4228 /* if we padded the buffer out to 32-bit bound, and data
4229 * xfer direction is from-device, we must copy from the
4230 * pad buffer back into the supplied buffer
4231 */
4232 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4233 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4234
4235 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4236 if (qc->n_elem)
2f1f610b 4237 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4238 /* restore last sg */
4239 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4240 if (pad_buf) {
4241 struct scatterlist *psg = &qc->pad_sgent;
4242 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4243 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4244 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4245 }
4246 } else {
2e242fa9 4247 if (qc->n_elem)
2f1f610b 4248 dma_unmap_single(ap->dev,
e1410f2d
JG
4249 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4250 dir);
cedc9a47
JG
4251 /* restore sg */
4252 sg->length += qc->pad_len;
4253 if (pad_buf)
4254 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4255 pad_buf, qc->pad_len);
4256 }
1da177e4
LT
4257
4258 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4259 qc->__sg = NULL;
1da177e4
LT
4260}
4261
4262/**
4263 * ata_fill_sg - Fill PCI IDE PRD table
4264 * @qc: Metadata associated with taskfile to be transferred
4265 *
780a87f7
JG
4266 * Fill PCI IDE PRD (scatter-gather) table with segments
4267 * associated with the current disk command.
4268 *
1da177e4 4269 * LOCKING:
cca3974e 4270 * spin_lock_irqsave(host lock)
1da177e4
LT
4271 *
4272 */
4273static void ata_fill_sg(struct ata_queued_cmd *qc)
4274{
1da177e4 4275 struct ata_port *ap = qc->ap;
cedc9a47
JG
4276 struct scatterlist *sg;
4277 unsigned int idx;
1da177e4 4278
a4631474 4279 WARN_ON(qc->__sg == NULL);
f131883e 4280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4281
4282 idx = 0;
cedc9a47 4283 ata_for_each_sg(sg, qc) {
1da177e4
LT
4284 u32 addr, offset;
4285 u32 sg_len, len;
4286
4287 /* determine if physical DMA addr spans 64K boundary.
4288 * Note h/w doesn't support 64-bit, so we unconditionally
4289 * truncate dma_addr_t to u32.
4290 */
4291 addr = (u32) sg_dma_address(sg);
4292 sg_len = sg_dma_len(sg);
4293
4294 while (sg_len) {
4295 offset = addr & 0xffff;
4296 len = sg_len;
4297 if ((offset + sg_len) > 0x10000)
4298 len = 0x10000 - offset;
4299
4300 ap->prd[idx].addr = cpu_to_le32(addr);
4301 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4302 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4303
4304 idx++;
4305 sg_len -= len;
4306 addr += len;
4307 }
4308 }
4309
4310 if (idx)
4311 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4312}
b9a4197e 4313
d26fc955
AC
4314/**
4315 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4316 * @qc: Metadata associated with taskfile to be transferred
4317 *
4318 * Fill PCI IDE PRD (scatter-gather) table with segments
4319 * associated with the current disk command. Perform the fill
4320 * so that we avoid writing any length 64K records for
4321 * controllers that don't follow the spec.
4322 *
4323 * LOCKING:
4324 * spin_lock_irqsave(host lock)
4325 *
4326 */
4327static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4328{
4329 struct ata_port *ap = qc->ap;
4330 struct scatterlist *sg;
4331 unsigned int idx;
4332
4333 WARN_ON(qc->__sg == NULL);
4334 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4335
4336 idx = 0;
4337 ata_for_each_sg(sg, qc) {
4338 u32 addr, offset;
4339 u32 sg_len, len, blen;
4340
4341 /* determine if physical DMA addr spans 64K boundary.
4342 * Note h/w doesn't support 64-bit, so we unconditionally
4343 * truncate dma_addr_t to u32.
4344 */
4345 addr = (u32) sg_dma_address(sg);
4346 sg_len = sg_dma_len(sg);
4347
4348 while (sg_len) {
4349 offset = addr & 0xffff;
4350 len = sg_len;
4351 if ((offset + sg_len) > 0x10000)
4352 len = 0x10000 - offset;
4353
4354 blen = len & 0xffff;
4355 ap->prd[idx].addr = cpu_to_le32(addr);
4356 if (blen == 0) {
4357 /* Some PATA chipsets like the CS5530 can't
4358 cope with 0x0000 meaning 64K as the spec says */
4359 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4360 blen = 0x8000;
4361 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4362 }
4363 ap->prd[idx].flags_len = cpu_to_le32(blen);
4364 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4365
4366 idx++;
4367 sg_len -= len;
4368 addr += len;
4369 }
4370 }
4371
4372 if (idx)
4373 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4374}
4375
1da177e4
LT
4376/**
4377 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4378 * @qc: Metadata associated with taskfile to check
4379 *
780a87f7
JG
4380 * Allow low-level driver to filter ATA PACKET commands, returning
4381 * a status indicating whether or not it is OK to use DMA for the
4382 * supplied PACKET command.
4383 *
1da177e4 4384 * LOCKING:
cca3974e 4385 * spin_lock_irqsave(host lock)
0cba632b 4386 *
1da177e4
LT
4387 * RETURNS: 0 when ATAPI DMA can be used
4388 * nonzero otherwise
4389 */
4390int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4391{
4392 struct ata_port *ap = qc->ap;
b9a4197e
TH
4393
4394 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4395 * few ATAPI devices choke on such DMA requests.
4396 */
4397 if (unlikely(qc->nbytes & 15))
4398 return 1;
6f23a31d 4399
1da177e4 4400 if (ap->ops->check_atapi_dma)
b9a4197e 4401 return ap->ops->check_atapi_dma(qc);
1da177e4 4402
b9a4197e 4403 return 0;
1da177e4 4404}
b9a4197e 4405
31cc23b3
TH
4406/**
4407 * ata_std_qc_defer - Check whether a qc needs to be deferred
4408 * @qc: ATA command in question
4409 *
4410 * Non-NCQ commands cannot run with any other command, NCQ or
4411 * not. As upper layer only knows the queue depth, we are
4412 * responsible for maintaining exclusion. This function checks
4413 * whether a new command @qc can be issued.
4414 *
4415 * LOCKING:
4416 * spin_lock_irqsave(host lock)
4417 *
4418 * RETURNS:
4419 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4420 */
4421int ata_std_qc_defer(struct ata_queued_cmd *qc)
4422{
4423 struct ata_link *link = qc->dev->link;
4424
4425 if (qc->tf.protocol == ATA_PROT_NCQ) {
4426 if (!ata_tag_valid(link->active_tag))
4427 return 0;
4428 } else {
4429 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4430 return 0;
4431 }
4432
4433 return ATA_DEFER_LINK;
4434}
4435
1da177e4
LT
4436/**
4437 * ata_qc_prep - Prepare taskfile for submission
4438 * @qc: Metadata associated with taskfile to be prepared
4439 *
780a87f7
JG
4440 * Prepare ATA taskfile for submission.
4441 *
1da177e4 4442 * LOCKING:
cca3974e 4443 * spin_lock_irqsave(host lock)
1da177e4
LT
4444 */
4445void ata_qc_prep(struct ata_queued_cmd *qc)
4446{
4447 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4448 return;
4449
4450 ata_fill_sg(qc);
4451}
4452
d26fc955
AC
4453/**
4454 * ata_dumb_qc_prep - Prepare taskfile for submission
4455 * @qc: Metadata associated with taskfile to be prepared
4456 *
4457 * Prepare ATA taskfile for submission.
4458 *
4459 * LOCKING:
4460 * spin_lock_irqsave(host lock)
4461 */
4462void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4463{
4464 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4465 return;
4466
4467 ata_fill_sg_dumb(qc);
4468}
4469
e46834cd
BK
4470void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4471
0cba632b
JG
4472/**
4473 * ata_sg_init_one - Associate command with memory buffer
4474 * @qc: Command to be associated
4475 * @buf: Memory buffer
4476 * @buflen: Length of memory buffer, in bytes.
4477 *
4478 * Initialize the data-related elements of queued_cmd @qc
4479 * to point to a single memory buffer, @buf of byte length @buflen.
4480 *
4481 * LOCKING:
cca3974e 4482 * spin_lock_irqsave(host lock)
0cba632b
JG
4483 */
4484
1da177e4
LT
4485void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4486{
1da177e4
LT
4487 qc->flags |= ATA_QCFLAG_SINGLE;
4488
cedc9a47 4489 qc->__sg = &qc->sgent;
1da177e4 4490 qc->n_elem = 1;
cedc9a47 4491 qc->orig_n_elem = 1;
1da177e4 4492 qc->buf_virt = buf;
233277ca 4493 qc->nbytes = buflen;
1da177e4 4494
61c0596c 4495 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4496}
4497
0cba632b
JG
4498/**
4499 * ata_sg_init - Associate command with scatter-gather table.
4500 * @qc: Command to be associated
4501 * @sg: Scatter-gather table.
4502 * @n_elem: Number of elements in s/g table.
4503 *
4504 * Initialize the data-related elements of queued_cmd @qc
4505 * to point to a scatter-gather table @sg, containing @n_elem
4506 * elements.
4507 *
4508 * LOCKING:
cca3974e 4509 * spin_lock_irqsave(host lock)
0cba632b
JG
4510 */
4511
1da177e4
LT
4512void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4513 unsigned int n_elem)
4514{
4515 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4516 qc->__sg = sg;
1da177e4 4517 qc->n_elem = n_elem;
cedc9a47 4518 qc->orig_n_elem = n_elem;
1da177e4
LT
4519}
4520
4521/**
0cba632b
JG
4522 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4523 * @qc: Command with memory buffer to be mapped.
4524 *
4525 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4526 *
4527 * LOCKING:
cca3974e 4528 * spin_lock_irqsave(host lock)
1da177e4
LT
4529 *
4530 * RETURNS:
0cba632b 4531 * Zero on success, negative on error.
1da177e4
LT
4532 */
4533
4534static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4535{
4536 struct ata_port *ap = qc->ap;
4537 int dir = qc->dma_dir;
cedc9a47 4538 struct scatterlist *sg = qc->__sg;
1da177e4 4539 dma_addr_t dma_address;
2e242fa9 4540 int trim_sg = 0;
1da177e4 4541
cedc9a47
JG
4542 /* we must lengthen transfers to end on a 32-bit boundary */
4543 qc->pad_len = sg->length & 3;
4544 if (qc->pad_len) {
4545 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4546 struct scatterlist *psg = &qc->pad_sgent;
4547
a4631474 4548 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4549
4550 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4551
4552 if (qc->tf.flags & ATA_TFLAG_WRITE)
4553 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4554 qc->pad_len);
4555
4556 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4557 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4558 /* trim sg */
4559 sg->length -= qc->pad_len;
2e242fa9
TH
4560 if (sg->length == 0)
4561 trim_sg = 1;
cedc9a47
JG
4562
4563 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4564 sg->length, qc->pad_len);
4565 }
4566
2e242fa9
TH
4567 if (trim_sg) {
4568 qc->n_elem--;
e1410f2d
JG
4569 goto skip_map;
4570 }
4571
2f1f610b 4572 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4573 sg->length, dir);
537a95d9
TH
4574 if (dma_mapping_error(dma_address)) {
4575 /* restore sg */
4576 sg->length += qc->pad_len;
1da177e4 4577 return -1;
537a95d9 4578 }
1da177e4
LT
4579
4580 sg_dma_address(sg) = dma_address;
32529e01 4581 sg_dma_len(sg) = sg->length;
1da177e4 4582
2e242fa9 4583skip_map:
1da177e4
LT
4584 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4585 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4586
4587 return 0;
4588}
4589
4590/**
0cba632b
JG
4591 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4592 * @qc: Command with scatter-gather table to be mapped.
4593 *
4594 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4595 *
4596 * LOCKING:
cca3974e 4597 * spin_lock_irqsave(host lock)
1da177e4
LT
4598 *
4599 * RETURNS:
0cba632b 4600 * Zero on success, negative on error.
1da177e4
LT
4601 *
4602 */
4603
4604static int ata_sg_setup(struct ata_queued_cmd *qc)
4605{
4606 struct ata_port *ap = qc->ap;
cedc9a47
JG
4607 struct scatterlist *sg = qc->__sg;
4608 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4609 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4610
44877b4e 4611 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4612 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4613
cedc9a47
JG
4614 /* we must lengthen transfers to end on a 32-bit boundary */
4615 qc->pad_len = lsg->length & 3;
4616 if (qc->pad_len) {
4617 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4618 struct scatterlist *psg = &qc->pad_sgent;
4619 unsigned int offset;
4620
a4631474 4621 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4622
4623 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4624
4625 /*
4626 * psg->page/offset are used to copy to-be-written
4627 * data in this function or read data in ata_sg_clean.
4628 */
4629 offset = lsg->offset + lsg->length - qc->pad_len;
4630 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4631 psg->offset = offset_in_page(offset);
4632
4633 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4634 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4635 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4636 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4637 }
4638
4639 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4640 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4641 /* trim last sg */
4642 lsg->length -= qc->pad_len;
e1410f2d
JG
4643 if (lsg->length == 0)
4644 trim_sg = 1;
cedc9a47
JG
4645
4646 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4647 qc->n_elem - 1, lsg->length, qc->pad_len);
4648 }
4649
e1410f2d
JG
4650 pre_n_elem = qc->n_elem;
4651 if (trim_sg && pre_n_elem)
4652 pre_n_elem--;
4653
4654 if (!pre_n_elem) {
4655 n_elem = 0;
4656 goto skip_map;
4657 }
4658
1da177e4 4659 dir = qc->dma_dir;
2f1f610b 4660 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4661 if (n_elem < 1) {
4662 /* restore last sg */
4663 lsg->length += qc->pad_len;
1da177e4 4664 return -1;
537a95d9 4665 }
1da177e4
LT
4666
4667 DPRINTK("%d sg elements mapped\n", n_elem);
4668
e1410f2d 4669skip_map:
1da177e4
LT
4670 qc->n_elem = n_elem;
4671
4672 return 0;
4673}
4674
0baab86b 4675/**
c893a3ae 4676 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4677 * @buf: Buffer to swap
4678 * @buf_words: Number of 16-bit words in buffer.
4679 *
4680 * Swap halves of 16-bit words if needed to convert from
4681 * little-endian byte order to native cpu byte order, or
4682 * vice-versa.
4683 *
4684 * LOCKING:
6f0ef4fa 4685 * Inherited from caller.
0baab86b 4686 */
1da177e4
LT
4687void swap_buf_le16(u16 *buf, unsigned int buf_words)
4688{
4689#ifdef __BIG_ENDIAN
4690 unsigned int i;
4691
4692 for (i = 0; i < buf_words; i++)
4693 buf[i] = le16_to_cpu(buf[i]);
4694#endif /* __BIG_ENDIAN */
4695}
4696
6ae4cfb5 4697/**
0d5ff566 4698 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4699 * @adev: device to target
6ae4cfb5
AL
4700 * @buf: data buffer
4701 * @buflen: buffer length
344babaa 4702 * @write_data: read/write
6ae4cfb5
AL
4703 *
4704 * Transfer data from/to the device data register by PIO.
4705 *
4706 * LOCKING:
4707 * Inherited from caller.
6ae4cfb5 4708 */
0d5ff566
TH
4709void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4710 unsigned int buflen, int write_data)
1da177e4 4711{
9af5c9c9 4712 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4713 unsigned int words = buflen >> 1;
1da177e4 4714
6ae4cfb5 4715 /* Transfer multiple of 2 bytes */
1da177e4 4716 if (write_data)
0d5ff566 4717 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4718 else
0d5ff566 4719 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4720
4721 /* Transfer trailing 1 byte, if any. */
4722 if (unlikely(buflen & 0x01)) {
4723 u16 align_buf[1] = { 0 };
4724 unsigned char *trailing_buf = buf + buflen - 1;
4725
4726 if (write_data) {
4727 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4728 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4729 } else {
0d5ff566 4730 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4731 memcpy(trailing_buf, align_buf, 1);
4732 }
4733 }
1da177e4
LT
4734}
4735
75e99585 4736/**
0d5ff566 4737 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4738 * @adev: device to target
4739 * @buf: data buffer
4740 * @buflen: buffer length
4741 * @write_data: read/write
4742 *
88574551 4743 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4744 * transfer with interrupts disabled.
4745 *
4746 * LOCKING:
4747 * Inherited from caller.
4748 */
0d5ff566
TH
4749void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4750 unsigned int buflen, int write_data)
75e99585
AC
4751{
4752 unsigned long flags;
4753 local_irq_save(flags);
0d5ff566 4754 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4755 local_irq_restore(flags);
4756}
4757
4758
6ae4cfb5 4759/**
5a5dbd18 4760 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4761 * @qc: Command on going
4762 *
5a5dbd18 4763 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4764 *
4765 * LOCKING:
4766 * Inherited from caller.
4767 */
4768
1da177e4
LT
4769static void ata_pio_sector(struct ata_queued_cmd *qc)
4770{
4771 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4772 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4773 struct ata_port *ap = qc->ap;
4774 struct page *page;
4775 unsigned int offset;
4776 unsigned char *buf;
4777
5a5dbd18 4778 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4779 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4780
4781 page = sg[qc->cursg].page;
726f0785 4782 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4783
4784 /* get the current page and offset */
4785 page = nth_page(page, (offset >> PAGE_SHIFT));
4786 offset %= PAGE_SIZE;
4787
1da177e4
LT
4788 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4789
91b8b313
AL
4790 if (PageHighMem(page)) {
4791 unsigned long flags;
4792
a6b2c5d4 4793 /* FIXME: use a bounce buffer */
91b8b313
AL
4794 local_irq_save(flags);
4795 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4796
91b8b313 4797 /* do the actual data transfer */
5a5dbd18 4798 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4799
91b8b313
AL
4800 kunmap_atomic(buf, KM_IRQ0);
4801 local_irq_restore(flags);
4802 } else {
4803 buf = page_address(page);
5a5dbd18 4804 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4805 }
1da177e4 4806
5a5dbd18
ML
4807 qc->curbytes += qc->sect_size;
4808 qc->cursg_ofs += qc->sect_size;
1da177e4 4809
726f0785 4810 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4811 qc->cursg++;
4812 qc->cursg_ofs = 0;
4813 }
1da177e4 4814}
1da177e4 4815
07f6f7d0 4816/**
5a5dbd18 4817 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4818 * @qc: Command on going
4819 *
5a5dbd18 4820 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4821 * ATA device for the DRQ request.
4822 *
4823 * LOCKING:
4824 * Inherited from caller.
4825 */
1da177e4 4826
07f6f7d0
AL
4827static void ata_pio_sectors(struct ata_queued_cmd *qc)
4828{
4829 if (is_multi_taskfile(&qc->tf)) {
4830 /* READ/WRITE MULTIPLE */
4831 unsigned int nsect;
4832
587005de 4833 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4834
5a5dbd18 4835 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4836 qc->dev->multi_count);
07f6f7d0
AL
4837 while (nsect--)
4838 ata_pio_sector(qc);
4839 } else
4840 ata_pio_sector(qc);
4cc980b3
AL
4841
4842 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4843}
4844
c71c1857
AL
4845/**
4846 * atapi_send_cdb - Write CDB bytes to hardware
4847 * @ap: Port to which ATAPI device is attached.
4848 * @qc: Taskfile currently active
4849 *
4850 * When device has indicated its readiness to accept
4851 * a CDB, this function is called. Send the CDB.
4852 *
4853 * LOCKING:
4854 * caller.
4855 */
4856
4857static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4858{
4859 /* send SCSI cdb */
4860 DPRINTK("send cdb\n");
db024d53 4861 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4862
a6b2c5d4 4863 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4864 ata_altstatus(ap); /* flush */
4865
4866 switch (qc->tf.protocol) {
4867 case ATA_PROT_ATAPI:
4868 ap->hsm_task_state = HSM_ST;
4869 break;
4870 case ATA_PROT_ATAPI_NODATA:
4871 ap->hsm_task_state = HSM_ST_LAST;
4872 break;
4873 case ATA_PROT_ATAPI_DMA:
4874 ap->hsm_task_state = HSM_ST_LAST;
4875 /* initiate bmdma */
4876 ap->ops->bmdma_start(qc);
4877 break;
4878 }
1da177e4
LT
4879}
4880
6ae4cfb5
AL
4881/**
4882 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4883 * @qc: Command on going
4884 * @bytes: number of bytes
4885 *
4886 * Transfer Transfer data from/to the ATAPI device.
4887 *
4888 * LOCKING:
4889 * Inherited from caller.
4890 *
4891 */
4892
1da177e4
LT
4893static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4894{
4895 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4896 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4897 struct ata_port *ap = qc->ap;
4898 struct page *page;
4899 unsigned char *buf;
4900 unsigned int offset, count;
4901
563a6e1f 4902 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4903 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4904
4905next_sg:
563a6e1f 4906 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4907 /*
563a6e1f
AL
4908 * The end of qc->sg is reached and the device expects
4909 * more data to transfer. In order not to overrun qc->sg
4910 * and fulfill length specified in the byte count register,
4911 * - for read case, discard trailing data from the device
4912 * - for write case, padding zero data to the device
4913 */
4914 u16 pad_buf[1] = { 0 };
4915 unsigned int words = bytes >> 1;
4916 unsigned int i;
4917
4918 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4919 ata_dev_printk(qc->dev, KERN_WARNING,
4920 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4921
4922 for (i = 0; i < words; i++)
a6b2c5d4 4923 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4924
14be71f4 4925 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4926 return;
4927 }
4928
cedc9a47 4929 sg = &qc->__sg[qc->cursg];
1da177e4 4930
1da177e4
LT
4931 page = sg->page;
4932 offset = sg->offset + qc->cursg_ofs;
4933
4934 /* get the current page and offset */
4935 page = nth_page(page, (offset >> PAGE_SHIFT));
4936 offset %= PAGE_SIZE;
4937
6952df03 4938 /* don't overrun current sg */
32529e01 4939 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4940
4941 /* don't cross page boundaries */
4942 count = min(count, (unsigned int)PAGE_SIZE - offset);
4943
7282aa4b
AL
4944 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4945
91b8b313
AL
4946 if (PageHighMem(page)) {
4947 unsigned long flags;
4948
a6b2c5d4 4949 /* FIXME: use bounce buffer */
91b8b313
AL
4950 local_irq_save(flags);
4951 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4952
91b8b313 4953 /* do the actual data transfer */
a6b2c5d4 4954 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4955
91b8b313
AL
4956 kunmap_atomic(buf, KM_IRQ0);
4957 local_irq_restore(flags);
4958 } else {
4959 buf = page_address(page);
a6b2c5d4 4960 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4961 }
1da177e4
LT
4962
4963 bytes -= count;
4964 qc->curbytes += count;
4965 qc->cursg_ofs += count;
4966
32529e01 4967 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4968 qc->cursg++;
4969 qc->cursg_ofs = 0;
4970 }
4971
563a6e1f 4972 if (bytes)
1da177e4 4973 goto next_sg;
1da177e4
LT
4974}
4975
6ae4cfb5
AL
4976/**
4977 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4978 * @qc: Command on going
4979 *
4980 * Transfer Transfer data from/to the ATAPI device.
4981 *
4982 * LOCKING:
4983 * Inherited from caller.
6ae4cfb5
AL
4984 */
4985
1da177e4
LT
4986static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4987{
4988 struct ata_port *ap = qc->ap;
4989 struct ata_device *dev = qc->dev;
4990 unsigned int ireason, bc_lo, bc_hi, bytes;
4991 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4992
eec4c3f3
AL
4993 /* Abuse qc->result_tf for temp storage of intermediate TF
4994 * here to save some kernel stack usage.
4995 * For normal completion, qc->result_tf is not relevant. For
4996 * error, qc->result_tf is later overwritten by ata_qc_complete().
4997 * So, the correctness of qc->result_tf is not affected.
4998 */
4999 ap->ops->tf_read(ap, &qc->result_tf);
5000 ireason = qc->result_tf.nsect;
5001 bc_lo = qc->result_tf.lbam;
5002 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5003 bytes = (bc_hi << 8) | bc_lo;
5004
5005 /* shall be cleared to zero, indicating xfer of data */
5006 if (ireason & (1 << 0))
5007 goto err_out;
5008
5009 /* make sure transfer direction matches expected */
5010 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5011 if (do_write != i_write)
5012 goto err_out;
5013
44877b4e 5014 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5015
1da177e4 5016 __atapi_pio_bytes(qc, bytes);
4cc980b3 5017 ata_altstatus(ap); /* flush */
1da177e4
LT
5018
5019 return;
5020
5021err_out:
f15a1daf 5022 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5023 qc->err_mask |= AC_ERR_HSM;
14be71f4 5024 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5025}
5026
5027/**
c234fb00
AL
5028 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5029 * @ap: the target ata_port
5030 * @qc: qc on going
1da177e4 5031 *
c234fb00
AL
5032 * RETURNS:
5033 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5034 */
c234fb00
AL
5035
5036static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5037{
c234fb00
AL
5038 if (qc->tf.flags & ATA_TFLAG_POLLING)
5039 return 1;
1da177e4 5040
c234fb00
AL
5041 if (ap->hsm_task_state == HSM_ST_FIRST) {
5042 if (qc->tf.protocol == ATA_PROT_PIO &&
5043 (qc->tf.flags & ATA_TFLAG_WRITE))
5044 return 1;
1da177e4 5045
c234fb00
AL
5046 if (is_atapi_taskfile(&qc->tf) &&
5047 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5048 return 1;
fe79e683
AL
5049 }
5050
c234fb00
AL
5051 return 0;
5052}
1da177e4 5053
c17ea20d
TH
5054/**
5055 * ata_hsm_qc_complete - finish a qc running on standard HSM
5056 * @qc: Command to complete
5057 * @in_wq: 1 if called from workqueue, 0 otherwise
5058 *
5059 * Finish @qc which is running on standard HSM.
5060 *
5061 * LOCKING:
cca3974e 5062 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5063 * Otherwise, none on entry and grabs host lock.
5064 */
5065static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5066{
5067 struct ata_port *ap = qc->ap;
5068 unsigned long flags;
5069
5070 if (ap->ops->error_handler) {
5071 if (in_wq) {
ba6a1308 5072 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5073
cca3974e
JG
5074 /* EH might have kicked in while host lock is
5075 * released.
c17ea20d
TH
5076 */
5077 qc = ata_qc_from_tag(ap, qc->tag);
5078 if (qc) {
5079 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5080 ap->ops->irq_on(ap);
c17ea20d
TH
5081 ata_qc_complete(qc);
5082 } else
5083 ata_port_freeze(ap);
5084 }
5085
ba6a1308 5086 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5087 } else {
5088 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5089 ata_qc_complete(qc);
5090 else
5091 ata_port_freeze(ap);
5092 }
5093 } else {
5094 if (in_wq) {
ba6a1308 5095 spin_lock_irqsave(ap->lock, flags);
83625006 5096 ap->ops->irq_on(ap);
c17ea20d 5097 ata_qc_complete(qc);
ba6a1308 5098 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5099 } else
5100 ata_qc_complete(qc);
5101 }
5102}
5103
bb5cb290
AL
5104/**
5105 * ata_hsm_move - move the HSM to the next state.
5106 * @ap: the target ata_port
5107 * @qc: qc on going
5108 * @status: current device status
5109 * @in_wq: 1 if called from workqueue, 0 otherwise
5110 *
5111 * RETURNS:
5112 * 1 when poll next status needed, 0 otherwise.
5113 */
9a1004d0
TH
5114int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5115 u8 status, int in_wq)
e2cec771 5116{
bb5cb290
AL
5117 unsigned long flags = 0;
5118 int poll_next;
5119
6912ccd5
AL
5120 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5121
bb5cb290
AL
5122 /* Make sure ata_qc_issue_prot() does not throw things
5123 * like DMA polling into the workqueue. Notice that
5124 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5125 */
c234fb00 5126 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5127
e2cec771 5128fsm_start:
999bb6f4 5129 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5130 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5131
e2cec771
AL
5132 switch (ap->hsm_task_state) {
5133 case HSM_ST_FIRST:
bb5cb290
AL
5134 /* Send first data block or PACKET CDB */
5135
5136 /* If polling, we will stay in the work queue after
5137 * sending the data. Otherwise, interrupt handler
5138 * takes over after sending the data.
5139 */
5140 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5141
e2cec771 5142 /* check device status */
3655d1d3
AL
5143 if (unlikely((status & ATA_DRQ) == 0)) {
5144 /* handle BSY=0, DRQ=0 as error */
5145 if (likely(status & (ATA_ERR | ATA_DF)))
5146 /* device stops HSM for abort/error */
5147 qc->err_mask |= AC_ERR_DEV;
5148 else
5149 /* HSM violation. Let EH handle this */
5150 qc->err_mask |= AC_ERR_HSM;
5151
14be71f4 5152 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5153 goto fsm_start;
1da177e4
LT
5154 }
5155
71601958
AL
5156 /* Device should not ask for data transfer (DRQ=1)
5157 * when it finds something wrong.
eee6c32f
AL
5158 * We ignore DRQ here and stop the HSM by
5159 * changing hsm_task_state to HSM_ST_ERR and
5160 * let the EH abort the command or reset the device.
71601958
AL
5161 */
5162 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5163 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5164 "error, dev_stat 0x%X\n", status);
3655d1d3 5165 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5166 ap->hsm_task_state = HSM_ST_ERR;
5167 goto fsm_start;
71601958 5168 }
1da177e4 5169
bb5cb290
AL
5170 /* Send the CDB (atapi) or the first data block (ata pio out).
5171 * During the state transition, interrupt handler shouldn't
5172 * be invoked before the data transfer is complete and
5173 * hsm_task_state is changed. Hence, the following locking.
5174 */
5175 if (in_wq)
ba6a1308 5176 spin_lock_irqsave(ap->lock, flags);
1da177e4 5177
bb5cb290
AL
5178 if (qc->tf.protocol == ATA_PROT_PIO) {
5179 /* PIO data out protocol.
5180 * send first data block.
5181 */
0565c26d 5182
bb5cb290
AL
5183 /* ata_pio_sectors() might change the state
5184 * to HSM_ST_LAST. so, the state is changed here
5185 * before ata_pio_sectors().
5186 */
5187 ap->hsm_task_state = HSM_ST;
5188 ata_pio_sectors(qc);
bb5cb290
AL
5189 } else
5190 /* send CDB */
5191 atapi_send_cdb(ap, qc);
5192
5193 if (in_wq)
ba6a1308 5194 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5195
5196 /* if polling, ata_pio_task() handles the rest.
5197 * otherwise, interrupt handler takes over from here.
5198 */
e2cec771 5199 break;
1c848984 5200
e2cec771
AL
5201 case HSM_ST:
5202 /* complete command or read/write the data register */
5203 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5204 /* ATAPI PIO protocol */
5205 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5206 /* No more data to transfer or device error.
5207 * Device error will be tagged in HSM_ST_LAST.
5208 */
e2cec771
AL
5209 ap->hsm_task_state = HSM_ST_LAST;
5210 goto fsm_start;
5211 }
1da177e4 5212
71601958
AL
5213 /* Device should not ask for data transfer (DRQ=1)
5214 * when it finds something wrong.
eee6c32f
AL
5215 * We ignore DRQ here and stop the HSM by
5216 * changing hsm_task_state to HSM_ST_ERR and
5217 * let the EH abort the command or reset the device.
71601958
AL
5218 */
5219 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5220 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5221 "device error, dev_stat 0x%X\n",
5222 status);
3655d1d3 5223 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5224 ap->hsm_task_state = HSM_ST_ERR;
5225 goto fsm_start;
71601958 5226 }
1da177e4 5227
e2cec771 5228 atapi_pio_bytes(qc);
7fb6ec28 5229
e2cec771
AL
5230 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5231 /* bad ireason reported by device */
5232 goto fsm_start;
1da177e4 5233
e2cec771
AL
5234 } else {
5235 /* ATA PIO protocol */
5236 if (unlikely((status & ATA_DRQ) == 0)) {
5237 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5238 if (likely(status & (ATA_ERR | ATA_DF)))
5239 /* device stops HSM for abort/error */
5240 qc->err_mask |= AC_ERR_DEV;
5241 else
55a8e2c8
TH
5242 /* HSM violation. Let EH handle this.
5243 * Phantom devices also trigger this
5244 * condition. Mark hint.
5245 */
5246 qc->err_mask |= AC_ERR_HSM |
5247 AC_ERR_NODEV_HINT;
3655d1d3 5248
e2cec771
AL
5249 ap->hsm_task_state = HSM_ST_ERR;
5250 goto fsm_start;
5251 }
1da177e4 5252
eee6c32f
AL
5253 /* For PIO reads, some devices may ask for
5254 * data transfer (DRQ=1) alone with ERR=1.
5255 * We respect DRQ here and transfer one
5256 * block of junk data before changing the
5257 * hsm_task_state to HSM_ST_ERR.
5258 *
5259 * For PIO writes, ERR=1 DRQ=1 doesn't make
5260 * sense since the data block has been
5261 * transferred to the device.
71601958
AL
5262 */
5263 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5264 /* data might be corrputed */
5265 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5266
5267 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5268 ata_pio_sectors(qc);
eee6c32f
AL
5269 status = ata_wait_idle(ap);
5270 }
5271
3655d1d3
AL
5272 if (status & (ATA_BUSY | ATA_DRQ))
5273 qc->err_mask |= AC_ERR_HSM;
5274
eee6c32f
AL
5275 /* ata_pio_sectors() might change the
5276 * state to HSM_ST_LAST. so, the state
5277 * is changed after ata_pio_sectors().
5278 */
5279 ap->hsm_task_state = HSM_ST_ERR;
5280 goto fsm_start;
71601958
AL
5281 }
5282
e2cec771
AL
5283 ata_pio_sectors(qc);
5284
5285 if (ap->hsm_task_state == HSM_ST_LAST &&
5286 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5287 /* all data read */
52a32205 5288 status = ata_wait_idle(ap);
e2cec771
AL
5289 goto fsm_start;
5290 }
5291 }
5292
bb5cb290 5293 poll_next = 1;
1da177e4
LT
5294 break;
5295
14be71f4 5296 case HSM_ST_LAST:
6912ccd5
AL
5297 if (unlikely(!ata_ok(status))) {
5298 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5299 ap->hsm_task_state = HSM_ST_ERR;
5300 goto fsm_start;
5301 }
5302
5303 /* no more data to transfer */
4332a771 5304 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5305 ap->print_id, qc->dev->devno, status);
e2cec771 5306
6912ccd5
AL
5307 WARN_ON(qc->err_mask);
5308
e2cec771 5309 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5310
e2cec771 5311 /* complete taskfile transaction */
c17ea20d 5312 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5313
5314 poll_next = 0;
1da177e4
LT
5315 break;
5316
14be71f4 5317 case HSM_ST_ERR:
e2cec771
AL
5318 /* make sure qc->err_mask is available to
5319 * know what's wrong and recover
5320 */
5321 WARN_ON(qc->err_mask == 0);
5322
5323 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5324
999bb6f4 5325 /* complete taskfile transaction */
c17ea20d 5326 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5327
5328 poll_next = 0;
e2cec771
AL
5329 break;
5330 default:
bb5cb290 5331 poll_next = 0;
6912ccd5 5332 BUG();
1da177e4
LT
5333 }
5334
bb5cb290 5335 return poll_next;
1da177e4
LT
5336}
5337
65f27f38 5338static void ata_pio_task(struct work_struct *work)
8061f5f0 5339{
65f27f38
DH
5340 struct ata_port *ap =
5341 container_of(work, struct ata_port, port_task.work);
5342 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5343 u8 status;
a1af3734 5344 int poll_next;
8061f5f0 5345
7fb6ec28 5346fsm_start:
a1af3734 5347 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5348
a1af3734
AL
5349 /*
5350 * This is purely heuristic. This is a fast path.
5351 * Sometimes when we enter, BSY will be cleared in
5352 * a chk-status or two. If not, the drive is probably seeking
5353 * or something. Snooze for a couple msecs, then
5354 * chk-status again. If still busy, queue delayed work.
5355 */
5356 status = ata_busy_wait(ap, ATA_BUSY, 5);
5357 if (status & ATA_BUSY) {
5358 msleep(2);
5359 status = ata_busy_wait(ap, ATA_BUSY, 10);
5360 if (status & ATA_BUSY) {
31ce6dae 5361 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5362 return;
5363 }
8061f5f0
TH
5364 }
5365
a1af3734
AL
5366 /* move the HSM */
5367 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5368
a1af3734
AL
5369 /* another command or interrupt handler
5370 * may be running at this point.
5371 */
5372 if (poll_next)
7fb6ec28 5373 goto fsm_start;
8061f5f0
TH
5374}
5375
1da177e4
LT
5376/**
5377 * ata_qc_new - Request an available ATA command, for queueing
5378 * @ap: Port associated with device @dev
5379 * @dev: Device from whom we request an available command structure
5380 *
5381 * LOCKING:
0cba632b 5382 * None.
1da177e4
LT
5383 */
5384
5385static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5386{
5387 struct ata_queued_cmd *qc = NULL;
5388 unsigned int i;
5389
e3180499 5390 /* no command while frozen */
b51e9e5d 5391 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5392 return NULL;
5393
2ab7db1f
TH
5394 /* the last tag is reserved for internal command. */
5395 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5396 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5397 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5398 break;
5399 }
5400
5401 if (qc)
5402 qc->tag = i;
5403
5404 return qc;
5405}
5406
5407/**
5408 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5409 * @dev: Device from whom we request an available command structure
5410 *
5411 * LOCKING:
0cba632b 5412 * None.
1da177e4
LT
5413 */
5414
3373efd8 5415struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5416{
9af5c9c9 5417 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5418 struct ata_queued_cmd *qc;
5419
5420 qc = ata_qc_new(ap);
5421 if (qc) {
1da177e4
LT
5422 qc->scsicmd = NULL;
5423 qc->ap = ap;
5424 qc->dev = dev;
1da177e4 5425
2c13b7ce 5426 ata_qc_reinit(qc);
1da177e4
LT
5427 }
5428
5429 return qc;
5430}
5431
1da177e4
LT
5432/**
5433 * ata_qc_free - free unused ata_queued_cmd
5434 * @qc: Command to complete
5435 *
5436 * Designed to free unused ata_queued_cmd object
5437 * in case something prevents using it.
5438 *
5439 * LOCKING:
cca3974e 5440 * spin_lock_irqsave(host lock)
1da177e4
LT
5441 */
5442void ata_qc_free(struct ata_queued_cmd *qc)
5443{
4ba946e9
TH
5444 struct ata_port *ap = qc->ap;
5445 unsigned int tag;
5446
a4631474 5447 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5448
4ba946e9
TH
5449 qc->flags = 0;
5450 tag = qc->tag;
5451 if (likely(ata_tag_valid(tag))) {
4ba946e9 5452 qc->tag = ATA_TAG_POISON;
6cec4a39 5453 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5454 }
1da177e4
LT
5455}
5456
76014427 5457void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5458{
dedaf2b0 5459 struct ata_port *ap = qc->ap;
9af5c9c9 5460 struct ata_link *link = qc->dev->link;
dedaf2b0 5461
a4631474
TH
5462 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5463 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5464
5465 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5466 ata_sg_clean(qc);
5467
7401abf2 5468 /* command should be marked inactive atomically with qc completion */
da917d69 5469 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5470 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5471 if (!link->sactive)
5472 ap->nr_active_links--;
5473 } else {
9af5c9c9 5474 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5475 ap->nr_active_links--;
5476 }
5477
5478 /* clear exclusive status */
5479 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5480 ap->excl_link == link))
5481 ap->excl_link = NULL;
7401abf2 5482
3f3791d3
AL
5483 /* atapi: mark qc as inactive to prevent the interrupt handler
5484 * from completing the command twice later, before the error handler
5485 * is called. (when rc != 0 and atapi request sense is needed)
5486 */
5487 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5488 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5489
1da177e4 5490 /* call completion callback */
77853bf2 5491 qc->complete_fn(qc);
1da177e4
LT
5492}
5493
39599a53
TH
5494static void fill_result_tf(struct ata_queued_cmd *qc)
5495{
5496 struct ata_port *ap = qc->ap;
5497
39599a53 5498 qc->result_tf.flags = qc->tf.flags;
4742d54f 5499 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5500}
5501
f686bcb8
TH
5502/**
5503 * ata_qc_complete - Complete an active ATA command
5504 * @qc: Command to complete
5505 * @err_mask: ATA Status register contents
5506 *
5507 * Indicate to the mid and upper layers that an ATA
5508 * command has completed, with either an ok or not-ok status.
5509 *
5510 * LOCKING:
cca3974e 5511 * spin_lock_irqsave(host lock)
f686bcb8
TH
5512 */
5513void ata_qc_complete(struct ata_queued_cmd *qc)
5514{
5515 struct ata_port *ap = qc->ap;
5516
5517 /* XXX: New EH and old EH use different mechanisms to
5518 * synchronize EH with regular execution path.
5519 *
5520 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5521 * Normal execution path is responsible for not accessing a
5522 * failed qc. libata core enforces the rule by returning NULL
5523 * from ata_qc_from_tag() for failed qcs.
5524 *
5525 * Old EH depends on ata_qc_complete() nullifying completion
5526 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5527 * not synchronize with interrupt handler. Only PIO task is
5528 * taken care of.
5529 */
5530 if (ap->ops->error_handler) {
b51e9e5d 5531 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5532
5533 if (unlikely(qc->err_mask))
5534 qc->flags |= ATA_QCFLAG_FAILED;
5535
5536 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5537 if (!ata_tag_internal(qc->tag)) {
5538 /* always fill result TF for failed qc */
39599a53 5539 fill_result_tf(qc);
f686bcb8
TH
5540 ata_qc_schedule_eh(qc);
5541 return;
5542 }
5543 }
5544
5545 /* read result TF if requested */
5546 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5547 fill_result_tf(qc);
f686bcb8
TH
5548
5549 __ata_qc_complete(qc);
5550 } else {
5551 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5552 return;
5553
5554 /* read result TF if failed or requested */
5555 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5556 fill_result_tf(qc);
f686bcb8
TH
5557
5558 __ata_qc_complete(qc);
5559 }
5560}
5561
dedaf2b0
TH
5562/**
5563 * ata_qc_complete_multiple - Complete multiple qcs successfully
5564 * @ap: port in question
5565 * @qc_active: new qc_active mask
5566 * @finish_qc: LLDD callback invoked before completing a qc
5567 *
5568 * Complete in-flight commands. This functions is meant to be
5569 * called from low-level driver's interrupt routine to complete
5570 * requests normally. ap->qc_active and @qc_active is compared
5571 * and commands are completed accordingly.
5572 *
5573 * LOCKING:
cca3974e 5574 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5575 *
5576 * RETURNS:
5577 * Number of completed commands on success, -errno otherwise.
5578 */
5579int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5580 void (*finish_qc)(struct ata_queued_cmd *))
5581{
5582 int nr_done = 0;
5583 u32 done_mask;
5584 int i;
5585
5586 done_mask = ap->qc_active ^ qc_active;
5587
5588 if (unlikely(done_mask & qc_active)) {
5589 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5590 "(%08x->%08x)\n", ap->qc_active, qc_active);
5591 return -EINVAL;
5592 }
5593
5594 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5595 struct ata_queued_cmd *qc;
5596
5597 if (!(done_mask & (1 << i)))
5598 continue;
5599
5600 if ((qc = ata_qc_from_tag(ap, i))) {
5601 if (finish_qc)
5602 finish_qc(qc);
5603 ata_qc_complete(qc);
5604 nr_done++;
5605 }
5606 }
5607
5608 return nr_done;
5609}
5610
1da177e4
LT
5611static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5612{
5613 struct ata_port *ap = qc->ap;
5614
5615 switch (qc->tf.protocol) {
3dc1d881 5616 case ATA_PROT_NCQ:
1da177e4
LT
5617 case ATA_PROT_DMA:
5618 case ATA_PROT_ATAPI_DMA:
5619 return 1;
5620
5621 case ATA_PROT_ATAPI:
5622 case ATA_PROT_PIO:
1da177e4
LT
5623 if (ap->flags & ATA_FLAG_PIO_DMA)
5624 return 1;
5625
5626 /* fall through */
5627
5628 default:
5629 return 0;
5630 }
5631
5632 /* never reached */
5633}
5634
5635/**
5636 * ata_qc_issue - issue taskfile to device
5637 * @qc: command to issue to device
5638 *
5639 * Prepare an ATA command to submission to device.
5640 * This includes mapping the data into a DMA-able
5641 * area, filling in the S/G table, and finally
5642 * writing the taskfile to hardware, starting the command.
5643 *
5644 * LOCKING:
cca3974e 5645 * spin_lock_irqsave(host lock)
1da177e4 5646 */
8e0e694a 5647void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5648{
5649 struct ata_port *ap = qc->ap;
9af5c9c9 5650 struct ata_link *link = qc->dev->link;
1da177e4 5651
dedaf2b0
TH
5652 /* Make sure only one non-NCQ command is outstanding. The
5653 * check is skipped for old EH because it reuses active qc to
5654 * request ATAPI sense.
5655 */
9af5c9c9 5656 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5657
5658 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5659 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5660
5661 if (!link->sactive)
5662 ap->nr_active_links++;
9af5c9c9 5663 link->sactive |= 1 << qc->tag;
dedaf2b0 5664 } else {
9af5c9c9 5665 WARN_ON(link->sactive);
da917d69
TH
5666
5667 ap->nr_active_links++;
9af5c9c9 5668 link->active_tag = qc->tag;
dedaf2b0
TH
5669 }
5670
e4a70e76 5671 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5672 ap->qc_active |= 1 << qc->tag;
e4a70e76 5673
1da177e4
LT
5674 if (ata_should_dma_map(qc)) {
5675 if (qc->flags & ATA_QCFLAG_SG) {
5676 if (ata_sg_setup(qc))
8e436af9 5677 goto sg_err;
1da177e4
LT
5678 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5679 if (ata_sg_setup_one(qc))
8e436af9 5680 goto sg_err;
1da177e4
LT
5681 }
5682 } else {
5683 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5684 }
5685
5686 ap->ops->qc_prep(qc);
5687
8e0e694a
TH
5688 qc->err_mask |= ap->ops->qc_issue(qc);
5689 if (unlikely(qc->err_mask))
5690 goto err;
5691 return;
1da177e4 5692
8e436af9
TH
5693sg_err:
5694 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5695 qc->err_mask |= AC_ERR_SYSTEM;
5696err:
5697 ata_qc_complete(qc);
1da177e4
LT
5698}
5699
5700/**
5701 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5702 * @qc: command to issue to device
5703 *
5704 * Using various libata functions and hooks, this function
5705 * starts an ATA command. ATA commands are grouped into
5706 * classes called "protocols", and issuing each type of protocol
5707 * is slightly different.
5708 *
0baab86b
EF
5709 * May be used as the qc_issue() entry in ata_port_operations.
5710 *
1da177e4 5711 * LOCKING:
cca3974e 5712 * spin_lock_irqsave(host lock)
1da177e4
LT
5713 *
5714 * RETURNS:
9a3d9eb0 5715 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5716 */
5717
9a3d9eb0 5718unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5719{
5720 struct ata_port *ap = qc->ap;
5721
e50362ec
AL
5722 /* Use polling pio if the LLD doesn't handle
5723 * interrupt driven pio and atapi CDB interrupt.
5724 */
5725 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5726 switch (qc->tf.protocol) {
5727 case ATA_PROT_PIO:
e3472cbe 5728 case ATA_PROT_NODATA:
e50362ec
AL
5729 case ATA_PROT_ATAPI:
5730 case ATA_PROT_ATAPI_NODATA:
5731 qc->tf.flags |= ATA_TFLAG_POLLING;
5732 break;
5733 case ATA_PROT_ATAPI_DMA:
5734 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5735 /* see ata_dma_blacklisted() */
e50362ec
AL
5736 BUG();
5737 break;
5738 default:
5739 break;
5740 }
5741 }
5742
312f7da2 5743 /* select the device */
1da177e4
LT
5744 ata_dev_select(ap, qc->dev->devno, 1, 0);
5745
312f7da2 5746 /* start the command */
1da177e4
LT
5747 switch (qc->tf.protocol) {
5748 case ATA_PROT_NODATA:
312f7da2
AL
5749 if (qc->tf.flags & ATA_TFLAG_POLLING)
5750 ata_qc_set_polling(qc);
5751
e5338254 5752 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5753 ap->hsm_task_state = HSM_ST_LAST;
5754
5755 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5756 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5757
1da177e4
LT
5758 break;
5759
5760 case ATA_PROT_DMA:
587005de 5761 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5762
1da177e4
LT
5763 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5764 ap->ops->bmdma_setup(qc); /* set up bmdma */
5765 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5766 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5767 break;
5768
312f7da2
AL
5769 case ATA_PROT_PIO:
5770 if (qc->tf.flags & ATA_TFLAG_POLLING)
5771 ata_qc_set_polling(qc);
1da177e4 5772
e5338254 5773 ata_tf_to_host(ap, &qc->tf);
312f7da2 5774
54f00389
AL
5775 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5776 /* PIO data out protocol */
5777 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5778 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5779
5780 /* always send first data block using
e27486db 5781 * the ata_pio_task() codepath.
54f00389 5782 */
312f7da2 5783 } else {
54f00389
AL
5784 /* PIO data in protocol */
5785 ap->hsm_task_state = HSM_ST;
5786
5787 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5788 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5789
5790 /* if polling, ata_pio_task() handles the rest.
5791 * otherwise, interrupt handler takes over from here.
5792 */
312f7da2
AL
5793 }
5794
1da177e4
LT
5795 break;
5796
1da177e4 5797 case ATA_PROT_ATAPI:
1da177e4 5798 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5799 if (qc->tf.flags & ATA_TFLAG_POLLING)
5800 ata_qc_set_polling(qc);
5801
e5338254 5802 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5803
312f7da2
AL
5804 ap->hsm_task_state = HSM_ST_FIRST;
5805
5806 /* send cdb by polling if no cdb interrupt */
5807 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5808 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5809 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5810 break;
5811
5812 case ATA_PROT_ATAPI_DMA:
587005de 5813 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5814
1da177e4
LT
5815 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5816 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5817 ap->hsm_task_state = HSM_ST_FIRST;
5818
5819 /* send cdb by polling if no cdb interrupt */
5820 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5821 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5822 break;
5823
5824 default:
5825 WARN_ON(1);
9a3d9eb0 5826 return AC_ERR_SYSTEM;
1da177e4
LT
5827 }
5828
5829 return 0;
5830}
5831
1da177e4
LT
5832/**
5833 * ata_host_intr - Handle host interrupt for given (port, task)
5834 * @ap: Port on which interrupt arrived (possibly...)
5835 * @qc: Taskfile currently active in engine
5836 *
5837 * Handle host interrupt for given queued command. Currently,
5838 * only DMA interrupts are handled. All other commands are
5839 * handled via polling with interrupts disabled (nIEN bit).
5840 *
5841 * LOCKING:
cca3974e 5842 * spin_lock_irqsave(host lock)
1da177e4
LT
5843 *
5844 * RETURNS:
5845 * One if interrupt was handled, zero if not (shared irq).
5846 */
5847
5848inline unsigned int ata_host_intr (struct ata_port *ap,
5849 struct ata_queued_cmd *qc)
5850{
9af5c9c9 5851 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5852 u8 status, host_stat = 0;
1da177e4 5853
312f7da2 5854 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5855 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5856
312f7da2
AL
5857 /* Check whether we are expecting interrupt in this state */
5858 switch (ap->hsm_task_state) {
5859 case HSM_ST_FIRST:
6912ccd5
AL
5860 /* Some pre-ATAPI-4 devices assert INTRQ
5861 * at this state when ready to receive CDB.
5862 */
1da177e4 5863
312f7da2
AL
5864 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5865 * The flag was turned on only for atapi devices.
5866 * No need to check is_atapi_taskfile(&qc->tf) again.
5867 */
5868 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5869 goto idle_irq;
1da177e4 5870 break;
312f7da2
AL
5871 case HSM_ST_LAST:
5872 if (qc->tf.protocol == ATA_PROT_DMA ||
5873 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5874 /* check status of DMA engine */
5875 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5876 VPRINTK("ata%u: host_stat 0x%X\n",
5877 ap->print_id, host_stat);
312f7da2
AL
5878
5879 /* if it's not our irq... */
5880 if (!(host_stat & ATA_DMA_INTR))
5881 goto idle_irq;
5882
5883 /* before we do anything else, clear DMA-Start bit */
5884 ap->ops->bmdma_stop(qc);
a4f16610
AL
5885
5886 if (unlikely(host_stat & ATA_DMA_ERR)) {
5887 /* error when transfering data to/from memory */
5888 qc->err_mask |= AC_ERR_HOST_BUS;
5889 ap->hsm_task_state = HSM_ST_ERR;
5890 }
312f7da2
AL
5891 }
5892 break;
5893 case HSM_ST:
5894 break;
1da177e4
LT
5895 default:
5896 goto idle_irq;
5897 }
5898
312f7da2
AL
5899 /* check altstatus */
5900 status = ata_altstatus(ap);
5901 if (status & ATA_BUSY)
5902 goto idle_irq;
1da177e4 5903
312f7da2
AL
5904 /* check main status, clearing INTRQ */
5905 status = ata_chk_status(ap);
5906 if (unlikely(status & ATA_BUSY))
5907 goto idle_irq;
1da177e4 5908
312f7da2
AL
5909 /* ack bmdma irq events */
5910 ap->ops->irq_clear(ap);
1da177e4 5911
bb5cb290 5912 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5913
5914 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5915 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5916 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5917
1da177e4
LT
5918 return 1; /* irq handled */
5919
5920idle_irq:
5921 ap->stats.idle_irq++;
5922
5923#ifdef ATA_IRQ_TRAP
5924 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5925 ata_chk_status(ap);
5926 ap->ops->irq_clear(ap);
f15a1daf 5927 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5928 return 1;
1da177e4
LT
5929 }
5930#endif
5931 return 0; /* irq not handled */
5932}
5933
5934/**
5935 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5936 * @irq: irq line (unused)
cca3974e 5937 * @dev_instance: pointer to our ata_host information structure
1da177e4 5938 *
0cba632b
JG
5939 * Default interrupt handler for PCI IDE devices. Calls
5940 * ata_host_intr() for each port that is not disabled.
5941 *
1da177e4 5942 * LOCKING:
cca3974e 5943 * Obtains host lock during operation.
1da177e4
LT
5944 *
5945 * RETURNS:
0cba632b 5946 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5947 */
5948
7d12e780 5949irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5950{
cca3974e 5951 struct ata_host *host = dev_instance;
1da177e4
LT
5952 unsigned int i;
5953 unsigned int handled = 0;
5954 unsigned long flags;
5955
5956 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5957 spin_lock_irqsave(&host->lock, flags);
1da177e4 5958
cca3974e 5959 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5960 struct ata_port *ap;
5961
cca3974e 5962 ap = host->ports[i];
c1389503 5963 if (ap &&
029f5468 5964 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5965 struct ata_queued_cmd *qc;
5966
9af5c9c9 5967 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5968 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5969 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5970 handled |= ata_host_intr(ap, qc);
5971 }
5972 }
5973
cca3974e 5974 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5975
5976 return IRQ_RETVAL(handled);
5977}
5978
34bf2170
TH
5979/**
5980 * sata_scr_valid - test whether SCRs are accessible
936fd732 5981 * @link: ATA link to test SCR accessibility for
34bf2170 5982 *
936fd732 5983 * Test whether SCRs are accessible for @link.
34bf2170
TH
5984 *
5985 * LOCKING:
5986 * None.
5987 *
5988 * RETURNS:
5989 * 1 if SCRs are accessible, 0 otherwise.
5990 */
936fd732 5991int sata_scr_valid(struct ata_link *link)
34bf2170 5992{
936fd732
TH
5993 struct ata_port *ap = link->ap;
5994
a16abc0b 5995 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5996}
5997
5998/**
5999 * sata_scr_read - read SCR register of the specified port
936fd732 6000 * @link: ATA link to read SCR for
34bf2170
TH
6001 * @reg: SCR to read
6002 * @val: Place to store read value
6003 *
936fd732 6004 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6005 * guaranteed to succeed if @link is ap->link, the cable type of
6006 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6007 *
6008 * LOCKING:
633273a3 6009 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6010 *
6011 * RETURNS:
6012 * 0 on success, negative errno on failure.
6013 */
936fd732 6014int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6015{
633273a3
TH
6016 if (ata_is_host_link(link)) {
6017 struct ata_port *ap = link->ap;
936fd732 6018
633273a3
TH
6019 if (sata_scr_valid(link))
6020 return ap->ops->scr_read(ap, reg, val);
6021 return -EOPNOTSUPP;
6022 }
6023
6024 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6025}
6026
6027/**
6028 * sata_scr_write - write SCR register of the specified port
936fd732 6029 * @link: ATA link to write SCR for
34bf2170
TH
6030 * @reg: SCR to write
6031 * @val: value to write
6032 *
936fd732 6033 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6034 * guaranteed to succeed if @link is ap->link, the cable type of
6035 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6036 *
6037 * LOCKING:
633273a3 6038 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6039 *
6040 * RETURNS:
6041 * 0 on success, negative errno on failure.
6042 */
936fd732 6043int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6044{
633273a3
TH
6045 if (ata_is_host_link(link)) {
6046 struct ata_port *ap = link->ap;
6047
6048 if (sata_scr_valid(link))
6049 return ap->ops->scr_write(ap, reg, val);
6050 return -EOPNOTSUPP;
6051 }
936fd732 6052
633273a3 6053 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6054}
6055
6056/**
6057 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6058 * @link: ATA link to write SCR for
34bf2170
TH
6059 * @reg: SCR to write
6060 * @val: value to write
6061 *
6062 * This function is identical to sata_scr_write() except that this
6063 * function performs flush after writing to the register.
6064 *
6065 * LOCKING:
633273a3 6066 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6067 *
6068 * RETURNS:
6069 * 0 on success, negative errno on failure.
6070 */
936fd732 6071int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6072{
633273a3
TH
6073 if (ata_is_host_link(link)) {
6074 struct ata_port *ap = link->ap;
6075 int rc;
da3dbb17 6076
633273a3
TH
6077 if (sata_scr_valid(link)) {
6078 rc = ap->ops->scr_write(ap, reg, val);
6079 if (rc == 0)
6080 rc = ap->ops->scr_read(ap, reg, &val);
6081 return rc;
6082 }
6083 return -EOPNOTSUPP;
34bf2170 6084 }
633273a3
TH
6085
6086 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6087}
6088
6089/**
936fd732
TH
6090 * ata_link_online - test whether the given link is online
6091 * @link: ATA link to test
34bf2170 6092 *
936fd732
TH
6093 * Test whether @link is online. Note that this function returns
6094 * 0 if online status of @link cannot be obtained, so
6095 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6096 *
6097 * LOCKING:
6098 * None.
6099 *
6100 * RETURNS:
6101 * 1 if the port online status is available and online.
6102 */
936fd732 6103int ata_link_online(struct ata_link *link)
34bf2170
TH
6104{
6105 u32 sstatus;
6106
936fd732
TH
6107 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6108 (sstatus & 0xf) == 0x3)
34bf2170
TH
6109 return 1;
6110 return 0;
6111}
6112
6113/**
936fd732
TH
6114 * ata_link_offline - test whether the given link is offline
6115 * @link: ATA link to test
34bf2170 6116 *
936fd732
TH
6117 * Test whether @link is offline. Note that this function
6118 * returns 0 if offline status of @link cannot be obtained, so
6119 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6120 *
6121 * LOCKING:
6122 * None.
6123 *
6124 * RETURNS:
6125 * 1 if the port offline status is available and offline.
6126 */
936fd732 6127int ata_link_offline(struct ata_link *link)
34bf2170
TH
6128{
6129 u32 sstatus;
6130
936fd732
TH
6131 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6132 (sstatus & 0xf) != 0x3)
34bf2170
TH
6133 return 1;
6134 return 0;
6135}
0baab86b 6136
77b08fb5 6137int ata_flush_cache(struct ata_device *dev)
9b847548 6138{
977e6b9f 6139 unsigned int err_mask;
9b847548
JA
6140 u8 cmd;
6141
6142 if (!ata_try_flush_cache(dev))
6143 return 0;
6144
6fc49adb 6145 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6146 cmd = ATA_CMD_FLUSH_EXT;
6147 else
6148 cmd = ATA_CMD_FLUSH;
6149
4f34337b
AC
6150 /* This is wrong. On a failed flush we get back the LBA of the lost
6151 sector and we should (assuming it wasn't aborted as unknown) issue
6152 a further flush command to continue the writeback until it
6153 does not error */
977e6b9f
TH
6154 err_mask = ata_do_simple_cmd(dev, cmd);
6155 if (err_mask) {
6156 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6157 return -EIO;
6158 }
6159
6160 return 0;
9b847548
JA
6161}
6162
6ffa01d8 6163#ifdef CONFIG_PM
cca3974e
JG
6164static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6165 unsigned int action, unsigned int ehi_flags,
6166 int wait)
500530f6
TH
6167{
6168 unsigned long flags;
6169 int i, rc;
6170
cca3974e
JG
6171 for (i = 0; i < host->n_ports; i++) {
6172 struct ata_port *ap = host->ports[i];
e3667ebf 6173 struct ata_link *link;
500530f6
TH
6174
6175 /* Previous resume operation might still be in
6176 * progress. Wait for PM_PENDING to clear.
6177 */
6178 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6179 ata_port_wait_eh(ap);
6180 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6181 }
6182
6183 /* request PM ops to EH */
6184 spin_lock_irqsave(ap->lock, flags);
6185
6186 ap->pm_mesg = mesg;
6187 if (wait) {
6188 rc = 0;
6189 ap->pm_result = &rc;
6190 }
6191
6192 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6193 __ata_port_for_each_link(link, ap) {
6194 link->eh_info.action |= action;
6195 link->eh_info.flags |= ehi_flags;
6196 }
500530f6
TH
6197
6198 ata_port_schedule_eh(ap);
6199
6200 spin_unlock_irqrestore(ap->lock, flags);
6201
6202 /* wait and check result */
6203 if (wait) {
6204 ata_port_wait_eh(ap);
6205 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6206 if (rc)
6207 return rc;
6208 }
6209 }
6210
6211 return 0;
6212}
6213
6214/**
cca3974e
JG
6215 * ata_host_suspend - suspend host
6216 * @host: host to suspend
500530f6
TH
6217 * @mesg: PM message
6218 *
cca3974e 6219 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6220 * function requests EH to perform PM operations and waits for EH
6221 * to finish.
6222 *
6223 * LOCKING:
6224 * Kernel thread context (may sleep).
6225 *
6226 * RETURNS:
6227 * 0 on success, -errno on failure.
6228 */
cca3974e 6229int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6230{
9666f400 6231 int rc;
500530f6 6232
cca3974e 6233 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6234 if (rc == 0)
6235 host->dev->power.power_state = mesg;
500530f6
TH
6236 return rc;
6237}
6238
6239/**
cca3974e
JG
6240 * ata_host_resume - resume host
6241 * @host: host to resume
500530f6 6242 *
cca3974e 6243 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6244 * function requests EH to perform PM operations and returns.
6245 * Note that all resume operations are performed parallely.
6246 *
6247 * LOCKING:
6248 * Kernel thread context (may sleep).
6249 */
cca3974e 6250void ata_host_resume(struct ata_host *host)
500530f6 6251{
cca3974e
JG
6252 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6253 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6254 host->dev->power.power_state = PMSG_ON;
500530f6 6255}
6ffa01d8 6256#endif
500530f6 6257
c893a3ae
RD
6258/**
6259 * ata_port_start - Set port up for dma.
6260 * @ap: Port to initialize
6261 *
6262 * Called just after data structures for each port are
6263 * initialized. Allocates space for PRD table.
6264 *
6265 * May be used as the port_start() entry in ata_port_operations.
6266 *
6267 * LOCKING:
6268 * Inherited from caller.
6269 */
f0d36efd 6270int ata_port_start(struct ata_port *ap)
1da177e4 6271{
2f1f610b 6272 struct device *dev = ap->dev;
6037d6bb 6273 int rc;
1da177e4 6274
f0d36efd
TH
6275 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6276 GFP_KERNEL);
1da177e4
LT
6277 if (!ap->prd)
6278 return -ENOMEM;
6279
6037d6bb 6280 rc = ata_pad_alloc(ap, dev);
f0d36efd 6281 if (rc)
6037d6bb 6282 return rc;
1da177e4 6283
f0d36efd
TH
6284 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6285 (unsigned long long)ap->prd_dma);
1da177e4
LT
6286 return 0;
6287}
6288
3ef3b43d
TH
6289/**
6290 * ata_dev_init - Initialize an ata_device structure
6291 * @dev: Device structure to initialize
6292 *
6293 * Initialize @dev in preparation for probing.
6294 *
6295 * LOCKING:
6296 * Inherited from caller.
6297 */
6298void ata_dev_init(struct ata_device *dev)
6299{
9af5c9c9
TH
6300 struct ata_link *link = dev->link;
6301 struct ata_port *ap = link->ap;
72fa4b74
TH
6302 unsigned long flags;
6303
5a04bf4b 6304 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6305 link->sata_spd_limit = link->hw_sata_spd_limit;
6306 link->sata_spd = 0;
5a04bf4b 6307
72fa4b74
TH
6308 /* High bits of dev->flags are used to record warm plug
6309 * requests which occur asynchronously. Synchronize using
cca3974e 6310 * host lock.
72fa4b74 6311 */
ba6a1308 6312 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6313 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6314 dev->horkage = 0;
ba6a1308 6315 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6316
72fa4b74
TH
6317 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6318 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6319 dev->pio_mask = UINT_MAX;
6320 dev->mwdma_mask = UINT_MAX;
6321 dev->udma_mask = UINT_MAX;
6322}
6323
4fb37a25
TH
6324/**
6325 * ata_link_init - Initialize an ata_link structure
6326 * @ap: ATA port link is attached to
6327 * @link: Link structure to initialize
8989805d 6328 * @pmp: Port multiplier port number
4fb37a25
TH
6329 *
6330 * Initialize @link.
6331 *
6332 * LOCKING:
6333 * Kernel thread context (may sleep)
6334 */
fb7fd614 6335void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6336{
6337 int i;
6338
6339 /* clear everything except for devices */
6340 memset(link, 0, offsetof(struct ata_link, device[0]));
6341
6342 link->ap = ap;
8989805d 6343 link->pmp = pmp;
4fb37a25
TH
6344 link->active_tag = ATA_TAG_POISON;
6345 link->hw_sata_spd_limit = UINT_MAX;
6346
6347 /* can't use iterator, ap isn't initialized yet */
6348 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6349 struct ata_device *dev = &link->device[i];
6350
6351 dev->link = link;
6352 dev->devno = dev - link->device;
6353 ata_dev_init(dev);
6354 }
6355}
6356
6357/**
6358 * sata_link_init_spd - Initialize link->sata_spd_limit
6359 * @link: Link to configure sata_spd_limit for
6360 *
6361 * Initialize @link->[hw_]sata_spd_limit to the currently
6362 * configured value.
6363 *
6364 * LOCKING:
6365 * Kernel thread context (may sleep).
6366 *
6367 * RETURNS:
6368 * 0 on success, -errno on failure.
6369 */
fb7fd614 6370int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6371{
6372 u32 scontrol, spd;
6373 int rc;
6374
6375 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6376 if (rc)
6377 return rc;
6378
6379 spd = (scontrol >> 4) & 0xf;
6380 if (spd)
6381 link->hw_sata_spd_limit &= (1 << spd) - 1;
6382
6383 link->sata_spd_limit = link->hw_sata_spd_limit;
6384
6385 return 0;
6386}
6387
1da177e4 6388/**
f3187195
TH
6389 * ata_port_alloc - allocate and initialize basic ATA port resources
6390 * @host: ATA host this allocated port belongs to
1da177e4 6391 *
f3187195
TH
6392 * Allocate and initialize basic ATA port resources.
6393 *
6394 * RETURNS:
6395 * Allocate ATA port on success, NULL on failure.
0cba632b 6396 *
1da177e4 6397 * LOCKING:
f3187195 6398 * Inherited from calling layer (may sleep).
1da177e4 6399 */
f3187195 6400struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6401{
f3187195 6402 struct ata_port *ap;
1da177e4 6403
f3187195
TH
6404 DPRINTK("ENTER\n");
6405
6406 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6407 if (!ap)
6408 return NULL;
6409
f4d6d004 6410 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6411 ap->lock = &host->lock;
198e0fed 6412 ap->flags = ATA_FLAG_DISABLED;
f3187195 6413 ap->print_id = -1;
1da177e4 6414 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6415 ap->host = host;
f3187195 6416 ap->dev = host->dev;
1da177e4 6417 ap->last_ctl = 0xFF;
bd5d825c
BP
6418
6419#if defined(ATA_VERBOSE_DEBUG)
6420 /* turn on all debugging levels */
6421 ap->msg_enable = 0x00FF;
6422#elif defined(ATA_DEBUG)
6423 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6424#else
0dd4b21f 6425 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6426#endif
1da177e4 6427
65f27f38
DH
6428 INIT_DELAYED_WORK(&ap->port_task, NULL);
6429 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6430 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6431 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6432 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6433 init_timer_deferrable(&ap->fastdrain_timer);
6434 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6435 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6436
838df628 6437 ap->cbl = ATA_CBL_NONE;
838df628 6438
8989805d 6439 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6440
6441#ifdef ATA_IRQ_TRAP
6442 ap->stats.unhandled_irq = 1;
6443 ap->stats.idle_irq = 1;
6444#endif
1da177e4 6445 return ap;
1da177e4
LT
6446}
6447
f0d36efd
TH
6448static void ata_host_release(struct device *gendev, void *res)
6449{
6450 struct ata_host *host = dev_get_drvdata(gendev);
6451 int i;
6452
6453 for (i = 0; i < host->n_ports; i++) {
6454 struct ata_port *ap = host->ports[i];
6455
ecef7253
TH
6456 if (!ap)
6457 continue;
6458
6459 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6460 ap->ops->port_stop(ap);
f0d36efd
TH
6461 }
6462
ecef7253 6463 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6464 host->ops->host_stop(host);
1aa56cca 6465
1aa506e4
TH
6466 for (i = 0; i < host->n_ports; i++) {
6467 struct ata_port *ap = host->ports[i];
6468
4911487a
TH
6469 if (!ap)
6470 continue;
6471
6472 if (ap->scsi_host)
1aa506e4
TH
6473 scsi_host_put(ap->scsi_host);
6474
633273a3 6475 kfree(ap->pmp_link);
4911487a 6476 kfree(ap);
1aa506e4
TH
6477 host->ports[i] = NULL;
6478 }
6479
1aa56cca 6480 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6481}
6482
f3187195
TH
6483/**
6484 * ata_host_alloc - allocate and init basic ATA host resources
6485 * @dev: generic device this host is associated with
6486 * @max_ports: maximum number of ATA ports associated with this host
6487 *
6488 * Allocate and initialize basic ATA host resources. LLD calls
6489 * this function to allocate a host, initializes it fully and
6490 * attaches it using ata_host_register().
6491 *
6492 * @max_ports ports are allocated and host->n_ports is
6493 * initialized to @max_ports. The caller is allowed to decrease
6494 * host->n_ports before calling ata_host_register(). The unused
6495 * ports will be automatically freed on registration.
6496 *
6497 * RETURNS:
6498 * Allocate ATA host on success, NULL on failure.
6499 *
6500 * LOCKING:
6501 * Inherited from calling layer (may sleep).
6502 */
6503struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6504{
6505 struct ata_host *host;
6506 size_t sz;
6507 int i;
6508
6509 DPRINTK("ENTER\n");
6510
6511 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6512 return NULL;
6513
6514 /* alloc a container for our list of ATA ports (buses) */
6515 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6516 /* alloc a container for our list of ATA ports (buses) */
6517 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6518 if (!host)
6519 goto err_out;
6520
6521 devres_add(dev, host);
6522 dev_set_drvdata(dev, host);
6523
6524 spin_lock_init(&host->lock);
6525 host->dev = dev;
6526 host->n_ports = max_ports;
6527
6528 /* allocate ports bound to this host */
6529 for (i = 0; i < max_ports; i++) {
6530 struct ata_port *ap;
6531
6532 ap = ata_port_alloc(host);
6533 if (!ap)
6534 goto err_out;
6535
6536 ap->port_no = i;
6537 host->ports[i] = ap;
6538 }
6539
6540 devres_remove_group(dev, NULL);
6541 return host;
6542
6543 err_out:
6544 devres_release_group(dev, NULL);
6545 return NULL;
6546}
6547
f5cda257
TH
6548/**
6549 * ata_host_alloc_pinfo - alloc host and init with port_info array
6550 * @dev: generic device this host is associated with
6551 * @ppi: array of ATA port_info to initialize host with
6552 * @n_ports: number of ATA ports attached to this host
6553 *
6554 * Allocate ATA host and initialize with info from @ppi. If NULL
6555 * terminated, @ppi may contain fewer entries than @n_ports. The
6556 * last entry will be used for the remaining ports.
6557 *
6558 * RETURNS:
6559 * Allocate ATA host on success, NULL on failure.
6560 *
6561 * LOCKING:
6562 * Inherited from calling layer (may sleep).
6563 */
6564struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6565 const struct ata_port_info * const * ppi,
6566 int n_ports)
6567{
6568 const struct ata_port_info *pi;
6569 struct ata_host *host;
6570 int i, j;
6571
6572 host = ata_host_alloc(dev, n_ports);
6573 if (!host)
6574 return NULL;
6575
6576 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6577 struct ata_port *ap = host->ports[i];
6578
6579 if (ppi[j])
6580 pi = ppi[j++];
6581
6582 ap->pio_mask = pi->pio_mask;
6583 ap->mwdma_mask = pi->mwdma_mask;
6584 ap->udma_mask = pi->udma_mask;
6585 ap->flags |= pi->flags;
0c88758b 6586 ap->link.flags |= pi->link_flags;
f5cda257
TH
6587 ap->ops = pi->port_ops;
6588
6589 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6590 host->ops = pi->port_ops;
6591 if (!host->private_data && pi->private_data)
6592 host->private_data = pi->private_data;
6593 }
6594
6595 return host;
6596}
6597
ecef7253
TH
6598/**
6599 * ata_host_start - start and freeze ports of an ATA host
6600 * @host: ATA host to start ports for
6601 *
6602 * Start and then freeze ports of @host. Started status is
6603 * recorded in host->flags, so this function can be called
6604 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6605 * once. If host->ops isn't initialized yet, its set to the
6606 * first non-dummy port ops.
ecef7253
TH
6607 *
6608 * LOCKING:
6609 * Inherited from calling layer (may sleep).
6610 *
6611 * RETURNS:
6612 * 0 if all ports are started successfully, -errno otherwise.
6613 */
6614int ata_host_start(struct ata_host *host)
6615{
6616 int i, rc;
6617
6618 if (host->flags & ATA_HOST_STARTED)
6619 return 0;
6620
6621 for (i = 0; i < host->n_ports; i++) {
6622 struct ata_port *ap = host->ports[i];
6623
f3187195
TH
6624 if (!host->ops && !ata_port_is_dummy(ap))
6625 host->ops = ap->ops;
6626
ecef7253
TH
6627 if (ap->ops->port_start) {
6628 rc = ap->ops->port_start(ap);
6629 if (rc) {
6630 ata_port_printk(ap, KERN_ERR, "failed to "
6631 "start port (errno=%d)\n", rc);
6632 goto err_out;
6633 }
6634 }
6635
6636 ata_eh_freeze_port(ap);
6637 }
6638
6639 host->flags |= ATA_HOST_STARTED;
6640 return 0;
6641
6642 err_out:
6643 while (--i >= 0) {
6644 struct ata_port *ap = host->ports[i];
6645
6646 if (ap->ops->port_stop)
6647 ap->ops->port_stop(ap);
6648 }
6649 return rc;
6650}
6651
b03732f0 6652/**
cca3974e
JG
6653 * ata_sas_host_init - Initialize a host struct
6654 * @host: host to initialize
6655 * @dev: device host is attached to
6656 * @flags: host flags
6657 * @ops: port_ops
b03732f0
BK
6658 *
6659 * LOCKING:
6660 * PCI/etc. bus probe sem.
6661 *
6662 */
f3187195 6663/* KILLME - the only user left is ipr */
cca3974e
JG
6664void ata_host_init(struct ata_host *host, struct device *dev,
6665 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6666{
cca3974e
JG
6667 spin_lock_init(&host->lock);
6668 host->dev = dev;
6669 host->flags = flags;
6670 host->ops = ops;
b03732f0
BK
6671}
6672
f3187195
TH
6673/**
6674 * ata_host_register - register initialized ATA host
6675 * @host: ATA host to register
6676 * @sht: template for SCSI host
6677 *
6678 * Register initialized ATA host. @host is allocated using
6679 * ata_host_alloc() and fully initialized by LLD. This function
6680 * starts ports, registers @host with ATA and SCSI layers and
6681 * probe registered devices.
6682 *
6683 * LOCKING:
6684 * Inherited from calling layer (may sleep).
6685 *
6686 * RETURNS:
6687 * 0 on success, -errno otherwise.
6688 */
6689int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6690{
6691 int i, rc;
6692
6693 /* host must have been started */
6694 if (!(host->flags & ATA_HOST_STARTED)) {
6695 dev_printk(KERN_ERR, host->dev,
6696 "BUG: trying to register unstarted host\n");
6697 WARN_ON(1);
6698 return -EINVAL;
6699 }
6700
6701 /* Blow away unused ports. This happens when LLD can't
6702 * determine the exact number of ports to allocate at
6703 * allocation time.
6704 */
6705 for (i = host->n_ports; host->ports[i]; i++)
6706 kfree(host->ports[i]);
6707
6708 /* give ports names and add SCSI hosts */
6709 for (i = 0; i < host->n_ports; i++)
6710 host->ports[i]->print_id = ata_print_id++;
6711
6712 rc = ata_scsi_add_hosts(host, sht);
6713 if (rc)
6714 return rc;
6715
fafbae87
TH
6716 /* associate with ACPI nodes */
6717 ata_acpi_associate(host);
6718
f3187195
TH
6719 /* set cable, sata_spd_limit and report */
6720 for (i = 0; i < host->n_ports; i++) {
6721 struct ata_port *ap = host->ports[i];
f3187195
TH
6722 unsigned long xfer_mask;
6723
6724 /* set SATA cable type if still unset */
6725 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6726 ap->cbl = ATA_CBL_SATA;
6727
6728 /* init sata_spd_limit to the current value */
4fb37a25 6729 sata_link_init_spd(&ap->link);
f3187195 6730
cbcdd875 6731 /* print per-port info to dmesg */
f3187195
TH
6732 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6733 ap->udma_mask);
6734
f3187195 6735 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6736 ata_port_printk(ap, KERN_INFO,
6737 "%cATA max %s %s\n",
a16abc0b 6738 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6739 ata_mode_string(xfer_mask),
cbcdd875 6740 ap->link.eh_info.desc);
f3187195
TH
6741 else
6742 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6743 }
6744
6745 /* perform each probe synchronously */
6746 DPRINTK("probe begin\n");
6747 for (i = 0; i < host->n_ports; i++) {
6748 struct ata_port *ap = host->ports[i];
6749 int rc;
6750
6751 /* probe */
6752 if (ap->ops->error_handler) {
9af5c9c9 6753 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6754 unsigned long flags;
6755
6756 ata_port_probe(ap);
6757
6758 /* kick EH for boot probing */
6759 spin_lock_irqsave(ap->lock, flags);
6760
f58229f8
TH
6761 ehi->probe_mask =
6762 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6763 ehi->action |= ATA_EH_SOFTRESET;
6764 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6765
f4d6d004 6766 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6767 ap->pflags |= ATA_PFLAG_LOADING;
6768 ata_port_schedule_eh(ap);
6769
6770 spin_unlock_irqrestore(ap->lock, flags);
6771
6772 /* wait for EH to finish */
6773 ata_port_wait_eh(ap);
6774 } else {
6775 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6776 rc = ata_bus_probe(ap);
6777 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6778
6779 if (rc) {
6780 /* FIXME: do something useful here?
6781 * Current libata behavior will
6782 * tear down everything when
6783 * the module is removed
6784 * or the h/w is unplugged.
6785 */
6786 }
6787 }
6788 }
6789
6790 /* probes are done, now scan each port's disk(s) */
6791 DPRINTK("host probe begin\n");
6792 for (i = 0; i < host->n_ports; i++) {
6793 struct ata_port *ap = host->ports[i];
6794
1ae46317 6795 ata_scsi_scan_host(ap, 1);
f3187195
TH
6796 }
6797
6798 return 0;
6799}
6800
f5cda257
TH
6801/**
6802 * ata_host_activate - start host, request IRQ and register it
6803 * @host: target ATA host
6804 * @irq: IRQ to request
6805 * @irq_handler: irq_handler used when requesting IRQ
6806 * @irq_flags: irq_flags used when requesting IRQ
6807 * @sht: scsi_host_template to use when registering the host
6808 *
6809 * After allocating an ATA host and initializing it, most libata
6810 * LLDs perform three steps to activate the host - start host,
6811 * request IRQ and register it. This helper takes necessasry
6812 * arguments and performs the three steps in one go.
6813 *
6814 * LOCKING:
6815 * Inherited from calling layer (may sleep).
6816 *
6817 * RETURNS:
6818 * 0 on success, -errno otherwise.
6819 */
6820int ata_host_activate(struct ata_host *host, int irq,
6821 irq_handler_t irq_handler, unsigned long irq_flags,
6822 struct scsi_host_template *sht)
6823{
cbcdd875 6824 int i, rc;
f5cda257
TH
6825
6826 rc = ata_host_start(host);
6827 if (rc)
6828 return rc;
6829
6830 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6831 dev_driver_string(host->dev), host);
6832 if (rc)
6833 return rc;
6834
cbcdd875
TH
6835 for (i = 0; i < host->n_ports; i++)
6836 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6837
f5cda257
TH
6838 rc = ata_host_register(host, sht);
6839 /* if failed, just free the IRQ and leave ports alone */
6840 if (rc)
6841 devm_free_irq(host->dev, irq, host);
6842
6843 return rc;
6844}
6845
720ba126
TH
6846/**
6847 * ata_port_detach - Detach ATA port in prepration of device removal
6848 * @ap: ATA port to be detached
6849 *
6850 * Detach all ATA devices and the associated SCSI devices of @ap;
6851 * then, remove the associated SCSI host. @ap is guaranteed to
6852 * be quiescent on return from this function.
6853 *
6854 * LOCKING:
6855 * Kernel thread context (may sleep).
6856 */
6857void ata_port_detach(struct ata_port *ap)
6858{
6859 unsigned long flags;
41bda9c9 6860 struct ata_link *link;
f58229f8 6861 struct ata_device *dev;
720ba126
TH
6862
6863 if (!ap->ops->error_handler)
c3cf30a9 6864 goto skip_eh;
720ba126
TH
6865
6866 /* tell EH we're leaving & flush EH */
ba6a1308 6867 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6868 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6869 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6870
6871 ata_port_wait_eh(ap);
6872
6873 /* EH is now guaranteed to see UNLOADING, so no new device
6874 * will be attached. Disable all existing devices.
6875 */
ba6a1308 6876 spin_lock_irqsave(ap->lock, flags);
720ba126 6877
41bda9c9
TH
6878 ata_port_for_each_link(link, ap) {
6879 ata_link_for_each_dev(dev, link)
6880 ata_dev_disable(dev);
6881 }
720ba126 6882
ba6a1308 6883 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6884
6885 /* Final freeze & EH. All in-flight commands are aborted. EH
6886 * will be skipped and retrials will be terminated with bad
6887 * target.
6888 */
ba6a1308 6889 spin_lock_irqsave(ap->lock, flags);
720ba126 6890 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6891 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6892
6893 ata_port_wait_eh(ap);
45a66c1c 6894 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6895
c3cf30a9 6896 skip_eh:
720ba126 6897 /* remove the associated SCSI host */
cca3974e 6898 scsi_remove_host(ap->scsi_host);
720ba126
TH
6899}
6900
0529c159
TH
6901/**
6902 * ata_host_detach - Detach all ports of an ATA host
6903 * @host: Host to detach
6904 *
6905 * Detach all ports of @host.
6906 *
6907 * LOCKING:
6908 * Kernel thread context (may sleep).
6909 */
6910void ata_host_detach(struct ata_host *host)
6911{
6912 int i;
6913
6914 for (i = 0; i < host->n_ports; i++)
6915 ata_port_detach(host->ports[i]);
6916}
6917
1da177e4
LT
6918/**
6919 * ata_std_ports - initialize ioaddr with standard port offsets.
6920 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6921 *
6922 * Utility function which initializes data_addr, error_addr,
6923 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6924 * device_addr, status_addr, and command_addr to standard offsets
6925 * relative to cmd_addr.
6926 *
6927 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6928 */
0baab86b 6929
1da177e4
LT
6930void ata_std_ports(struct ata_ioports *ioaddr)
6931{
6932 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6933 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6934 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6935 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6936 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6937 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6938 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6939 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6940 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6941 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6942}
6943
0baab86b 6944
374b1873
JG
6945#ifdef CONFIG_PCI
6946
1da177e4
LT
6947/**
6948 * ata_pci_remove_one - PCI layer callback for device removal
6949 * @pdev: PCI device that was removed
6950 *
b878ca5d
TH
6951 * PCI layer indicates to libata via this hook that hot-unplug or
6952 * module unload event has occurred. Detach all ports. Resource
6953 * release is handled via devres.
1da177e4
LT
6954 *
6955 * LOCKING:
6956 * Inherited from PCI layer (may sleep).
6957 */
f0d36efd 6958void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6959{
6960 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6961 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6962
b878ca5d 6963 ata_host_detach(host);
1da177e4
LT
6964}
6965
6966/* move to PCI subsystem */
057ace5e 6967int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6968{
6969 unsigned long tmp = 0;
6970
6971 switch (bits->width) {
6972 case 1: {
6973 u8 tmp8 = 0;
6974 pci_read_config_byte(pdev, bits->reg, &tmp8);
6975 tmp = tmp8;
6976 break;
6977 }
6978 case 2: {
6979 u16 tmp16 = 0;
6980 pci_read_config_word(pdev, bits->reg, &tmp16);
6981 tmp = tmp16;
6982 break;
6983 }
6984 case 4: {
6985 u32 tmp32 = 0;
6986 pci_read_config_dword(pdev, bits->reg, &tmp32);
6987 tmp = tmp32;
6988 break;
6989 }
6990
6991 default:
6992 return -EINVAL;
6993 }
6994
6995 tmp &= bits->mask;
6996
6997 return (tmp == bits->val) ? 1 : 0;
6998}
9b847548 6999
6ffa01d8 7000#ifdef CONFIG_PM
3c5100c1 7001void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7002{
7003 pci_save_state(pdev);
4c90d971 7004 pci_disable_device(pdev);
500530f6 7005
4c90d971 7006 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7007 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7008}
7009
553c4aa6 7010int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7011{
553c4aa6
TH
7012 int rc;
7013
9b847548
JA
7014 pci_set_power_state(pdev, PCI_D0);
7015 pci_restore_state(pdev);
553c4aa6 7016
b878ca5d 7017 rc = pcim_enable_device(pdev);
553c4aa6
TH
7018 if (rc) {
7019 dev_printk(KERN_ERR, &pdev->dev,
7020 "failed to enable device after resume (%d)\n", rc);
7021 return rc;
7022 }
7023
9b847548 7024 pci_set_master(pdev);
553c4aa6 7025 return 0;
500530f6
TH
7026}
7027
3c5100c1 7028int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7029{
cca3974e 7030 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7031 int rc = 0;
7032
cca3974e 7033 rc = ata_host_suspend(host, mesg);
500530f6
TH
7034 if (rc)
7035 return rc;
7036
3c5100c1 7037 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7038
7039 return 0;
7040}
7041
7042int ata_pci_device_resume(struct pci_dev *pdev)
7043{
cca3974e 7044 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7045 int rc;
500530f6 7046
553c4aa6
TH
7047 rc = ata_pci_device_do_resume(pdev);
7048 if (rc == 0)
7049 ata_host_resume(host);
7050 return rc;
9b847548 7051}
6ffa01d8
TH
7052#endif /* CONFIG_PM */
7053
1da177e4
LT
7054#endif /* CONFIG_PCI */
7055
7056
1da177e4
LT
7057static int __init ata_init(void)
7058{
a8601e5f 7059 ata_probe_timeout *= HZ;
1da177e4
LT
7060 ata_wq = create_workqueue("ata");
7061 if (!ata_wq)
7062 return -ENOMEM;
7063
453b07ac
TH
7064 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7065 if (!ata_aux_wq) {
7066 destroy_workqueue(ata_wq);
7067 return -ENOMEM;
7068 }
7069
1da177e4
LT
7070 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7071 return 0;
7072}
7073
7074static void __exit ata_exit(void)
7075{
7076 destroy_workqueue(ata_wq);
453b07ac 7077 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7078}
7079
a4625085 7080subsys_initcall(ata_init);
1da177e4
LT
7081module_exit(ata_exit);
7082
67846b30 7083static unsigned long ratelimit_time;
34af946a 7084static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7085
7086int ata_ratelimit(void)
7087{
7088 int rc;
7089 unsigned long flags;
7090
7091 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7092
7093 if (time_after(jiffies, ratelimit_time)) {
7094 rc = 1;
7095 ratelimit_time = jiffies + (HZ/5);
7096 } else
7097 rc = 0;
7098
7099 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7100
7101 return rc;
7102}
7103
c22daff4
TH
7104/**
7105 * ata_wait_register - wait until register value changes
7106 * @reg: IO-mapped register
7107 * @mask: Mask to apply to read register value
7108 * @val: Wait condition
7109 * @interval_msec: polling interval in milliseconds
7110 * @timeout_msec: timeout in milliseconds
7111 *
7112 * Waiting for some bits of register to change is a common
7113 * operation for ATA controllers. This function reads 32bit LE
7114 * IO-mapped register @reg and tests for the following condition.
7115 *
7116 * (*@reg & mask) != val
7117 *
7118 * If the condition is met, it returns; otherwise, the process is
7119 * repeated after @interval_msec until timeout.
7120 *
7121 * LOCKING:
7122 * Kernel thread context (may sleep)
7123 *
7124 * RETURNS:
7125 * The final register value.
7126 */
7127u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7128 unsigned long interval_msec,
7129 unsigned long timeout_msec)
7130{
7131 unsigned long timeout;
7132 u32 tmp;
7133
7134 tmp = ioread32(reg);
7135
7136 /* Calculate timeout _after_ the first read to make sure
7137 * preceding writes reach the controller before starting to
7138 * eat away the timeout.
7139 */
7140 timeout = jiffies + (timeout_msec * HZ) / 1000;
7141
7142 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7143 msleep(interval_msec);
7144 tmp = ioread32(reg);
7145 }
7146
7147 return tmp;
7148}
7149
dd5b06c4
TH
7150/*
7151 * Dummy port_ops
7152 */
7153static void ata_dummy_noret(struct ata_port *ap) { }
7154static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7155static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7156
7157static u8 ata_dummy_check_status(struct ata_port *ap)
7158{
7159 return ATA_DRDY;
7160}
7161
7162static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7163{
7164 return AC_ERR_SYSTEM;
7165}
7166
7167const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7168 .check_status = ata_dummy_check_status,
7169 .check_altstatus = ata_dummy_check_status,
7170 .dev_select = ata_noop_dev_select,
7171 .qc_prep = ata_noop_qc_prep,
7172 .qc_issue = ata_dummy_qc_issue,
7173 .freeze = ata_dummy_noret,
7174 .thaw = ata_dummy_noret,
7175 .error_handler = ata_dummy_noret,
7176 .post_internal_cmd = ata_dummy_qc_noret,
7177 .irq_clear = ata_dummy_noret,
7178 .port_start = ata_dummy_ret0,
7179 .port_stop = ata_dummy_noret,
7180};
7181
21b0ad4f
TH
7182const struct ata_port_info ata_dummy_port_info = {
7183 .port_ops = &ata_dummy_port_ops,
7184};
7185
1da177e4
LT
7186/*
7187 * libata is essentially a library of internal helper functions for
7188 * low-level ATA host controller drivers. As such, the API/ABI is
7189 * likely to change as new drivers are added and updated.
7190 * Do not depend on ABI/API stability.
7191 */
7192
e9c83914
TH
7193EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7194EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7195EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7196EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7197EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7198EXPORT_SYMBOL_GPL(ata_std_bios_param);
7199EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7200EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7201EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7202EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7203EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7204EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7205EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7206EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7207EXPORT_SYMBOL_GPL(ata_sg_init);
7208EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7209EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7210EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7211EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7212EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7213EXPORT_SYMBOL_GPL(ata_tf_load);
7214EXPORT_SYMBOL_GPL(ata_tf_read);
7215EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7216EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7217EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7218EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7219EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7220EXPORT_SYMBOL_GPL(ata_check_status);
7221EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7222EXPORT_SYMBOL_GPL(ata_exec_command);
7223EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7224EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7225EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7226EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7227EXPORT_SYMBOL_GPL(ata_data_xfer);
7228EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7229EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7230EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7231EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7232EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7233EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7234EXPORT_SYMBOL_GPL(ata_bmdma_start);
7235EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7236EXPORT_SYMBOL_GPL(ata_bmdma_status);
7237EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7238EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7239EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7240EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7241EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7242EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7243EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7244EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7245EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7246EXPORT_SYMBOL_GPL(sata_link_debounce);
7247EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7248EXPORT_SYMBOL_GPL(sata_phy_reset);
7249EXPORT_SYMBOL_GPL(__sata_phy_reset);
7250EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7251EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7252EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7253EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7254EXPORT_SYMBOL_GPL(sata_std_hardreset);
7255EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7256EXPORT_SYMBOL_GPL(ata_dev_classify);
7257EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7258EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7259EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7260EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7261EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7262EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7263EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7264EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7265EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7266EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7267EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7268EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7269EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7270EXPORT_SYMBOL_GPL(sata_scr_valid);
7271EXPORT_SYMBOL_GPL(sata_scr_read);
7272EXPORT_SYMBOL_GPL(sata_scr_write);
7273EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7274EXPORT_SYMBOL_GPL(ata_link_online);
7275EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7276#ifdef CONFIG_PM
cca3974e
JG
7277EXPORT_SYMBOL_GPL(ata_host_suspend);
7278EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7279#endif /* CONFIG_PM */
6a62a04d
TH
7280EXPORT_SYMBOL_GPL(ata_id_string);
7281EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7282EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7283EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7284
1bc4ccff 7285EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7286EXPORT_SYMBOL_GPL(ata_timing_compute);
7287EXPORT_SYMBOL_GPL(ata_timing_merge);
7288
1da177e4
LT
7289#ifdef CONFIG_PCI
7290EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7291EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7292EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7293EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7294EXPORT_SYMBOL_GPL(ata_pci_init_one);
7295EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7296#ifdef CONFIG_PM
500530f6
TH
7297EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7298EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7299EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7300EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7301#endif /* CONFIG_PM */
67951ade
AC
7302EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7303EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7304#endif /* CONFIG_PCI */
9b847548 7305
3af9a77a
TH
7306EXPORT_SYMBOL_GPL(sata_pmp_read_init_tf);
7307EXPORT_SYMBOL_GPL(sata_pmp_read_val);
7308EXPORT_SYMBOL_GPL(sata_pmp_write_init_tf);
7309EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7310EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7311EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7312EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7313
b64bbc39
TH
7314EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7315EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7316EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7317EXPORT_SYMBOL_GPL(ata_port_desc);
7318#ifdef CONFIG_PCI
7319EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7320#endif /* CONFIG_PCI */
ece1d636 7321EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7322EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7323EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7324EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7325EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7326EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7327EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7328EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7329EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7330EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7331EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7332EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7333EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7334
7335EXPORT_SYMBOL_GPL(ata_cable_40wire);
7336EXPORT_SYMBOL_GPL(ata_cable_80wire);
7337EXPORT_SYMBOL_GPL(ata_cable_unknown);
7338EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.935854 seconds and 5 git commands to generate.