sata_nv: propagate ata_pci_device_do_resume return value
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
1da177e4
LT
96MODULE_AUTHOR("Jeff Garzik");
97MODULE_DESCRIPTION("Library module for ATA devices");
98MODULE_LICENSE("GPL");
99MODULE_VERSION(DRV_VERSION);
100
0baab86b 101
1da177e4
LT
102/**
103 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
104 * @tf: Taskfile to convert
105 * @fis: Buffer into which data will output
106 * @pmp: Port multiplier port
107 *
108 * Converts a standard ATA taskfile to a Serial ATA
109 * FIS structure (Register - Host to Device).
110 *
111 * LOCKING:
112 * Inherited from caller.
113 */
114
057ace5e 115void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
116{
117 fis[0] = 0x27; /* Register - Host to Device FIS */
118 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
119 bit 7 indicates Command FIS */
120 fis[2] = tf->command;
121 fis[3] = tf->feature;
122
123 fis[4] = tf->lbal;
124 fis[5] = tf->lbam;
125 fis[6] = tf->lbah;
126 fis[7] = tf->device;
127
128 fis[8] = tf->hob_lbal;
129 fis[9] = tf->hob_lbam;
130 fis[10] = tf->hob_lbah;
131 fis[11] = tf->hob_feature;
132
133 fis[12] = tf->nsect;
134 fis[13] = tf->hob_nsect;
135 fis[14] = 0;
136 fis[15] = tf->ctl;
137
138 fis[16] = 0;
139 fis[17] = 0;
140 fis[18] = 0;
141 fis[19] = 0;
142}
143
144/**
145 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
146 * @fis: Buffer from which data will be input
147 * @tf: Taskfile to output
148 *
e12a1be6 149 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
150 *
151 * LOCKING:
152 * Inherited from caller.
153 */
154
057ace5e 155void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
156{
157 tf->command = fis[2]; /* status */
158 tf->feature = fis[3]; /* error */
159
160 tf->lbal = fis[4];
161 tf->lbam = fis[5];
162 tf->lbah = fis[6];
163 tf->device = fis[7];
164
165 tf->hob_lbal = fis[8];
166 tf->hob_lbam = fis[9];
167 tf->hob_lbah = fis[10];
168
169 tf->nsect = fis[12];
170 tf->hob_nsect = fis[13];
171}
172
8cbd6df1
AL
173static const u8 ata_rw_cmds[] = {
174 /* pio multi */
175 ATA_CMD_READ_MULTI,
176 ATA_CMD_WRITE_MULTI,
177 ATA_CMD_READ_MULTI_EXT,
178 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
179 0,
180 0,
181 0,
182 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
183 /* pio */
184 ATA_CMD_PIO_READ,
185 ATA_CMD_PIO_WRITE,
186 ATA_CMD_PIO_READ_EXT,
187 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
188 0,
189 0,
190 0,
191 0,
8cbd6df1
AL
192 /* dma */
193 ATA_CMD_READ,
194 ATA_CMD_WRITE,
195 ATA_CMD_READ_EXT,
9a3dccc4
TH
196 ATA_CMD_WRITE_EXT,
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 201};
1da177e4
LT
202
203/**
8cbd6df1 204 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
205 * @tf: command to examine and configure
206 * @dev: device tf belongs to
1da177e4 207 *
2e9edbf8 208 * Examine the device configuration and tf->flags to calculate
8cbd6df1 209 * the proper read/write commands and protocol to use.
1da177e4
LT
210 *
211 * LOCKING:
212 * caller.
213 */
bd056d7e 214static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 215{
9a3dccc4 216 u8 cmd;
1da177e4 217
9a3dccc4 218 int index, fua, lba48, write;
2e9edbf8 219
9a3dccc4 220 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
221 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
222 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 223
8cbd6df1
AL
224 if (dev->flags & ATA_DFLAG_PIO) {
225 tf->protocol = ATA_PROT_PIO;
9a3dccc4 226 index = dev->multi_count ? 0 : 8;
bd056d7e 227 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
228 /* Unable to use DMA due to host limitation */
229 tf->protocol = ATA_PROT_PIO;
0565c26d 230 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
231 } else {
232 tf->protocol = ATA_PROT_DMA;
9a3dccc4 233 index = 16;
8cbd6df1 234 }
1da177e4 235
9a3dccc4
TH
236 cmd = ata_rw_cmds[index + fua + lba48 + write];
237 if (cmd) {
238 tf->command = cmd;
239 return 0;
240 }
241 return -1;
1da177e4
LT
242}
243
35b649fe
TH
244/**
245 * ata_tf_read_block - Read block address from ATA taskfile
246 * @tf: ATA taskfile of interest
247 * @dev: ATA device @tf belongs to
248 *
249 * LOCKING:
250 * None.
251 *
252 * Read block address from @tf. This function can handle all
253 * three address formats - LBA, LBA48 and CHS. tf->protocol and
254 * flags select the address format to use.
255 *
256 * RETURNS:
257 * Block address read from @tf.
258 */
259u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
260{
261 u64 block = 0;
262
263 if (tf->flags & ATA_TFLAG_LBA) {
264 if (tf->flags & ATA_TFLAG_LBA48) {
265 block |= (u64)tf->hob_lbah << 40;
266 block |= (u64)tf->hob_lbam << 32;
267 block |= tf->hob_lbal << 24;
268 } else
269 block |= (tf->device & 0xf) << 24;
270
271 block |= tf->lbah << 16;
272 block |= tf->lbam << 8;
273 block |= tf->lbal;
274 } else {
275 u32 cyl, head, sect;
276
277 cyl = tf->lbam | (tf->lbah << 8);
278 head = tf->device & 0xf;
279 sect = tf->lbal;
280
281 block = (cyl * dev->heads + head) * dev->sectors + sect;
282 }
283
284 return block;
285}
286
bd056d7e
TH
287/**
288 * ata_build_rw_tf - Build ATA taskfile for given read/write request
289 * @tf: Target ATA taskfile
290 * @dev: ATA device @tf belongs to
291 * @block: Block address
292 * @n_block: Number of blocks
293 * @tf_flags: RW/FUA etc...
294 * @tag: tag
295 *
296 * LOCKING:
297 * None.
298 *
299 * Build ATA taskfile @tf for read/write request described by
300 * @block, @n_block, @tf_flags and @tag on @dev.
301 *
302 * RETURNS:
303 *
304 * 0 on success, -ERANGE if the request is too large for @dev,
305 * -EINVAL if the request is invalid.
306 */
307int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
308 u64 block, u32 n_block, unsigned int tf_flags,
309 unsigned int tag)
310{
311 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
312 tf->flags |= tf_flags;
313
314 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
315 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
316 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
317 /* yay, NCQ */
318 if (!lba_48_ok(block, n_block))
319 return -ERANGE;
320
321 tf->protocol = ATA_PROT_NCQ;
322 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
323
324 if (tf->flags & ATA_TFLAG_WRITE)
325 tf->command = ATA_CMD_FPDMA_WRITE;
326 else
327 tf->command = ATA_CMD_FPDMA_READ;
328
329 tf->nsect = tag << 3;
330 tf->hob_feature = (n_block >> 8) & 0xff;
331 tf->feature = n_block & 0xff;
332
333 tf->hob_lbah = (block >> 40) & 0xff;
334 tf->hob_lbam = (block >> 32) & 0xff;
335 tf->hob_lbal = (block >> 24) & 0xff;
336 tf->lbah = (block >> 16) & 0xff;
337 tf->lbam = (block >> 8) & 0xff;
338 tf->lbal = block & 0xff;
339
340 tf->device = 1 << 6;
341 if (tf->flags & ATA_TFLAG_FUA)
342 tf->device |= 1 << 7;
343 } else if (dev->flags & ATA_DFLAG_LBA) {
344 tf->flags |= ATA_TFLAG_LBA;
345
346 if (lba_28_ok(block, n_block)) {
347 /* use LBA28 */
348 tf->device |= (block >> 24) & 0xf;
349 } else if (lba_48_ok(block, n_block)) {
350 if (!(dev->flags & ATA_DFLAG_LBA48))
351 return -ERANGE;
352
353 /* use LBA48 */
354 tf->flags |= ATA_TFLAG_LBA48;
355
356 tf->hob_nsect = (n_block >> 8) & 0xff;
357
358 tf->hob_lbah = (block >> 40) & 0xff;
359 tf->hob_lbam = (block >> 32) & 0xff;
360 tf->hob_lbal = (block >> 24) & 0xff;
361 } else
362 /* request too large even for LBA48 */
363 return -ERANGE;
364
365 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
366 return -EINVAL;
367
368 tf->nsect = n_block & 0xff;
369
370 tf->lbah = (block >> 16) & 0xff;
371 tf->lbam = (block >> 8) & 0xff;
372 tf->lbal = block & 0xff;
373
374 tf->device |= ATA_LBA;
375 } else {
376 /* CHS */
377 u32 sect, head, cyl, track;
378
379 /* The request -may- be too large for CHS addressing. */
380 if (!lba_28_ok(block, n_block))
381 return -ERANGE;
382
383 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
384 return -EINVAL;
385
386 /* Convert LBA to CHS */
387 track = (u32)block / dev->sectors;
388 cyl = track / dev->heads;
389 head = track % dev->heads;
390 sect = (u32)block % dev->sectors + 1;
391
392 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
393 (u32)block, track, cyl, head, sect);
394
395 /* Check whether the converted CHS can fit.
396 Cylinder: 0-65535
397 Head: 0-15
398 Sector: 1-255*/
399 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
400 return -ERANGE;
401
402 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
403 tf->lbal = sect;
404 tf->lbam = cyl;
405 tf->lbah = cyl >> 8;
406 tf->device |= head;
407 }
408
409 return 0;
410}
411
cb95d562
TH
412/**
413 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
414 * @pio_mask: pio_mask
415 * @mwdma_mask: mwdma_mask
416 * @udma_mask: udma_mask
417 *
418 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
419 * unsigned int xfer_mask.
420 *
421 * LOCKING:
422 * None.
423 *
424 * RETURNS:
425 * Packed xfer_mask.
426 */
427static unsigned int ata_pack_xfermask(unsigned int pio_mask,
428 unsigned int mwdma_mask,
429 unsigned int udma_mask)
430{
431 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
432 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
433 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
434}
435
c0489e4e
TH
436/**
437 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
438 * @xfer_mask: xfer_mask to unpack
439 * @pio_mask: resulting pio_mask
440 * @mwdma_mask: resulting mwdma_mask
441 * @udma_mask: resulting udma_mask
442 *
443 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
444 * Any NULL distination masks will be ignored.
445 */
446static void ata_unpack_xfermask(unsigned int xfer_mask,
447 unsigned int *pio_mask,
448 unsigned int *mwdma_mask,
449 unsigned int *udma_mask)
450{
451 if (pio_mask)
452 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
453 if (mwdma_mask)
454 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
455 if (udma_mask)
456 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
457}
458
cb95d562 459static const struct ata_xfer_ent {
be9a50c8 460 int shift, bits;
cb95d562
TH
461 u8 base;
462} ata_xfer_tbl[] = {
463 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
464 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
465 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
466 { -1, },
467};
468
469/**
470 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
471 * @xfer_mask: xfer_mask of interest
472 *
473 * Return matching XFER_* value for @xfer_mask. Only the highest
474 * bit of @xfer_mask is considered.
475 *
476 * LOCKING:
477 * None.
478 *
479 * RETURNS:
480 * Matching XFER_* value, 0 if no match found.
481 */
482static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
483{
484 int highbit = fls(xfer_mask) - 1;
485 const struct ata_xfer_ent *ent;
486
487 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
488 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
489 return ent->base + highbit - ent->shift;
490 return 0;
491}
492
493/**
494 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
495 * @xfer_mode: XFER_* of interest
496 *
497 * Return matching xfer_mask for @xfer_mode.
498 *
499 * LOCKING:
500 * None.
501 *
502 * RETURNS:
503 * Matching xfer_mask, 0 if no match found.
504 */
505static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
506{
507 const struct ata_xfer_ent *ent;
508
509 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
510 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
511 return 1 << (ent->shift + xfer_mode - ent->base);
512 return 0;
513}
514
515/**
516 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
517 * @xfer_mode: XFER_* of interest
518 *
519 * Return matching xfer_shift for @xfer_mode.
520 *
521 * LOCKING:
522 * None.
523 *
524 * RETURNS:
525 * Matching xfer_shift, -1 if no match found.
526 */
527static int ata_xfer_mode2shift(unsigned int xfer_mode)
528{
529 const struct ata_xfer_ent *ent;
530
531 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
532 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
533 return ent->shift;
534 return -1;
535}
536
1da177e4 537/**
1da7b0d0
TH
538 * ata_mode_string - convert xfer_mask to string
539 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
540 *
541 * Determine string which represents the highest speed
1da7b0d0 542 * (highest bit in @modemask).
1da177e4
LT
543 *
544 * LOCKING:
545 * None.
546 *
547 * RETURNS:
548 * Constant C string representing highest speed listed in
1da7b0d0 549 * @mode_mask, or the constant C string "<n/a>".
1da177e4 550 */
1da7b0d0 551static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 552{
75f554bc
TH
553 static const char * const xfer_mode_str[] = {
554 "PIO0",
555 "PIO1",
556 "PIO2",
557 "PIO3",
558 "PIO4",
b352e57d
AC
559 "PIO5",
560 "PIO6",
75f554bc
TH
561 "MWDMA0",
562 "MWDMA1",
563 "MWDMA2",
b352e57d
AC
564 "MWDMA3",
565 "MWDMA4",
75f554bc
TH
566 "UDMA/16",
567 "UDMA/25",
568 "UDMA/33",
569 "UDMA/44",
570 "UDMA/66",
571 "UDMA/100",
572 "UDMA/133",
573 "UDMA7",
574 };
1da7b0d0 575 int highbit;
1da177e4 576
1da7b0d0
TH
577 highbit = fls(xfer_mask) - 1;
578 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
579 return xfer_mode_str[highbit];
1da177e4 580 return "<n/a>";
1da177e4
LT
581}
582
4c360c81
TH
583static const char *sata_spd_string(unsigned int spd)
584{
585 static const char * const spd_str[] = {
586 "1.5 Gbps",
587 "3.0 Gbps",
588 };
589
590 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
591 return "<unknown>";
592 return spd_str[spd - 1];
593}
594
3373efd8 595void ata_dev_disable(struct ata_device *dev)
0b8efb0a 596{
0dd4b21f 597 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 598 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
599 dev->class++;
600 }
601}
602
1da177e4 603/**
0d5ff566 604 * ata_devchk - PATA device presence detection
1da177e4
LT
605 * @ap: ATA channel to examine
606 * @device: Device to examine (starting at zero)
607 *
608 * This technique was originally described in
609 * Hale Landis's ATADRVR (www.ata-atapi.com), and
610 * later found its way into the ATA/ATAPI spec.
611 *
612 * Write a pattern to the ATA shadow registers,
613 * and if a device is present, it will respond by
614 * correctly storing and echoing back the
615 * ATA shadow register contents.
616 *
617 * LOCKING:
618 * caller.
619 */
620
0d5ff566 621static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
622{
623 struct ata_ioports *ioaddr = &ap->ioaddr;
624 u8 nsect, lbal;
625
626 ap->ops->dev_select(ap, device);
627
0d5ff566
TH
628 iowrite8(0x55, ioaddr->nsect_addr);
629 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 630
0d5ff566
TH
631 iowrite8(0xaa, ioaddr->nsect_addr);
632 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 633
0d5ff566
TH
634 iowrite8(0x55, ioaddr->nsect_addr);
635 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 636
0d5ff566
TH
637 nsect = ioread8(ioaddr->nsect_addr);
638 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
639
640 if ((nsect == 0x55) && (lbal == 0xaa))
641 return 1; /* we found a device */
642
643 return 0; /* nothing found */
644}
645
1da177e4
LT
646/**
647 * ata_dev_classify - determine device type based on ATA-spec signature
648 * @tf: ATA taskfile register set for device to be identified
649 *
650 * Determine from taskfile register contents whether a device is
651 * ATA or ATAPI, as per "Signature and persistence" section
652 * of ATA/PI spec (volume 1, sect 5.14).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
659 * the event of failure.
660 */
661
057ace5e 662unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
663{
664 /* Apple's open source Darwin code hints that some devices only
665 * put a proper signature into the LBA mid/high registers,
666 * So, we only check those. It's sufficient for uniqueness.
667 */
668
669 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
670 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
671 DPRINTK("found ATA device by sig\n");
672 return ATA_DEV_ATA;
673 }
674
675 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
676 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
677 DPRINTK("found ATAPI device by sig\n");
678 return ATA_DEV_ATAPI;
679 }
680
681 DPRINTK("unknown device\n");
682 return ATA_DEV_UNKNOWN;
683}
684
685/**
686 * ata_dev_try_classify - Parse returned ATA device signature
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
b4dc7623 689 * @r_err: Value of error register on completion
1da177e4
LT
690 *
691 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
692 * an ATA/ATAPI-defined set of values is placed in the ATA
693 * shadow registers, indicating the results of device detection
694 * and diagnostics.
695 *
696 * Select the ATA device, and read the values from the ATA shadow
697 * registers. Then parse according to the Error register value,
698 * and the spec-defined values examined by ata_dev_classify().
699 *
700 * LOCKING:
701 * caller.
b4dc7623
TH
702 *
703 * RETURNS:
704 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
705 */
706
b4dc7623
TH
707static unsigned int
708ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 709{
1da177e4
LT
710 struct ata_taskfile tf;
711 unsigned int class;
712 u8 err;
713
714 ap->ops->dev_select(ap, device);
715
716 memset(&tf, 0, sizeof(tf));
717
1da177e4 718 ap->ops->tf_read(ap, &tf);
0169e284 719 err = tf.feature;
b4dc7623
TH
720 if (r_err)
721 *r_err = err;
1da177e4 722
93590859
AC
723 /* see if device passed diags: if master then continue and warn later */
724 if (err == 0 && device == 0)
725 /* diagnostic fail : do nothing _YET_ */
726 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
727 else if (err == 1)
1da177e4
LT
728 /* do nothing */ ;
729 else if ((device == 0) && (err == 0x81))
730 /* do nothing */ ;
731 else
b4dc7623 732 return ATA_DEV_NONE;
1da177e4 733
b4dc7623 734 /* determine if device is ATA or ATAPI */
1da177e4 735 class = ata_dev_classify(&tf);
b4dc7623 736
1da177e4 737 if (class == ATA_DEV_UNKNOWN)
b4dc7623 738 return ATA_DEV_NONE;
1da177e4 739 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
740 return ATA_DEV_NONE;
741 return class;
1da177e4
LT
742}
743
744/**
6a62a04d 745 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
746 * @id: IDENTIFY DEVICE results we will examine
747 * @s: string into which data is output
748 * @ofs: offset into identify device page
749 * @len: length of string to return. must be an even number.
750 *
751 * The strings in the IDENTIFY DEVICE page are broken up into
752 * 16-bit chunks. Run through the string, and output each
753 * 8-bit chunk linearly, regardless of platform.
754 *
755 * LOCKING:
756 * caller.
757 */
758
6a62a04d
TH
759void ata_id_string(const u16 *id, unsigned char *s,
760 unsigned int ofs, unsigned int len)
1da177e4
LT
761{
762 unsigned int c;
763
764 while (len > 0) {
765 c = id[ofs] >> 8;
766 *s = c;
767 s++;
768
769 c = id[ofs] & 0xff;
770 *s = c;
771 s++;
772
773 ofs++;
774 len -= 2;
775 }
776}
777
0e949ff3 778/**
6a62a04d 779 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
780 * @id: IDENTIFY DEVICE results we will examine
781 * @s: string into which data is output
782 * @ofs: offset into identify device page
783 * @len: length of string to return. must be an odd number.
784 *
6a62a04d 785 * This function is identical to ata_id_string except that it
0e949ff3
TH
786 * trims trailing spaces and terminates the resulting string with
787 * null. @len must be actual maximum length (even number) + 1.
788 *
789 * LOCKING:
790 * caller.
791 */
6a62a04d
TH
792void ata_id_c_string(const u16 *id, unsigned char *s,
793 unsigned int ofs, unsigned int len)
0e949ff3
TH
794{
795 unsigned char *p;
796
797 WARN_ON(!(len & 1));
798
6a62a04d 799 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
800
801 p = s + strnlen(s, len - 1);
802 while (p > s && p[-1] == ' ')
803 p--;
804 *p = '\0';
805}
0baab86b 806
2940740b
TH
807static u64 ata_id_n_sectors(const u16 *id)
808{
809 if (ata_id_has_lba(id)) {
810 if (ata_id_has_lba48(id))
811 return ata_id_u64(id, 100);
812 else
813 return ata_id_u32(id, 60);
814 } else {
815 if (ata_id_current_chs_valid(id))
816 return ata_id_u32(id, 57);
817 else
818 return id[1] * id[3] * id[6];
819 }
820}
821
0baab86b
EF
822/**
823 * ata_noop_dev_select - Select device 0/1 on ATA bus
824 * @ap: ATA channel to manipulate
825 * @device: ATA device (numbered from zero) to select
826 *
827 * This function performs no actual function.
828 *
829 * May be used as the dev_select() entry in ata_port_operations.
830 *
831 * LOCKING:
832 * caller.
833 */
1da177e4
LT
834void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
835{
836}
837
0baab86b 838
1da177e4
LT
839/**
840 * ata_std_dev_select - Select device 0/1 on ATA bus
841 * @ap: ATA channel to manipulate
842 * @device: ATA device (numbered from zero) to select
843 *
844 * Use the method defined in the ATA specification to
845 * make either device 0, or device 1, active on the
0baab86b
EF
846 * ATA channel. Works with both PIO and MMIO.
847 *
848 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
849 *
850 * LOCKING:
851 * caller.
852 */
853
854void ata_std_dev_select (struct ata_port *ap, unsigned int device)
855{
856 u8 tmp;
857
858 if (device == 0)
859 tmp = ATA_DEVICE_OBS;
860 else
861 tmp = ATA_DEVICE_OBS | ATA_DEV1;
862
0d5ff566 863 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
864 ata_pause(ap); /* needed; also flushes, for mmio */
865}
866
867/**
868 * ata_dev_select - Select device 0/1 on ATA bus
869 * @ap: ATA channel to manipulate
870 * @device: ATA device (numbered from zero) to select
871 * @wait: non-zero to wait for Status register BSY bit to clear
872 * @can_sleep: non-zero if context allows sleeping
873 *
874 * Use the method defined in the ATA specification to
875 * make either device 0, or device 1, active on the
876 * ATA channel.
877 *
878 * This is a high-level version of ata_std_dev_select(),
879 * which additionally provides the services of inserting
880 * the proper pauses and status polling, where needed.
881 *
882 * LOCKING:
883 * caller.
884 */
885
886void ata_dev_select(struct ata_port *ap, unsigned int device,
887 unsigned int wait, unsigned int can_sleep)
888{
88574551 889 if (ata_msg_probe(ap))
0dd4b21f 890 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 891 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
892
893 if (wait)
894 ata_wait_idle(ap);
895
896 ap->ops->dev_select(ap, device);
897
898 if (wait) {
899 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
900 msleep(150);
901 ata_wait_idle(ap);
902 }
903}
904
905/**
906 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 907 * @id: IDENTIFY DEVICE page to dump
1da177e4 908 *
0bd3300a
TH
909 * Dump selected 16-bit words from the given IDENTIFY DEVICE
910 * page.
1da177e4
LT
911 *
912 * LOCKING:
913 * caller.
914 */
915
0bd3300a 916static inline void ata_dump_id(const u16 *id)
1da177e4
LT
917{
918 DPRINTK("49==0x%04x "
919 "53==0x%04x "
920 "63==0x%04x "
921 "64==0x%04x "
922 "75==0x%04x \n",
0bd3300a
TH
923 id[49],
924 id[53],
925 id[63],
926 id[64],
927 id[75]);
1da177e4
LT
928 DPRINTK("80==0x%04x "
929 "81==0x%04x "
930 "82==0x%04x "
931 "83==0x%04x "
932 "84==0x%04x \n",
0bd3300a
TH
933 id[80],
934 id[81],
935 id[82],
936 id[83],
937 id[84]);
1da177e4
LT
938 DPRINTK("88==0x%04x "
939 "93==0x%04x\n",
0bd3300a
TH
940 id[88],
941 id[93]);
1da177e4
LT
942}
943
cb95d562
TH
944/**
945 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
946 * @id: IDENTIFY data to compute xfer mask from
947 *
948 * Compute the xfermask for this device. This is not as trivial
949 * as it seems if we must consider early devices correctly.
950 *
951 * FIXME: pre IDE drive timing (do we care ?).
952 *
953 * LOCKING:
954 * None.
955 *
956 * RETURNS:
957 * Computed xfermask
958 */
959static unsigned int ata_id_xfermask(const u16 *id)
960{
961 unsigned int pio_mask, mwdma_mask, udma_mask;
962
963 /* Usual case. Word 53 indicates word 64 is valid */
964 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
965 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
966 pio_mask <<= 3;
967 pio_mask |= 0x7;
968 } else {
969 /* If word 64 isn't valid then Word 51 high byte holds
970 * the PIO timing number for the maximum. Turn it into
971 * a mask.
972 */
7a0f1c8a 973 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
974 if (mode < 5) /* Valid PIO range */
975 pio_mask = (2 << mode) - 1;
976 else
977 pio_mask = 1;
cb95d562
TH
978
979 /* But wait.. there's more. Design your standards by
980 * committee and you too can get a free iordy field to
981 * process. However its the speeds not the modes that
982 * are supported... Note drivers using the timing API
983 * will get this right anyway
984 */
985 }
986
987 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 988
b352e57d
AC
989 if (ata_id_is_cfa(id)) {
990 /*
991 * Process compact flash extended modes
992 */
993 int pio = id[163] & 0x7;
994 int dma = (id[163] >> 3) & 7;
995
996 if (pio)
997 pio_mask |= (1 << 5);
998 if (pio > 1)
999 pio_mask |= (1 << 6);
1000 if (dma)
1001 mwdma_mask |= (1 << 3);
1002 if (dma > 1)
1003 mwdma_mask |= (1 << 4);
1004 }
1005
fb21f0d0
TH
1006 udma_mask = 0;
1007 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1008 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1009
1010 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1011}
1012
86e45b6b
TH
1013/**
1014 * ata_port_queue_task - Queue port_task
1015 * @ap: The ata_port to queue port_task for
e2a7f77a 1016 * @fn: workqueue function to be scheduled
65f27f38 1017 * @data: data for @fn to use
e2a7f77a 1018 * @delay: delay time for workqueue function
86e45b6b
TH
1019 *
1020 * Schedule @fn(@data) for execution after @delay jiffies using
1021 * port_task. There is one port_task per port and it's the
1022 * user(low level driver)'s responsibility to make sure that only
1023 * one task is active at any given time.
1024 *
1025 * libata core layer takes care of synchronization between
1026 * port_task and EH. ata_port_queue_task() may be ignored for EH
1027 * synchronization.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
65f27f38 1032void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1033 unsigned long delay)
1034{
1035 int rc;
1036
b51e9e5d 1037 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1038 return;
1039
65f27f38
DH
1040 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1041 ap->port_task_data = data;
86e45b6b 1042
52bad64d 1043 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1044
1045 /* rc == 0 means that another user is using port task */
1046 WARN_ON(rc == 0);
1047}
1048
1049/**
1050 * ata_port_flush_task - Flush port_task
1051 * @ap: The ata_port to flush port_task for
1052 *
1053 * After this function completes, port_task is guranteed not to
1054 * be running or scheduled.
1055 *
1056 * LOCKING:
1057 * Kernel thread context (may sleep)
1058 */
1059void ata_port_flush_task(struct ata_port *ap)
1060{
1061 unsigned long flags;
1062
1063 DPRINTK("ENTER\n");
1064
ba6a1308 1065 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1066 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1067 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1068
1069 DPRINTK("flush #1\n");
1070 flush_workqueue(ata_wq);
1071
1072 /*
1073 * At this point, if a task is running, it's guaranteed to see
1074 * the FLUSH flag; thus, it will never queue pio tasks again.
1075 * Cancel and flush.
1076 */
1077 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1078 if (ata_msg_ctl(ap))
88574551
TH
1079 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1080 __FUNCTION__);
86e45b6b
TH
1081 flush_workqueue(ata_wq);
1082 }
1083
ba6a1308 1084 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1085 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1086 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1087
0dd4b21f
BP
1088 if (ata_msg_ctl(ap))
1089 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1090}
1091
7102d230 1092static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1093{
77853bf2 1094 struct completion *waiting = qc->private_data;
a2a7a662 1095
a2a7a662 1096 complete(waiting);
a2a7a662
TH
1097}
1098
1099/**
2432697b 1100 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1101 * @dev: Device to which the command is sent
1102 * @tf: Taskfile registers for the command and the result
d69cf37d 1103 * @cdb: CDB for packet command
a2a7a662 1104 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1105 * @sg: sg list for the data buffer of the command
1106 * @n_elem: Number of sg entries
a2a7a662
TH
1107 *
1108 * Executes libata internal command with timeout. @tf contains
1109 * command on entry and result on return. Timeout and error
1110 * conditions are reported via return value. No recovery action
1111 * is taken after a command times out. It's caller's duty to
1112 * clean up after timeout.
1113 *
1114 * LOCKING:
1115 * None. Should be called with kernel context, might sleep.
551e8889
TH
1116 *
1117 * RETURNS:
1118 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1119 */
2432697b
TH
1120unsigned ata_exec_internal_sg(struct ata_device *dev,
1121 struct ata_taskfile *tf, const u8 *cdb,
1122 int dma_dir, struct scatterlist *sg,
1123 unsigned int n_elem)
a2a7a662 1124{
3373efd8 1125 struct ata_port *ap = dev->ap;
a2a7a662
TH
1126 u8 command = tf->command;
1127 struct ata_queued_cmd *qc;
2ab7db1f 1128 unsigned int tag, preempted_tag;
dedaf2b0 1129 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1130 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1131 unsigned long flags;
77853bf2 1132 unsigned int err_mask;
d95a717f 1133 int rc;
a2a7a662 1134
ba6a1308 1135 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1136
e3180499 1137 /* no internal command while frozen */
b51e9e5d 1138 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1139 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1140 return AC_ERR_SYSTEM;
1141 }
1142
2ab7db1f 1143 /* initialize internal qc */
a2a7a662 1144
2ab7db1f
TH
1145 /* XXX: Tag 0 is used for drivers with legacy EH as some
1146 * drivers choke if any other tag is given. This breaks
1147 * ata_tag_internal() test for those drivers. Don't use new
1148 * EH stuff without converting to it.
1149 */
1150 if (ap->ops->error_handler)
1151 tag = ATA_TAG_INTERNAL;
1152 else
1153 tag = 0;
1154
6cec4a39 1155 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1156 BUG();
f69499f4 1157 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1158
1159 qc->tag = tag;
1160 qc->scsicmd = NULL;
1161 qc->ap = ap;
1162 qc->dev = dev;
1163 ata_qc_reinit(qc);
1164
1165 preempted_tag = ap->active_tag;
dedaf2b0
TH
1166 preempted_sactive = ap->sactive;
1167 preempted_qc_active = ap->qc_active;
2ab7db1f 1168 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1169 ap->sactive = 0;
1170 ap->qc_active = 0;
2ab7db1f
TH
1171
1172 /* prepare & issue qc */
a2a7a662 1173 qc->tf = *tf;
d69cf37d
TH
1174 if (cdb)
1175 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1176 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1177 qc->dma_dir = dma_dir;
1178 if (dma_dir != DMA_NONE) {
2432697b
TH
1179 unsigned int i, buflen = 0;
1180
1181 for (i = 0; i < n_elem; i++)
1182 buflen += sg[i].length;
1183
1184 ata_sg_init(qc, sg, n_elem);
49c80429 1185 qc->nbytes = buflen;
a2a7a662
TH
1186 }
1187
77853bf2 1188 qc->private_data = &wait;
a2a7a662
TH
1189 qc->complete_fn = ata_qc_complete_internal;
1190
8e0e694a 1191 ata_qc_issue(qc);
a2a7a662 1192
ba6a1308 1193 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1194
a8601e5f 1195 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1196
1197 ata_port_flush_task(ap);
41ade50c 1198
d95a717f 1199 if (!rc) {
ba6a1308 1200 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1201
1202 /* We're racing with irq here. If we lose, the
1203 * following test prevents us from completing the qc
d95a717f
TH
1204 * twice. If we win, the port is frozen and will be
1205 * cleaned up by ->post_internal_cmd().
a2a7a662 1206 */
77853bf2 1207 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1208 qc->err_mask |= AC_ERR_TIMEOUT;
1209
1210 if (ap->ops->error_handler)
1211 ata_port_freeze(ap);
1212 else
1213 ata_qc_complete(qc);
f15a1daf 1214
0dd4b21f
BP
1215 if (ata_msg_warn(ap))
1216 ata_dev_printk(dev, KERN_WARNING,
88574551 1217 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1218 }
1219
ba6a1308 1220 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1221 }
1222
d95a717f
TH
1223 /* do post_internal_cmd */
1224 if (ap->ops->post_internal_cmd)
1225 ap->ops->post_internal_cmd(qc);
1226
18d90deb 1227 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1228 if (ata_msg_warn(ap))
88574551 1229 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1230 "zero err_mask for failed "
88574551 1231 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1232 qc->err_mask |= AC_ERR_OTHER;
1233 }
1234
15869303 1235 /* finish up */
ba6a1308 1236 spin_lock_irqsave(ap->lock, flags);
15869303 1237
e61e0672 1238 *tf = qc->result_tf;
77853bf2
TH
1239 err_mask = qc->err_mask;
1240
1241 ata_qc_free(qc);
2ab7db1f 1242 ap->active_tag = preempted_tag;
dedaf2b0
TH
1243 ap->sactive = preempted_sactive;
1244 ap->qc_active = preempted_qc_active;
77853bf2 1245
1f7dd3e9
TH
1246 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1247 * Until those drivers are fixed, we detect the condition
1248 * here, fail the command with AC_ERR_SYSTEM and reenable the
1249 * port.
1250 *
1251 * Note that this doesn't change any behavior as internal
1252 * command failure results in disabling the device in the
1253 * higher layer for LLDDs without new reset/EH callbacks.
1254 *
1255 * Kill the following code as soon as those drivers are fixed.
1256 */
198e0fed 1257 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1258 err_mask |= AC_ERR_SYSTEM;
1259 ata_port_probe(ap);
1260 }
1261
ba6a1308 1262 spin_unlock_irqrestore(ap->lock, flags);
15869303 1263
77853bf2 1264 return err_mask;
a2a7a662
TH
1265}
1266
2432697b 1267/**
33480a0e 1268 * ata_exec_internal - execute libata internal command
2432697b
TH
1269 * @dev: Device to which the command is sent
1270 * @tf: Taskfile registers for the command and the result
1271 * @cdb: CDB for packet command
1272 * @dma_dir: Data tranfer direction of the command
1273 * @buf: Data buffer of the command
1274 * @buflen: Length of data buffer
1275 *
1276 * Wrapper around ata_exec_internal_sg() which takes simple
1277 * buffer instead of sg list.
1278 *
1279 * LOCKING:
1280 * None. Should be called with kernel context, might sleep.
1281 *
1282 * RETURNS:
1283 * Zero on success, AC_ERR_* mask on failure
1284 */
1285unsigned ata_exec_internal(struct ata_device *dev,
1286 struct ata_taskfile *tf, const u8 *cdb,
1287 int dma_dir, void *buf, unsigned int buflen)
1288{
33480a0e
TH
1289 struct scatterlist *psg = NULL, sg;
1290 unsigned int n_elem = 0;
2432697b 1291
33480a0e
TH
1292 if (dma_dir != DMA_NONE) {
1293 WARN_ON(!buf);
1294 sg_init_one(&sg, buf, buflen);
1295 psg = &sg;
1296 n_elem++;
1297 }
2432697b 1298
33480a0e 1299 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1300}
1301
977e6b9f
TH
1302/**
1303 * ata_do_simple_cmd - execute simple internal command
1304 * @dev: Device to which the command is sent
1305 * @cmd: Opcode to execute
1306 *
1307 * Execute a 'simple' command, that only consists of the opcode
1308 * 'cmd' itself, without filling any other registers
1309 *
1310 * LOCKING:
1311 * Kernel thread context (may sleep).
1312 *
1313 * RETURNS:
1314 * Zero on success, AC_ERR_* mask on failure
e58eb583 1315 */
77b08fb5 1316unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1317{
1318 struct ata_taskfile tf;
e58eb583
TH
1319
1320 ata_tf_init(dev, &tf);
1321
1322 tf.command = cmd;
1323 tf.flags |= ATA_TFLAG_DEVICE;
1324 tf.protocol = ATA_PROT_NODATA;
1325
977e6b9f 1326 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1327}
1328
1bc4ccff
AC
1329/**
1330 * ata_pio_need_iordy - check if iordy needed
1331 * @adev: ATA device
1332 *
1333 * Check if the current speed of the device requires IORDY. Used
1334 * by various controllers for chip configuration.
1335 */
1336
1337unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1338{
1339 int pio;
1340 int speed = adev->pio_mode - XFER_PIO_0;
1341
1342 if (speed < 2)
1343 return 0;
1344 if (speed > 2)
1345 return 1;
2e9edbf8 1346
1bc4ccff
AC
1347 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1348
1349 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1350 pio = adev->id[ATA_ID_EIDE_PIO];
1351 /* Is the speed faster than the drive allows non IORDY ? */
1352 if (pio) {
1353 /* This is cycle times not frequency - watch the logic! */
1354 if (pio > 240) /* PIO2 is 240nS per cycle */
1355 return 1;
1356 return 0;
1357 }
1358 }
1359 return 0;
1360}
1361
1da177e4 1362/**
49016aca 1363 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1364 * @dev: target device
1365 * @p_class: pointer to class of the target device (may be changed)
bff04647 1366 * @flags: ATA_READID_* flags
fe635c7e 1367 * @id: buffer to read IDENTIFY data into
1da177e4 1368 *
49016aca
TH
1369 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1370 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1371 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1372 * for pre-ATA4 drives.
1da177e4
LT
1373 *
1374 * LOCKING:
49016aca
TH
1375 * Kernel thread context (may sleep)
1376 *
1377 * RETURNS:
1378 * 0 on success, -errno otherwise.
1da177e4 1379 */
a9beec95 1380int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1381 unsigned int flags, u16 *id)
1da177e4 1382{
3373efd8 1383 struct ata_port *ap = dev->ap;
49016aca 1384 unsigned int class = *p_class;
a0123703 1385 struct ata_taskfile tf;
49016aca
TH
1386 unsigned int err_mask = 0;
1387 const char *reason;
1388 int rc;
1da177e4 1389
0dd4b21f 1390 if (ata_msg_ctl(ap))
88574551
TH
1391 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1392 __FUNCTION__, ap->id, dev->devno);
1da177e4 1393
49016aca 1394 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1395
49016aca 1396 retry:
3373efd8 1397 ata_tf_init(dev, &tf);
a0123703 1398
49016aca
TH
1399 switch (class) {
1400 case ATA_DEV_ATA:
a0123703 1401 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1402 break;
1403 case ATA_DEV_ATAPI:
a0123703 1404 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1405 break;
1406 default:
1407 rc = -ENODEV;
1408 reason = "unsupported class";
1409 goto err_out;
1da177e4
LT
1410 }
1411
a0123703 1412 tf.protocol = ATA_PROT_PIO;
800b3996 1413 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1da177e4 1414
3373efd8 1415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1416 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1417 if (err_mask) {
800b3996 1418 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1419 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1420 ap->id, dev->devno);
1421 return -ENOENT;
1422 }
1423
49016aca
TH
1424 rc = -EIO;
1425 reason = "I/O error";
1da177e4
LT
1426 goto err_out;
1427 }
1428
49016aca 1429 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1430
49016aca 1431 /* sanity check */
a4f5749b
TH
1432 rc = -EINVAL;
1433 reason = "device reports illegal type";
1434
1435 if (class == ATA_DEV_ATA) {
1436 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1437 goto err_out;
1438 } else {
1439 if (ata_id_is_ata(id))
1440 goto err_out;
49016aca
TH
1441 }
1442
bff04647 1443 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1444 /*
1445 * The exact sequence expected by certain pre-ATA4 drives is:
1446 * SRST RESET
1447 * IDENTIFY
1448 * INITIALIZE DEVICE PARAMETERS
1449 * anything else..
1450 * Some drives were very specific about that exact sequence.
1451 */
1452 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1453 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1454 if (err_mask) {
1455 rc = -EIO;
1456 reason = "INIT_DEV_PARAMS failed";
1457 goto err_out;
1458 }
1459
1460 /* current CHS translation info (id[53-58]) might be
1461 * changed. reread the identify device info.
1462 */
bff04647 1463 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1464 goto retry;
1465 }
1466 }
1467
1468 *p_class = class;
fe635c7e 1469
49016aca
TH
1470 return 0;
1471
1472 err_out:
88574551 1473 if (ata_msg_warn(ap))
0dd4b21f 1474 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1475 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1476 return rc;
1477}
1478
3373efd8 1479static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1480{
3373efd8 1481 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1482}
1483
a6e6ce8e
TH
1484static void ata_dev_config_ncq(struct ata_device *dev,
1485 char *desc, size_t desc_sz)
1486{
1487 struct ata_port *ap = dev->ap;
1488 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1489
1490 if (!ata_id_has_ncq(dev->id)) {
1491 desc[0] = '\0';
1492 return;
1493 }
6919a0a6
AC
1494 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1495 snprintf(desc, desc_sz, "NCQ (not used)");
1496 return;
1497 }
a6e6ce8e 1498 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1499 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1500 dev->flags |= ATA_DFLAG_NCQ;
1501 }
1502
1503 if (hdepth >= ddepth)
1504 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1505 else
1506 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1507}
1508
e6d902a3
BK
1509static void ata_set_port_max_cmd_len(struct ata_port *ap)
1510{
1511 int i;
1512
cca3974e
JG
1513 if (ap->scsi_host) {
1514 unsigned int len = 0;
1515
e6d902a3 1516 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1517 len = max(len, ap->device[i].cdb_len);
1518
1519 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1520 }
1521}
1522
49016aca 1523/**
ffeae418 1524 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1525 * @dev: Target device to configure
1526 *
1527 * Configure @dev according to @dev->id. Generic and low-level
1528 * driver specific fixups are also applied.
49016aca
TH
1529 *
1530 * LOCKING:
ffeae418
TH
1531 * Kernel thread context (may sleep)
1532 *
1533 * RETURNS:
1534 * 0 on success, -errno otherwise
49016aca 1535 */
efdaedc4 1536int ata_dev_configure(struct ata_device *dev)
49016aca 1537{
3373efd8 1538 struct ata_port *ap = dev->ap;
efdaedc4 1539 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1540 const u16 *id = dev->id;
ff8854b2 1541 unsigned int xfer_mask;
b352e57d 1542 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1543 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1544 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1545 int rc;
49016aca 1546
0dd4b21f 1547 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1548 ata_dev_printk(dev, KERN_INFO,
1549 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1550 __FUNCTION__, ap->id, dev->devno);
ffeae418 1551 return 0;
49016aca
TH
1552 }
1553
0dd4b21f 1554 if (ata_msg_probe(ap))
88574551
TH
1555 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1556 __FUNCTION__, ap->id, dev->devno);
1da177e4 1557
c39f5ebe 1558 /* print device capabilities */
0dd4b21f 1559 if (ata_msg_probe(ap))
88574551
TH
1560 ata_dev_printk(dev, KERN_DEBUG,
1561 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1562 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1563 __FUNCTION__,
f15a1daf
TH
1564 id[49], id[82], id[83], id[84],
1565 id[85], id[86], id[87], id[88]);
c39f5ebe 1566
208a9933 1567 /* initialize to-be-configured parameters */
ea1dd4e1 1568 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1569 dev->max_sectors = 0;
1570 dev->cdb_len = 0;
1571 dev->n_sectors = 0;
1572 dev->cylinders = 0;
1573 dev->heads = 0;
1574 dev->sectors = 0;
1575
1da177e4
LT
1576 /*
1577 * common ATA, ATAPI feature tests
1578 */
1579
ff8854b2 1580 /* find max transfer mode; for printk only */
1148c3a7 1581 xfer_mask = ata_id_xfermask(id);
1da177e4 1582
0dd4b21f
BP
1583 if (ata_msg_probe(ap))
1584 ata_dump_id(id);
1da177e4
LT
1585
1586 /* ATA-specific feature tests */
1587 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1588 if (ata_id_is_cfa(id)) {
1589 if (id[162] & 1) /* CPRM may make this media unusable */
1590 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1591 ap->id, dev->devno);
1592 snprintf(revbuf, 7, "CFA");
1593 }
1594 else
1595 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1596
1148c3a7 1597 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1598
3f64f565
EM
1599 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1600 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV_OFS,
1601 sizeof(fwrevbuf));
1602
1603 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD_OFS,
1604 sizeof(modelbuf));
1605
1606 if (dev->id[59] & 0x100)
1607 dev->multi_count = dev->id[59] & 0xff;
1608
1148c3a7 1609 if (ata_id_has_lba(id)) {
4c2d721a 1610 const char *lba_desc;
a6e6ce8e 1611 char ncq_desc[20];
8bf62ece 1612
4c2d721a
TH
1613 lba_desc = "LBA";
1614 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1615 if (ata_id_has_lba48(id)) {
8bf62ece 1616 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1617 lba_desc = "LBA48";
6fc49adb
TH
1618
1619 if (dev->n_sectors >= (1UL << 28) &&
1620 ata_id_has_flush_ext(id))
1621 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1622 }
8bf62ece 1623
a6e6ce8e
TH
1624 /* config NCQ */
1625 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1626
8bf62ece 1627 /* print device info to dmesg */
3f64f565
EM
1628 if (ata_msg_drv(ap) && print_info) {
1629 ata_dev_printk(dev, KERN_INFO,
1630 "%s: %s, %s, max %s\n",
1631 revbuf, modelbuf, fwrevbuf,
1632 ata_mode_string(xfer_mask));
1633 ata_dev_printk(dev, KERN_INFO,
1634 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1635 (unsigned long long)dev->n_sectors,
3f64f565
EM
1636 dev->multi_count, lba_desc, ncq_desc);
1637 }
ffeae418 1638 } else {
8bf62ece
AL
1639 /* CHS */
1640
1641 /* Default translation */
1148c3a7
TH
1642 dev->cylinders = id[1];
1643 dev->heads = id[3];
1644 dev->sectors = id[6];
8bf62ece 1645
1148c3a7 1646 if (ata_id_current_chs_valid(id)) {
8bf62ece 1647 /* Current CHS translation is valid. */
1148c3a7
TH
1648 dev->cylinders = id[54];
1649 dev->heads = id[55];
1650 dev->sectors = id[56];
8bf62ece
AL
1651 }
1652
1653 /* print device info to dmesg */
3f64f565 1654 if (ata_msg_drv(ap) && print_info) {
88574551 1655 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1656 "%s: %s, %s, max %s\n",
1657 revbuf, modelbuf, fwrevbuf,
1658 ata_mode_string(xfer_mask));
1659 ata_dev_printk(dev, KERN_INFO,
1660 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1661 (unsigned long long)dev->n_sectors,
1662 dev->multi_count, dev->cylinders,
1663 dev->heads, dev->sectors);
1664 }
07f6f7d0
AL
1665 }
1666
6e7846e9 1667 dev->cdb_len = 16;
1da177e4
LT
1668 }
1669
1670 /* ATAPI-specific feature tests */
2c13b7ce 1671 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1672 char *cdb_intr_string = "";
1673
1148c3a7 1674 rc = atapi_cdb_len(id);
1da177e4 1675 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1676 if (ata_msg_warn(ap))
88574551
TH
1677 ata_dev_printk(dev, KERN_WARNING,
1678 "unsupported CDB len\n");
ffeae418 1679 rc = -EINVAL;
1da177e4
LT
1680 goto err_out_nosup;
1681 }
6e7846e9 1682 dev->cdb_len = (unsigned int) rc;
1da177e4 1683
08a556db 1684 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1685 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1686 cdb_intr_string = ", CDB intr";
1687 }
312f7da2 1688
1da177e4 1689 /* print device info to dmesg */
5afc8142 1690 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1691 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1692 ata_mode_string(xfer_mask),
1693 cdb_intr_string);
1da177e4
LT
1694 }
1695
914ed354
TH
1696 /* determine max_sectors */
1697 dev->max_sectors = ATA_MAX_SECTORS;
1698 if (dev->flags & ATA_DFLAG_LBA48)
1699 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1700
93590859
AC
1701 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1702 /* Let the user know. We don't want to disallow opens for
1703 rescue purposes, or in case the vendor is just a blithering
1704 idiot */
1705 if (print_info) {
1706 ata_dev_printk(dev, KERN_WARNING,
1707"Drive reports diagnostics failure. This may indicate a drive\n");
1708 ata_dev_printk(dev, KERN_WARNING,
1709"fault or invalid emulation. Contact drive vendor for information.\n");
1710 }
1711 }
1712
e6d902a3 1713 ata_set_port_max_cmd_len(ap);
6e7846e9 1714
4b2f3ede 1715 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1716 if (ata_dev_knobble(dev)) {
5afc8142 1717 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1718 ata_dev_printk(dev, KERN_INFO,
1719 "applying bridge limits\n");
5a529139 1720 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1721 dev->max_sectors = ATA_MAX_SECTORS;
1722 }
1723
1724 if (ap->ops->dev_config)
1725 ap->ops->dev_config(ap, dev);
1726
0dd4b21f
BP
1727 if (ata_msg_probe(ap))
1728 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1729 __FUNCTION__, ata_chk_status(ap));
ffeae418 1730 return 0;
1da177e4
LT
1731
1732err_out_nosup:
0dd4b21f 1733 if (ata_msg_probe(ap))
88574551
TH
1734 ata_dev_printk(dev, KERN_DEBUG,
1735 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1736 return rc;
1da177e4
LT
1737}
1738
1739/**
1740 * ata_bus_probe - Reset and probe ATA bus
1741 * @ap: Bus to probe
1742 *
0cba632b
JG
1743 * Master ATA bus probing function. Initiates a hardware-dependent
1744 * bus reset, then attempts to identify any devices found on
1745 * the bus.
1746 *
1da177e4 1747 * LOCKING:
0cba632b 1748 * PCI/etc. bus probe sem.
1da177e4
LT
1749 *
1750 * RETURNS:
96072e69 1751 * Zero on success, negative errno otherwise.
1da177e4
LT
1752 */
1753
80289167 1754int ata_bus_probe(struct ata_port *ap)
1da177e4 1755{
28ca5c57 1756 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1757 int tries[ATA_MAX_DEVICES];
1758 int i, rc, down_xfermask;
e82cbdb9 1759 struct ata_device *dev;
1da177e4 1760
28ca5c57 1761 ata_port_probe(ap);
c19ba8af 1762
14d2bac1
TH
1763 for (i = 0; i < ATA_MAX_DEVICES; i++)
1764 tries[i] = ATA_PROBE_MAX_TRIES;
1765
1766 retry:
1767 down_xfermask = 0;
1768
2044470c 1769 /* reset and determine device classes */
52783c5d 1770 ap->ops->phy_reset(ap);
2061a47a 1771
52783c5d
TH
1772 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1773 dev = &ap->device[i];
c19ba8af 1774
52783c5d
TH
1775 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1776 dev->class != ATA_DEV_UNKNOWN)
1777 classes[dev->devno] = dev->class;
1778 else
1779 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1780
52783c5d 1781 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1782 }
1da177e4 1783
52783c5d 1784 ata_port_probe(ap);
2044470c 1785
b6079ca4
AC
1786 /* after the reset the device state is PIO 0 and the controller
1787 state is undefined. Record the mode */
1788
1789 for (i = 0; i < ATA_MAX_DEVICES; i++)
1790 ap->device[i].pio_mode = XFER_PIO_0;
1791
28ca5c57 1792 /* read IDENTIFY page and configure devices */
1da177e4 1793 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1794 dev = &ap->device[i];
28ca5c57 1795
ec573755
TH
1796 if (tries[i])
1797 dev->class = classes[i];
ffeae418 1798
14d2bac1 1799 if (!ata_dev_enabled(dev))
ffeae418 1800 continue;
ffeae418 1801
bff04647
TH
1802 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1803 dev->id);
14d2bac1
TH
1804 if (rc)
1805 goto fail;
1806
efdaedc4
TH
1807 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1808 rc = ata_dev_configure(dev);
1809 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1810 if (rc)
1811 goto fail;
1da177e4
LT
1812 }
1813
e82cbdb9 1814 /* configure transfer mode */
3adcebb2 1815 rc = ata_set_mode(ap, &dev);
51713d35
TH
1816 if (rc) {
1817 down_xfermask = 1;
1818 goto fail;
e82cbdb9 1819 }
1da177e4 1820
e82cbdb9
TH
1821 for (i = 0; i < ATA_MAX_DEVICES; i++)
1822 if (ata_dev_enabled(&ap->device[i]))
1823 return 0;
1da177e4 1824
e82cbdb9
TH
1825 /* no device present, disable port */
1826 ata_port_disable(ap);
1da177e4 1827 ap->ops->port_disable(ap);
96072e69 1828 return -ENODEV;
14d2bac1
TH
1829
1830 fail:
1831 switch (rc) {
1832 case -EINVAL:
1833 case -ENODEV:
1834 tries[dev->devno] = 0;
1835 break;
1836 case -EIO:
3c567b7d 1837 sata_down_spd_limit(ap);
14d2bac1
TH
1838 /* fall through */
1839 default:
1840 tries[dev->devno]--;
1841 if (down_xfermask &&
3373efd8 1842 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1843 tries[dev->devno] = 0;
1844 }
1845
ec573755 1846 if (!tries[dev->devno]) {
3373efd8
TH
1847 ata_down_xfermask_limit(dev, 1);
1848 ata_dev_disable(dev);
ec573755
TH
1849 }
1850
14d2bac1 1851 goto retry;
1da177e4
LT
1852}
1853
1854/**
0cba632b
JG
1855 * ata_port_probe - Mark port as enabled
1856 * @ap: Port for which we indicate enablement
1da177e4 1857 *
0cba632b
JG
1858 * Modify @ap data structure such that the system
1859 * thinks that the entire port is enabled.
1860 *
cca3974e 1861 * LOCKING: host lock, or some other form of
0cba632b 1862 * serialization.
1da177e4
LT
1863 */
1864
1865void ata_port_probe(struct ata_port *ap)
1866{
198e0fed 1867 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1868}
1869
3be680b7
TH
1870/**
1871 * sata_print_link_status - Print SATA link status
1872 * @ap: SATA port to printk link status about
1873 *
1874 * This function prints link speed and status of a SATA link.
1875 *
1876 * LOCKING:
1877 * None.
1878 */
1879static void sata_print_link_status(struct ata_port *ap)
1880{
6d5f9732 1881 u32 sstatus, scontrol, tmp;
3be680b7 1882
81952c54 1883 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1884 return;
81952c54 1885 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1886
81952c54 1887 if (ata_port_online(ap)) {
3be680b7 1888 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1889 ata_port_printk(ap, KERN_INFO,
1890 "SATA link up %s (SStatus %X SControl %X)\n",
1891 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1892 } else {
f15a1daf
TH
1893 ata_port_printk(ap, KERN_INFO,
1894 "SATA link down (SStatus %X SControl %X)\n",
1895 sstatus, scontrol);
3be680b7
TH
1896 }
1897}
1898
1da177e4 1899/**
780a87f7
JG
1900 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1901 * @ap: SATA port associated with target SATA PHY.
1da177e4 1902 *
780a87f7
JG
1903 * This function issues commands to standard SATA Sxxx
1904 * PHY registers, to wake up the phy (and device), and
1905 * clear any reset condition.
1da177e4
LT
1906 *
1907 * LOCKING:
0cba632b 1908 * PCI/etc. bus probe sem.
1da177e4
LT
1909 *
1910 */
1911void __sata_phy_reset(struct ata_port *ap)
1912{
1913 u32 sstatus;
1914 unsigned long timeout = jiffies + (HZ * 5);
1915
1916 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1917 /* issue phy wake/reset */
81952c54 1918 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1919 /* Couldn't find anything in SATA I/II specs, but
1920 * AHCI-1.1 10.4.2 says at least 1 ms. */
1921 mdelay(1);
1da177e4 1922 }
81952c54
TH
1923 /* phy wake/clear reset */
1924 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1925
1926 /* wait for phy to become ready, if necessary */
1927 do {
1928 msleep(200);
81952c54 1929 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1930 if ((sstatus & 0xf) != 1)
1931 break;
1932 } while (time_before(jiffies, timeout));
1933
3be680b7
TH
1934 /* print link status */
1935 sata_print_link_status(ap);
656563e3 1936
3be680b7 1937 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1938 if (!ata_port_offline(ap))
1da177e4 1939 ata_port_probe(ap);
3be680b7 1940 else
1da177e4 1941 ata_port_disable(ap);
1da177e4 1942
198e0fed 1943 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1944 return;
1945
1946 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1947 ata_port_disable(ap);
1948 return;
1949 }
1950
1951 ap->cbl = ATA_CBL_SATA;
1952}
1953
1954/**
780a87f7
JG
1955 * sata_phy_reset - Reset SATA bus.
1956 * @ap: SATA port associated with target SATA PHY.
1da177e4 1957 *
780a87f7
JG
1958 * This function resets the SATA bus, and then probes
1959 * the bus for devices.
1da177e4
LT
1960 *
1961 * LOCKING:
0cba632b 1962 * PCI/etc. bus probe sem.
1da177e4
LT
1963 *
1964 */
1965void sata_phy_reset(struct ata_port *ap)
1966{
1967 __sata_phy_reset(ap);
198e0fed 1968 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1969 return;
1970 ata_bus_reset(ap);
1971}
1972
ebdfca6e
AC
1973/**
1974 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1975 * @adev: device
1976 *
1977 * Obtain the other device on the same cable, or if none is
1978 * present NULL is returned
1979 */
2e9edbf8 1980
3373efd8 1981struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1982{
3373efd8 1983 struct ata_port *ap = adev->ap;
ebdfca6e 1984 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1985 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1986 return NULL;
1987 return pair;
1988}
1989
1da177e4 1990/**
780a87f7
JG
1991 * ata_port_disable - Disable port.
1992 * @ap: Port to be disabled.
1da177e4 1993 *
780a87f7
JG
1994 * Modify @ap data structure such that the system
1995 * thinks that the entire port is disabled, and should
1996 * never attempt to probe or communicate with devices
1997 * on this port.
1998 *
cca3974e 1999 * LOCKING: host lock, or some other form of
780a87f7 2000 * serialization.
1da177e4
LT
2001 */
2002
2003void ata_port_disable(struct ata_port *ap)
2004{
2005 ap->device[0].class = ATA_DEV_NONE;
2006 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2007 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2008}
2009
1c3fae4d 2010/**
3c567b7d 2011 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2012 * @ap: Port to adjust SATA spd limit for
2013 *
2014 * Adjust SATA spd limit of @ap downward. Note that this
2015 * function only adjusts the limit. The change must be applied
3c567b7d 2016 * using sata_set_spd().
1c3fae4d
TH
2017 *
2018 * LOCKING:
2019 * Inherited from caller.
2020 *
2021 * RETURNS:
2022 * 0 on success, negative errno on failure
2023 */
3c567b7d 2024int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2025{
81952c54
TH
2026 u32 sstatus, spd, mask;
2027 int rc, highbit;
1c3fae4d 2028
81952c54
TH
2029 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2030 if (rc)
2031 return rc;
1c3fae4d
TH
2032
2033 mask = ap->sata_spd_limit;
2034 if (mask <= 1)
2035 return -EINVAL;
2036 highbit = fls(mask) - 1;
2037 mask &= ~(1 << highbit);
2038
81952c54 2039 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2040 if (spd <= 1)
2041 return -EINVAL;
2042 spd--;
2043 mask &= (1 << spd) - 1;
2044 if (!mask)
2045 return -EINVAL;
2046
2047 ap->sata_spd_limit = mask;
2048
f15a1daf
TH
2049 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2050 sata_spd_string(fls(mask)));
1c3fae4d
TH
2051
2052 return 0;
2053}
2054
3c567b7d 2055static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2056{
2057 u32 spd, limit;
2058
2059 if (ap->sata_spd_limit == UINT_MAX)
2060 limit = 0;
2061 else
2062 limit = fls(ap->sata_spd_limit);
2063
2064 spd = (*scontrol >> 4) & 0xf;
2065 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2066
2067 return spd != limit;
2068}
2069
2070/**
3c567b7d 2071 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2072 * @ap: Port in question
2073 *
2074 * Test whether the spd limit in SControl matches
2075 * @ap->sata_spd_limit. This function is used to determine
2076 * whether hardreset is necessary to apply SATA spd
2077 * configuration.
2078 *
2079 * LOCKING:
2080 * Inherited from caller.
2081 *
2082 * RETURNS:
2083 * 1 if SATA spd configuration is needed, 0 otherwise.
2084 */
3c567b7d 2085int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2086{
2087 u32 scontrol;
2088
81952c54 2089 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2090 return 0;
2091
3c567b7d 2092 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2093}
2094
2095/**
3c567b7d 2096 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2097 * @ap: Port to set SATA spd for
2098 *
2099 * Set SATA spd of @ap according to sata_spd_limit.
2100 *
2101 * LOCKING:
2102 * Inherited from caller.
2103 *
2104 * RETURNS:
2105 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2106 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2107 */
3c567b7d 2108int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2109{
2110 u32 scontrol;
81952c54 2111 int rc;
1c3fae4d 2112
81952c54
TH
2113 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2114 return rc;
1c3fae4d 2115
3c567b7d 2116 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2117 return 0;
2118
81952c54
TH
2119 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2120 return rc;
2121
1c3fae4d
TH
2122 return 1;
2123}
2124
452503f9
AC
2125/*
2126 * This mode timing computation functionality is ported over from
2127 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2128 */
2129/*
b352e57d 2130 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2131 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2132 * for UDMA6, which is currently supported only by Maxtor drives.
2133 *
2134 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2135 */
2136
2137static const struct ata_timing ata_timing[] = {
2138
2139 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2140 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2141 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2142 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2143
b352e57d
AC
2144 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2145 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2146 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2147 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2148 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2149
2150/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2151
452503f9
AC
2152 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2153 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2154 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2155
452503f9
AC
2156 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2157 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2158 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2159
b352e57d
AC
2160 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2161 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2162 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2163 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2164
2165 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2166 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2167 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2168
2169/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2170
2171 { 0xFF }
2172};
2173
2174#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2175#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2176
2177static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2178{
2179 q->setup = EZ(t->setup * 1000, T);
2180 q->act8b = EZ(t->act8b * 1000, T);
2181 q->rec8b = EZ(t->rec8b * 1000, T);
2182 q->cyc8b = EZ(t->cyc8b * 1000, T);
2183 q->active = EZ(t->active * 1000, T);
2184 q->recover = EZ(t->recover * 1000, T);
2185 q->cycle = EZ(t->cycle * 1000, T);
2186 q->udma = EZ(t->udma * 1000, UT);
2187}
2188
2189void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2190 struct ata_timing *m, unsigned int what)
2191{
2192 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2193 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2194 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2195 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2196 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2197 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2198 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2199 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2200}
2201
2202static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2203{
2204 const struct ata_timing *t;
2205
2206 for (t = ata_timing; t->mode != speed; t++)
91190758 2207 if (t->mode == 0xFF)
452503f9 2208 return NULL;
2e9edbf8 2209 return t;
452503f9
AC
2210}
2211
2212int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2213 struct ata_timing *t, int T, int UT)
2214{
2215 const struct ata_timing *s;
2216 struct ata_timing p;
2217
2218 /*
2e9edbf8 2219 * Find the mode.
75b1f2f8 2220 */
452503f9
AC
2221
2222 if (!(s = ata_timing_find_mode(speed)))
2223 return -EINVAL;
2224
75b1f2f8
AL
2225 memcpy(t, s, sizeof(*s));
2226
452503f9
AC
2227 /*
2228 * If the drive is an EIDE drive, it can tell us it needs extended
2229 * PIO/MW_DMA cycle timing.
2230 */
2231
2232 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2233 memset(&p, 0, sizeof(p));
2234 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2235 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2236 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2237 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2238 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2239 }
2240 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2241 }
2242
2243 /*
2244 * Convert the timing to bus clock counts.
2245 */
2246
75b1f2f8 2247 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2248
2249 /*
c893a3ae
RD
2250 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2251 * S.M.A.R.T * and some other commands. We have to ensure that the
2252 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2253 */
2254
fd3367af 2255 if (speed > XFER_PIO_6) {
452503f9
AC
2256 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2257 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2258 }
2259
2260 /*
c893a3ae 2261 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2262 */
2263
2264 if (t->act8b + t->rec8b < t->cyc8b) {
2265 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2266 t->rec8b = t->cyc8b - t->act8b;
2267 }
2268
2269 if (t->active + t->recover < t->cycle) {
2270 t->active += (t->cycle - (t->active + t->recover)) / 2;
2271 t->recover = t->cycle - t->active;
2272 }
2273
2274 return 0;
2275}
2276
cf176e1a
TH
2277/**
2278 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2279 * @dev: Device to adjust xfer masks
2280 * @force_pio0: Force PIO0
2281 *
2282 * Adjust xfer masks of @dev downward. Note that this function
2283 * does not apply the change. Invoking ata_set_mode() afterwards
2284 * will apply the limit.
2285 *
2286 * LOCKING:
2287 * Inherited from caller.
2288 *
2289 * RETURNS:
2290 * 0 on success, negative errno on failure
2291 */
3373efd8 2292int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2293{
2294 unsigned long xfer_mask;
2295 int highbit;
2296
2297 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2298 dev->udma_mask);
2299
2300 if (!xfer_mask)
2301 goto fail;
2302 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2303 if (xfer_mask & ATA_MASK_UDMA)
2304 xfer_mask &= ~ATA_MASK_MWDMA;
2305
2306 highbit = fls(xfer_mask) - 1;
2307 xfer_mask &= ~(1 << highbit);
2308 if (force_pio0)
2309 xfer_mask &= 1 << ATA_SHIFT_PIO;
2310 if (!xfer_mask)
2311 goto fail;
2312
2313 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2314 &dev->udma_mask);
2315
f15a1daf
TH
2316 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2317 ata_mode_string(xfer_mask));
cf176e1a
TH
2318
2319 return 0;
2320
2321 fail:
2322 return -EINVAL;
2323}
2324
3373efd8 2325static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2326{
baa1e78a 2327 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2328 unsigned int err_mask;
2329 int rc;
1da177e4 2330
e8384607 2331 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2332 if (dev->xfer_shift == ATA_SHIFT_PIO)
2333 dev->flags |= ATA_DFLAG_PIO;
2334
3373efd8 2335 err_mask = ata_dev_set_xfermode(dev);
83206a29 2336 if (err_mask) {
f15a1daf
TH
2337 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2338 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2339 return -EIO;
2340 }
1da177e4 2341
baa1e78a 2342 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2343 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2344 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2345 if (rc)
83206a29 2346 return rc;
48a8a14f 2347
23e71c3d
TH
2348 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2349 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2350
f15a1daf
TH
2351 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2352 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2353 return 0;
1da177e4
LT
2354}
2355
1da177e4
LT
2356/**
2357 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2358 * @ap: port on which timings will be programmed
e82cbdb9 2359 * @r_failed_dev: out paramter for failed device
1da177e4 2360 *
e82cbdb9
TH
2361 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2362 * ata_set_mode() fails, pointer to the failing device is
2363 * returned in @r_failed_dev.
780a87f7 2364 *
1da177e4 2365 * LOCKING:
0cba632b 2366 * PCI/etc. bus probe sem.
e82cbdb9
TH
2367 *
2368 * RETURNS:
2369 * 0 on success, negative errno otherwise
1da177e4 2370 */
1ad8e7f9 2371int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2372{
e8e0619f 2373 struct ata_device *dev;
e82cbdb9 2374 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2375
3adcebb2 2376 /* has private set_mode? */
b229a7b0
A
2377 if (ap->ops->set_mode)
2378 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2379
a6d5a51c
TH
2380 /* step 1: calculate xfer_mask */
2381 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2382 unsigned int pio_mask, dma_mask;
a6d5a51c 2383
e8e0619f
TH
2384 dev = &ap->device[i];
2385
e1211e3f 2386 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2387 continue;
2388
3373efd8 2389 ata_dev_xfermask(dev);
1da177e4 2390
acf356b1
TH
2391 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2392 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2393 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2394 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2395
4f65977d 2396 found = 1;
5444a6f4
AC
2397 if (dev->dma_mode)
2398 used_dma = 1;
a6d5a51c 2399 }
4f65977d 2400 if (!found)
e82cbdb9 2401 goto out;
a6d5a51c
TH
2402
2403 /* step 2: always set host PIO timings */
e8e0619f
TH
2404 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2405 dev = &ap->device[i];
2406 if (!ata_dev_enabled(dev))
2407 continue;
2408
2409 if (!dev->pio_mode) {
f15a1daf 2410 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2411 rc = -EINVAL;
e82cbdb9 2412 goto out;
e8e0619f
TH
2413 }
2414
2415 dev->xfer_mode = dev->pio_mode;
2416 dev->xfer_shift = ATA_SHIFT_PIO;
2417 if (ap->ops->set_piomode)
2418 ap->ops->set_piomode(ap, dev);
2419 }
1da177e4 2420
a6d5a51c 2421 /* step 3: set host DMA timings */
e8e0619f
TH
2422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2423 dev = &ap->device[i];
2424
2425 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2426 continue;
2427
2428 dev->xfer_mode = dev->dma_mode;
2429 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2430 if (ap->ops->set_dmamode)
2431 ap->ops->set_dmamode(ap, dev);
2432 }
1da177e4
LT
2433
2434 /* step 4: update devices' xfer mode */
83206a29 2435 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2436 dev = &ap->device[i];
1da177e4 2437
18d90deb 2438 /* don't update suspended devices' xfer mode */
02670bf3 2439 if (!ata_dev_ready(dev))
83206a29
TH
2440 continue;
2441
3373efd8 2442 rc = ata_dev_set_mode(dev);
5bbc53f4 2443 if (rc)
e82cbdb9 2444 goto out;
83206a29 2445 }
1da177e4 2446
e8e0619f
TH
2447 /* Record simplex status. If we selected DMA then the other
2448 * host channels are not permitted to do so.
5444a6f4 2449 */
cca3974e
JG
2450 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2451 ap->host->simplex_claimed = 1;
5444a6f4 2452
e8e0619f 2453 /* step5: chip specific finalisation */
1da177e4
LT
2454 if (ap->ops->post_set_mode)
2455 ap->ops->post_set_mode(ap);
2456
e82cbdb9
TH
2457 out:
2458 if (rc)
2459 *r_failed_dev = dev;
2460 return rc;
1da177e4
LT
2461}
2462
1fdffbce
JG
2463/**
2464 * ata_tf_to_host - issue ATA taskfile to host controller
2465 * @ap: port to which command is being issued
2466 * @tf: ATA taskfile register set
2467 *
2468 * Issues ATA taskfile register set to ATA host controller,
2469 * with proper synchronization with interrupt handler and
2470 * other threads.
2471 *
2472 * LOCKING:
cca3974e 2473 * spin_lock_irqsave(host lock)
1fdffbce
JG
2474 */
2475
2476static inline void ata_tf_to_host(struct ata_port *ap,
2477 const struct ata_taskfile *tf)
2478{
2479 ap->ops->tf_load(ap, tf);
2480 ap->ops->exec_command(ap, tf);
2481}
2482
1da177e4
LT
2483/**
2484 * ata_busy_sleep - sleep until BSY clears, or timeout
2485 * @ap: port containing status register to be polled
2486 * @tmout_pat: impatience timeout
2487 * @tmout: overall timeout
2488 *
780a87f7
JG
2489 * Sleep until ATA Status register bit BSY clears,
2490 * or a timeout occurs.
2491 *
d1adc1bb
TH
2492 * LOCKING:
2493 * Kernel thread context (may sleep).
2494 *
2495 * RETURNS:
2496 * 0 on success, -errno otherwise.
1da177e4 2497 */
d1adc1bb
TH
2498int ata_busy_sleep(struct ata_port *ap,
2499 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2500{
2501 unsigned long timer_start, timeout;
2502 u8 status;
2503
2504 status = ata_busy_wait(ap, ATA_BUSY, 300);
2505 timer_start = jiffies;
2506 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2507 while (status != 0xff && (status & ATA_BUSY) &&
2508 time_before(jiffies, timeout)) {
1da177e4
LT
2509 msleep(50);
2510 status = ata_busy_wait(ap, ATA_BUSY, 3);
2511 }
2512
d1adc1bb 2513 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2514 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2515 "port is slow to respond, please be patient "
2516 "(Status 0x%x)\n", status);
1da177e4
LT
2517
2518 timeout = timer_start + tmout;
d1adc1bb
TH
2519 while (status != 0xff && (status & ATA_BUSY) &&
2520 time_before(jiffies, timeout)) {
1da177e4
LT
2521 msleep(50);
2522 status = ata_chk_status(ap);
2523 }
2524
d1adc1bb
TH
2525 if (status == 0xff)
2526 return -ENODEV;
2527
1da177e4 2528 if (status & ATA_BUSY) {
f15a1daf 2529 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2530 "(%lu secs, Status 0x%x)\n",
2531 tmout / HZ, status);
d1adc1bb 2532 return -EBUSY;
1da177e4
LT
2533 }
2534
2535 return 0;
2536}
2537
2538static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2539{
2540 struct ata_ioports *ioaddr = &ap->ioaddr;
2541 unsigned int dev0 = devmask & (1 << 0);
2542 unsigned int dev1 = devmask & (1 << 1);
2543 unsigned long timeout;
2544
2545 /* if device 0 was found in ata_devchk, wait for its
2546 * BSY bit to clear
2547 */
2548 if (dev0)
2549 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2550
2551 /* if device 1 was found in ata_devchk, wait for
2552 * register access, then wait for BSY to clear
2553 */
2554 timeout = jiffies + ATA_TMOUT_BOOT;
2555 while (dev1) {
2556 u8 nsect, lbal;
2557
2558 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2559 nsect = ioread8(ioaddr->nsect_addr);
2560 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2561 if ((nsect == 1) && (lbal == 1))
2562 break;
2563 if (time_after(jiffies, timeout)) {
2564 dev1 = 0;
2565 break;
2566 }
2567 msleep(50); /* give drive a breather */
2568 }
2569 if (dev1)
2570 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2571
2572 /* is all this really necessary? */
2573 ap->ops->dev_select(ap, 0);
2574 if (dev1)
2575 ap->ops->dev_select(ap, 1);
2576 if (dev0)
2577 ap->ops->dev_select(ap, 0);
2578}
2579
1da177e4
LT
2580static unsigned int ata_bus_softreset(struct ata_port *ap,
2581 unsigned int devmask)
2582{
2583 struct ata_ioports *ioaddr = &ap->ioaddr;
2584
2585 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2586
2587 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2588 iowrite8(ap->ctl, ioaddr->ctl_addr);
2589 udelay(20); /* FIXME: flush */
2590 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2591 udelay(20); /* FIXME: flush */
2592 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2593
2594 /* spec mandates ">= 2ms" before checking status.
2595 * We wait 150ms, because that was the magic delay used for
2596 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2597 * between when the ATA command register is written, and then
2598 * status is checked. Because waiting for "a while" before
2599 * checking status is fine, post SRST, we perform this magic
2600 * delay here as well.
09c7ad79
AC
2601 *
2602 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2603 */
2604 msleep(150);
2605
2e9edbf8 2606 /* Before we perform post reset processing we want to see if
298a41ca
TH
2607 * the bus shows 0xFF because the odd clown forgets the D7
2608 * pulldown resistor.
2609 */
d1adc1bb
TH
2610 if (ata_check_status(ap) == 0xFF)
2611 return 0;
09c7ad79 2612
1da177e4
LT
2613 ata_bus_post_reset(ap, devmask);
2614
2615 return 0;
2616}
2617
2618/**
2619 * ata_bus_reset - reset host port and associated ATA channel
2620 * @ap: port to reset
2621 *
2622 * This is typically the first time we actually start issuing
2623 * commands to the ATA channel. We wait for BSY to clear, then
2624 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2625 * result. Determine what devices, if any, are on the channel
2626 * by looking at the device 0/1 error register. Look at the signature
2627 * stored in each device's taskfile registers, to determine if
2628 * the device is ATA or ATAPI.
2629 *
2630 * LOCKING:
0cba632b 2631 * PCI/etc. bus probe sem.
cca3974e 2632 * Obtains host lock.
1da177e4
LT
2633 *
2634 * SIDE EFFECTS:
198e0fed 2635 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2636 */
2637
2638void ata_bus_reset(struct ata_port *ap)
2639{
2640 struct ata_ioports *ioaddr = &ap->ioaddr;
2641 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2642 u8 err;
aec5c3c1 2643 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2644
2645 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2646
2647 /* determine if device 0/1 are present */
2648 if (ap->flags & ATA_FLAG_SATA_RESET)
2649 dev0 = 1;
2650 else {
2651 dev0 = ata_devchk(ap, 0);
2652 if (slave_possible)
2653 dev1 = ata_devchk(ap, 1);
2654 }
2655
2656 if (dev0)
2657 devmask |= (1 << 0);
2658 if (dev1)
2659 devmask |= (1 << 1);
2660
2661 /* select device 0 again */
2662 ap->ops->dev_select(ap, 0);
2663
2664 /* issue bus reset */
2665 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2666 if (ata_bus_softreset(ap, devmask))
2667 goto err_out;
1da177e4
LT
2668
2669 /*
2670 * determine by signature whether we have ATA or ATAPI devices
2671 */
b4dc7623 2672 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2673 if ((slave_possible) && (err != 0x81))
b4dc7623 2674 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2675
2676 /* re-enable interrupts */
83625006 2677 ap->ops->irq_on(ap);
1da177e4
LT
2678
2679 /* is double-select really necessary? */
2680 if (ap->device[1].class != ATA_DEV_NONE)
2681 ap->ops->dev_select(ap, 1);
2682 if (ap->device[0].class != ATA_DEV_NONE)
2683 ap->ops->dev_select(ap, 0);
2684
2685 /* if no devices were detected, disable this port */
2686 if ((ap->device[0].class == ATA_DEV_NONE) &&
2687 (ap->device[1].class == ATA_DEV_NONE))
2688 goto err_out;
2689
2690 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2691 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2692 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2693 }
2694
2695 DPRINTK("EXIT\n");
2696 return;
2697
2698err_out:
f15a1daf 2699 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2700 ap->ops->port_disable(ap);
2701
2702 DPRINTK("EXIT\n");
2703}
2704
d7bb4cc7
TH
2705/**
2706 * sata_phy_debounce - debounce SATA phy status
2707 * @ap: ATA port to debounce SATA phy status for
2708 * @params: timing parameters { interval, duratinon, timeout } in msec
2709 *
2710 * Make sure SStatus of @ap reaches stable state, determined by
2711 * holding the same value where DET is not 1 for @duration polled
2712 * every @interval, before @timeout. Timeout constraints the
2713 * beginning of the stable state. Because, after hot unplugging,
2714 * DET gets stuck at 1 on some controllers, this functions waits
2715 * until timeout then returns 0 if DET is stable at 1.
2716 *
2717 * LOCKING:
2718 * Kernel thread context (may sleep)
2719 *
2720 * RETURNS:
2721 * 0 on success, -errno on failure.
2722 */
2723int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2724{
d7bb4cc7
TH
2725 unsigned long interval_msec = params[0];
2726 unsigned long duration = params[1] * HZ / 1000;
2727 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2728 unsigned long last_jiffies;
2729 u32 last, cur;
2730 int rc;
2731
2732 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2733 return rc;
2734 cur &= 0xf;
2735
2736 last = cur;
2737 last_jiffies = jiffies;
2738
2739 while (1) {
2740 msleep(interval_msec);
2741 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2742 return rc;
2743 cur &= 0xf;
2744
2745 /* DET stable? */
2746 if (cur == last) {
2747 if (cur == 1 && time_before(jiffies, timeout))
2748 continue;
2749 if (time_after(jiffies, last_jiffies + duration))
2750 return 0;
2751 continue;
2752 }
2753
2754 /* unstable, start over */
2755 last = cur;
2756 last_jiffies = jiffies;
2757
2758 /* check timeout */
2759 if (time_after(jiffies, timeout))
2760 return -EBUSY;
2761 }
2762}
2763
2764/**
2765 * sata_phy_resume - resume SATA phy
2766 * @ap: ATA port to resume SATA phy for
2767 * @params: timing parameters { interval, duratinon, timeout } in msec
2768 *
2769 * Resume SATA phy of @ap and debounce it.
2770 *
2771 * LOCKING:
2772 * Kernel thread context (may sleep)
2773 *
2774 * RETURNS:
2775 * 0 on success, -errno on failure.
2776 */
2777int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2778{
2779 u32 scontrol;
81952c54
TH
2780 int rc;
2781
2782 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2783 return rc;
7a7921e8 2784
852ee16a 2785 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2786
2787 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2788 return rc;
7a7921e8 2789
d7bb4cc7
TH
2790 /* Some PHYs react badly if SStatus is pounded immediately
2791 * after resuming. Delay 200ms before debouncing.
2792 */
2793 msleep(200);
7a7921e8 2794
d7bb4cc7 2795 return sata_phy_debounce(ap, params);
7a7921e8
TH
2796}
2797
f5914a46
TH
2798static void ata_wait_spinup(struct ata_port *ap)
2799{
2800 struct ata_eh_context *ehc = &ap->eh_context;
2801 unsigned long end, secs;
2802 int rc;
2803
2804 /* first, debounce phy if SATA */
2805 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2806 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2807
2808 /* if debounced successfully and offline, no need to wait */
2809 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2810 return;
2811 }
2812
2813 /* okay, let's give the drive time to spin up */
2814 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2815 secs = ((end - jiffies) + HZ - 1) / HZ;
2816
2817 if (time_after(jiffies, end))
2818 return;
2819
2820 if (secs > 5)
2821 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2822 "(%lu secs)\n", secs);
2823
2824 schedule_timeout_uninterruptible(end - jiffies);
2825}
2826
2827/**
2828 * ata_std_prereset - prepare for reset
2829 * @ap: ATA port to be reset
2830 *
2831 * @ap is about to be reset. Initialize it.
2832 *
2833 * LOCKING:
2834 * Kernel thread context (may sleep)
2835 *
2836 * RETURNS:
2837 * 0 on success, -errno otherwise.
2838 */
2839int ata_std_prereset(struct ata_port *ap)
2840{
2841 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2842 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2843 int rc;
2844
28324304
TH
2845 /* handle link resume & hotplug spinup */
2846 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2847 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2848 ehc->i.action |= ATA_EH_HARDRESET;
2849
2850 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2851 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2852 ata_wait_spinup(ap);
f5914a46
TH
2853
2854 /* if we're about to do hardreset, nothing more to do */
2855 if (ehc->i.action & ATA_EH_HARDRESET)
2856 return 0;
2857
2858 /* if SATA, resume phy */
2859 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2860 rc = sata_phy_resume(ap, timing);
2861 if (rc && rc != -EOPNOTSUPP) {
2862 /* phy resume failed */
2863 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2864 "link for reset (errno=%d)\n", rc);
2865 return rc;
2866 }
2867 }
2868
2869 /* Wait for !BSY if the controller can wait for the first D2H
2870 * Reg FIS and we don't know that no device is attached.
2871 */
2872 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2873 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2874
2875 return 0;
2876}
2877
c2bd5804
TH
2878/**
2879 * ata_std_softreset - reset host port via ATA SRST
2880 * @ap: port to reset
c2bd5804
TH
2881 * @classes: resulting classes of attached devices
2882 *
52783c5d 2883 * Reset host port using ATA SRST.
c2bd5804
TH
2884 *
2885 * LOCKING:
2886 * Kernel thread context (may sleep)
2887 *
2888 * RETURNS:
2889 * 0 on success, -errno otherwise.
2890 */
2bf2cb26 2891int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2892{
2893 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2894 unsigned int devmask = 0, err_mask;
2895 u8 err;
2896
2897 DPRINTK("ENTER\n");
2898
81952c54 2899 if (ata_port_offline(ap)) {
3a39746a
TH
2900 classes[0] = ATA_DEV_NONE;
2901 goto out;
2902 }
2903
c2bd5804
TH
2904 /* determine if device 0/1 are present */
2905 if (ata_devchk(ap, 0))
2906 devmask |= (1 << 0);
2907 if (slave_possible && ata_devchk(ap, 1))
2908 devmask |= (1 << 1);
2909
c2bd5804
TH
2910 /* select device 0 again */
2911 ap->ops->dev_select(ap, 0);
2912
2913 /* issue bus reset */
2914 DPRINTK("about to softreset, devmask=%x\n", devmask);
2915 err_mask = ata_bus_softreset(ap, devmask);
2916 if (err_mask) {
f15a1daf
TH
2917 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2918 err_mask);
c2bd5804
TH
2919 return -EIO;
2920 }
2921
2922 /* determine by signature whether we have ATA or ATAPI devices */
2923 classes[0] = ata_dev_try_classify(ap, 0, &err);
2924 if (slave_possible && err != 0x81)
2925 classes[1] = ata_dev_try_classify(ap, 1, &err);
2926
3a39746a 2927 out:
c2bd5804
TH
2928 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2929 return 0;
2930}
2931
2932/**
b6103f6d 2933 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2934 * @ap: port to reset
b6103f6d 2935 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2936 *
2937 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2938 *
2939 * LOCKING:
2940 * Kernel thread context (may sleep)
2941 *
2942 * RETURNS:
2943 * 0 on success, -errno otherwise.
2944 */
b6103f6d 2945int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2946{
852ee16a 2947 u32 scontrol;
81952c54 2948 int rc;
852ee16a 2949
c2bd5804
TH
2950 DPRINTK("ENTER\n");
2951
3c567b7d 2952 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2953 /* SATA spec says nothing about how to reconfigure
2954 * spd. To be on the safe side, turn off phy during
2955 * reconfiguration. This works for at least ICH7 AHCI
2956 * and Sil3124.
2957 */
81952c54 2958 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2959 goto out;
81952c54 2960
a34b6fc0 2961 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2962
2963 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2964 goto out;
1c3fae4d 2965
3c567b7d 2966 sata_set_spd(ap);
1c3fae4d
TH
2967 }
2968
2969 /* issue phy wake/reset */
81952c54 2970 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2971 goto out;
81952c54 2972
852ee16a 2973 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2974
2975 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2976 goto out;
c2bd5804 2977
1c3fae4d 2978 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2979 * 10.4.2 says at least 1 ms.
2980 */
2981 msleep(1);
2982
1c3fae4d 2983 /* bring phy back */
b6103f6d
TH
2984 rc = sata_phy_resume(ap, timing);
2985 out:
2986 DPRINTK("EXIT, rc=%d\n", rc);
2987 return rc;
2988}
2989
2990/**
2991 * sata_std_hardreset - reset host port via SATA phy reset
2992 * @ap: port to reset
2993 * @class: resulting class of attached device
2994 *
2995 * SATA phy-reset host port using DET bits of SControl register,
2996 * wait for !BSY and classify the attached device.
2997 *
2998 * LOCKING:
2999 * Kernel thread context (may sleep)
3000 *
3001 * RETURNS:
3002 * 0 on success, -errno otherwise.
3003 */
3004int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3005{
3006 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3007 int rc;
3008
3009 DPRINTK("ENTER\n");
3010
3011 /* do hardreset */
3012 rc = sata_port_hardreset(ap, timing);
3013 if (rc) {
3014 ata_port_printk(ap, KERN_ERR,
3015 "COMRESET failed (errno=%d)\n", rc);
3016 return rc;
3017 }
c2bd5804 3018
c2bd5804 3019 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3020 if (ata_port_offline(ap)) {
c2bd5804
TH
3021 *class = ATA_DEV_NONE;
3022 DPRINTK("EXIT, link offline\n");
3023 return 0;
3024 }
3025
34fee227
TH
3026 /* wait a while before checking status, see SRST for more info */
3027 msleep(150);
3028
c2bd5804 3029 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3030 ata_port_printk(ap, KERN_ERR,
3031 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3032 return -EIO;
3033 }
3034
3a39746a
TH
3035 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3036
c2bd5804
TH
3037 *class = ata_dev_try_classify(ap, 0, NULL);
3038
3039 DPRINTK("EXIT, class=%u\n", *class);
3040 return 0;
3041}
3042
3043/**
3044 * ata_std_postreset - standard postreset callback
3045 * @ap: the target ata_port
3046 * @classes: classes of attached devices
3047 *
3048 * This function is invoked after a successful reset. Note that
3049 * the device might have been reset more than once using
3050 * different reset methods before postreset is invoked.
c2bd5804 3051 *
c2bd5804
TH
3052 * LOCKING:
3053 * Kernel thread context (may sleep)
3054 */
3055void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3056{
dc2b3515
TH
3057 u32 serror;
3058
c2bd5804
TH
3059 DPRINTK("ENTER\n");
3060
c2bd5804 3061 /* print link status */
81952c54 3062 sata_print_link_status(ap);
c2bd5804 3063
dc2b3515
TH
3064 /* clear SError */
3065 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3066 sata_scr_write(ap, SCR_ERROR, serror);
3067
3a39746a 3068 /* re-enable interrupts */
83625006
AI
3069 if (!ap->ops->error_handler)
3070 ap->ops->irq_on(ap);
c2bd5804
TH
3071
3072 /* is double-select really necessary? */
3073 if (classes[0] != ATA_DEV_NONE)
3074 ap->ops->dev_select(ap, 1);
3075 if (classes[1] != ATA_DEV_NONE)
3076 ap->ops->dev_select(ap, 0);
3077
3a39746a
TH
3078 /* bail out if no device is present */
3079 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3080 DPRINTK("EXIT, no device\n");
3081 return;
3082 }
3083
3084 /* set up device control */
0d5ff566
TH
3085 if (ap->ioaddr.ctl_addr)
3086 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3087
3088 DPRINTK("EXIT\n");
3089}
3090
623a3128
TH
3091/**
3092 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3093 * @dev: device to compare against
3094 * @new_class: class of the new device
3095 * @new_id: IDENTIFY page of the new device
3096 *
3097 * Compare @new_class and @new_id against @dev and determine
3098 * whether @dev is the device indicated by @new_class and
3099 * @new_id.
3100 *
3101 * LOCKING:
3102 * None.
3103 *
3104 * RETURNS:
3105 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3106 */
3373efd8
TH
3107static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3108 const u16 *new_id)
623a3128
TH
3109{
3110 const u16 *old_id = dev->id;
a0cf733b
TH
3111 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3112 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3113 u64 new_n_sectors;
3114
3115 if (dev->class != new_class) {
f15a1daf
TH
3116 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3117 dev->class, new_class);
623a3128
TH
3118 return 0;
3119 }
3120
a0cf733b
TH
3121 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3122 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3123 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3124 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3125 new_n_sectors = ata_id_n_sectors(new_id);
3126
3127 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3128 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3129 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3130 return 0;
3131 }
3132
3133 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3134 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3135 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3136 return 0;
3137 }
3138
3139 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3140 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3141 "%llu != %llu\n",
3142 (unsigned long long)dev->n_sectors,
3143 (unsigned long long)new_n_sectors);
623a3128
TH
3144 return 0;
3145 }
3146
3147 return 1;
3148}
3149
3150/**
3151 * ata_dev_revalidate - Revalidate ATA device
623a3128 3152 * @dev: device to revalidate
bff04647 3153 * @readid_flags: read ID flags
623a3128
TH
3154 *
3155 * Re-read IDENTIFY page and make sure @dev is still attached to
3156 * the port.
3157 *
3158 * LOCKING:
3159 * Kernel thread context (may sleep)
3160 *
3161 * RETURNS:
3162 * 0 on success, negative errno otherwise
3163 */
bff04647 3164int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3165{
5eb45c02 3166 unsigned int class = dev->class;
f15a1daf 3167 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3168 int rc;
3169
5eb45c02
TH
3170 if (!ata_dev_enabled(dev)) {
3171 rc = -ENODEV;
3172 goto fail;
3173 }
623a3128 3174
fe635c7e 3175 /* read ID data */
bff04647 3176 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3177 if (rc)
3178 goto fail;
3179
3180 /* is the device still there? */
3373efd8 3181 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3182 rc = -ENODEV;
3183 goto fail;
3184 }
3185
fe635c7e 3186 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3187
3188 /* configure device according to the new ID */
efdaedc4 3189 rc = ata_dev_configure(dev);
5eb45c02
TH
3190 if (rc == 0)
3191 return 0;
623a3128
TH
3192
3193 fail:
f15a1daf 3194 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3195 return rc;
3196}
3197
6919a0a6
AC
3198struct ata_blacklist_entry {
3199 const char *model_num;
3200 const char *model_rev;
3201 unsigned long horkage;
3202};
3203
3204static const struct ata_blacklist_entry ata_device_blacklist [] = {
3205 /* Devices with DMA related problems under Linux */
3206 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3207 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3208 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3209 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3210 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3211 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3212 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3213 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3214 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3215 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3216 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3217 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3218 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3219 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3220 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3221 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3222 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3223 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3224 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3225 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3226 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3227 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3228 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3229 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3230 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3231 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3232 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3233 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3234 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3235 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3236
3237 /* Devices we expect to fail diagnostics */
3238
3239 /* Devices where NCQ should be avoided */
3240 /* NCQ is slow */
3241 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3242
3243 /* Devices with NCQ limits */
3244
3245 /* End Marker */
3246 { }
1da177e4 3247};
2e9edbf8 3248
6919a0a6 3249unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3250{
8bfa79fc
TH
3251 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3252 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3253 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3254
8bfa79fc
TH
3255 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3256 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3257
6919a0a6 3258 while (ad->model_num) {
8bfa79fc 3259 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3260 if (ad->model_rev == NULL)
3261 return ad->horkage;
8bfa79fc 3262 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3263 return ad->horkage;
f4b15fef 3264 }
6919a0a6 3265 ad++;
f4b15fef 3266 }
1da177e4
LT
3267 return 0;
3268}
3269
6919a0a6
AC
3270static int ata_dma_blacklisted(const struct ata_device *dev)
3271{
3272 /* We don't support polling DMA.
3273 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3274 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3275 */
3276 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3277 (dev->flags & ATA_DFLAG_CDB_INTR))
3278 return 1;
3279 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3280}
3281
a6d5a51c
TH
3282/**
3283 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3284 * @dev: Device to compute xfermask for
3285 *
acf356b1
TH
3286 * Compute supported xfermask of @dev and store it in
3287 * dev->*_mask. This function is responsible for applying all
3288 * known limits including host controller limits, device
3289 * blacklist, etc...
a6d5a51c
TH
3290 *
3291 * LOCKING:
3292 * None.
a6d5a51c 3293 */
3373efd8 3294static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3295{
3373efd8 3296 struct ata_port *ap = dev->ap;
cca3974e 3297 struct ata_host *host = ap->host;
a6d5a51c 3298 unsigned long xfer_mask;
1da177e4 3299
37deecb5 3300 /* controller modes available */
565083e1
TH
3301 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3302 ap->mwdma_mask, ap->udma_mask);
3303
3304 /* Apply cable rule here. Don't apply it early because when
3305 * we handle hot plug the cable type can itself change.
3306 */
3307 if (ap->cbl == ATA_CBL_PATA40)
3308 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3309 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3310 * host side are checked drive side as well. Cases where we know a
3311 * 40wire cable is used safely for 80 are not checked here.
3312 */
3313 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3314 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3315
1da177e4 3316
37deecb5
TH
3317 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3318 dev->mwdma_mask, dev->udma_mask);
3319 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3320
b352e57d
AC
3321 /*
3322 * CFA Advanced TrueIDE timings are not allowed on a shared
3323 * cable
3324 */
3325 if (ata_dev_pair(dev)) {
3326 /* No PIO5 or PIO6 */
3327 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3328 /* No MWDMA3 or MWDMA 4 */
3329 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3330 }
3331
37deecb5
TH
3332 if (ata_dma_blacklisted(dev)) {
3333 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3334 ata_dev_printk(dev, KERN_WARNING,
3335 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3336 }
a6d5a51c 3337
cca3974e 3338 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3339 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3340 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3341 "other device, disabling DMA\n");
5444a6f4 3342 }
565083e1 3343
5444a6f4
AC
3344 if (ap->ops->mode_filter)
3345 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3346
565083e1
TH
3347 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3348 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3349}
3350
1da177e4
LT
3351/**
3352 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3353 * @dev: Device to which command will be sent
3354 *
780a87f7
JG
3355 * Issue SET FEATURES - XFER MODE command to device @dev
3356 * on port @ap.
3357 *
1da177e4 3358 * LOCKING:
0cba632b 3359 * PCI/etc. bus probe sem.
83206a29
TH
3360 *
3361 * RETURNS:
3362 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3363 */
3364
3373efd8 3365static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3366{
a0123703 3367 struct ata_taskfile tf;
83206a29 3368 unsigned int err_mask;
1da177e4
LT
3369
3370 /* set up set-features taskfile */
3371 DPRINTK("set features - xfer mode\n");
3372
3373efd8 3373 ata_tf_init(dev, &tf);
a0123703
TH
3374 tf.command = ATA_CMD_SET_FEATURES;
3375 tf.feature = SETFEATURES_XFER;
3376 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3377 tf.protocol = ATA_PROT_NODATA;
3378 tf.nsect = dev->xfer_mode;
1da177e4 3379
3373efd8 3380 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3381
83206a29
TH
3382 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3383 return err_mask;
1da177e4
LT
3384}
3385
8bf62ece
AL
3386/**
3387 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3388 * @dev: Device to which command will be sent
e2a7f77a
RD
3389 * @heads: Number of heads (taskfile parameter)
3390 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3391 *
3392 * LOCKING:
6aff8f1f
TH
3393 * Kernel thread context (may sleep)
3394 *
3395 * RETURNS:
3396 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3397 */
3373efd8
TH
3398static unsigned int ata_dev_init_params(struct ata_device *dev,
3399 u16 heads, u16 sectors)
8bf62ece 3400{
a0123703 3401 struct ata_taskfile tf;
6aff8f1f 3402 unsigned int err_mask;
8bf62ece
AL
3403
3404 /* Number of sectors per track 1-255. Number of heads 1-16 */
3405 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3406 return AC_ERR_INVALID;
8bf62ece
AL
3407
3408 /* set up init dev params taskfile */
3409 DPRINTK("init dev params \n");
3410
3373efd8 3411 ata_tf_init(dev, &tf);
a0123703
TH
3412 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3413 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3414 tf.protocol = ATA_PROT_NODATA;
3415 tf.nsect = sectors;
3416 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3417
3373efd8 3418 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3419
6aff8f1f
TH
3420 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3421 return err_mask;
8bf62ece
AL
3422}
3423
1da177e4 3424/**
0cba632b
JG
3425 * ata_sg_clean - Unmap DMA memory associated with command
3426 * @qc: Command containing DMA memory to be released
3427 *
3428 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3429 *
3430 * LOCKING:
cca3974e 3431 * spin_lock_irqsave(host lock)
1da177e4 3432 */
70e6ad0c 3433void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3434{
3435 struct ata_port *ap = qc->ap;
cedc9a47 3436 struct scatterlist *sg = qc->__sg;
1da177e4 3437 int dir = qc->dma_dir;
cedc9a47 3438 void *pad_buf = NULL;
1da177e4 3439
a4631474
TH
3440 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3441 WARN_ON(sg == NULL);
1da177e4
LT
3442
3443 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3444 WARN_ON(qc->n_elem > 1);
1da177e4 3445
2c13b7ce 3446 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3447
cedc9a47
JG
3448 /* if we padded the buffer out to 32-bit bound, and data
3449 * xfer direction is from-device, we must copy from the
3450 * pad buffer back into the supplied buffer
3451 */
3452 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3453 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3454
3455 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3456 if (qc->n_elem)
2f1f610b 3457 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3458 /* restore last sg */
3459 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3460 if (pad_buf) {
3461 struct scatterlist *psg = &qc->pad_sgent;
3462 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3463 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3464 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3465 }
3466 } else {
2e242fa9 3467 if (qc->n_elem)
2f1f610b 3468 dma_unmap_single(ap->dev,
e1410f2d
JG
3469 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3470 dir);
cedc9a47
JG
3471 /* restore sg */
3472 sg->length += qc->pad_len;
3473 if (pad_buf)
3474 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3475 pad_buf, qc->pad_len);
3476 }
1da177e4
LT
3477
3478 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3479 qc->__sg = NULL;
1da177e4
LT
3480}
3481
3482/**
3483 * ata_fill_sg - Fill PCI IDE PRD table
3484 * @qc: Metadata associated with taskfile to be transferred
3485 *
780a87f7
JG
3486 * Fill PCI IDE PRD (scatter-gather) table with segments
3487 * associated with the current disk command.
3488 *
1da177e4 3489 * LOCKING:
cca3974e 3490 * spin_lock_irqsave(host lock)
1da177e4
LT
3491 *
3492 */
3493static void ata_fill_sg(struct ata_queued_cmd *qc)
3494{
1da177e4 3495 struct ata_port *ap = qc->ap;
cedc9a47
JG
3496 struct scatterlist *sg;
3497 unsigned int idx;
1da177e4 3498
a4631474 3499 WARN_ON(qc->__sg == NULL);
f131883e 3500 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3501
3502 idx = 0;
cedc9a47 3503 ata_for_each_sg(sg, qc) {
1da177e4
LT
3504 u32 addr, offset;
3505 u32 sg_len, len;
3506
3507 /* determine if physical DMA addr spans 64K boundary.
3508 * Note h/w doesn't support 64-bit, so we unconditionally
3509 * truncate dma_addr_t to u32.
3510 */
3511 addr = (u32) sg_dma_address(sg);
3512 sg_len = sg_dma_len(sg);
3513
3514 while (sg_len) {
3515 offset = addr & 0xffff;
3516 len = sg_len;
3517 if ((offset + sg_len) > 0x10000)
3518 len = 0x10000 - offset;
3519
3520 ap->prd[idx].addr = cpu_to_le32(addr);
3521 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3522 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3523
3524 idx++;
3525 sg_len -= len;
3526 addr += len;
3527 }
3528 }
3529
3530 if (idx)
3531 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3532}
3533/**
3534 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3535 * @qc: Metadata associated with taskfile to check
3536 *
780a87f7
JG
3537 * Allow low-level driver to filter ATA PACKET commands, returning
3538 * a status indicating whether or not it is OK to use DMA for the
3539 * supplied PACKET command.
3540 *
1da177e4 3541 * LOCKING:
cca3974e 3542 * spin_lock_irqsave(host lock)
0cba632b 3543 *
1da177e4
LT
3544 * RETURNS: 0 when ATAPI DMA can be used
3545 * nonzero otherwise
3546 */
3547int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3548{
3549 struct ata_port *ap = qc->ap;
3550 int rc = 0; /* Assume ATAPI DMA is OK by default */
3551
3552 if (ap->ops->check_atapi_dma)
3553 rc = ap->ops->check_atapi_dma(qc);
3554
3555 return rc;
3556}
3557/**
3558 * ata_qc_prep - Prepare taskfile for submission
3559 * @qc: Metadata associated with taskfile to be prepared
3560 *
780a87f7
JG
3561 * Prepare ATA taskfile for submission.
3562 *
1da177e4 3563 * LOCKING:
cca3974e 3564 * spin_lock_irqsave(host lock)
1da177e4
LT
3565 */
3566void ata_qc_prep(struct ata_queued_cmd *qc)
3567{
3568 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3569 return;
3570
3571 ata_fill_sg(qc);
3572}
3573
e46834cd
BK
3574void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3575
0cba632b
JG
3576/**
3577 * ata_sg_init_one - Associate command with memory buffer
3578 * @qc: Command to be associated
3579 * @buf: Memory buffer
3580 * @buflen: Length of memory buffer, in bytes.
3581 *
3582 * Initialize the data-related elements of queued_cmd @qc
3583 * to point to a single memory buffer, @buf of byte length @buflen.
3584 *
3585 * LOCKING:
cca3974e 3586 * spin_lock_irqsave(host lock)
0cba632b
JG
3587 */
3588
1da177e4
LT
3589void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3590{
1da177e4
LT
3591 qc->flags |= ATA_QCFLAG_SINGLE;
3592
cedc9a47 3593 qc->__sg = &qc->sgent;
1da177e4 3594 qc->n_elem = 1;
cedc9a47 3595 qc->orig_n_elem = 1;
1da177e4 3596 qc->buf_virt = buf;
233277ca 3597 qc->nbytes = buflen;
1da177e4 3598
61c0596c 3599 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3600}
3601
0cba632b
JG
3602/**
3603 * ata_sg_init - Associate command with scatter-gather table.
3604 * @qc: Command to be associated
3605 * @sg: Scatter-gather table.
3606 * @n_elem: Number of elements in s/g table.
3607 *
3608 * Initialize the data-related elements of queued_cmd @qc
3609 * to point to a scatter-gather table @sg, containing @n_elem
3610 * elements.
3611 *
3612 * LOCKING:
cca3974e 3613 * spin_lock_irqsave(host lock)
0cba632b
JG
3614 */
3615
1da177e4
LT
3616void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3617 unsigned int n_elem)
3618{
3619 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3620 qc->__sg = sg;
1da177e4 3621 qc->n_elem = n_elem;
cedc9a47 3622 qc->orig_n_elem = n_elem;
1da177e4
LT
3623}
3624
3625/**
0cba632b
JG
3626 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3627 * @qc: Command with memory buffer to be mapped.
3628 *
3629 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3630 *
3631 * LOCKING:
cca3974e 3632 * spin_lock_irqsave(host lock)
1da177e4
LT
3633 *
3634 * RETURNS:
0cba632b 3635 * Zero on success, negative on error.
1da177e4
LT
3636 */
3637
3638static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3639{
3640 struct ata_port *ap = qc->ap;
3641 int dir = qc->dma_dir;
cedc9a47 3642 struct scatterlist *sg = qc->__sg;
1da177e4 3643 dma_addr_t dma_address;
2e242fa9 3644 int trim_sg = 0;
1da177e4 3645
cedc9a47
JG
3646 /* we must lengthen transfers to end on a 32-bit boundary */
3647 qc->pad_len = sg->length & 3;
3648 if (qc->pad_len) {
3649 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3650 struct scatterlist *psg = &qc->pad_sgent;
3651
a4631474 3652 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3653
3654 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3655
3656 if (qc->tf.flags & ATA_TFLAG_WRITE)
3657 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3658 qc->pad_len);
3659
3660 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3661 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3662 /* trim sg */
3663 sg->length -= qc->pad_len;
2e242fa9
TH
3664 if (sg->length == 0)
3665 trim_sg = 1;
cedc9a47
JG
3666
3667 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3668 sg->length, qc->pad_len);
3669 }
3670
2e242fa9
TH
3671 if (trim_sg) {
3672 qc->n_elem--;
e1410f2d
JG
3673 goto skip_map;
3674 }
3675
2f1f610b 3676 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3677 sg->length, dir);
537a95d9
TH
3678 if (dma_mapping_error(dma_address)) {
3679 /* restore sg */
3680 sg->length += qc->pad_len;
1da177e4 3681 return -1;
537a95d9 3682 }
1da177e4
LT
3683
3684 sg_dma_address(sg) = dma_address;
32529e01 3685 sg_dma_len(sg) = sg->length;
1da177e4 3686
2e242fa9 3687skip_map:
1da177e4
LT
3688 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3689 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3690
3691 return 0;
3692}
3693
3694/**
0cba632b
JG
3695 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3696 * @qc: Command with scatter-gather table to be mapped.
3697 *
3698 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3699 *
3700 * LOCKING:
cca3974e 3701 * spin_lock_irqsave(host lock)
1da177e4
LT
3702 *
3703 * RETURNS:
0cba632b 3704 * Zero on success, negative on error.
1da177e4
LT
3705 *
3706 */
3707
3708static int ata_sg_setup(struct ata_queued_cmd *qc)
3709{
3710 struct ata_port *ap = qc->ap;
cedc9a47
JG
3711 struct scatterlist *sg = qc->__sg;
3712 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3713 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3714
3715 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3716 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3717
cedc9a47
JG
3718 /* we must lengthen transfers to end on a 32-bit boundary */
3719 qc->pad_len = lsg->length & 3;
3720 if (qc->pad_len) {
3721 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3722 struct scatterlist *psg = &qc->pad_sgent;
3723 unsigned int offset;
3724
a4631474 3725 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3726
3727 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3728
3729 /*
3730 * psg->page/offset are used to copy to-be-written
3731 * data in this function or read data in ata_sg_clean.
3732 */
3733 offset = lsg->offset + lsg->length - qc->pad_len;
3734 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3735 psg->offset = offset_in_page(offset);
3736
3737 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3738 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3739 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3740 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3741 }
3742
3743 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3744 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3745 /* trim last sg */
3746 lsg->length -= qc->pad_len;
e1410f2d
JG
3747 if (lsg->length == 0)
3748 trim_sg = 1;
cedc9a47
JG
3749
3750 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3751 qc->n_elem - 1, lsg->length, qc->pad_len);
3752 }
3753
e1410f2d
JG
3754 pre_n_elem = qc->n_elem;
3755 if (trim_sg && pre_n_elem)
3756 pre_n_elem--;
3757
3758 if (!pre_n_elem) {
3759 n_elem = 0;
3760 goto skip_map;
3761 }
3762
1da177e4 3763 dir = qc->dma_dir;
2f1f610b 3764 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3765 if (n_elem < 1) {
3766 /* restore last sg */
3767 lsg->length += qc->pad_len;
1da177e4 3768 return -1;
537a95d9 3769 }
1da177e4
LT
3770
3771 DPRINTK("%d sg elements mapped\n", n_elem);
3772
e1410f2d 3773skip_map:
1da177e4
LT
3774 qc->n_elem = n_elem;
3775
3776 return 0;
3777}
3778
0baab86b 3779/**
c893a3ae 3780 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3781 * @buf: Buffer to swap
3782 * @buf_words: Number of 16-bit words in buffer.
3783 *
3784 * Swap halves of 16-bit words if needed to convert from
3785 * little-endian byte order to native cpu byte order, or
3786 * vice-versa.
3787 *
3788 * LOCKING:
6f0ef4fa 3789 * Inherited from caller.
0baab86b 3790 */
1da177e4
LT
3791void swap_buf_le16(u16 *buf, unsigned int buf_words)
3792{
3793#ifdef __BIG_ENDIAN
3794 unsigned int i;
3795
3796 for (i = 0; i < buf_words; i++)
3797 buf[i] = le16_to_cpu(buf[i]);
3798#endif /* __BIG_ENDIAN */
3799}
3800
6ae4cfb5 3801/**
0d5ff566 3802 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3803 * @adev: device to target
6ae4cfb5
AL
3804 * @buf: data buffer
3805 * @buflen: buffer length
344babaa 3806 * @write_data: read/write
6ae4cfb5
AL
3807 *
3808 * Transfer data from/to the device data register by PIO.
3809 *
3810 * LOCKING:
3811 * Inherited from caller.
6ae4cfb5 3812 */
0d5ff566
TH
3813void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3814 unsigned int buflen, int write_data)
1da177e4 3815{
a6b2c5d4 3816 struct ata_port *ap = adev->ap;
6ae4cfb5 3817 unsigned int words = buflen >> 1;
1da177e4 3818
6ae4cfb5 3819 /* Transfer multiple of 2 bytes */
1da177e4 3820 if (write_data)
0d5ff566 3821 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3822 else
0d5ff566 3823 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3824
3825 /* Transfer trailing 1 byte, if any. */
3826 if (unlikely(buflen & 0x01)) {
3827 u16 align_buf[1] = { 0 };
3828 unsigned char *trailing_buf = buf + buflen - 1;
3829
3830 if (write_data) {
3831 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3832 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3833 } else {
0d5ff566 3834 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3835 memcpy(trailing_buf, align_buf, 1);
3836 }
3837 }
1da177e4
LT
3838}
3839
75e99585 3840/**
0d5ff566 3841 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3842 * @adev: device to target
3843 * @buf: data buffer
3844 * @buflen: buffer length
3845 * @write_data: read/write
3846 *
88574551 3847 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3848 * transfer with interrupts disabled.
3849 *
3850 * LOCKING:
3851 * Inherited from caller.
3852 */
0d5ff566
TH
3853void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3854 unsigned int buflen, int write_data)
75e99585
AC
3855{
3856 unsigned long flags;
3857 local_irq_save(flags);
0d5ff566 3858 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3859 local_irq_restore(flags);
3860}
3861
3862
6ae4cfb5
AL
3863/**
3864 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3865 * @qc: Command on going
3866 *
3867 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3868 *
3869 * LOCKING:
3870 * Inherited from caller.
3871 */
3872
1da177e4
LT
3873static void ata_pio_sector(struct ata_queued_cmd *qc)
3874{
3875 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3876 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3877 struct ata_port *ap = qc->ap;
3878 struct page *page;
3879 unsigned int offset;
3880 unsigned char *buf;
3881
726f0785 3882 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3883 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3884
3885 page = sg[qc->cursg].page;
726f0785 3886 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3887
3888 /* get the current page and offset */
3889 page = nth_page(page, (offset >> PAGE_SHIFT));
3890 offset %= PAGE_SIZE;
3891
1da177e4
LT
3892 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3893
91b8b313
AL
3894 if (PageHighMem(page)) {
3895 unsigned long flags;
3896
a6b2c5d4 3897 /* FIXME: use a bounce buffer */
91b8b313
AL
3898 local_irq_save(flags);
3899 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3900
91b8b313 3901 /* do the actual data transfer */
a6b2c5d4 3902 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3903
91b8b313
AL
3904 kunmap_atomic(buf, KM_IRQ0);
3905 local_irq_restore(flags);
3906 } else {
3907 buf = page_address(page);
a6b2c5d4 3908 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3909 }
1da177e4 3910
726f0785
TH
3911 qc->curbytes += ATA_SECT_SIZE;
3912 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3913
726f0785 3914 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3915 qc->cursg++;
3916 qc->cursg_ofs = 0;
3917 }
1da177e4 3918}
1da177e4 3919
07f6f7d0
AL
3920/**
3921 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3922 * @qc: Command on going
3923 *
c81e29b4 3924 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3925 * ATA device for the DRQ request.
3926 *
3927 * LOCKING:
3928 * Inherited from caller.
3929 */
1da177e4 3930
07f6f7d0
AL
3931static void ata_pio_sectors(struct ata_queued_cmd *qc)
3932{
3933 if (is_multi_taskfile(&qc->tf)) {
3934 /* READ/WRITE MULTIPLE */
3935 unsigned int nsect;
3936
587005de 3937 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3938
726f0785
TH
3939 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3940 qc->dev->multi_count);
07f6f7d0
AL
3941 while (nsect--)
3942 ata_pio_sector(qc);
3943 } else
3944 ata_pio_sector(qc);
3945}
3946
c71c1857
AL
3947/**
3948 * atapi_send_cdb - Write CDB bytes to hardware
3949 * @ap: Port to which ATAPI device is attached.
3950 * @qc: Taskfile currently active
3951 *
3952 * When device has indicated its readiness to accept
3953 * a CDB, this function is called. Send the CDB.
3954 *
3955 * LOCKING:
3956 * caller.
3957 */
3958
3959static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3960{
3961 /* send SCSI cdb */
3962 DPRINTK("send cdb\n");
db024d53 3963 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3964
a6b2c5d4 3965 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3966 ata_altstatus(ap); /* flush */
3967
3968 switch (qc->tf.protocol) {
3969 case ATA_PROT_ATAPI:
3970 ap->hsm_task_state = HSM_ST;
3971 break;
3972 case ATA_PROT_ATAPI_NODATA:
3973 ap->hsm_task_state = HSM_ST_LAST;
3974 break;
3975 case ATA_PROT_ATAPI_DMA:
3976 ap->hsm_task_state = HSM_ST_LAST;
3977 /* initiate bmdma */
3978 ap->ops->bmdma_start(qc);
3979 break;
3980 }
1da177e4
LT
3981}
3982
6ae4cfb5
AL
3983/**
3984 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3985 * @qc: Command on going
3986 * @bytes: number of bytes
3987 *
3988 * Transfer Transfer data from/to the ATAPI device.
3989 *
3990 * LOCKING:
3991 * Inherited from caller.
3992 *
3993 */
3994
1da177e4
LT
3995static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3996{
3997 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3998 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3999 struct ata_port *ap = qc->ap;
4000 struct page *page;
4001 unsigned char *buf;
4002 unsigned int offset, count;
4003
563a6e1f 4004 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4005 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4006
4007next_sg:
563a6e1f 4008 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4009 /*
563a6e1f
AL
4010 * The end of qc->sg is reached and the device expects
4011 * more data to transfer. In order not to overrun qc->sg
4012 * and fulfill length specified in the byte count register,
4013 * - for read case, discard trailing data from the device
4014 * - for write case, padding zero data to the device
4015 */
4016 u16 pad_buf[1] = { 0 };
4017 unsigned int words = bytes >> 1;
4018 unsigned int i;
4019
4020 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4021 ata_dev_printk(qc->dev, KERN_WARNING,
4022 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4023
4024 for (i = 0; i < words; i++)
a6b2c5d4 4025 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4026
14be71f4 4027 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4028 return;
4029 }
4030
cedc9a47 4031 sg = &qc->__sg[qc->cursg];
1da177e4 4032
1da177e4
LT
4033 page = sg->page;
4034 offset = sg->offset + qc->cursg_ofs;
4035
4036 /* get the current page and offset */
4037 page = nth_page(page, (offset >> PAGE_SHIFT));
4038 offset %= PAGE_SIZE;
4039
6952df03 4040 /* don't overrun current sg */
32529e01 4041 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4042
4043 /* don't cross page boundaries */
4044 count = min(count, (unsigned int)PAGE_SIZE - offset);
4045
7282aa4b
AL
4046 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4047
91b8b313
AL
4048 if (PageHighMem(page)) {
4049 unsigned long flags;
4050
a6b2c5d4 4051 /* FIXME: use bounce buffer */
91b8b313
AL
4052 local_irq_save(flags);
4053 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4054
91b8b313 4055 /* do the actual data transfer */
a6b2c5d4 4056 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4057
91b8b313
AL
4058 kunmap_atomic(buf, KM_IRQ0);
4059 local_irq_restore(flags);
4060 } else {
4061 buf = page_address(page);
a6b2c5d4 4062 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4063 }
1da177e4
LT
4064
4065 bytes -= count;
4066 qc->curbytes += count;
4067 qc->cursg_ofs += count;
4068
32529e01 4069 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4070 qc->cursg++;
4071 qc->cursg_ofs = 0;
4072 }
4073
563a6e1f 4074 if (bytes)
1da177e4 4075 goto next_sg;
1da177e4
LT
4076}
4077
6ae4cfb5
AL
4078/**
4079 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4080 * @qc: Command on going
4081 *
4082 * Transfer Transfer data from/to the ATAPI device.
4083 *
4084 * LOCKING:
4085 * Inherited from caller.
6ae4cfb5
AL
4086 */
4087
1da177e4
LT
4088static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4089{
4090 struct ata_port *ap = qc->ap;
4091 struct ata_device *dev = qc->dev;
4092 unsigned int ireason, bc_lo, bc_hi, bytes;
4093 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4094
eec4c3f3
AL
4095 /* Abuse qc->result_tf for temp storage of intermediate TF
4096 * here to save some kernel stack usage.
4097 * For normal completion, qc->result_tf is not relevant. For
4098 * error, qc->result_tf is later overwritten by ata_qc_complete().
4099 * So, the correctness of qc->result_tf is not affected.
4100 */
4101 ap->ops->tf_read(ap, &qc->result_tf);
4102 ireason = qc->result_tf.nsect;
4103 bc_lo = qc->result_tf.lbam;
4104 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4105 bytes = (bc_hi << 8) | bc_lo;
4106
4107 /* shall be cleared to zero, indicating xfer of data */
4108 if (ireason & (1 << 0))
4109 goto err_out;
4110
4111 /* make sure transfer direction matches expected */
4112 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4113 if (do_write != i_write)
4114 goto err_out;
4115
312f7da2
AL
4116 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4117
1da177e4
LT
4118 __atapi_pio_bytes(qc, bytes);
4119
4120 return;
4121
4122err_out:
f15a1daf 4123 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4124 qc->err_mask |= AC_ERR_HSM;
14be71f4 4125 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4126}
4127
4128/**
c234fb00
AL
4129 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4130 * @ap: the target ata_port
4131 * @qc: qc on going
1da177e4 4132 *
c234fb00
AL
4133 * RETURNS:
4134 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4135 */
c234fb00
AL
4136
4137static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4138{
c234fb00
AL
4139 if (qc->tf.flags & ATA_TFLAG_POLLING)
4140 return 1;
1da177e4 4141
c234fb00
AL
4142 if (ap->hsm_task_state == HSM_ST_FIRST) {
4143 if (qc->tf.protocol == ATA_PROT_PIO &&
4144 (qc->tf.flags & ATA_TFLAG_WRITE))
4145 return 1;
1da177e4 4146
c234fb00
AL
4147 if (is_atapi_taskfile(&qc->tf) &&
4148 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4149 return 1;
fe79e683
AL
4150 }
4151
c234fb00
AL
4152 return 0;
4153}
1da177e4 4154
c17ea20d
TH
4155/**
4156 * ata_hsm_qc_complete - finish a qc running on standard HSM
4157 * @qc: Command to complete
4158 * @in_wq: 1 if called from workqueue, 0 otherwise
4159 *
4160 * Finish @qc which is running on standard HSM.
4161 *
4162 * LOCKING:
cca3974e 4163 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4164 * Otherwise, none on entry and grabs host lock.
4165 */
4166static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4167{
4168 struct ata_port *ap = qc->ap;
4169 unsigned long flags;
4170
4171 if (ap->ops->error_handler) {
4172 if (in_wq) {
ba6a1308 4173 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4174
cca3974e
JG
4175 /* EH might have kicked in while host lock is
4176 * released.
c17ea20d
TH
4177 */
4178 qc = ata_qc_from_tag(ap, qc->tag);
4179 if (qc) {
4180 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4181 ap->ops->irq_on(ap);
c17ea20d
TH
4182 ata_qc_complete(qc);
4183 } else
4184 ata_port_freeze(ap);
4185 }
4186
ba6a1308 4187 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4188 } else {
4189 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4190 ata_qc_complete(qc);
4191 else
4192 ata_port_freeze(ap);
4193 }
4194 } else {
4195 if (in_wq) {
ba6a1308 4196 spin_lock_irqsave(ap->lock, flags);
83625006 4197 ap->ops->irq_on(ap);
c17ea20d 4198 ata_qc_complete(qc);
ba6a1308 4199 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4200 } else
4201 ata_qc_complete(qc);
4202 }
1da177e4 4203
c81e29b4 4204 ata_altstatus(ap); /* flush */
c17ea20d
TH
4205}
4206
bb5cb290
AL
4207/**
4208 * ata_hsm_move - move the HSM to the next state.
4209 * @ap: the target ata_port
4210 * @qc: qc on going
4211 * @status: current device status
4212 * @in_wq: 1 if called from workqueue, 0 otherwise
4213 *
4214 * RETURNS:
4215 * 1 when poll next status needed, 0 otherwise.
4216 */
9a1004d0
TH
4217int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4218 u8 status, int in_wq)
e2cec771 4219{
bb5cb290
AL
4220 unsigned long flags = 0;
4221 int poll_next;
4222
6912ccd5
AL
4223 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4224
bb5cb290
AL
4225 /* Make sure ata_qc_issue_prot() does not throw things
4226 * like DMA polling into the workqueue. Notice that
4227 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4228 */
c234fb00 4229 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4230
e2cec771 4231fsm_start:
999bb6f4
AL
4232 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4233 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4234
e2cec771
AL
4235 switch (ap->hsm_task_state) {
4236 case HSM_ST_FIRST:
bb5cb290
AL
4237 /* Send first data block or PACKET CDB */
4238
4239 /* If polling, we will stay in the work queue after
4240 * sending the data. Otherwise, interrupt handler
4241 * takes over after sending the data.
4242 */
4243 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4244
e2cec771 4245 /* check device status */
3655d1d3
AL
4246 if (unlikely((status & ATA_DRQ) == 0)) {
4247 /* handle BSY=0, DRQ=0 as error */
4248 if (likely(status & (ATA_ERR | ATA_DF)))
4249 /* device stops HSM for abort/error */
4250 qc->err_mask |= AC_ERR_DEV;
4251 else
4252 /* HSM violation. Let EH handle this */
4253 qc->err_mask |= AC_ERR_HSM;
4254
14be71f4 4255 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4256 goto fsm_start;
1da177e4
LT
4257 }
4258
71601958
AL
4259 /* Device should not ask for data transfer (DRQ=1)
4260 * when it finds something wrong.
eee6c32f
AL
4261 * We ignore DRQ here and stop the HSM by
4262 * changing hsm_task_state to HSM_ST_ERR and
4263 * let the EH abort the command or reset the device.
71601958
AL
4264 */
4265 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4266 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4267 ap->id, status);
3655d1d3 4268 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4269 ap->hsm_task_state = HSM_ST_ERR;
4270 goto fsm_start;
71601958 4271 }
1da177e4 4272
bb5cb290
AL
4273 /* Send the CDB (atapi) or the first data block (ata pio out).
4274 * During the state transition, interrupt handler shouldn't
4275 * be invoked before the data transfer is complete and
4276 * hsm_task_state is changed. Hence, the following locking.
4277 */
4278 if (in_wq)
ba6a1308 4279 spin_lock_irqsave(ap->lock, flags);
1da177e4 4280
bb5cb290
AL
4281 if (qc->tf.protocol == ATA_PROT_PIO) {
4282 /* PIO data out protocol.
4283 * send first data block.
4284 */
0565c26d 4285
bb5cb290
AL
4286 /* ata_pio_sectors() might change the state
4287 * to HSM_ST_LAST. so, the state is changed here
4288 * before ata_pio_sectors().
4289 */
4290 ap->hsm_task_state = HSM_ST;
4291 ata_pio_sectors(qc);
4292 ata_altstatus(ap); /* flush */
4293 } else
4294 /* send CDB */
4295 atapi_send_cdb(ap, qc);
4296
4297 if (in_wq)
ba6a1308 4298 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4299
4300 /* if polling, ata_pio_task() handles the rest.
4301 * otherwise, interrupt handler takes over from here.
4302 */
e2cec771 4303 break;
1c848984 4304
e2cec771
AL
4305 case HSM_ST:
4306 /* complete command or read/write the data register */
4307 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4308 /* ATAPI PIO protocol */
4309 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4310 /* No more data to transfer or device error.
4311 * Device error will be tagged in HSM_ST_LAST.
4312 */
e2cec771
AL
4313 ap->hsm_task_state = HSM_ST_LAST;
4314 goto fsm_start;
4315 }
1da177e4 4316
71601958
AL
4317 /* Device should not ask for data transfer (DRQ=1)
4318 * when it finds something wrong.
eee6c32f
AL
4319 * We ignore DRQ here and stop the HSM by
4320 * changing hsm_task_state to HSM_ST_ERR and
4321 * let the EH abort the command or reset the device.
71601958
AL
4322 */
4323 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4324 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4325 ap->id, status);
3655d1d3 4326 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4327 ap->hsm_task_state = HSM_ST_ERR;
4328 goto fsm_start;
71601958 4329 }
1da177e4 4330
e2cec771 4331 atapi_pio_bytes(qc);
7fb6ec28 4332
e2cec771
AL
4333 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4334 /* bad ireason reported by device */
4335 goto fsm_start;
1da177e4 4336
e2cec771
AL
4337 } else {
4338 /* ATA PIO protocol */
4339 if (unlikely((status & ATA_DRQ) == 0)) {
4340 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4341 if (likely(status & (ATA_ERR | ATA_DF)))
4342 /* device stops HSM for abort/error */
4343 qc->err_mask |= AC_ERR_DEV;
4344 else
55a8e2c8
TH
4345 /* HSM violation. Let EH handle this.
4346 * Phantom devices also trigger this
4347 * condition. Mark hint.
4348 */
4349 qc->err_mask |= AC_ERR_HSM |
4350 AC_ERR_NODEV_HINT;
3655d1d3 4351
e2cec771
AL
4352 ap->hsm_task_state = HSM_ST_ERR;
4353 goto fsm_start;
4354 }
1da177e4 4355
eee6c32f
AL
4356 /* For PIO reads, some devices may ask for
4357 * data transfer (DRQ=1) alone with ERR=1.
4358 * We respect DRQ here and transfer one
4359 * block of junk data before changing the
4360 * hsm_task_state to HSM_ST_ERR.
4361 *
4362 * For PIO writes, ERR=1 DRQ=1 doesn't make
4363 * sense since the data block has been
4364 * transferred to the device.
71601958
AL
4365 */
4366 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4367 /* data might be corrputed */
4368 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4369
4370 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4371 ata_pio_sectors(qc);
4372 ata_altstatus(ap);
4373 status = ata_wait_idle(ap);
4374 }
4375
3655d1d3
AL
4376 if (status & (ATA_BUSY | ATA_DRQ))
4377 qc->err_mask |= AC_ERR_HSM;
4378
eee6c32f
AL
4379 /* ata_pio_sectors() might change the
4380 * state to HSM_ST_LAST. so, the state
4381 * is changed after ata_pio_sectors().
4382 */
4383 ap->hsm_task_state = HSM_ST_ERR;
4384 goto fsm_start;
71601958
AL
4385 }
4386
e2cec771
AL
4387 ata_pio_sectors(qc);
4388
4389 if (ap->hsm_task_state == HSM_ST_LAST &&
4390 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4391 /* all data read */
4392 ata_altstatus(ap);
52a32205 4393 status = ata_wait_idle(ap);
e2cec771
AL
4394 goto fsm_start;
4395 }
4396 }
4397
4398 ata_altstatus(ap); /* flush */
bb5cb290 4399 poll_next = 1;
1da177e4
LT
4400 break;
4401
14be71f4 4402 case HSM_ST_LAST:
6912ccd5
AL
4403 if (unlikely(!ata_ok(status))) {
4404 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4405 ap->hsm_task_state = HSM_ST_ERR;
4406 goto fsm_start;
4407 }
4408
4409 /* no more data to transfer */
4332a771
AL
4410 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4411 ap->id, qc->dev->devno, status);
e2cec771 4412
6912ccd5
AL
4413 WARN_ON(qc->err_mask);
4414
e2cec771 4415 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4416
e2cec771 4417 /* complete taskfile transaction */
c17ea20d 4418 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4419
4420 poll_next = 0;
1da177e4
LT
4421 break;
4422
14be71f4 4423 case HSM_ST_ERR:
e2cec771
AL
4424 /* make sure qc->err_mask is available to
4425 * know what's wrong and recover
4426 */
4427 WARN_ON(qc->err_mask == 0);
4428
4429 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4430
999bb6f4 4431 /* complete taskfile transaction */
c17ea20d 4432 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4433
4434 poll_next = 0;
e2cec771
AL
4435 break;
4436 default:
bb5cb290 4437 poll_next = 0;
6912ccd5 4438 BUG();
1da177e4
LT
4439 }
4440
bb5cb290 4441 return poll_next;
1da177e4
LT
4442}
4443
65f27f38 4444static void ata_pio_task(struct work_struct *work)
8061f5f0 4445{
65f27f38
DH
4446 struct ata_port *ap =
4447 container_of(work, struct ata_port, port_task.work);
4448 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4449 u8 status;
a1af3734 4450 int poll_next;
8061f5f0 4451
7fb6ec28 4452fsm_start:
a1af3734 4453 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4454
a1af3734
AL
4455 /*
4456 * This is purely heuristic. This is a fast path.
4457 * Sometimes when we enter, BSY will be cleared in
4458 * a chk-status or two. If not, the drive is probably seeking
4459 * or something. Snooze for a couple msecs, then
4460 * chk-status again. If still busy, queue delayed work.
4461 */
4462 status = ata_busy_wait(ap, ATA_BUSY, 5);
4463 if (status & ATA_BUSY) {
4464 msleep(2);
4465 status = ata_busy_wait(ap, ATA_BUSY, 10);
4466 if (status & ATA_BUSY) {
31ce6dae 4467 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4468 return;
4469 }
8061f5f0
TH
4470 }
4471
a1af3734
AL
4472 /* move the HSM */
4473 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4474
a1af3734
AL
4475 /* another command or interrupt handler
4476 * may be running at this point.
4477 */
4478 if (poll_next)
7fb6ec28 4479 goto fsm_start;
8061f5f0
TH
4480}
4481
1da177e4
LT
4482/**
4483 * ata_qc_new - Request an available ATA command, for queueing
4484 * @ap: Port associated with device @dev
4485 * @dev: Device from whom we request an available command structure
4486 *
4487 * LOCKING:
0cba632b 4488 * None.
1da177e4
LT
4489 */
4490
4491static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4492{
4493 struct ata_queued_cmd *qc = NULL;
4494 unsigned int i;
4495
e3180499 4496 /* no command while frozen */
b51e9e5d 4497 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4498 return NULL;
4499
2ab7db1f
TH
4500 /* the last tag is reserved for internal command. */
4501 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4502 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4503 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4504 break;
4505 }
4506
4507 if (qc)
4508 qc->tag = i;
4509
4510 return qc;
4511}
4512
4513/**
4514 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4515 * @dev: Device from whom we request an available command structure
4516 *
4517 * LOCKING:
0cba632b 4518 * None.
1da177e4
LT
4519 */
4520
3373efd8 4521struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4522{
3373efd8 4523 struct ata_port *ap = dev->ap;
1da177e4
LT
4524 struct ata_queued_cmd *qc;
4525
4526 qc = ata_qc_new(ap);
4527 if (qc) {
1da177e4
LT
4528 qc->scsicmd = NULL;
4529 qc->ap = ap;
4530 qc->dev = dev;
1da177e4 4531
2c13b7ce 4532 ata_qc_reinit(qc);
1da177e4
LT
4533 }
4534
4535 return qc;
4536}
4537
1da177e4
LT
4538/**
4539 * ata_qc_free - free unused ata_queued_cmd
4540 * @qc: Command to complete
4541 *
4542 * Designed to free unused ata_queued_cmd object
4543 * in case something prevents using it.
4544 *
4545 * LOCKING:
cca3974e 4546 * spin_lock_irqsave(host lock)
1da177e4
LT
4547 */
4548void ata_qc_free(struct ata_queued_cmd *qc)
4549{
4ba946e9
TH
4550 struct ata_port *ap = qc->ap;
4551 unsigned int tag;
4552
a4631474 4553 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4554
4ba946e9
TH
4555 qc->flags = 0;
4556 tag = qc->tag;
4557 if (likely(ata_tag_valid(tag))) {
4ba946e9 4558 qc->tag = ATA_TAG_POISON;
6cec4a39 4559 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4560 }
1da177e4
LT
4561}
4562
76014427 4563void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4564{
dedaf2b0
TH
4565 struct ata_port *ap = qc->ap;
4566
a4631474
TH
4567 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4568 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4569
4570 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4571 ata_sg_clean(qc);
4572
7401abf2 4573 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4574 if (qc->tf.protocol == ATA_PROT_NCQ)
4575 ap->sactive &= ~(1 << qc->tag);
4576 else
4577 ap->active_tag = ATA_TAG_POISON;
7401abf2 4578
3f3791d3
AL
4579 /* atapi: mark qc as inactive to prevent the interrupt handler
4580 * from completing the command twice later, before the error handler
4581 * is called. (when rc != 0 and atapi request sense is needed)
4582 */
4583 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4584 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4585
1da177e4 4586 /* call completion callback */
77853bf2 4587 qc->complete_fn(qc);
1da177e4
LT
4588}
4589
39599a53
TH
4590static void fill_result_tf(struct ata_queued_cmd *qc)
4591{
4592 struct ata_port *ap = qc->ap;
4593
4594 ap->ops->tf_read(ap, &qc->result_tf);
4595 qc->result_tf.flags = qc->tf.flags;
4596}
4597
f686bcb8
TH
4598/**
4599 * ata_qc_complete - Complete an active ATA command
4600 * @qc: Command to complete
4601 * @err_mask: ATA Status register contents
4602 *
4603 * Indicate to the mid and upper layers that an ATA
4604 * command has completed, with either an ok or not-ok status.
4605 *
4606 * LOCKING:
cca3974e 4607 * spin_lock_irqsave(host lock)
f686bcb8
TH
4608 */
4609void ata_qc_complete(struct ata_queued_cmd *qc)
4610{
4611 struct ata_port *ap = qc->ap;
4612
4613 /* XXX: New EH and old EH use different mechanisms to
4614 * synchronize EH with regular execution path.
4615 *
4616 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4617 * Normal execution path is responsible for not accessing a
4618 * failed qc. libata core enforces the rule by returning NULL
4619 * from ata_qc_from_tag() for failed qcs.
4620 *
4621 * Old EH depends on ata_qc_complete() nullifying completion
4622 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4623 * not synchronize with interrupt handler. Only PIO task is
4624 * taken care of.
4625 */
4626 if (ap->ops->error_handler) {
b51e9e5d 4627 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4628
4629 if (unlikely(qc->err_mask))
4630 qc->flags |= ATA_QCFLAG_FAILED;
4631
4632 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4633 if (!ata_tag_internal(qc->tag)) {
4634 /* always fill result TF for failed qc */
39599a53 4635 fill_result_tf(qc);
f686bcb8
TH
4636 ata_qc_schedule_eh(qc);
4637 return;
4638 }
4639 }
4640
4641 /* read result TF if requested */
4642 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4643 fill_result_tf(qc);
f686bcb8
TH
4644
4645 __ata_qc_complete(qc);
4646 } else {
4647 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4648 return;
4649
4650 /* read result TF if failed or requested */
4651 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4652 fill_result_tf(qc);
f686bcb8
TH
4653
4654 __ata_qc_complete(qc);
4655 }
4656}
4657
dedaf2b0
TH
4658/**
4659 * ata_qc_complete_multiple - Complete multiple qcs successfully
4660 * @ap: port in question
4661 * @qc_active: new qc_active mask
4662 * @finish_qc: LLDD callback invoked before completing a qc
4663 *
4664 * Complete in-flight commands. This functions is meant to be
4665 * called from low-level driver's interrupt routine to complete
4666 * requests normally. ap->qc_active and @qc_active is compared
4667 * and commands are completed accordingly.
4668 *
4669 * LOCKING:
cca3974e 4670 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4671 *
4672 * RETURNS:
4673 * Number of completed commands on success, -errno otherwise.
4674 */
4675int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4676 void (*finish_qc)(struct ata_queued_cmd *))
4677{
4678 int nr_done = 0;
4679 u32 done_mask;
4680 int i;
4681
4682 done_mask = ap->qc_active ^ qc_active;
4683
4684 if (unlikely(done_mask & qc_active)) {
4685 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4686 "(%08x->%08x)\n", ap->qc_active, qc_active);
4687 return -EINVAL;
4688 }
4689
4690 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4691 struct ata_queued_cmd *qc;
4692
4693 if (!(done_mask & (1 << i)))
4694 continue;
4695
4696 if ((qc = ata_qc_from_tag(ap, i))) {
4697 if (finish_qc)
4698 finish_qc(qc);
4699 ata_qc_complete(qc);
4700 nr_done++;
4701 }
4702 }
4703
4704 return nr_done;
4705}
4706
1da177e4
LT
4707static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4708{
4709 struct ata_port *ap = qc->ap;
4710
4711 switch (qc->tf.protocol) {
3dc1d881 4712 case ATA_PROT_NCQ:
1da177e4
LT
4713 case ATA_PROT_DMA:
4714 case ATA_PROT_ATAPI_DMA:
4715 return 1;
4716
4717 case ATA_PROT_ATAPI:
4718 case ATA_PROT_PIO:
1da177e4
LT
4719 if (ap->flags & ATA_FLAG_PIO_DMA)
4720 return 1;
4721
4722 /* fall through */
4723
4724 default:
4725 return 0;
4726 }
4727
4728 /* never reached */
4729}
4730
4731/**
4732 * ata_qc_issue - issue taskfile to device
4733 * @qc: command to issue to device
4734 *
4735 * Prepare an ATA command to submission to device.
4736 * This includes mapping the data into a DMA-able
4737 * area, filling in the S/G table, and finally
4738 * writing the taskfile to hardware, starting the command.
4739 *
4740 * LOCKING:
cca3974e 4741 * spin_lock_irqsave(host lock)
1da177e4 4742 */
8e0e694a 4743void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4744{
4745 struct ata_port *ap = qc->ap;
4746
dedaf2b0
TH
4747 /* Make sure only one non-NCQ command is outstanding. The
4748 * check is skipped for old EH because it reuses active qc to
4749 * request ATAPI sense.
4750 */
4751 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4752
4753 if (qc->tf.protocol == ATA_PROT_NCQ) {
4754 WARN_ON(ap->sactive & (1 << qc->tag));
4755 ap->sactive |= 1 << qc->tag;
4756 } else {
4757 WARN_ON(ap->sactive);
4758 ap->active_tag = qc->tag;
4759 }
4760
e4a70e76 4761 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4762 ap->qc_active |= 1 << qc->tag;
e4a70e76 4763
1da177e4
LT
4764 if (ata_should_dma_map(qc)) {
4765 if (qc->flags & ATA_QCFLAG_SG) {
4766 if (ata_sg_setup(qc))
8e436af9 4767 goto sg_err;
1da177e4
LT
4768 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4769 if (ata_sg_setup_one(qc))
8e436af9 4770 goto sg_err;
1da177e4
LT
4771 }
4772 } else {
4773 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4774 }
4775
4776 ap->ops->qc_prep(qc);
4777
8e0e694a
TH
4778 qc->err_mask |= ap->ops->qc_issue(qc);
4779 if (unlikely(qc->err_mask))
4780 goto err;
4781 return;
1da177e4 4782
8e436af9
TH
4783sg_err:
4784 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4785 qc->err_mask |= AC_ERR_SYSTEM;
4786err:
4787 ata_qc_complete(qc);
1da177e4
LT
4788}
4789
4790/**
4791 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4792 * @qc: command to issue to device
4793 *
4794 * Using various libata functions and hooks, this function
4795 * starts an ATA command. ATA commands are grouped into
4796 * classes called "protocols", and issuing each type of protocol
4797 * is slightly different.
4798 *
0baab86b
EF
4799 * May be used as the qc_issue() entry in ata_port_operations.
4800 *
1da177e4 4801 * LOCKING:
cca3974e 4802 * spin_lock_irqsave(host lock)
1da177e4
LT
4803 *
4804 * RETURNS:
9a3d9eb0 4805 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4806 */
4807
9a3d9eb0 4808unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4809{
4810 struct ata_port *ap = qc->ap;
4811
e50362ec
AL
4812 /* Use polling pio if the LLD doesn't handle
4813 * interrupt driven pio and atapi CDB interrupt.
4814 */
4815 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4816 switch (qc->tf.protocol) {
4817 case ATA_PROT_PIO:
e3472cbe 4818 case ATA_PROT_NODATA:
e50362ec
AL
4819 case ATA_PROT_ATAPI:
4820 case ATA_PROT_ATAPI_NODATA:
4821 qc->tf.flags |= ATA_TFLAG_POLLING;
4822 break;
4823 case ATA_PROT_ATAPI_DMA:
4824 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4825 /* see ata_dma_blacklisted() */
e50362ec
AL
4826 BUG();
4827 break;
4828 default:
4829 break;
4830 }
4831 }
4832
3d3cca37
TH
4833 /* Some controllers show flaky interrupt behavior after
4834 * setting xfer mode. Use polling instead.
4835 */
4836 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4837 qc->tf.feature == SETFEATURES_XFER) &&
4838 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4839 qc->tf.flags |= ATA_TFLAG_POLLING;
4840
312f7da2 4841 /* select the device */
1da177e4
LT
4842 ata_dev_select(ap, qc->dev->devno, 1, 0);
4843
312f7da2 4844 /* start the command */
1da177e4
LT
4845 switch (qc->tf.protocol) {
4846 case ATA_PROT_NODATA:
312f7da2
AL
4847 if (qc->tf.flags & ATA_TFLAG_POLLING)
4848 ata_qc_set_polling(qc);
4849
e5338254 4850 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4851 ap->hsm_task_state = HSM_ST_LAST;
4852
4853 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4854 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4855
1da177e4
LT
4856 break;
4857
4858 case ATA_PROT_DMA:
587005de 4859 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4860
1da177e4
LT
4861 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4862 ap->ops->bmdma_setup(qc); /* set up bmdma */
4863 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4864 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4865 break;
4866
312f7da2
AL
4867 case ATA_PROT_PIO:
4868 if (qc->tf.flags & ATA_TFLAG_POLLING)
4869 ata_qc_set_polling(qc);
1da177e4 4870
e5338254 4871 ata_tf_to_host(ap, &qc->tf);
312f7da2 4872
54f00389
AL
4873 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4874 /* PIO data out protocol */
4875 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4876 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4877
4878 /* always send first data block using
e27486db 4879 * the ata_pio_task() codepath.
54f00389 4880 */
312f7da2 4881 } else {
54f00389
AL
4882 /* PIO data in protocol */
4883 ap->hsm_task_state = HSM_ST;
4884
4885 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4886 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4887
4888 /* if polling, ata_pio_task() handles the rest.
4889 * otherwise, interrupt handler takes over from here.
4890 */
312f7da2
AL
4891 }
4892
1da177e4
LT
4893 break;
4894
1da177e4 4895 case ATA_PROT_ATAPI:
1da177e4 4896 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4897 if (qc->tf.flags & ATA_TFLAG_POLLING)
4898 ata_qc_set_polling(qc);
4899
e5338254 4900 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4901
312f7da2
AL
4902 ap->hsm_task_state = HSM_ST_FIRST;
4903
4904 /* send cdb by polling if no cdb interrupt */
4905 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4906 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4907 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4908 break;
4909
4910 case ATA_PROT_ATAPI_DMA:
587005de 4911 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4912
1da177e4
LT
4913 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4914 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4915 ap->hsm_task_state = HSM_ST_FIRST;
4916
4917 /* send cdb by polling if no cdb interrupt */
4918 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4919 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4920 break;
4921
4922 default:
4923 WARN_ON(1);
9a3d9eb0 4924 return AC_ERR_SYSTEM;
1da177e4
LT
4925 }
4926
4927 return 0;
4928}
4929
1da177e4
LT
4930/**
4931 * ata_host_intr - Handle host interrupt for given (port, task)
4932 * @ap: Port on which interrupt arrived (possibly...)
4933 * @qc: Taskfile currently active in engine
4934 *
4935 * Handle host interrupt for given queued command. Currently,
4936 * only DMA interrupts are handled. All other commands are
4937 * handled via polling with interrupts disabled (nIEN bit).
4938 *
4939 * LOCKING:
cca3974e 4940 * spin_lock_irqsave(host lock)
1da177e4
LT
4941 *
4942 * RETURNS:
4943 * One if interrupt was handled, zero if not (shared irq).
4944 */
4945
4946inline unsigned int ata_host_intr (struct ata_port *ap,
4947 struct ata_queued_cmd *qc)
4948{
ea54763f 4949 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 4950 u8 status, host_stat = 0;
1da177e4 4951
312f7da2
AL
4952 VPRINTK("ata%u: protocol %d task_state %d\n",
4953 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4954
312f7da2
AL
4955 /* Check whether we are expecting interrupt in this state */
4956 switch (ap->hsm_task_state) {
4957 case HSM_ST_FIRST:
6912ccd5
AL
4958 /* Some pre-ATAPI-4 devices assert INTRQ
4959 * at this state when ready to receive CDB.
4960 */
1da177e4 4961
312f7da2
AL
4962 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4963 * The flag was turned on only for atapi devices.
4964 * No need to check is_atapi_taskfile(&qc->tf) again.
4965 */
4966 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4967 goto idle_irq;
1da177e4 4968 break;
312f7da2
AL
4969 case HSM_ST_LAST:
4970 if (qc->tf.protocol == ATA_PROT_DMA ||
4971 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4972 /* check status of DMA engine */
4973 host_stat = ap->ops->bmdma_status(ap);
4974 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4975
4976 /* if it's not our irq... */
4977 if (!(host_stat & ATA_DMA_INTR))
4978 goto idle_irq;
4979
4980 /* before we do anything else, clear DMA-Start bit */
4981 ap->ops->bmdma_stop(qc);
a4f16610
AL
4982
4983 if (unlikely(host_stat & ATA_DMA_ERR)) {
4984 /* error when transfering data to/from memory */
4985 qc->err_mask |= AC_ERR_HOST_BUS;
4986 ap->hsm_task_state = HSM_ST_ERR;
4987 }
312f7da2
AL
4988 }
4989 break;
4990 case HSM_ST:
4991 break;
1da177e4
LT
4992 default:
4993 goto idle_irq;
4994 }
4995
312f7da2
AL
4996 /* check altstatus */
4997 status = ata_altstatus(ap);
4998 if (status & ATA_BUSY)
4999 goto idle_irq;
1da177e4 5000
312f7da2
AL
5001 /* check main status, clearing INTRQ */
5002 status = ata_chk_status(ap);
5003 if (unlikely(status & ATA_BUSY))
5004 goto idle_irq;
1da177e4 5005
312f7da2
AL
5006 /* ack bmdma irq events */
5007 ap->ops->irq_clear(ap);
1da177e4 5008
bb5cb290 5009 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5010
5011 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5012 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5013 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5014
1da177e4
LT
5015 return 1; /* irq handled */
5016
5017idle_irq:
5018 ap->stats.idle_irq++;
5019
5020#ifdef ATA_IRQ_TRAP
5021 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5022 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5023 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5024 return 1;
1da177e4
LT
5025 }
5026#endif
5027 return 0; /* irq not handled */
5028}
5029
5030/**
5031 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5032 * @irq: irq line (unused)
cca3974e 5033 * @dev_instance: pointer to our ata_host information structure
1da177e4 5034 *
0cba632b
JG
5035 * Default interrupt handler for PCI IDE devices. Calls
5036 * ata_host_intr() for each port that is not disabled.
5037 *
1da177e4 5038 * LOCKING:
cca3974e 5039 * Obtains host lock during operation.
1da177e4
LT
5040 *
5041 * RETURNS:
0cba632b 5042 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5043 */
5044
7d12e780 5045irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5046{
cca3974e 5047 struct ata_host *host = dev_instance;
1da177e4
LT
5048 unsigned int i;
5049 unsigned int handled = 0;
5050 unsigned long flags;
5051
5052 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5053 spin_lock_irqsave(&host->lock, flags);
1da177e4 5054
cca3974e 5055 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5056 struct ata_port *ap;
5057
cca3974e 5058 ap = host->ports[i];
c1389503 5059 if (ap &&
029f5468 5060 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5061 struct ata_queued_cmd *qc;
5062
5063 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5064 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5065 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5066 handled |= ata_host_intr(ap, qc);
5067 }
5068 }
5069
cca3974e 5070 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5071
5072 return IRQ_RETVAL(handled);
5073}
5074
34bf2170
TH
5075/**
5076 * sata_scr_valid - test whether SCRs are accessible
5077 * @ap: ATA port to test SCR accessibility for
5078 *
5079 * Test whether SCRs are accessible for @ap.
5080 *
5081 * LOCKING:
5082 * None.
5083 *
5084 * RETURNS:
5085 * 1 if SCRs are accessible, 0 otherwise.
5086 */
5087int sata_scr_valid(struct ata_port *ap)
5088{
5089 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5090}
5091
5092/**
5093 * sata_scr_read - read SCR register of the specified port
5094 * @ap: ATA port to read SCR for
5095 * @reg: SCR to read
5096 * @val: Place to store read value
5097 *
5098 * Read SCR register @reg of @ap into *@val. This function is
5099 * guaranteed to succeed if the cable type of the port is SATA
5100 * and the port implements ->scr_read.
5101 *
5102 * LOCKING:
5103 * None.
5104 *
5105 * RETURNS:
5106 * 0 on success, negative errno on failure.
5107 */
5108int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5109{
5110 if (sata_scr_valid(ap)) {
5111 *val = ap->ops->scr_read(ap, reg);
5112 return 0;
5113 }
5114 return -EOPNOTSUPP;
5115}
5116
5117/**
5118 * sata_scr_write - write SCR register of the specified port
5119 * @ap: ATA port to write SCR for
5120 * @reg: SCR to write
5121 * @val: value to write
5122 *
5123 * Write @val to SCR register @reg of @ap. This function is
5124 * guaranteed to succeed if the cable type of the port is SATA
5125 * and the port implements ->scr_read.
5126 *
5127 * LOCKING:
5128 * None.
5129 *
5130 * RETURNS:
5131 * 0 on success, negative errno on failure.
5132 */
5133int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5134{
5135 if (sata_scr_valid(ap)) {
5136 ap->ops->scr_write(ap, reg, val);
5137 return 0;
5138 }
5139 return -EOPNOTSUPP;
5140}
5141
5142/**
5143 * sata_scr_write_flush - write SCR register of the specified port and flush
5144 * @ap: ATA port to write SCR for
5145 * @reg: SCR to write
5146 * @val: value to write
5147 *
5148 * This function is identical to sata_scr_write() except that this
5149 * function performs flush after writing to the register.
5150 *
5151 * LOCKING:
5152 * None.
5153 *
5154 * RETURNS:
5155 * 0 on success, negative errno on failure.
5156 */
5157int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5158{
5159 if (sata_scr_valid(ap)) {
5160 ap->ops->scr_write(ap, reg, val);
5161 ap->ops->scr_read(ap, reg);
5162 return 0;
5163 }
5164 return -EOPNOTSUPP;
5165}
5166
5167/**
5168 * ata_port_online - test whether the given port is online
5169 * @ap: ATA port to test
5170 *
5171 * Test whether @ap is online. Note that this function returns 0
5172 * if online status of @ap cannot be obtained, so
5173 * ata_port_online(ap) != !ata_port_offline(ap).
5174 *
5175 * LOCKING:
5176 * None.
5177 *
5178 * RETURNS:
5179 * 1 if the port online status is available and online.
5180 */
5181int ata_port_online(struct ata_port *ap)
5182{
5183 u32 sstatus;
5184
5185 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5186 return 1;
5187 return 0;
5188}
5189
5190/**
5191 * ata_port_offline - test whether the given port is offline
5192 * @ap: ATA port to test
5193 *
5194 * Test whether @ap is offline. Note that this function returns
5195 * 0 if offline status of @ap cannot be obtained, so
5196 * ata_port_online(ap) != !ata_port_offline(ap).
5197 *
5198 * LOCKING:
5199 * None.
5200 *
5201 * RETURNS:
5202 * 1 if the port offline status is available and offline.
5203 */
5204int ata_port_offline(struct ata_port *ap)
5205{
5206 u32 sstatus;
5207
5208 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5209 return 1;
5210 return 0;
5211}
0baab86b 5212
77b08fb5 5213int ata_flush_cache(struct ata_device *dev)
9b847548 5214{
977e6b9f 5215 unsigned int err_mask;
9b847548
JA
5216 u8 cmd;
5217
5218 if (!ata_try_flush_cache(dev))
5219 return 0;
5220
6fc49adb 5221 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5222 cmd = ATA_CMD_FLUSH_EXT;
5223 else
5224 cmd = ATA_CMD_FLUSH;
5225
977e6b9f
TH
5226 err_mask = ata_do_simple_cmd(dev, cmd);
5227 if (err_mask) {
5228 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5229 return -EIO;
5230 }
5231
5232 return 0;
9b847548
JA
5233}
5234
cca3974e
JG
5235static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5236 unsigned int action, unsigned int ehi_flags,
5237 int wait)
500530f6
TH
5238{
5239 unsigned long flags;
5240 int i, rc;
5241
cca3974e
JG
5242 for (i = 0; i < host->n_ports; i++) {
5243 struct ata_port *ap = host->ports[i];
500530f6
TH
5244
5245 /* Previous resume operation might still be in
5246 * progress. Wait for PM_PENDING to clear.
5247 */
5248 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5249 ata_port_wait_eh(ap);
5250 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5251 }
5252
5253 /* request PM ops to EH */
5254 spin_lock_irqsave(ap->lock, flags);
5255
5256 ap->pm_mesg = mesg;
5257 if (wait) {
5258 rc = 0;
5259 ap->pm_result = &rc;
5260 }
5261
5262 ap->pflags |= ATA_PFLAG_PM_PENDING;
5263 ap->eh_info.action |= action;
5264 ap->eh_info.flags |= ehi_flags;
5265
5266 ata_port_schedule_eh(ap);
5267
5268 spin_unlock_irqrestore(ap->lock, flags);
5269
5270 /* wait and check result */
5271 if (wait) {
5272 ata_port_wait_eh(ap);
5273 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5274 if (rc)
5275 return rc;
5276 }
5277 }
5278
5279 return 0;
5280}
5281
5282/**
cca3974e
JG
5283 * ata_host_suspend - suspend host
5284 * @host: host to suspend
500530f6
TH
5285 * @mesg: PM message
5286 *
cca3974e 5287 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5288 * function requests EH to perform PM operations and waits for EH
5289 * to finish.
5290 *
5291 * LOCKING:
5292 * Kernel thread context (may sleep).
5293 *
5294 * RETURNS:
5295 * 0 on success, -errno on failure.
5296 */
cca3974e 5297int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5298{
5299 int i, j, rc;
5300
cca3974e 5301 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5302 if (rc)
5303 goto fail;
5304
5305 /* EH is quiescent now. Fail if we have any ready device.
5306 * This happens if hotplug occurs between completion of device
5307 * suspension and here.
5308 */
cca3974e
JG
5309 for (i = 0; i < host->n_ports; i++) {
5310 struct ata_port *ap = host->ports[i];
500530f6
TH
5311
5312 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5313 struct ata_device *dev = &ap->device[j];
5314
5315 if (ata_dev_ready(dev)) {
5316 ata_port_printk(ap, KERN_WARNING,
5317 "suspend failed, device %d "
5318 "still active\n", dev->devno);
5319 rc = -EBUSY;
5320 goto fail;
5321 }
5322 }
5323 }
5324
cca3974e 5325 host->dev->power.power_state = mesg;
500530f6
TH
5326 return 0;
5327
5328 fail:
cca3974e 5329 ata_host_resume(host);
500530f6
TH
5330 return rc;
5331}
5332
5333/**
cca3974e
JG
5334 * ata_host_resume - resume host
5335 * @host: host to resume
500530f6 5336 *
cca3974e 5337 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5338 * function requests EH to perform PM operations and returns.
5339 * Note that all resume operations are performed parallely.
5340 *
5341 * LOCKING:
5342 * Kernel thread context (may sleep).
5343 */
cca3974e 5344void ata_host_resume(struct ata_host *host)
500530f6 5345{
cca3974e
JG
5346 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5347 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5348 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5349}
5350
c893a3ae
RD
5351/**
5352 * ata_port_start - Set port up for dma.
5353 * @ap: Port to initialize
5354 *
5355 * Called just after data structures for each port are
5356 * initialized. Allocates space for PRD table.
5357 *
5358 * May be used as the port_start() entry in ata_port_operations.
5359 *
5360 * LOCKING:
5361 * Inherited from caller.
5362 */
f0d36efd 5363int ata_port_start(struct ata_port *ap)
1da177e4 5364{
2f1f610b 5365 struct device *dev = ap->dev;
6037d6bb 5366 int rc;
1da177e4 5367
f0d36efd
TH
5368 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5369 GFP_KERNEL);
1da177e4
LT
5370 if (!ap->prd)
5371 return -ENOMEM;
5372
6037d6bb 5373 rc = ata_pad_alloc(ap, dev);
f0d36efd 5374 if (rc)
6037d6bb 5375 return rc;
1da177e4 5376
f0d36efd
TH
5377 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5378 (unsigned long long)ap->prd_dma);
1da177e4
LT
5379 return 0;
5380}
5381
3ef3b43d
TH
5382/**
5383 * ata_dev_init - Initialize an ata_device structure
5384 * @dev: Device structure to initialize
5385 *
5386 * Initialize @dev in preparation for probing.
5387 *
5388 * LOCKING:
5389 * Inherited from caller.
5390 */
5391void ata_dev_init(struct ata_device *dev)
5392{
5393 struct ata_port *ap = dev->ap;
72fa4b74
TH
5394 unsigned long flags;
5395
5a04bf4b
TH
5396 /* SATA spd limit is bound to the first device */
5397 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5398
72fa4b74
TH
5399 /* High bits of dev->flags are used to record warm plug
5400 * requests which occur asynchronously. Synchronize using
cca3974e 5401 * host lock.
72fa4b74 5402 */
ba6a1308 5403 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5404 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5405 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5406
72fa4b74
TH
5407 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5408 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5409 dev->pio_mask = UINT_MAX;
5410 dev->mwdma_mask = UINT_MAX;
5411 dev->udma_mask = UINT_MAX;
5412}
5413
1da177e4 5414/**
155a8a9c 5415 * ata_port_init - Initialize an ata_port structure
1da177e4 5416 * @ap: Structure to initialize
cca3974e 5417 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5418 * @ent: Probe information provided by low-level driver
5419 * @port_no: Port number associated with this ata_port
5420 *
155a8a9c 5421 * Initialize a new ata_port structure.
0cba632b 5422 *
1da177e4 5423 * LOCKING:
0cba632b 5424 * Inherited from caller.
1da177e4 5425 */
cca3974e 5426void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5427 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5428{
5429 unsigned int i;
5430
cca3974e 5431 ap->lock = &host->lock;
198e0fed 5432 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5433 ap->id = ata_unique_id++;
1da177e4 5434 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5435 ap->host = host;
2f1f610b 5436 ap->dev = ent->dev;
1da177e4 5437 ap->port_no = port_no;
fea63e38
TH
5438 if (port_no == 1 && ent->pinfo2) {
5439 ap->pio_mask = ent->pinfo2->pio_mask;
5440 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5441 ap->udma_mask = ent->pinfo2->udma_mask;
5442 ap->flags |= ent->pinfo2->flags;
5443 ap->ops = ent->pinfo2->port_ops;
5444 } else {
5445 ap->pio_mask = ent->pio_mask;
5446 ap->mwdma_mask = ent->mwdma_mask;
5447 ap->udma_mask = ent->udma_mask;
5448 ap->flags |= ent->port_flags;
5449 ap->ops = ent->port_ops;
5450 }
5a04bf4b 5451 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5452 ap->active_tag = ATA_TAG_POISON;
5453 ap->last_ctl = 0xFF;
bd5d825c
BP
5454
5455#if defined(ATA_VERBOSE_DEBUG)
5456 /* turn on all debugging levels */
5457 ap->msg_enable = 0x00FF;
5458#elif defined(ATA_DEBUG)
5459 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5460#else
0dd4b21f 5461 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5462#endif
1da177e4 5463
65f27f38
DH
5464 INIT_DELAYED_WORK(&ap->port_task, NULL);
5465 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5466 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5467 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5468 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5469
838df628
TH
5470 /* set cable type */
5471 ap->cbl = ATA_CBL_NONE;
5472 if (ap->flags & ATA_FLAG_SATA)
5473 ap->cbl = ATA_CBL_SATA;
5474
acf356b1
TH
5475 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5476 struct ata_device *dev = &ap->device[i];
38d87234 5477 dev->ap = ap;
72fa4b74 5478 dev->devno = i;
3ef3b43d 5479 ata_dev_init(dev);
acf356b1 5480 }
1da177e4
LT
5481
5482#ifdef ATA_IRQ_TRAP
5483 ap->stats.unhandled_irq = 1;
5484 ap->stats.idle_irq = 1;
5485#endif
5486
5487 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5488}
5489
155a8a9c 5490/**
4608c160
TH
5491 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5492 * @ap: ATA port to initialize SCSI host for
5493 * @shost: SCSI host associated with @ap
155a8a9c 5494 *
4608c160 5495 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5496 *
5497 * LOCKING:
5498 * Inherited from caller.
5499 */
4608c160 5500static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5501{
cca3974e 5502 ap->scsi_host = shost;
155a8a9c 5503
4608c160
TH
5504 shost->unique_id = ap->id;
5505 shost->max_id = 16;
5506 shost->max_lun = 1;
5507 shost->max_channel = 1;
5508 shost->max_cmd_len = 12;
155a8a9c
BK
5509}
5510
1da177e4 5511/**
996139f1 5512 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5513 * @ent: Information provided by low-level driver
cca3974e 5514 * @host: Collections of ports to which we add
1da177e4
LT
5515 * @port_no: Port number associated with this host
5516 *
0cba632b
JG
5517 * Attach low-level ATA driver to system.
5518 *
1da177e4 5519 * LOCKING:
0cba632b 5520 * PCI/etc. bus probe sem.
1da177e4
LT
5521 *
5522 * RETURNS:
0cba632b 5523 * New ata_port on success, for NULL on error.
1da177e4 5524 */
996139f1 5525static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5526 struct ata_host *host,
1da177e4
LT
5527 unsigned int port_no)
5528{
996139f1 5529 struct Scsi_Host *shost;
1da177e4 5530 struct ata_port *ap;
1da177e4
LT
5531
5532 DPRINTK("ENTER\n");
aec5c3c1 5533
52783c5d 5534 if (!ent->port_ops->error_handler &&
cca3974e 5535 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5536 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5537 port_no);
5538 return NULL;
5539 }
5540
996139f1
JG
5541 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5542 if (!shost)
1da177e4
LT
5543 return NULL;
5544
996139f1 5545 shost->transportt = &ata_scsi_transport_template;
30afc84c 5546
996139f1 5547 ap = ata_shost_to_port(shost);
1da177e4 5548
cca3974e 5549 ata_port_init(ap, host, ent, port_no);
996139f1 5550 ata_port_init_shost(ap, shost);
1da177e4 5551
1da177e4 5552 return ap;
1da177e4
LT
5553}
5554
f0d36efd
TH
5555static void ata_host_release(struct device *gendev, void *res)
5556{
5557 struct ata_host *host = dev_get_drvdata(gendev);
5558 int i;
5559
5560 for (i = 0; i < host->n_ports; i++) {
5561 struct ata_port *ap = host->ports[i];
5562
5563 if (!ap)
5564 continue;
5565
5566 if (ap->ops->port_stop)
5567 ap->ops->port_stop(ap);
5568
5569 scsi_host_put(ap->scsi_host);
5570 }
5571
5572 if (host->ops->host_stop)
5573 host->ops->host_stop(host);
5574}
5575
b03732f0 5576/**
cca3974e
JG
5577 * ata_sas_host_init - Initialize a host struct
5578 * @host: host to initialize
5579 * @dev: device host is attached to
5580 * @flags: host flags
5581 * @ops: port_ops
b03732f0
BK
5582 *
5583 * LOCKING:
5584 * PCI/etc. bus probe sem.
5585 *
5586 */
5587
cca3974e
JG
5588void ata_host_init(struct ata_host *host, struct device *dev,
5589 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5590{
cca3974e
JG
5591 spin_lock_init(&host->lock);
5592 host->dev = dev;
5593 host->flags = flags;
5594 host->ops = ops;
b03732f0
BK
5595}
5596
1da177e4 5597/**
0cba632b
JG
5598 * ata_device_add - Register hardware device with ATA and SCSI layers
5599 * @ent: Probe information describing hardware device to be registered
5600 *
5601 * This function processes the information provided in the probe
5602 * information struct @ent, allocates the necessary ATA and SCSI
5603 * host information structures, initializes them, and registers
5604 * everything with requisite kernel subsystems.
5605 *
5606 * This function requests irqs, probes the ATA bus, and probes
5607 * the SCSI bus.
1da177e4
LT
5608 *
5609 * LOCKING:
0cba632b 5610 * PCI/etc. bus probe sem.
1da177e4
LT
5611 *
5612 * RETURNS:
0cba632b 5613 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5614 */
057ace5e 5615int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5616{
6d0500df 5617 unsigned int i;
1da177e4 5618 struct device *dev = ent->dev;
cca3974e 5619 struct ata_host *host;
39b07ce6 5620 int rc;
1da177e4
LT
5621
5622 DPRINTK("ENTER\n");
f20b16ff 5623
02f076aa
AC
5624 if (ent->irq == 0) {
5625 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5626 return 0;
5627 }
f0d36efd
TH
5628
5629 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5630 return 0;
5631
1da177e4 5632 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5633 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5634 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5635 if (!host)
f0d36efd
TH
5636 goto err_out;
5637 devres_add(dev, host);
5638 dev_set_drvdata(dev, host);
1da177e4 5639
cca3974e
JG
5640 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5641 host->n_ports = ent->n_ports;
5642 host->irq = ent->irq;
5643 host->irq2 = ent->irq2;
0d5ff566 5644 host->iomap = ent->iomap;
cca3974e 5645 host->private_data = ent->private_data;
1da177e4
LT
5646
5647 /* register each port bound to this device */
cca3974e 5648 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5649 struct ata_port *ap;
5650 unsigned long xfer_mode_mask;
2ec7df04 5651 int irq_line = ent->irq;
1da177e4 5652
cca3974e 5653 ap = ata_port_add(ent, host, i);
c38778c3 5654 host->ports[i] = ap;
1da177e4
LT
5655 if (!ap)
5656 goto err_out;
5657
dd5b06c4
TH
5658 /* dummy? */
5659 if (ent->dummy_port_mask & (1 << i)) {
5660 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5661 ap->ops = &ata_dummy_port_ops;
5662 continue;
5663 }
5664
5665 /* start port */
5666 rc = ap->ops->port_start(ap);
5667 if (rc) {
cca3974e
JG
5668 host->ports[i] = NULL;
5669 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5670 goto err_out;
5671 }
5672
2ec7df04
AC
5673 /* Report the secondary IRQ for second channel legacy */
5674 if (i == 1 && ent->irq2)
5675 irq_line = ent->irq2;
5676
1da177e4
LT
5677 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5678 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5679 (ap->pio_mask << ATA_SHIFT_PIO);
5680
5681 /* print per-port info to dmesg */
0d5ff566
TH
5682 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5683 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5684 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5685 ata_mode_string(xfer_mode_mask),
5686 ap->ioaddr.cmd_addr,
5687 ap->ioaddr.ctl_addr,
5688 ap->ioaddr.bmdma_addr,
2ec7df04 5689 irq_line);
1da177e4 5690
0f0a3ad3
TH
5691 /* freeze port before requesting IRQ */
5692 ata_eh_freeze_port(ap);
1da177e4
LT
5693 }
5694
2ec7df04 5695 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5696 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5697 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5698 if (rc) {
5699 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5700 ent->irq, rc);
1da177e4 5701 goto err_out;
39b07ce6 5702 }
1da177e4 5703
2ec7df04
AC
5704 /* do we have a second IRQ for the other channel, eg legacy mode */
5705 if (ent->irq2) {
5706 /* We will get weird core code crashes later if this is true
5707 so trap it now */
5708 BUG_ON(ent->irq == ent->irq2);
5709
f0d36efd
TH
5710 rc = devm_request_irq(dev, ent->irq2,
5711 ent->port_ops->irq_handler, ent->irq_flags,
5712 DRV_NAME, host);
2ec7df04
AC
5713 if (rc) {
5714 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5715 ent->irq2, rc);
f0d36efd 5716 goto err_out;
2ec7df04
AC
5717 }
5718 }
5719
f0d36efd 5720 /* resource acquisition complete */
b878ca5d 5721 devres_remove_group(dev, ata_device_add);
f0d36efd 5722
1da177e4
LT
5723 /* perform each probe synchronously */
5724 DPRINTK("probe begin\n");
cca3974e
JG
5725 for (i = 0; i < host->n_ports; i++) {
5726 struct ata_port *ap = host->ports[i];
5a04bf4b 5727 u32 scontrol;
1da177e4
LT
5728 int rc;
5729
5a04bf4b
TH
5730 /* init sata_spd_limit to the current value */
5731 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5732 int spd = (scontrol >> 4) & 0xf;
5733 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5734 }
5735 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5736
cca3974e 5737 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5738 if (rc) {
f15a1daf 5739 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5740 /* FIXME: do something useful here */
5741 /* FIXME: handle unconditional calls to
5742 * scsi_scan_host and ata_host_remove, below,
5743 * at the very least
5744 */
5745 }
3e706399 5746
52783c5d 5747 if (ap->ops->error_handler) {
1cdaf534 5748 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5749 unsigned long flags;
5750
5751 ata_port_probe(ap);
5752
5753 /* kick EH for boot probing */
ba6a1308 5754 spin_lock_irqsave(ap->lock, flags);
3e706399 5755
1cdaf534
TH
5756 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5757 ehi->action |= ATA_EH_SOFTRESET;
5758 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5759
b51e9e5d 5760 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5761 ata_port_schedule_eh(ap);
5762
ba6a1308 5763 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5764
5765 /* wait for EH to finish */
5766 ata_port_wait_eh(ap);
5767 } else {
5768 DPRINTK("ata%u: bus probe begin\n", ap->id);
5769 rc = ata_bus_probe(ap);
5770 DPRINTK("ata%u: bus probe end\n", ap->id);
5771
5772 if (rc) {
5773 /* FIXME: do something useful here?
5774 * Current libata behavior will
5775 * tear down everything when
5776 * the module is removed
5777 * or the h/w is unplugged.
5778 */
5779 }
5780 }
1da177e4
LT
5781 }
5782
5783 /* probes are done, now scan each port's disk(s) */
c893a3ae 5784 DPRINTK("host probe begin\n");
cca3974e
JG
5785 for (i = 0; i < host->n_ports; i++) {
5786 struct ata_port *ap = host->ports[i];
1da177e4 5787
644dd0cc 5788 ata_scsi_scan_host(ap);
1da177e4
LT
5789 }
5790
1da177e4
LT
5791 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5792 return ent->n_ports; /* success */
5793
f0d36efd
TH
5794 err_out:
5795 devres_release_group(dev, ata_device_add);
5796 dev_set_drvdata(dev, NULL);
5797 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5798 return 0;
5799}
5800
720ba126
TH
5801/**
5802 * ata_port_detach - Detach ATA port in prepration of device removal
5803 * @ap: ATA port to be detached
5804 *
5805 * Detach all ATA devices and the associated SCSI devices of @ap;
5806 * then, remove the associated SCSI host. @ap is guaranteed to
5807 * be quiescent on return from this function.
5808 *
5809 * LOCKING:
5810 * Kernel thread context (may sleep).
5811 */
5812void ata_port_detach(struct ata_port *ap)
5813{
5814 unsigned long flags;
5815 int i;
5816
5817 if (!ap->ops->error_handler)
c3cf30a9 5818 goto skip_eh;
720ba126
TH
5819
5820 /* tell EH we're leaving & flush EH */
ba6a1308 5821 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5822 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5823 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5824
5825 ata_port_wait_eh(ap);
5826
5827 /* EH is now guaranteed to see UNLOADING, so no new device
5828 * will be attached. Disable all existing devices.
5829 */
ba6a1308 5830 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5831
5832 for (i = 0; i < ATA_MAX_DEVICES; i++)
5833 ata_dev_disable(&ap->device[i]);
5834
ba6a1308 5835 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5836
5837 /* Final freeze & EH. All in-flight commands are aborted. EH
5838 * will be skipped and retrials will be terminated with bad
5839 * target.
5840 */
ba6a1308 5841 spin_lock_irqsave(ap->lock, flags);
720ba126 5842 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5843 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5844
5845 ata_port_wait_eh(ap);
5846
5847 /* Flush hotplug task. The sequence is similar to
5848 * ata_port_flush_task().
5849 */
5850 flush_workqueue(ata_aux_wq);
5851 cancel_delayed_work(&ap->hotplug_task);
5852 flush_workqueue(ata_aux_wq);
5853
c3cf30a9 5854 skip_eh:
720ba126 5855 /* remove the associated SCSI host */
cca3974e 5856 scsi_remove_host(ap->scsi_host);
720ba126
TH
5857}
5858
0529c159
TH
5859/**
5860 * ata_host_detach - Detach all ports of an ATA host
5861 * @host: Host to detach
5862 *
5863 * Detach all ports of @host.
5864 *
5865 * LOCKING:
5866 * Kernel thread context (may sleep).
5867 */
5868void ata_host_detach(struct ata_host *host)
5869{
5870 int i;
5871
5872 for (i = 0; i < host->n_ports; i++)
5873 ata_port_detach(host->ports[i]);
5874}
5875
f6d950e2
BK
5876struct ata_probe_ent *
5877ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5878{
5879 struct ata_probe_ent *probe_ent;
5880
f0d36efd
TH
5881 /* XXX - the following if can go away once all LLDs are managed */
5882 if (!list_empty(&dev->devres_head))
5883 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5884 else
5885 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5886 if (!probe_ent) {
5887 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5888 kobject_name(&(dev->kobj)));
5889 return NULL;
5890 }
5891
5892 INIT_LIST_HEAD(&probe_ent->node);
5893 probe_ent->dev = dev;
5894
5895 probe_ent->sht = port->sht;
cca3974e 5896 probe_ent->port_flags = port->flags;
f6d950e2
BK
5897 probe_ent->pio_mask = port->pio_mask;
5898 probe_ent->mwdma_mask = port->mwdma_mask;
5899 probe_ent->udma_mask = port->udma_mask;
5900 probe_ent->port_ops = port->port_ops;
d639ca94 5901 probe_ent->private_data = port->private_data;
f6d950e2
BK
5902
5903 return probe_ent;
5904}
5905
1da177e4
LT
5906/**
5907 * ata_std_ports - initialize ioaddr with standard port offsets.
5908 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5909 *
5910 * Utility function which initializes data_addr, error_addr,
5911 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5912 * device_addr, status_addr, and command_addr to standard offsets
5913 * relative to cmd_addr.
5914 *
5915 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5916 */
0baab86b 5917
1da177e4
LT
5918void ata_std_ports(struct ata_ioports *ioaddr)
5919{
5920 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5921 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5922 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5923 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5924 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5925 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5926 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5927 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5928 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5929 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5930}
5931
0baab86b 5932
374b1873
JG
5933#ifdef CONFIG_PCI
5934
1da177e4
LT
5935/**
5936 * ata_pci_remove_one - PCI layer callback for device removal
5937 * @pdev: PCI device that was removed
5938 *
b878ca5d
TH
5939 * PCI layer indicates to libata via this hook that hot-unplug or
5940 * module unload event has occurred. Detach all ports. Resource
5941 * release is handled via devres.
1da177e4
LT
5942 *
5943 * LOCKING:
5944 * Inherited from PCI layer (may sleep).
5945 */
f0d36efd 5946void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
5947{
5948 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5949 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5950
b878ca5d 5951 ata_host_detach(host);
1da177e4
LT
5952}
5953
5954/* move to PCI subsystem */
057ace5e 5955int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5956{
5957 unsigned long tmp = 0;
5958
5959 switch (bits->width) {
5960 case 1: {
5961 u8 tmp8 = 0;
5962 pci_read_config_byte(pdev, bits->reg, &tmp8);
5963 tmp = tmp8;
5964 break;
5965 }
5966 case 2: {
5967 u16 tmp16 = 0;
5968 pci_read_config_word(pdev, bits->reg, &tmp16);
5969 tmp = tmp16;
5970 break;
5971 }
5972 case 4: {
5973 u32 tmp32 = 0;
5974 pci_read_config_dword(pdev, bits->reg, &tmp32);
5975 tmp = tmp32;
5976 break;
5977 }
5978
5979 default:
5980 return -EINVAL;
5981 }
5982
5983 tmp &= bits->mask;
5984
5985 return (tmp == bits->val) ? 1 : 0;
5986}
9b847548 5987
3c5100c1 5988void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5989{
5990 pci_save_state(pdev);
500530f6 5991
3c5100c1 5992 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
5993 pci_disable_device(pdev);
5994 pci_set_power_state(pdev, PCI_D3hot);
5995 }
9b847548
JA
5996}
5997
553c4aa6 5998int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5999{
553c4aa6
TH
6000 int rc;
6001
9b847548
JA
6002 pci_set_power_state(pdev, PCI_D0);
6003 pci_restore_state(pdev);
553c4aa6 6004
b878ca5d 6005 rc = pcim_enable_device(pdev);
553c4aa6
TH
6006 if (rc) {
6007 dev_printk(KERN_ERR, &pdev->dev,
6008 "failed to enable device after resume (%d)\n", rc);
6009 return rc;
6010 }
6011
9b847548 6012 pci_set_master(pdev);
553c4aa6 6013 return 0;
500530f6
TH
6014}
6015
3c5100c1 6016int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6017{
cca3974e 6018 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6019 int rc = 0;
6020
cca3974e 6021 rc = ata_host_suspend(host, mesg);
500530f6
TH
6022 if (rc)
6023 return rc;
6024
3c5100c1 6025 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6026
6027 return 0;
6028}
6029
6030int ata_pci_device_resume(struct pci_dev *pdev)
6031{
cca3974e 6032 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6033 int rc;
500530f6 6034
553c4aa6
TH
6035 rc = ata_pci_device_do_resume(pdev);
6036 if (rc == 0)
6037 ata_host_resume(host);
6038 return rc;
9b847548 6039}
1da177e4
LT
6040#endif /* CONFIG_PCI */
6041
6042
1da177e4
LT
6043static int __init ata_init(void)
6044{
a8601e5f 6045 ata_probe_timeout *= HZ;
1da177e4
LT
6046 ata_wq = create_workqueue("ata");
6047 if (!ata_wq)
6048 return -ENOMEM;
6049
453b07ac
TH
6050 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6051 if (!ata_aux_wq) {
6052 destroy_workqueue(ata_wq);
6053 return -ENOMEM;
6054 }
6055
1da177e4
LT
6056 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6057 return 0;
6058}
6059
6060static void __exit ata_exit(void)
6061{
6062 destroy_workqueue(ata_wq);
453b07ac 6063 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6064}
6065
a4625085 6066subsys_initcall(ata_init);
1da177e4
LT
6067module_exit(ata_exit);
6068
67846b30 6069static unsigned long ratelimit_time;
34af946a 6070static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6071
6072int ata_ratelimit(void)
6073{
6074 int rc;
6075 unsigned long flags;
6076
6077 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6078
6079 if (time_after(jiffies, ratelimit_time)) {
6080 rc = 1;
6081 ratelimit_time = jiffies + (HZ/5);
6082 } else
6083 rc = 0;
6084
6085 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6086
6087 return rc;
6088}
6089
c22daff4
TH
6090/**
6091 * ata_wait_register - wait until register value changes
6092 * @reg: IO-mapped register
6093 * @mask: Mask to apply to read register value
6094 * @val: Wait condition
6095 * @interval_msec: polling interval in milliseconds
6096 * @timeout_msec: timeout in milliseconds
6097 *
6098 * Waiting for some bits of register to change is a common
6099 * operation for ATA controllers. This function reads 32bit LE
6100 * IO-mapped register @reg and tests for the following condition.
6101 *
6102 * (*@reg & mask) != val
6103 *
6104 * If the condition is met, it returns; otherwise, the process is
6105 * repeated after @interval_msec until timeout.
6106 *
6107 * LOCKING:
6108 * Kernel thread context (may sleep)
6109 *
6110 * RETURNS:
6111 * The final register value.
6112 */
6113u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6114 unsigned long interval_msec,
6115 unsigned long timeout_msec)
6116{
6117 unsigned long timeout;
6118 u32 tmp;
6119
6120 tmp = ioread32(reg);
6121
6122 /* Calculate timeout _after_ the first read to make sure
6123 * preceding writes reach the controller before starting to
6124 * eat away the timeout.
6125 */
6126 timeout = jiffies + (timeout_msec * HZ) / 1000;
6127
6128 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6129 msleep(interval_msec);
6130 tmp = ioread32(reg);
6131 }
6132
6133 return tmp;
6134}
6135
dd5b06c4
TH
6136/*
6137 * Dummy port_ops
6138 */
6139static void ata_dummy_noret(struct ata_port *ap) { }
6140static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6141static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6142
6143static u8 ata_dummy_check_status(struct ata_port *ap)
6144{
6145 return ATA_DRDY;
6146}
6147
6148static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6149{
6150 return AC_ERR_SYSTEM;
6151}
6152
6153const struct ata_port_operations ata_dummy_port_ops = {
6154 .port_disable = ata_port_disable,
6155 .check_status = ata_dummy_check_status,
6156 .check_altstatus = ata_dummy_check_status,
6157 .dev_select = ata_noop_dev_select,
6158 .qc_prep = ata_noop_qc_prep,
6159 .qc_issue = ata_dummy_qc_issue,
6160 .freeze = ata_dummy_noret,
6161 .thaw = ata_dummy_noret,
6162 .error_handler = ata_dummy_noret,
6163 .post_internal_cmd = ata_dummy_qc_noret,
6164 .irq_clear = ata_dummy_noret,
6165 .port_start = ata_dummy_ret0,
6166 .port_stop = ata_dummy_noret,
6167};
6168
1da177e4
LT
6169/*
6170 * libata is essentially a library of internal helper functions for
6171 * low-level ATA host controller drivers. As such, the API/ABI is
6172 * likely to change as new drivers are added and updated.
6173 * Do not depend on ABI/API stability.
6174 */
6175
e9c83914
TH
6176EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6177EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6178EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6179EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6180EXPORT_SYMBOL_GPL(ata_std_bios_param);
6181EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6182EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6183EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6184EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6185EXPORT_SYMBOL_GPL(ata_sg_init);
6186EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6187EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6188EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6189EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6190EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6191EXPORT_SYMBOL_GPL(ata_tf_load);
6192EXPORT_SYMBOL_GPL(ata_tf_read);
6193EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6194EXPORT_SYMBOL_GPL(ata_std_dev_select);
6195EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6196EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6197EXPORT_SYMBOL_GPL(ata_check_status);
6198EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6199EXPORT_SYMBOL_GPL(ata_exec_command);
6200EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6201EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6202EXPORT_SYMBOL_GPL(ata_data_xfer);
6203EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6204EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6205EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6206EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6207EXPORT_SYMBOL_GPL(ata_bmdma_start);
6208EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6209EXPORT_SYMBOL_GPL(ata_bmdma_status);
6210EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6211EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6212EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6213EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6214EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6215EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6216EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6217EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6218EXPORT_SYMBOL_GPL(sata_phy_debounce);
6219EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6220EXPORT_SYMBOL_GPL(sata_phy_reset);
6221EXPORT_SYMBOL_GPL(__sata_phy_reset);
6222EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6223EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6224EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6225EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6226EXPORT_SYMBOL_GPL(sata_std_hardreset);
6227EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6228EXPORT_SYMBOL_GPL(ata_dev_classify);
6229EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6230EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6231EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6232EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6233EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6234EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6235EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6236EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6237EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6238EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6239EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6240EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6241EXPORT_SYMBOL_GPL(sata_scr_valid);
6242EXPORT_SYMBOL_GPL(sata_scr_read);
6243EXPORT_SYMBOL_GPL(sata_scr_write);
6244EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6245EXPORT_SYMBOL_GPL(ata_port_online);
6246EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6247EXPORT_SYMBOL_GPL(ata_host_suspend);
6248EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6249EXPORT_SYMBOL_GPL(ata_id_string);
6250EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6251EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6252EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6253
1bc4ccff 6254EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6255EXPORT_SYMBOL_GPL(ata_timing_compute);
6256EXPORT_SYMBOL_GPL(ata_timing_merge);
6257
1da177e4
LT
6258#ifdef CONFIG_PCI
6259EXPORT_SYMBOL_GPL(pci_test_config_bits);
6260EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6261EXPORT_SYMBOL_GPL(ata_pci_init_one);
6262EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6263EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6264EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6265EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6266EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6267EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6268EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6269#endif /* CONFIG_PCI */
9b847548 6270
9b847548
JA
6271EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6272EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6273
ece1d636 6274EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6275EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6276EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6277EXPORT_SYMBOL_GPL(ata_port_freeze);
6278EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6279EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6280EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6281EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6282EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6283EXPORT_SYMBOL_GPL(ata_irq_on);
6284EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6285EXPORT_SYMBOL_GPL(ata_irq_ack);
6286EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
This page took 0.837034 seconds and 5 git commands to generate.