libata: Limit max sector to 128 for TORiSAN DVD drives (take 3)
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
A
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
A
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
e6d902a3
BK
1563static void ata_set_port_max_cmd_len(struct ata_port *ap)
1564{
1565 int i;
1566
cca3974e
JG
1567 if (ap->scsi_host) {
1568 unsigned int len = 0;
1569
e6d902a3 1570 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1571 len = max(len, ap->device[i].cdb_len);
1572
1573 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1574 }
1575}
1576
49016aca 1577/**
ffeae418 1578 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1579 * @dev: Target device to configure
1580 *
1581 * Configure @dev according to @dev->id. Generic and low-level
1582 * driver specific fixups are also applied.
49016aca
TH
1583 *
1584 * LOCKING:
ffeae418
TH
1585 * Kernel thread context (may sleep)
1586 *
1587 * RETURNS:
1588 * 0 on success, -errno otherwise
49016aca 1589 */
efdaedc4 1590int ata_dev_configure(struct ata_device *dev)
49016aca 1591{
3373efd8 1592 struct ata_port *ap = dev->ap;
efdaedc4 1593 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1594 const u16 *id = dev->id;
ff8854b2 1595 unsigned int xfer_mask;
b352e57d 1596 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1597 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1598 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1599 int rc;
49016aca 1600
0dd4b21f 1601 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1602 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1603 __FUNCTION__);
ffeae418 1604 return 0;
49016aca
TH
1605 }
1606
0dd4b21f 1607 if (ata_msg_probe(ap))
44877b4e 1608 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1609
08573a86
KCA
1610 /* set _SDD */
1611 rc = ata_acpi_push_id(ap, dev->devno);
1612 if (rc) {
1613 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1614 rc);
1615 }
1616
1617 /* retrieve and execute the ATA task file of _GTF */
1618 ata_acpi_exec_tfs(ap);
1619
c39f5ebe 1620 /* print device capabilities */
0dd4b21f 1621 if (ata_msg_probe(ap))
88574551
TH
1622 ata_dev_printk(dev, KERN_DEBUG,
1623 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1624 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1625 __FUNCTION__,
f15a1daf
TH
1626 id[49], id[82], id[83], id[84],
1627 id[85], id[86], id[87], id[88]);
c39f5ebe 1628
208a9933 1629 /* initialize to-be-configured parameters */
ea1dd4e1 1630 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1631 dev->max_sectors = 0;
1632 dev->cdb_len = 0;
1633 dev->n_sectors = 0;
1634 dev->cylinders = 0;
1635 dev->heads = 0;
1636 dev->sectors = 0;
1637
1da177e4
LT
1638 /*
1639 * common ATA, ATAPI feature tests
1640 */
1641
ff8854b2 1642 /* find max transfer mode; for printk only */
1148c3a7 1643 xfer_mask = ata_id_xfermask(id);
1da177e4 1644
0dd4b21f
BP
1645 if (ata_msg_probe(ap))
1646 ata_dump_id(id);
1da177e4
LT
1647
1648 /* ATA-specific feature tests */
1649 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1650 if (ata_id_is_cfa(id)) {
1651 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1652 ata_dev_printk(dev, KERN_WARNING,
1653 "supports DRM functions and may "
1654 "not be fully accessable.\n");
b352e57d
AC
1655 snprintf(revbuf, 7, "CFA");
1656 }
1657 else
1658 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1659
1148c3a7 1660 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1661
3f64f565 1662 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1663 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1664 sizeof(fwrevbuf));
1665
591a6e8e 1666 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1667 sizeof(modelbuf));
1668
1669 if (dev->id[59] & 0x100)
1670 dev->multi_count = dev->id[59] & 0xff;
1671
1148c3a7 1672 if (ata_id_has_lba(id)) {
4c2d721a 1673 const char *lba_desc;
a6e6ce8e 1674 char ncq_desc[20];
8bf62ece 1675
4c2d721a
TH
1676 lba_desc = "LBA";
1677 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1678 if (ata_id_has_lba48(id)) {
8bf62ece 1679 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1680 lba_desc = "LBA48";
6fc49adb
TH
1681
1682 if (dev->n_sectors >= (1UL << 28) &&
1683 ata_id_has_flush_ext(id))
1684 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1685 }
8bf62ece 1686
a6e6ce8e
TH
1687 /* config NCQ */
1688 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1689
8bf62ece 1690 /* print device info to dmesg */
3f64f565
EM
1691 if (ata_msg_drv(ap) && print_info) {
1692 ata_dev_printk(dev, KERN_INFO,
1693 "%s: %s, %s, max %s\n",
1694 revbuf, modelbuf, fwrevbuf,
1695 ata_mode_string(xfer_mask));
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1698 (unsigned long long)dev->n_sectors,
3f64f565
EM
1699 dev->multi_count, lba_desc, ncq_desc);
1700 }
ffeae418 1701 } else {
8bf62ece
AL
1702 /* CHS */
1703
1704 /* Default translation */
1148c3a7
TH
1705 dev->cylinders = id[1];
1706 dev->heads = id[3];
1707 dev->sectors = id[6];
8bf62ece 1708
1148c3a7 1709 if (ata_id_current_chs_valid(id)) {
8bf62ece 1710 /* Current CHS translation is valid. */
1148c3a7
TH
1711 dev->cylinders = id[54];
1712 dev->heads = id[55];
1713 dev->sectors = id[56];
8bf62ece
AL
1714 }
1715
1716 /* print device info to dmesg */
3f64f565 1717 if (ata_msg_drv(ap) && print_info) {
88574551 1718 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1719 "%s: %s, %s, max %s\n",
1720 revbuf, modelbuf, fwrevbuf,
1721 ata_mode_string(xfer_mask));
a84471fe 1722 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1723 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1724 (unsigned long long)dev->n_sectors,
1725 dev->multi_count, dev->cylinders,
1726 dev->heads, dev->sectors);
1727 }
07f6f7d0
AL
1728 }
1729
6e7846e9 1730 dev->cdb_len = 16;
1da177e4
LT
1731 }
1732
1733 /* ATAPI-specific feature tests */
2c13b7ce 1734 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1735 char *cdb_intr_string = "";
1736
1148c3a7 1737 rc = atapi_cdb_len(id);
1da177e4 1738 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1739 if (ata_msg_warn(ap))
88574551
TH
1740 ata_dev_printk(dev, KERN_WARNING,
1741 "unsupported CDB len\n");
ffeae418 1742 rc = -EINVAL;
1da177e4
LT
1743 goto err_out_nosup;
1744 }
6e7846e9 1745 dev->cdb_len = (unsigned int) rc;
1da177e4 1746
08a556db 1747 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1748 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1749 cdb_intr_string = ", CDB intr";
1750 }
312f7da2 1751
1da177e4 1752 /* print device info to dmesg */
5afc8142 1753 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1754 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1755 ata_mode_string(xfer_mask),
1756 cdb_intr_string);
1da177e4
LT
1757 }
1758
914ed354
TH
1759 /* determine max_sectors */
1760 dev->max_sectors = ATA_MAX_SECTORS;
1761 if (dev->flags & ATA_DFLAG_LBA48)
1762 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1763
93590859
AC
1764 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1765 /* Let the user know. We don't want to disallow opens for
1766 rescue purposes, or in case the vendor is just a blithering
1767 idiot */
1768 if (print_info) {
1769 ata_dev_printk(dev, KERN_WARNING,
1770"Drive reports diagnostics failure. This may indicate a drive\n");
1771 ata_dev_printk(dev, KERN_WARNING,
1772"fault or invalid emulation. Contact drive vendor for information.\n");
1773 }
1774 }
1775
e6d902a3 1776 ata_set_port_max_cmd_len(ap);
6e7846e9 1777
4b2f3ede 1778 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1779 if (ata_dev_knobble(dev)) {
5afc8142 1780 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1781 ata_dev_printk(dev, KERN_INFO,
1782 "applying bridge limits\n");
5a529139 1783 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1784 dev->max_sectors = ATA_MAX_SECTORS;
1785 }
1786
18d6e9d5
AL
1787 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
1788 dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
1789
4b2f3ede
TH
1790 if (ap->ops->dev_config)
1791 ap->ops->dev_config(ap, dev);
1792
0dd4b21f
BP
1793 if (ata_msg_probe(ap))
1794 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1795 __FUNCTION__, ata_chk_status(ap));
ffeae418 1796 return 0;
1da177e4
LT
1797
1798err_out_nosup:
0dd4b21f 1799 if (ata_msg_probe(ap))
88574551
TH
1800 ata_dev_printk(dev, KERN_DEBUG,
1801 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1802 return rc;
1da177e4
LT
1803}
1804
1805/**
1806 * ata_bus_probe - Reset and probe ATA bus
1807 * @ap: Bus to probe
1808 *
0cba632b
JG
1809 * Master ATA bus probing function. Initiates a hardware-dependent
1810 * bus reset, then attempts to identify any devices found on
1811 * the bus.
1812 *
1da177e4 1813 * LOCKING:
0cba632b 1814 * PCI/etc. bus probe sem.
1da177e4
LT
1815 *
1816 * RETURNS:
96072e69 1817 * Zero on success, negative errno otherwise.
1da177e4
LT
1818 */
1819
80289167 1820int ata_bus_probe(struct ata_port *ap)
1da177e4 1821{
28ca5c57 1822 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1823 int tries[ATA_MAX_DEVICES];
4ae72a1e 1824 int i, rc;
e82cbdb9 1825 struct ata_device *dev;
1da177e4 1826
28ca5c57 1827 ata_port_probe(ap);
c19ba8af 1828
14d2bac1
TH
1829 for (i = 0; i < ATA_MAX_DEVICES; i++)
1830 tries[i] = ATA_PROBE_MAX_TRIES;
1831
1832 retry:
2044470c 1833 /* reset and determine device classes */
52783c5d 1834 ap->ops->phy_reset(ap);
2061a47a 1835
52783c5d
TH
1836 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1837 dev = &ap->device[i];
c19ba8af 1838
52783c5d
TH
1839 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1840 dev->class != ATA_DEV_UNKNOWN)
1841 classes[dev->devno] = dev->class;
1842 else
1843 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1844
52783c5d 1845 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1846 }
1da177e4 1847
52783c5d 1848 ata_port_probe(ap);
2044470c 1849
b6079ca4
AC
1850 /* after the reset the device state is PIO 0 and the controller
1851 state is undefined. Record the mode */
1852
1853 for (i = 0; i < ATA_MAX_DEVICES; i++)
1854 ap->device[i].pio_mode = XFER_PIO_0;
1855
f31f0cc2
JG
1856 /* read IDENTIFY page and configure devices. We have to do the identify
1857 specific sequence bass-ackwards so that PDIAG- is released by
1858 the slave device */
1859
1860 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1861 dev = &ap->device[i];
28ca5c57 1862
ec573755
TH
1863 if (tries[i])
1864 dev->class = classes[i];
ffeae418 1865
14d2bac1 1866 if (!ata_dev_enabled(dev))
ffeae418 1867 continue;
ffeae418 1868
bff04647
TH
1869 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1870 dev->id);
14d2bac1
TH
1871 if (rc)
1872 goto fail;
f31f0cc2
JG
1873 }
1874
1875 /* After the identify sequence we can now set up the devices. We do
1876 this in the normal order so that the user doesn't get confused */
1877
1878 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1879 dev = &ap->device[i];
1880 if (!ata_dev_enabled(dev))
1881 continue;
14d2bac1 1882
efdaedc4
TH
1883 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1884 rc = ata_dev_configure(dev);
1885 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1886 if (rc)
1887 goto fail;
1da177e4
LT
1888 }
1889
e82cbdb9 1890 /* configure transfer mode */
3adcebb2 1891 rc = ata_set_mode(ap, &dev);
4ae72a1e 1892 if (rc)
51713d35 1893 goto fail;
1da177e4 1894
e82cbdb9
TH
1895 for (i = 0; i < ATA_MAX_DEVICES; i++)
1896 if (ata_dev_enabled(&ap->device[i]))
1897 return 0;
1da177e4 1898
e82cbdb9
TH
1899 /* no device present, disable port */
1900 ata_port_disable(ap);
1da177e4 1901 ap->ops->port_disable(ap);
96072e69 1902 return -ENODEV;
14d2bac1
TH
1903
1904 fail:
4ae72a1e
TH
1905 tries[dev->devno]--;
1906
14d2bac1
TH
1907 switch (rc) {
1908 case -EINVAL:
4ae72a1e 1909 /* eeek, something went very wrong, give up */
14d2bac1
TH
1910 tries[dev->devno] = 0;
1911 break;
4ae72a1e
TH
1912
1913 case -ENODEV:
1914 /* give it just one more chance */
1915 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1916 case -EIO:
4ae72a1e
TH
1917 if (tries[dev->devno] == 1) {
1918 /* This is the last chance, better to slow
1919 * down than lose it.
1920 */
1921 sata_down_spd_limit(ap);
1922 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1923 }
14d2bac1
TH
1924 }
1925
4ae72a1e 1926 if (!tries[dev->devno])
3373efd8 1927 ata_dev_disable(dev);
ec573755 1928
14d2bac1 1929 goto retry;
1da177e4
LT
1930}
1931
1932/**
0cba632b
JG
1933 * ata_port_probe - Mark port as enabled
1934 * @ap: Port for which we indicate enablement
1da177e4 1935 *
0cba632b
JG
1936 * Modify @ap data structure such that the system
1937 * thinks that the entire port is enabled.
1938 *
cca3974e 1939 * LOCKING: host lock, or some other form of
0cba632b 1940 * serialization.
1da177e4
LT
1941 */
1942
1943void ata_port_probe(struct ata_port *ap)
1944{
198e0fed 1945 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1946}
1947
3be680b7
TH
1948/**
1949 * sata_print_link_status - Print SATA link status
1950 * @ap: SATA port to printk link status about
1951 *
1952 * This function prints link speed and status of a SATA link.
1953 *
1954 * LOCKING:
1955 * None.
1956 */
1957static void sata_print_link_status(struct ata_port *ap)
1958{
6d5f9732 1959 u32 sstatus, scontrol, tmp;
3be680b7 1960
81952c54 1961 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1962 return;
81952c54 1963 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1964
81952c54 1965 if (ata_port_online(ap)) {
3be680b7 1966 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1967 ata_port_printk(ap, KERN_INFO,
1968 "SATA link up %s (SStatus %X SControl %X)\n",
1969 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1970 } else {
f15a1daf
TH
1971 ata_port_printk(ap, KERN_INFO,
1972 "SATA link down (SStatus %X SControl %X)\n",
1973 sstatus, scontrol);
3be680b7
TH
1974 }
1975}
1976
1da177e4 1977/**
780a87f7
JG
1978 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1979 * @ap: SATA port associated with target SATA PHY.
1da177e4 1980 *
780a87f7
JG
1981 * This function issues commands to standard SATA Sxxx
1982 * PHY registers, to wake up the phy (and device), and
1983 * clear any reset condition.
1da177e4
LT
1984 *
1985 * LOCKING:
0cba632b 1986 * PCI/etc. bus probe sem.
1da177e4
LT
1987 *
1988 */
1989void __sata_phy_reset(struct ata_port *ap)
1990{
1991 u32 sstatus;
1992 unsigned long timeout = jiffies + (HZ * 5);
1993
1994 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1995 /* issue phy wake/reset */
81952c54 1996 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1997 /* Couldn't find anything in SATA I/II specs, but
1998 * AHCI-1.1 10.4.2 says at least 1 ms. */
1999 mdelay(1);
1da177e4 2000 }
81952c54
TH
2001 /* phy wake/clear reset */
2002 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2003
2004 /* wait for phy to become ready, if necessary */
2005 do {
2006 msleep(200);
81952c54 2007 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2008 if ((sstatus & 0xf) != 1)
2009 break;
2010 } while (time_before(jiffies, timeout));
2011
3be680b7
TH
2012 /* print link status */
2013 sata_print_link_status(ap);
656563e3 2014
3be680b7 2015 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2016 if (!ata_port_offline(ap))
1da177e4 2017 ata_port_probe(ap);
3be680b7 2018 else
1da177e4 2019 ata_port_disable(ap);
1da177e4 2020
198e0fed 2021 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2022 return;
2023
2024 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2025 ata_port_disable(ap);
2026 return;
2027 }
2028
2029 ap->cbl = ATA_CBL_SATA;
2030}
2031
2032/**
780a87f7
JG
2033 * sata_phy_reset - Reset SATA bus.
2034 * @ap: SATA port associated with target SATA PHY.
1da177e4 2035 *
780a87f7
JG
2036 * This function resets the SATA bus, and then probes
2037 * the bus for devices.
1da177e4
LT
2038 *
2039 * LOCKING:
0cba632b 2040 * PCI/etc. bus probe sem.
1da177e4
LT
2041 *
2042 */
2043void sata_phy_reset(struct ata_port *ap)
2044{
2045 __sata_phy_reset(ap);
198e0fed 2046 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2047 return;
2048 ata_bus_reset(ap);
2049}
2050
ebdfca6e
AC
2051/**
2052 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2053 * @adev: device
2054 *
2055 * Obtain the other device on the same cable, or if none is
2056 * present NULL is returned
2057 */
2e9edbf8 2058
3373efd8 2059struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2060{
3373efd8 2061 struct ata_port *ap = adev->ap;
ebdfca6e 2062 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2063 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2064 return NULL;
2065 return pair;
2066}
2067
1da177e4 2068/**
780a87f7
JG
2069 * ata_port_disable - Disable port.
2070 * @ap: Port to be disabled.
1da177e4 2071 *
780a87f7
JG
2072 * Modify @ap data structure such that the system
2073 * thinks that the entire port is disabled, and should
2074 * never attempt to probe or communicate with devices
2075 * on this port.
2076 *
cca3974e 2077 * LOCKING: host lock, or some other form of
780a87f7 2078 * serialization.
1da177e4
LT
2079 */
2080
2081void ata_port_disable(struct ata_port *ap)
2082{
2083 ap->device[0].class = ATA_DEV_NONE;
2084 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2085 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2086}
2087
1c3fae4d 2088/**
3c567b7d 2089 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2090 * @ap: Port to adjust SATA spd limit for
2091 *
2092 * Adjust SATA spd limit of @ap downward. Note that this
2093 * function only adjusts the limit. The change must be applied
3c567b7d 2094 * using sata_set_spd().
1c3fae4d
TH
2095 *
2096 * LOCKING:
2097 * Inherited from caller.
2098 *
2099 * RETURNS:
2100 * 0 on success, negative errno on failure
2101 */
3c567b7d 2102int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2103{
81952c54
TH
2104 u32 sstatus, spd, mask;
2105 int rc, highbit;
1c3fae4d 2106
81952c54
TH
2107 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2108 if (rc)
2109 return rc;
1c3fae4d
TH
2110
2111 mask = ap->sata_spd_limit;
2112 if (mask <= 1)
2113 return -EINVAL;
2114 highbit = fls(mask) - 1;
2115 mask &= ~(1 << highbit);
2116
81952c54 2117 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2118 if (spd <= 1)
2119 return -EINVAL;
2120 spd--;
2121 mask &= (1 << spd) - 1;
2122 if (!mask)
2123 return -EINVAL;
2124
2125 ap->sata_spd_limit = mask;
2126
f15a1daf
TH
2127 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2128 sata_spd_string(fls(mask)));
1c3fae4d
TH
2129
2130 return 0;
2131}
2132
3c567b7d 2133static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2134{
2135 u32 spd, limit;
2136
2137 if (ap->sata_spd_limit == UINT_MAX)
2138 limit = 0;
2139 else
2140 limit = fls(ap->sata_spd_limit);
2141
2142 spd = (*scontrol >> 4) & 0xf;
2143 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2144
2145 return spd != limit;
2146}
2147
2148/**
3c567b7d 2149 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2150 * @ap: Port in question
2151 *
2152 * Test whether the spd limit in SControl matches
2153 * @ap->sata_spd_limit. This function is used to determine
2154 * whether hardreset is necessary to apply SATA spd
2155 * configuration.
2156 *
2157 * LOCKING:
2158 * Inherited from caller.
2159 *
2160 * RETURNS:
2161 * 1 if SATA spd configuration is needed, 0 otherwise.
2162 */
3c567b7d 2163int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2164{
2165 u32 scontrol;
2166
81952c54 2167 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2168 return 0;
2169
3c567b7d 2170 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2171}
2172
2173/**
3c567b7d 2174 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2175 * @ap: Port to set SATA spd for
2176 *
2177 * Set SATA spd of @ap according to sata_spd_limit.
2178 *
2179 * LOCKING:
2180 * Inherited from caller.
2181 *
2182 * RETURNS:
2183 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2184 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2185 */
3c567b7d 2186int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2187{
2188 u32 scontrol;
81952c54 2189 int rc;
1c3fae4d 2190
81952c54
TH
2191 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2192 return rc;
1c3fae4d 2193
3c567b7d 2194 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2195 return 0;
2196
81952c54
TH
2197 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2198 return rc;
2199
1c3fae4d
TH
2200 return 1;
2201}
2202
452503f9
AC
2203/*
2204 * This mode timing computation functionality is ported over from
2205 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2206 */
2207/*
b352e57d 2208 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2209 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2210 * for UDMA6, which is currently supported only by Maxtor drives.
2211 *
2212 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2213 */
2214
2215static const struct ata_timing ata_timing[] = {
2216
2217 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2218 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2219 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2220 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2221
b352e57d
AC
2222 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2223 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2224 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2225 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2226 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2227
2228/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2229
452503f9
AC
2230 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2231 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2232 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2233
452503f9
AC
2234 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2235 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2236 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2237
b352e57d
AC
2238 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2239 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2240 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2241 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2242
2243 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2244 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2245 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2246
2247/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2248
2249 { 0xFF }
2250};
2251
2252#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2253#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2254
2255static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2256{
2257 q->setup = EZ(t->setup * 1000, T);
2258 q->act8b = EZ(t->act8b * 1000, T);
2259 q->rec8b = EZ(t->rec8b * 1000, T);
2260 q->cyc8b = EZ(t->cyc8b * 1000, T);
2261 q->active = EZ(t->active * 1000, T);
2262 q->recover = EZ(t->recover * 1000, T);
2263 q->cycle = EZ(t->cycle * 1000, T);
2264 q->udma = EZ(t->udma * 1000, UT);
2265}
2266
2267void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2268 struct ata_timing *m, unsigned int what)
2269{
2270 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2271 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2272 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2273 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2274 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2275 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2276 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2277 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2278}
2279
2280static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2281{
2282 const struct ata_timing *t;
2283
2284 for (t = ata_timing; t->mode != speed; t++)
91190758 2285 if (t->mode == 0xFF)
452503f9 2286 return NULL;
2e9edbf8 2287 return t;
452503f9
AC
2288}
2289
2290int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2291 struct ata_timing *t, int T, int UT)
2292{
2293 const struct ata_timing *s;
2294 struct ata_timing p;
2295
2296 /*
2e9edbf8 2297 * Find the mode.
75b1f2f8 2298 */
452503f9
AC
2299
2300 if (!(s = ata_timing_find_mode(speed)))
2301 return -EINVAL;
2302
75b1f2f8
AL
2303 memcpy(t, s, sizeof(*s));
2304
452503f9
AC
2305 /*
2306 * If the drive is an EIDE drive, it can tell us it needs extended
2307 * PIO/MW_DMA cycle timing.
2308 */
2309
2310 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2311 memset(&p, 0, sizeof(p));
2312 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2313 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2314 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2315 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2316 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2317 }
2318 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2319 }
2320
2321 /*
2322 * Convert the timing to bus clock counts.
2323 */
2324
75b1f2f8 2325 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2326
2327 /*
c893a3ae
RD
2328 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2329 * S.M.A.R.T * and some other commands. We have to ensure that the
2330 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2331 */
2332
fd3367af 2333 if (speed > XFER_PIO_6) {
452503f9
AC
2334 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2335 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2336 }
2337
2338 /*
c893a3ae 2339 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2340 */
2341
2342 if (t->act8b + t->rec8b < t->cyc8b) {
2343 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2344 t->rec8b = t->cyc8b - t->act8b;
2345 }
2346
2347 if (t->active + t->recover < t->cycle) {
2348 t->active += (t->cycle - (t->active + t->recover)) / 2;
2349 t->recover = t->cycle - t->active;
2350 }
2351
2352 return 0;
2353}
2354
cf176e1a
TH
2355/**
2356 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2357 * @dev: Device to adjust xfer masks
458337db 2358 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2359 *
2360 * Adjust xfer masks of @dev downward. Note that this function
2361 * does not apply the change. Invoking ata_set_mode() afterwards
2362 * will apply the limit.
2363 *
2364 * LOCKING:
2365 * Inherited from caller.
2366 *
2367 * RETURNS:
2368 * 0 on success, negative errno on failure
2369 */
458337db 2370int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2371{
458337db
TH
2372 char buf[32];
2373 unsigned int orig_mask, xfer_mask;
2374 unsigned int pio_mask, mwdma_mask, udma_mask;
2375 int quiet, highbit;
cf176e1a 2376
458337db
TH
2377 quiet = !!(sel & ATA_DNXFER_QUIET);
2378 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2379
458337db
TH
2380 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2381 dev->mwdma_mask,
2382 dev->udma_mask);
2383 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2384
458337db
TH
2385 switch (sel) {
2386 case ATA_DNXFER_PIO:
2387 highbit = fls(pio_mask) - 1;
2388 pio_mask &= ~(1 << highbit);
2389 break;
2390
2391 case ATA_DNXFER_DMA:
2392 if (udma_mask) {
2393 highbit = fls(udma_mask) - 1;
2394 udma_mask &= ~(1 << highbit);
2395 if (!udma_mask)
2396 return -ENOENT;
2397 } else if (mwdma_mask) {
2398 highbit = fls(mwdma_mask) - 1;
2399 mwdma_mask &= ~(1 << highbit);
2400 if (!mwdma_mask)
2401 return -ENOENT;
2402 }
2403 break;
2404
2405 case ATA_DNXFER_40C:
2406 udma_mask &= ATA_UDMA_MASK_40C;
2407 break;
2408
2409 case ATA_DNXFER_FORCE_PIO0:
2410 pio_mask &= 1;
2411 case ATA_DNXFER_FORCE_PIO:
2412 mwdma_mask = 0;
2413 udma_mask = 0;
2414 break;
2415
458337db
TH
2416 default:
2417 BUG();
2418 }
2419
2420 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2421
2422 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2423 return -ENOENT;
2424
2425 if (!quiet) {
2426 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2427 snprintf(buf, sizeof(buf), "%s:%s",
2428 ata_mode_string(xfer_mask),
2429 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2430 else
2431 snprintf(buf, sizeof(buf), "%s",
2432 ata_mode_string(xfer_mask));
2433
2434 ata_dev_printk(dev, KERN_WARNING,
2435 "limiting speed to %s\n", buf);
2436 }
cf176e1a
TH
2437
2438 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2439 &dev->udma_mask);
2440
cf176e1a 2441 return 0;
cf176e1a
TH
2442}
2443
3373efd8 2444static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2445{
baa1e78a 2446 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2447 unsigned int err_mask;
2448 int rc;
1da177e4 2449
e8384607 2450 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2451 if (dev->xfer_shift == ATA_SHIFT_PIO)
2452 dev->flags |= ATA_DFLAG_PIO;
2453
3373efd8 2454 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2455 /* Old CFA may refuse this command, which is just fine */
2456 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2457 err_mask &= ~AC_ERR_DEV;
2458
83206a29 2459 if (err_mask) {
f15a1daf
TH
2460 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2461 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2462 return -EIO;
2463 }
1da177e4 2464
baa1e78a 2465 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2466 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2467 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2468 if (rc)
83206a29 2469 return rc;
48a8a14f 2470
23e71c3d
TH
2471 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2472 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2473
f15a1daf
TH
2474 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2475 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2476 return 0;
1da177e4
LT
2477}
2478
1da177e4
LT
2479/**
2480 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2481 * @ap: port on which timings will be programmed
e82cbdb9 2482 * @r_failed_dev: out paramter for failed device
1da177e4 2483 *
e82cbdb9
TH
2484 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2485 * ata_set_mode() fails, pointer to the failing device is
2486 * returned in @r_failed_dev.
780a87f7 2487 *
1da177e4 2488 * LOCKING:
0cba632b 2489 * PCI/etc. bus probe sem.
e82cbdb9
TH
2490 *
2491 * RETURNS:
2492 * 0 on success, negative errno otherwise
1da177e4 2493 */
1ad8e7f9 2494int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2495{
e8e0619f 2496 struct ata_device *dev;
e82cbdb9 2497 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2498
3adcebb2 2499 /* has private set_mode? */
b229a7b0
A
2500 if (ap->ops->set_mode)
2501 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2502
a6d5a51c
TH
2503 /* step 1: calculate xfer_mask */
2504 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2505 unsigned int pio_mask, dma_mask;
a6d5a51c 2506
e8e0619f
TH
2507 dev = &ap->device[i];
2508
e1211e3f 2509 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2510 continue;
2511
3373efd8 2512 ata_dev_xfermask(dev);
1da177e4 2513
acf356b1
TH
2514 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2515 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2516 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2517 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2518
4f65977d 2519 found = 1;
5444a6f4
AC
2520 if (dev->dma_mode)
2521 used_dma = 1;
a6d5a51c 2522 }
4f65977d 2523 if (!found)
e82cbdb9 2524 goto out;
a6d5a51c
TH
2525
2526 /* step 2: always set host PIO timings */
e8e0619f
TH
2527 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2528 dev = &ap->device[i];
2529 if (!ata_dev_enabled(dev))
2530 continue;
2531
2532 if (!dev->pio_mode) {
f15a1daf 2533 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2534 rc = -EINVAL;
e82cbdb9 2535 goto out;
e8e0619f
TH
2536 }
2537
2538 dev->xfer_mode = dev->pio_mode;
2539 dev->xfer_shift = ATA_SHIFT_PIO;
2540 if (ap->ops->set_piomode)
2541 ap->ops->set_piomode(ap, dev);
2542 }
1da177e4 2543
a6d5a51c 2544 /* step 3: set host DMA timings */
e8e0619f
TH
2545 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2546 dev = &ap->device[i];
2547
2548 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2549 continue;
2550
2551 dev->xfer_mode = dev->dma_mode;
2552 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2553 if (ap->ops->set_dmamode)
2554 ap->ops->set_dmamode(ap, dev);
2555 }
1da177e4
LT
2556
2557 /* step 4: update devices' xfer mode */
83206a29 2558 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2559 dev = &ap->device[i];
1da177e4 2560
18d90deb 2561 /* don't update suspended devices' xfer mode */
02670bf3 2562 if (!ata_dev_ready(dev))
83206a29
TH
2563 continue;
2564
3373efd8 2565 rc = ata_dev_set_mode(dev);
5bbc53f4 2566 if (rc)
e82cbdb9 2567 goto out;
83206a29 2568 }
1da177e4 2569
e8e0619f
TH
2570 /* Record simplex status. If we selected DMA then the other
2571 * host channels are not permitted to do so.
5444a6f4 2572 */
cca3974e 2573 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2574 ap->host->simplex_claimed = ap;
5444a6f4 2575
e8e0619f 2576 /* step5: chip specific finalisation */
1da177e4
LT
2577 if (ap->ops->post_set_mode)
2578 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2579 out:
2580 if (rc)
2581 *r_failed_dev = dev;
2582 return rc;
1da177e4
LT
2583}
2584
1fdffbce
JG
2585/**
2586 * ata_tf_to_host - issue ATA taskfile to host controller
2587 * @ap: port to which command is being issued
2588 * @tf: ATA taskfile register set
2589 *
2590 * Issues ATA taskfile register set to ATA host controller,
2591 * with proper synchronization with interrupt handler and
2592 * other threads.
2593 *
2594 * LOCKING:
cca3974e 2595 * spin_lock_irqsave(host lock)
1fdffbce
JG
2596 */
2597
2598static inline void ata_tf_to_host(struct ata_port *ap,
2599 const struct ata_taskfile *tf)
2600{
2601 ap->ops->tf_load(ap, tf);
2602 ap->ops->exec_command(ap, tf);
2603}
2604
1da177e4
LT
2605/**
2606 * ata_busy_sleep - sleep until BSY clears, or timeout
2607 * @ap: port containing status register to be polled
2608 * @tmout_pat: impatience timeout
2609 * @tmout: overall timeout
2610 *
780a87f7
JG
2611 * Sleep until ATA Status register bit BSY clears,
2612 * or a timeout occurs.
2613 *
d1adc1bb
TH
2614 * LOCKING:
2615 * Kernel thread context (may sleep).
2616 *
2617 * RETURNS:
2618 * 0 on success, -errno otherwise.
1da177e4 2619 */
d1adc1bb
TH
2620int ata_busy_sleep(struct ata_port *ap,
2621 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2622{
2623 unsigned long timer_start, timeout;
2624 u8 status;
2625
2626 status = ata_busy_wait(ap, ATA_BUSY, 300);
2627 timer_start = jiffies;
2628 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2629 while (status != 0xff && (status & ATA_BUSY) &&
2630 time_before(jiffies, timeout)) {
1da177e4
LT
2631 msleep(50);
2632 status = ata_busy_wait(ap, ATA_BUSY, 3);
2633 }
2634
d1adc1bb 2635 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2636 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2637 "port is slow to respond, please be patient "
2638 "(Status 0x%x)\n", status);
1da177e4
LT
2639
2640 timeout = timer_start + tmout;
d1adc1bb
TH
2641 while (status != 0xff && (status & ATA_BUSY) &&
2642 time_before(jiffies, timeout)) {
1da177e4
LT
2643 msleep(50);
2644 status = ata_chk_status(ap);
2645 }
2646
d1adc1bb
TH
2647 if (status == 0xff)
2648 return -ENODEV;
2649
1da177e4 2650 if (status & ATA_BUSY) {
f15a1daf 2651 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2652 "(%lu secs, Status 0x%x)\n",
2653 tmout / HZ, status);
d1adc1bb 2654 return -EBUSY;
1da177e4
LT
2655 }
2656
2657 return 0;
2658}
2659
2660static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2661{
2662 struct ata_ioports *ioaddr = &ap->ioaddr;
2663 unsigned int dev0 = devmask & (1 << 0);
2664 unsigned int dev1 = devmask & (1 << 1);
2665 unsigned long timeout;
2666
2667 /* if device 0 was found in ata_devchk, wait for its
2668 * BSY bit to clear
2669 */
2670 if (dev0)
2671 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2672
2673 /* if device 1 was found in ata_devchk, wait for
2674 * register access, then wait for BSY to clear
2675 */
2676 timeout = jiffies + ATA_TMOUT_BOOT;
2677 while (dev1) {
2678 u8 nsect, lbal;
2679
2680 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2681 nsect = ioread8(ioaddr->nsect_addr);
2682 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2683 if ((nsect == 1) && (lbal == 1))
2684 break;
2685 if (time_after(jiffies, timeout)) {
2686 dev1 = 0;
2687 break;
2688 }
2689 msleep(50); /* give drive a breather */
2690 }
2691 if (dev1)
2692 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2693
2694 /* is all this really necessary? */
2695 ap->ops->dev_select(ap, 0);
2696 if (dev1)
2697 ap->ops->dev_select(ap, 1);
2698 if (dev0)
2699 ap->ops->dev_select(ap, 0);
2700}
2701
1da177e4
LT
2702static unsigned int ata_bus_softreset(struct ata_port *ap,
2703 unsigned int devmask)
2704{
2705 struct ata_ioports *ioaddr = &ap->ioaddr;
2706
44877b4e 2707 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2708
2709 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2710 iowrite8(ap->ctl, ioaddr->ctl_addr);
2711 udelay(20); /* FIXME: flush */
2712 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2713 udelay(20); /* FIXME: flush */
2714 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2715
2716 /* spec mandates ">= 2ms" before checking status.
2717 * We wait 150ms, because that was the magic delay used for
2718 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2719 * between when the ATA command register is written, and then
2720 * status is checked. Because waiting for "a while" before
2721 * checking status is fine, post SRST, we perform this magic
2722 * delay here as well.
09c7ad79
AC
2723 *
2724 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2725 */
2726 msleep(150);
2727
2e9edbf8 2728 /* Before we perform post reset processing we want to see if
298a41ca
TH
2729 * the bus shows 0xFF because the odd clown forgets the D7
2730 * pulldown resistor.
2731 */
d1adc1bb
TH
2732 if (ata_check_status(ap) == 0xFF)
2733 return 0;
09c7ad79 2734
1da177e4
LT
2735 ata_bus_post_reset(ap, devmask);
2736
2737 return 0;
2738}
2739
2740/**
2741 * ata_bus_reset - reset host port and associated ATA channel
2742 * @ap: port to reset
2743 *
2744 * This is typically the first time we actually start issuing
2745 * commands to the ATA channel. We wait for BSY to clear, then
2746 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2747 * result. Determine what devices, if any, are on the channel
2748 * by looking at the device 0/1 error register. Look at the signature
2749 * stored in each device's taskfile registers, to determine if
2750 * the device is ATA or ATAPI.
2751 *
2752 * LOCKING:
0cba632b 2753 * PCI/etc. bus probe sem.
cca3974e 2754 * Obtains host lock.
1da177e4
LT
2755 *
2756 * SIDE EFFECTS:
198e0fed 2757 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2758 */
2759
2760void ata_bus_reset(struct ata_port *ap)
2761{
2762 struct ata_ioports *ioaddr = &ap->ioaddr;
2763 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2764 u8 err;
aec5c3c1 2765 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2766
44877b4e 2767 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2768
2769 /* determine if device 0/1 are present */
2770 if (ap->flags & ATA_FLAG_SATA_RESET)
2771 dev0 = 1;
2772 else {
2773 dev0 = ata_devchk(ap, 0);
2774 if (slave_possible)
2775 dev1 = ata_devchk(ap, 1);
2776 }
2777
2778 if (dev0)
2779 devmask |= (1 << 0);
2780 if (dev1)
2781 devmask |= (1 << 1);
2782
2783 /* select device 0 again */
2784 ap->ops->dev_select(ap, 0);
2785
2786 /* issue bus reset */
2787 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2788 if (ata_bus_softreset(ap, devmask))
2789 goto err_out;
1da177e4
LT
2790
2791 /*
2792 * determine by signature whether we have ATA or ATAPI devices
2793 */
b4dc7623 2794 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2795 if ((slave_possible) && (err != 0x81))
b4dc7623 2796 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2797
2798 /* re-enable interrupts */
83625006 2799 ap->ops->irq_on(ap);
1da177e4
LT
2800
2801 /* is double-select really necessary? */
2802 if (ap->device[1].class != ATA_DEV_NONE)
2803 ap->ops->dev_select(ap, 1);
2804 if (ap->device[0].class != ATA_DEV_NONE)
2805 ap->ops->dev_select(ap, 0);
2806
2807 /* if no devices were detected, disable this port */
2808 if ((ap->device[0].class == ATA_DEV_NONE) &&
2809 (ap->device[1].class == ATA_DEV_NONE))
2810 goto err_out;
2811
2812 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2813 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2814 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2815 }
2816
2817 DPRINTK("EXIT\n");
2818 return;
2819
2820err_out:
f15a1daf 2821 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2822 ap->ops->port_disable(ap);
2823
2824 DPRINTK("EXIT\n");
2825}
2826
d7bb4cc7
TH
2827/**
2828 * sata_phy_debounce - debounce SATA phy status
2829 * @ap: ATA port to debounce SATA phy status for
2830 * @params: timing parameters { interval, duratinon, timeout } in msec
2831 *
2832 * Make sure SStatus of @ap reaches stable state, determined by
2833 * holding the same value where DET is not 1 for @duration polled
2834 * every @interval, before @timeout. Timeout constraints the
2835 * beginning of the stable state. Because, after hot unplugging,
2836 * DET gets stuck at 1 on some controllers, this functions waits
2837 * until timeout then returns 0 if DET is stable at 1.
2838 *
2839 * LOCKING:
2840 * Kernel thread context (may sleep)
2841 *
2842 * RETURNS:
2843 * 0 on success, -errno on failure.
2844 */
2845int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2846{
d7bb4cc7
TH
2847 unsigned long interval_msec = params[0];
2848 unsigned long duration = params[1] * HZ / 1000;
2849 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2850 unsigned long last_jiffies;
2851 u32 last, cur;
2852 int rc;
2853
2854 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2855 return rc;
2856 cur &= 0xf;
2857
2858 last = cur;
2859 last_jiffies = jiffies;
2860
2861 while (1) {
2862 msleep(interval_msec);
2863 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2864 return rc;
2865 cur &= 0xf;
2866
2867 /* DET stable? */
2868 if (cur == last) {
2869 if (cur == 1 && time_before(jiffies, timeout))
2870 continue;
2871 if (time_after(jiffies, last_jiffies + duration))
2872 return 0;
2873 continue;
2874 }
2875
2876 /* unstable, start over */
2877 last = cur;
2878 last_jiffies = jiffies;
2879
2880 /* check timeout */
2881 if (time_after(jiffies, timeout))
2882 return -EBUSY;
2883 }
2884}
2885
2886/**
2887 * sata_phy_resume - resume SATA phy
2888 * @ap: ATA port to resume SATA phy for
2889 * @params: timing parameters { interval, duratinon, timeout } in msec
2890 *
2891 * Resume SATA phy of @ap and debounce it.
2892 *
2893 * LOCKING:
2894 * Kernel thread context (may sleep)
2895 *
2896 * RETURNS:
2897 * 0 on success, -errno on failure.
2898 */
2899int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2900{
2901 u32 scontrol;
81952c54
TH
2902 int rc;
2903
2904 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2905 return rc;
7a7921e8 2906
852ee16a 2907 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2908
2909 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2910 return rc;
7a7921e8 2911
d7bb4cc7
TH
2912 /* Some PHYs react badly if SStatus is pounded immediately
2913 * after resuming. Delay 200ms before debouncing.
2914 */
2915 msleep(200);
7a7921e8 2916
d7bb4cc7 2917 return sata_phy_debounce(ap, params);
7a7921e8
TH
2918}
2919
f5914a46
TH
2920static void ata_wait_spinup(struct ata_port *ap)
2921{
2922 struct ata_eh_context *ehc = &ap->eh_context;
2923 unsigned long end, secs;
2924 int rc;
2925
2926 /* first, debounce phy if SATA */
2927 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2928 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2929
2930 /* if debounced successfully and offline, no need to wait */
2931 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2932 return;
2933 }
2934
2935 /* okay, let's give the drive time to spin up */
2936 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2937 secs = ((end - jiffies) + HZ - 1) / HZ;
2938
2939 if (time_after(jiffies, end))
2940 return;
2941
2942 if (secs > 5)
2943 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2944 "(%lu secs)\n", secs);
2945
2946 schedule_timeout_uninterruptible(end - jiffies);
2947}
2948
2949/**
2950 * ata_std_prereset - prepare for reset
2951 * @ap: ATA port to be reset
2952 *
2953 * @ap is about to be reset. Initialize it.
2954 *
2955 * LOCKING:
2956 * Kernel thread context (may sleep)
2957 *
2958 * RETURNS:
2959 * 0 on success, -errno otherwise.
2960 */
2961int ata_std_prereset(struct ata_port *ap)
2962{
2963 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2964 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2965 int rc;
2966
28324304
TH
2967 /* handle link resume & hotplug spinup */
2968 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2969 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2970 ehc->i.action |= ATA_EH_HARDRESET;
2971
2972 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2973 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2974 ata_wait_spinup(ap);
f5914a46
TH
2975
2976 /* if we're about to do hardreset, nothing more to do */
2977 if (ehc->i.action & ATA_EH_HARDRESET)
2978 return 0;
2979
2980 /* if SATA, resume phy */
2981 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2982 rc = sata_phy_resume(ap, timing);
2983 if (rc && rc != -EOPNOTSUPP) {
2984 /* phy resume failed */
2985 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2986 "link for reset (errno=%d)\n", rc);
2987 return rc;
2988 }
2989 }
2990
2991 /* Wait for !BSY if the controller can wait for the first D2H
2992 * Reg FIS and we don't know that no device is attached.
2993 */
2994 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2995 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2996
2997 return 0;
2998}
2999
c2bd5804
TH
3000/**
3001 * ata_std_softreset - reset host port via ATA SRST
3002 * @ap: port to reset
c2bd5804
TH
3003 * @classes: resulting classes of attached devices
3004 *
52783c5d 3005 * Reset host port using ATA SRST.
c2bd5804
TH
3006 *
3007 * LOCKING:
3008 * Kernel thread context (may sleep)
3009 *
3010 * RETURNS:
3011 * 0 on success, -errno otherwise.
3012 */
2bf2cb26 3013int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3014{
3015 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3016 unsigned int devmask = 0, err_mask;
3017 u8 err;
3018
3019 DPRINTK("ENTER\n");
3020
81952c54 3021 if (ata_port_offline(ap)) {
3a39746a
TH
3022 classes[0] = ATA_DEV_NONE;
3023 goto out;
3024 }
3025
c2bd5804
TH
3026 /* determine if device 0/1 are present */
3027 if (ata_devchk(ap, 0))
3028 devmask |= (1 << 0);
3029 if (slave_possible && ata_devchk(ap, 1))
3030 devmask |= (1 << 1);
3031
c2bd5804
TH
3032 /* select device 0 again */
3033 ap->ops->dev_select(ap, 0);
3034
3035 /* issue bus reset */
3036 DPRINTK("about to softreset, devmask=%x\n", devmask);
3037 err_mask = ata_bus_softreset(ap, devmask);
3038 if (err_mask) {
f15a1daf
TH
3039 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3040 err_mask);
c2bd5804
TH
3041 return -EIO;
3042 }
3043
3044 /* determine by signature whether we have ATA or ATAPI devices */
3045 classes[0] = ata_dev_try_classify(ap, 0, &err);
3046 if (slave_possible && err != 0x81)
3047 classes[1] = ata_dev_try_classify(ap, 1, &err);
3048
3a39746a 3049 out:
c2bd5804
TH
3050 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3051 return 0;
3052}
3053
3054/**
b6103f6d 3055 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3056 * @ap: port to reset
b6103f6d 3057 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3058 *
3059 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3060 *
3061 * LOCKING:
3062 * Kernel thread context (may sleep)
3063 *
3064 * RETURNS:
3065 * 0 on success, -errno otherwise.
3066 */
b6103f6d 3067int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3068{
852ee16a 3069 u32 scontrol;
81952c54 3070 int rc;
852ee16a 3071
c2bd5804
TH
3072 DPRINTK("ENTER\n");
3073
3c567b7d 3074 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3075 /* SATA spec says nothing about how to reconfigure
3076 * spd. To be on the safe side, turn off phy during
3077 * reconfiguration. This works for at least ICH7 AHCI
3078 * and Sil3124.
3079 */
81952c54 3080 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3081 goto out;
81952c54 3082
a34b6fc0 3083 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3084
3085 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3086 goto out;
1c3fae4d 3087
3c567b7d 3088 sata_set_spd(ap);
1c3fae4d
TH
3089 }
3090
3091 /* issue phy wake/reset */
81952c54 3092 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3093 goto out;
81952c54 3094
852ee16a 3095 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3096
3097 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3098 goto out;
c2bd5804 3099
1c3fae4d 3100 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3101 * 10.4.2 says at least 1 ms.
3102 */
3103 msleep(1);
3104
1c3fae4d 3105 /* bring phy back */
b6103f6d
TH
3106 rc = sata_phy_resume(ap, timing);
3107 out:
3108 DPRINTK("EXIT, rc=%d\n", rc);
3109 return rc;
3110}
3111
3112/**
3113 * sata_std_hardreset - reset host port via SATA phy reset
3114 * @ap: port to reset
3115 * @class: resulting class of attached device
3116 *
3117 * SATA phy-reset host port using DET bits of SControl register,
3118 * wait for !BSY and classify the attached device.
3119 *
3120 * LOCKING:
3121 * Kernel thread context (may sleep)
3122 *
3123 * RETURNS:
3124 * 0 on success, -errno otherwise.
3125 */
3126int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3127{
3128 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3129 int rc;
3130
3131 DPRINTK("ENTER\n");
3132
3133 /* do hardreset */
3134 rc = sata_port_hardreset(ap, timing);
3135 if (rc) {
3136 ata_port_printk(ap, KERN_ERR,
3137 "COMRESET failed (errno=%d)\n", rc);
3138 return rc;
3139 }
c2bd5804 3140
c2bd5804 3141 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3142 if (ata_port_offline(ap)) {
c2bd5804
TH
3143 *class = ATA_DEV_NONE;
3144 DPRINTK("EXIT, link offline\n");
3145 return 0;
3146 }
3147
34fee227
TH
3148 /* wait a while before checking status, see SRST for more info */
3149 msleep(150);
3150
c2bd5804 3151 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3152 ata_port_printk(ap, KERN_ERR,
3153 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3154 return -EIO;
3155 }
3156
3a39746a
TH
3157 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3158
c2bd5804
TH
3159 *class = ata_dev_try_classify(ap, 0, NULL);
3160
3161 DPRINTK("EXIT, class=%u\n", *class);
3162 return 0;
3163}
3164
3165/**
3166 * ata_std_postreset - standard postreset callback
3167 * @ap: the target ata_port
3168 * @classes: classes of attached devices
3169 *
3170 * This function is invoked after a successful reset. Note that
3171 * the device might have been reset more than once using
3172 * different reset methods before postreset is invoked.
c2bd5804 3173 *
c2bd5804
TH
3174 * LOCKING:
3175 * Kernel thread context (may sleep)
3176 */
3177void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3178{
dc2b3515
TH
3179 u32 serror;
3180
c2bd5804
TH
3181 DPRINTK("ENTER\n");
3182
c2bd5804 3183 /* print link status */
81952c54 3184 sata_print_link_status(ap);
c2bd5804 3185
dc2b3515
TH
3186 /* clear SError */
3187 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3188 sata_scr_write(ap, SCR_ERROR, serror);
3189
3a39746a 3190 /* re-enable interrupts */
83625006
AI
3191 if (!ap->ops->error_handler)
3192 ap->ops->irq_on(ap);
c2bd5804
TH
3193
3194 /* is double-select really necessary? */
3195 if (classes[0] != ATA_DEV_NONE)
3196 ap->ops->dev_select(ap, 1);
3197 if (classes[1] != ATA_DEV_NONE)
3198 ap->ops->dev_select(ap, 0);
3199
3a39746a
TH
3200 /* bail out if no device is present */
3201 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3202 DPRINTK("EXIT, no device\n");
3203 return;
3204 }
3205
3206 /* set up device control */
0d5ff566
TH
3207 if (ap->ioaddr.ctl_addr)
3208 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3209
3210 DPRINTK("EXIT\n");
3211}
3212
623a3128
TH
3213/**
3214 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3215 * @dev: device to compare against
3216 * @new_class: class of the new device
3217 * @new_id: IDENTIFY page of the new device
3218 *
3219 * Compare @new_class and @new_id against @dev and determine
3220 * whether @dev is the device indicated by @new_class and
3221 * @new_id.
3222 *
3223 * LOCKING:
3224 * None.
3225 *
3226 * RETURNS:
3227 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3228 */
3373efd8
TH
3229static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3230 const u16 *new_id)
623a3128
TH
3231{
3232 const u16 *old_id = dev->id;
a0cf733b
TH
3233 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3234 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3235 u64 new_n_sectors;
3236
3237 if (dev->class != new_class) {
f15a1daf
TH
3238 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3239 dev->class, new_class);
623a3128
TH
3240 return 0;
3241 }
3242
a0cf733b
TH
3243 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3244 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3245 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3246 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3247 new_n_sectors = ata_id_n_sectors(new_id);
3248
3249 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3250 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3251 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3252 return 0;
3253 }
3254
3255 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3256 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3257 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3258 return 0;
3259 }
3260
3261 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3262 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3263 "%llu != %llu\n",
3264 (unsigned long long)dev->n_sectors,
3265 (unsigned long long)new_n_sectors);
623a3128
TH
3266 return 0;
3267 }
3268
3269 return 1;
3270}
3271
3272/**
3273 * ata_dev_revalidate - Revalidate ATA device
623a3128 3274 * @dev: device to revalidate
bff04647 3275 * @readid_flags: read ID flags
623a3128
TH
3276 *
3277 * Re-read IDENTIFY page and make sure @dev is still attached to
3278 * the port.
3279 *
3280 * LOCKING:
3281 * Kernel thread context (may sleep)
3282 *
3283 * RETURNS:
3284 * 0 on success, negative errno otherwise
3285 */
bff04647 3286int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3287{
5eb45c02 3288 unsigned int class = dev->class;
f15a1daf 3289 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3290 int rc;
3291
5eb45c02
TH
3292 if (!ata_dev_enabled(dev)) {
3293 rc = -ENODEV;
3294 goto fail;
3295 }
623a3128 3296
fe635c7e 3297 /* read ID data */
bff04647 3298 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3299 if (rc)
3300 goto fail;
3301
3302 /* is the device still there? */
3373efd8 3303 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3304 rc = -ENODEV;
3305 goto fail;
3306 }
3307
fe635c7e 3308 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3309
3310 /* configure device according to the new ID */
efdaedc4 3311 rc = ata_dev_configure(dev);
5eb45c02
TH
3312 if (rc == 0)
3313 return 0;
623a3128
TH
3314
3315 fail:
f15a1daf 3316 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3317 return rc;
3318}
3319
6919a0a6
AC
3320struct ata_blacklist_entry {
3321 const char *model_num;
3322 const char *model_rev;
3323 unsigned long horkage;
3324};
3325
3326static const struct ata_blacklist_entry ata_device_blacklist [] = {
3327 /* Devices with DMA related problems under Linux */
3328 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3329 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3330 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3331 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3332 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3333 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3334 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3335 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3336 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3337 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3338 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3339 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3340 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3341 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3342 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3343 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3344 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3345 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3346 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3347 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3348 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3349 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3350 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3351 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3352 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3353 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3354 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3355 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3356 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3357
18d6e9d5
AL
3358 /* Weird ATAPI devices */
3359 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3360
6919a0a6
AC
3361 /* Devices we expect to fail diagnostics */
3362
3363 /* Devices where NCQ should be avoided */
3364 /* NCQ is slow */
3365 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3366 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3367 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3368 /* NCQ is broken */
3369 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3370 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3371 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
6919a0a6
AC
3372
3373 /* Devices with NCQ limits */
3374
3375 /* End Marker */
3376 { }
1da177e4 3377};
2e9edbf8 3378
6919a0a6 3379unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3380{
8bfa79fc
TH
3381 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3382 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3383 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3384
8bfa79fc
TH
3385 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3386 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3387
6919a0a6 3388 while (ad->model_num) {
8bfa79fc 3389 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3390 if (ad->model_rev == NULL)
3391 return ad->horkage;
8bfa79fc 3392 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3393 return ad->horkage;
f4b15fef 3394 }
6919a0a6 3395 ad++;
f4b15fef 3396 }
1da177e4
LT
3397 return 0;
3398}
3399
6919a0a6
AC
3400static int ata_dma_blacklisted(const struct ata_device *dev)
3401{
3402 /* We don't support polling DMA.
3403 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3404 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3405 */
3406 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3407 (dev->flags & ATA_DFLAG_CDB_INTR))
3408 return 1;
3409 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3410}
3411
a6d5a51c
TH
3412/**
3413 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3414 * @dev: Device to compute xfermask for
3415 *
acf356b1
TH
3416 * Compute supported xfermask of @dev and store it in
3417 * dev->*_mask. This function is responsible for applying all
3418 * known limits including host controller limits, device
3419 * blacklist, etc...
a6d5a51c
TH
3420 *
3421 * LOCKING:
3422 * None.
a6d5a51c 3423 */
3373efd8 3424static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3425{
3373efd8 3426 struct ata_port *ap = dev->ap;
cca3974e 3427 struct ata_host *host = ap->host;
a6d5a51c 3428 unsigned long xfer_mask;
1da177e4 3429
37deecb5 3430 /* controller modes available */
565083e1
TH
3431 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3432 ap->mwdma_mask, ap->udma_mask);
3433
3434 /* Apply cable rule here. Don't apply it early because when
3435 * we handle hot plug the cable type can itself change.
3436 */
3437 if (ap->cbl == ATA_CBL_PATA40)
3438 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3439 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3440 * host side are checked drive side as well. Cases where we know a
3441 * 40wire cable is used safely for 80 are not checked here.
3442 */
3443 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3444 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3445
1da177e4 3446
37deecb5
TH
3447 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3448 dev->mwdma_mask, dev->udma_mask);
3449 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3450
b352e57d
AC
3451 /*
3452 * CFA Advanced TrueIDE timings are not allowed on a shared
3453 * cable
3454 */
3455 if (ata_dev_pair(dev)) {
3456 /* No PIO5 or PIO6 */
3457 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3458 /* No MWDMA3 or MWDMA 4 */
3459 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3460 }
3461
37deecb5
TH
3462 if (ata_dma_blacklisted(dev)) {
3463 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3464 ata_dev_printk(dev, KERN_WARNING,
3465 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3466 }
a6d5a51c 3467
14d66ab7
PV
3468 if ((host->flags & ATA_HOST_SIMPLEX) &&
3469 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3470 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3471 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3472 "other device, disabling DMA\n");
5444a6f4 3473 }
565083e1 3474
5444a6f4
AC
3475 if (ap->ops->mode_filter)
3476 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3477
565083e1
TH
3478 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3479 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3480}
3481
1da177e4
LT
3482/**
3483 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3484 * @dev: Device to which command will be sent
3485 *
780a87f7
JG
3486 * Issue SET FEATURES - XFER MODE command to device @dev
3487 * on port @ap.
3488 *
1da177e4 3489 * LOCKING:
0cba632b 3490 * PCI/etc. bus probe sem.
83206a29
TH
3491 *
3492 * RETURNS:
3493 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3494 */
3495
3373efd8 3496static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3497{
a0123703 3498 struct ata_taskfile tf;
83206a29 3499 unsigned int err_mask;
1da177e4
LT
3500
3501 /* set up set-features taskfile */
3502 DPRINTK("set features - xfer mode\n");
3503
3373efd8 3504 ata_tf_init(dev, &tf);
a0123703
TH
3505 tf.command = ATA_CMD_SET_FEATURES;
3506 tf.feature = SETFEATURES_XFER;
3507 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3508 tf.protocol = ATA_PROT_NODATA;
3509 tf.nsect = dev->xfer_mode;
1da177e4 3510
3373efd8 3511 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3512
83206a29
TH
3513 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3514 return err_mask;
1da177e4
LT
3515}
3516
8bf62ece
AL
3517/**
3518 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3519 * @dev: Device to which command will be sent
e2a7f77a
RD
3520 * @heads: Number of heads (taskfile parameter)
3521 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3522 *
3523 * LOCKING:
6aff8f1f
TH
3524 * Kernel thread context (may sleep)
3525 *
3526 * RETURNS:
3527 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3528 */
3373efd8
TH
3529static unsigned int ata_dev_init_params(struct ata_device *dev,
3530 u16 heads, u16 sectors)
8bf62ece 3531{
a0123703 3532 struct ata_taskfile tf;
6aff8f1f 3533 unsigned int err_mask;
8bf62ece
AL
3534
3535 /* Number of sectors per track 1-255. Number of heads 1-16 */
3536 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3537 return AC_ERR_INVALID;
8bf62ece
AL
3538
3539 /* set up init dev params taskfile */
3540 DPRINTK("init dev params \n");
3541
3373efd8 3542 ata_tf_init(dev, &tf);
a0123703
TH
3543 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3544 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3545 tf.protocol = ATA_PROT_NODATA;
3546 tf.nsect = sectors;
3547 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3548
3373efd8 3549 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3550
6aff8f1f
TH
3551 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3552 return err_mask;
8bf62ece
AL
3553}
3554
1da177e4 3555/**
0cba632b
JG
3556 * ata_sg_clean - Unmap DMA memory associated with command
3557 * @qc: Command containing DMA memory to be released
3558 *
3559 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3560 *
3561 * LOCKING:
cca3974e 3562 * spin_lock_irqsave(host lock)
1da177e4 3563 */
70e6ad0c 3564void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3565{
3566 struct ata_port *ap = qc->ap;
cedc9a47 3567 struct scatterlist *sg = qc->__sg;
1da177e4 3568 int dir = qc->dma_dir;
cedc9a47 3569 void *pad_buf = NULL;
1da177e4 3570
a4631474
TH
3571 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3572 WARN_ON(sg == NULL);
1da177e4
LT
3573
3574 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3575 WARN_ON(qc->n_elem > 1);
1da177e4 3576
2c13b7ce 3577 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3578
cedc9a47
JG
3579 /* if we padded the buffer out to 32-bit bound, and data
3580 * xfer direction is from-device, we must copy from the
3581 * pad buffer back into the supplied buffer
3582 */
3583 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3584 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3585
3586 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3587 if (qc->n_elem)
2f1f610b 3588 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3589 /* restore last sg */
3590 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3591 if (pad_buf) {
3592 struct scatterlist *psg = &qc->pad_sgent;
3593 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3594 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3595 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3596 }
3597 } else {
2e242fa9 3598 if (qc->n_elem)
2f1f610b 3599 dma_unmap_single(ap->dev,
e1410f2d
JG
3600 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3601 dir);
cedc9a47
JG
3602 /* restore sg */
3603 sg->length += qc->pad_len;
3604 if (pad_buf)
3605 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3606 pad_buf, qc->pad_len);
3607 }
1da177e4
LT
3608
3609 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3610 qc->__sg = NULL;
1da177e4
LT
3611}
3612
3613/**
3614 * ata_fill_sg - Fill PCI IDE PRD table
3615 * @qc: Metadata associated with taskfile to be transferred
3616 *
780a87f7
JG
3617 * Fill PCI IDE PRD (scatter-gather) table with segments
3618 * associated with the current disk command.
3619 *
1da177e4 3620 * LOCKING:
cca3974e 3621 * spin_lock_irqsave(host lock)
1da177e4
LT
3622 *
3623 */
3624static void ata_fill_sg(struct ata_queued_cmd *qc)
3625{
1da177e4 3626 struct ata_port *ap = qc->ap;
cedc9a47
JG
3627 struct scatterlist *sg;
3628 unsigned int idx;
1da177e4 3629
a4631474 3630 WARN_ON(qc->__sg == NULL);
f131883e 3631 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3632
3633 idx = 0;
cedc9a47 3634 ata_for_each_sg(sg, qc) {
1da177e4
LT
3635 u32 addr, offset;
3636 u32 sg_len, len;
3637
3638 /* determine if physical DMA addr spans 64K boundary.
3639 * Note h/w doesn't support 64-bit, so we unconditionally
3640 * truncate dma_addr_t to u32.
3641 */
3642 addr = (u32) sg_dma_address(sg);
3643 sg_len = sg_dma_len(sg);
3644
3645 while (sg_len) {
3646 offset = addr & 0xffff;
3647 len = sg_len;
3648 if ((offset + sg_len) > 0x10000)
3649 len = 0x10000 - offset;
3650
3651 ap->prd[idx].addr = cpu_to_le32(addr);
3652 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3653 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3654
3655 idx++;
3656 sg_len -= len;
3657 addr += len;
3658 }
3659 }
3660
3661 if (idx)
3662 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3663}
3664/**
3665 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3666 * @qc: Metadata associated with taskfile to check
3667 *
780a87f7
JG
3668 * Allow low-level driver to filter ATA PACKET commands, returning
3669 * a status indicating whether or not it is OK to use DMA for the
3670 * supplied PACKET command.
3671 *
1da177e4 3672 * LOCKING:
cca3974e 3673 * spin_lock_irqsave(host lock)
0cba632b 3674 *
1da177e4
LT
3675 * RETURNS: 0 when ATAPI DMA can be used
3676 * nonzero otherwise
3677 */
3678int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3679{
3680 struct ata_port *ap = qc->ap;
3681 int rc = 0; /* Assume ATAPI DMA is OK by default */
3682
3683 if (ap->ops->check_atapi_dma)
3684 rc = ap->ops->check_atapi_dma(qc);
3685
3686 return rc;
3687}
3688/**
3689 * ata_qc_prep - Prepare taskfile for submission
3690 * @qc: Metadata associated with taskfile to be prepared
3691 *
780a87f7
JG
3692 * Prepare ATA taskfile for submission.
3693 *
1da177e4 3694 * LOCKING:
cca3974e 3695 * spin_lock_irqsave(host lock)
1da177e4
LT
3696 */
3697void ata_qc_prep(struct ata_queued_cmd *qc)
3698{
3699 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3700 return;
3701
3702 ata_fill_sg(qc);
3703}
3704
e46834cd
BK
3705void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3706
0cba632b
JG
3707/**
3708 * ata_sg_init_one - Associate command with memory buffer
3709 * @qc: Command to be associated
3710 * @buf: Memory buffer
3711 * @buflen: Length of memory buffer, in bytes.
3712 *
3713 * Initialize the data-related elements of queued_cmd @qc
3714 * to point to a single memory buffer, @buf of byte length @buflen.
3715 *
3716 * LOCKING:
cca3974e 3717 * spin_lock_irqsave(host lock)
0cba632b
JG
3718 */
3719
1da177e4
LT
3720void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3721{
1da177e4
LT
3722 qc->flags |= ATA_QCFLAG_SINGLE;
3723
cedc9a47 3724 qc->__sg = &qc->sgent;
1da177e4 3725 qc->n_elem = 1;
cedc9a47 3726 qc->orig_n_elem = 1;
1da177e4 3727 qc->buf_virt = buf;
233277ca 3728 qc->nbytes = buflen;
1da177e4 3729
61c0596c 3730 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3731}
3732
0cba632b
JG
3733/**
3734 * ata_sg_init - Associate command with scatter-gather table.
3735 * @qc: Command to be associated
3736 * @sg: Scatter-gather table.
3737 * @n_elem: Number of elements in s/g table.
3738 *
3739 * Initialize the data-related elements of queued_cmd @qc
3740 * to point to a scatter-gather table @sg, containing @n_elem
3741 * elements.
3742 *
3743 * LOCKING:
cca3974e 3744 * spin_lock_irqsave(host lock)
0cba632b
JG
3745 */
3746
1da177e4
LT
3747void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3748 unsigned int n_elem)
3749{
3750 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3751 qc->__sg = sg;
1da177e4 3752 qc->n_elem = n_elem;
cedc9a47 3753 qc->orig_n_elem = n_elem;
1da177e4
LT
3754}
3755
3756/**
0cba632b
JG
3757 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3758 * @qc: Command with memory buffer to be mapped.
3759 *
3760 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3761 *
3762 * LOCKING:
cca3974e 3763 * spin_lock_irqsave(host lock)
1da177e4
LT
3764 *
3765 * RETURNS:
0cba632b 3766 * Zero on success, negative on error.
1da177e4
LT
3767 */
3768
3769static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3770{
3771 struct ata_port *ap = qc->ap;
3772 int dir = qc->dma_dir;
cedc9a47 3773 struct scatterlist *sg = qc->__sg;
1da177e4 3774 dma_addr_t dma_address;
2e242fa9 3775 int trim_sg = 0;
1da177e4 3776
cedc9a47
JG
3777 /* we must lengthen transfers to end on a 32-bit boundary */
3778 qc->pad_len = sg->length & 3;
3779 if (qc->pad_len) {
3780 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3781 struct scatterlist *psg = &qc->pad_sgent;
3782
a4631474 3783 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3784
3785 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3786
3787 if (qc->tf.flags & ATA_TFLAG_WRITE)
3788 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3789 qc->pad_len);
3790
3791 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3792 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3793 /* trim sg */
3794 sg->length -= qc->pad_len;
2e242fa9
TH
3795 if (sg->length == 0)
3796 trim_sg = 1;
cedc9a47
JG
3797
3798 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3799 sg->length, qc->pad_len);
3800 }
3801
2e242fa9
TH
3802 if (trim_sg) {
3803 qc->n_elem--;
e1410f2d
JG
3804 goto skip_map;
3805 }
3806
2f1f610b 3807 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3808 sg->length, dir);
537a95d9
TH
3809 if (dma_mapping_error(dma_address)) {
3810 /* restore sg */
3811 sg->length += qc->pad_len;
1da177e4 3812 return -1;
537a95d9 3813 }
1da177e4
LT
3814
3815 sg_dma_address(sg) = dma_address;
32529e01 3816 sg_dma_len(sg) = sg->length;
1da177e4 3817
2e242fa9 3818skip_map:
1da177e4
LT
3819 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3820 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3821
3822 return 0;
3823}
3824
3825/**
0cba632b
JG
3826 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3827 * @qc: Command with scatter-gather table to be mapped.
3828 *
3829 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3830 *
3831 * LOCKING:
cca3974e 3832 * spin_lock_irqsave(host lock)
1da177e4
LT
3833 *
3834 * RETURNS:
0cba632b 3835 * Zero on success, negative on error.
1da177e4
LT
3836 *
3837 */
3838
3839static int ata_sg_setup(struct ata_queued_cmd *qc)
3840{
3841 struct ata_port *ap = qc->ap;
cedc9a47
JG
3842 struct scatterlist *sg = qc->__sg;
3843 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3844 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3845
44877b4e 3846 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3847 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3848
cedc9a47
JG
3849 /* we must lengthen transfers to end on a 32-bit boundary */
3850 qc->pad_len = lsg->length & 3;
3851 if (qc->pad_len) {
3852 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3853 struct scatterlist *psg = &qc->pad_sgent;
3854 unsigned int offset;
3855
a4631474 3856 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3857
3858 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3859
3860 /*
3861 * psg->page/offset are used to copy to-be-written
3862 * data in this function or read data in ata_sg_clean.
3863 */
3864 offset = lsg->offset + lsg->length - qc->pad_len;
3865 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3866 psg->offset = offset_in_page(offset);
3867
3868 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3869 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3870 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3871 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3872 }
3873
3874 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3875 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3876 /* trim last sg */
3877 lsg->length -= qc->pad_len;
e1410f2d
JG
3878 if (lsg->length == 0)
3879 trim_sg = 1;
cedc9a47
JG
3880
3881 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3882 qc->n_elem - 1, lsg->length, qc->pad_len);
3883 }
3884
e1410f2d
JG
3885 pre_n_elem = qc->n_elem;
3886 if (trim_sg && pre_n_elem)
3887 pre_n_elem--;
3888
3889 if (!pre_n_elem) {
3890 n_elem = 0;
3891 goto skip_map;
3892 }
3893
1da177e4 3894 dir = qc->dma_dir;
2f1f610b 3895 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3896 if (n_elem < 1) {
3897 /* restore last sg */
3898 lsg->length += qc->pad_len;
1da177e4 3899 return -1;
537a95d9 3900 }
1da177e4
LT
3901
3902 DPRINTK("%d sg elements mapped\n", n_elem);
3903
e1410f2d 3904skip_map:
1da177e4
LT
3905 qc->n_elem = n_elem;
3906
3907 return 0;
3908}
3909
0baab86b 3910/**
c893a3ae 3911 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3912 * @buf: Buffer to swap
3913 * @buf_words: Number of 16-bit words in buffer.
3914 *
3915 * Swap halves of 16-bit words if needed to convert from
3916 * little-endian byte order to native cpu byte order, or
3917 * vice-versa.
3918 *
3919 * LOCKING:
6f0ef4fa 3920 * Inherited from caller.
0baab86b 3921 */
1da177e4
LT
3922void swap_buf_le16(u16 *buf, unsigned int buf_words)
3923{
3924#ifdef __BIG_ENDIAN
3925 unsigned int i;
3926
3927 for (i = 0; i < buf_words; i++)
3928 buf[i] = le16_to_cpu(buf[i]);
3929#endif /* __BIG_ENDIAN */
3930}
3931
6ae4cfb5 3932/**
0d5ff566 3933 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3934 * @adev: device to target
6ae4cfb5
AL
3935 * @buf: data buffer
3936 * @buflen: buffer length
344babaa 3937 * @write_data: read/write
6ae4cfb5
AL
3938 *
3939 * Transfer data from/to the device data register by PIO.
3940 *
3941 * LOCKING:
3942 * Inherited from caller.
6ae4cfb5 3943 */
0d5ff566
TH
3944void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3945 unsigned int buflen, int write_data)
1da177e4 3946{
a6b2c5d4 3947 struct ata_port *ap = adev->ap;
6ae4cfb5 3948 unsigned int words = buflen >> 1;
1da177e4 3949
6ae4cfb5 3950 /* Transfer multiple of 2 bytes */
1da177e4 3951 if (write_data)
0d5ff566 3952 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3953 else
0d5ff566 3954 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3955
3956 /* Transfer trailing 1 byte, if any. */
3957 if (unlikely(buflen & 0x01)) {
3958 u16 align_buf[1] = { 0 };
3959 unsigned char *trailing_buf = buf + buflen - 1;
3960
3961 if (write_data) {
3962 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3963 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3964 } else {
0d5ff566 3965 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3966 memcpy(trailing_buf, align_buf, 1);
3967 }
3968 }
1da177e4
LT
3969}
3970
75e99585 3971/**
0d5ff566 3972 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3973 * @adev: device to target
3974 * @buf: data buffer
3975 * @buflen: buffer length
3976 * @write_data: read/write
3977 *
88574551 3978 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3979 * transfer with interrupts disabled.
3980 *
3981 * LOCKING:
3982 * Inherited from caller.
3983 */
0d5ff566
TH
3984void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3985 unsigned int buflen, int write_data)
75e99585
AC
3986{
3987 unsigned long flags;
3988 local_irq_save(flags);
0d5ff566 3989 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3990 local_irq_restore(flags);
3991}
3992
3993
6ae4cfb5
AL
3994/**
3995 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3996 * @qc: Command on going
3997 *
3998 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3999 *
4000 * LOCKING:
4001 * Inherited from caller.
4002 */
4003
1da177e4
LT
4004static void ata_pio_sector(struct ata_queued_cmd *qc)
4005{
4006 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4007 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4008 struct ata_port *ap = qc->ap;
4009 struct page *page;
4010 unsigned int offset;
4011 unsigned char *buf;
4012
726f0785 4013 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 4014 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4015
4016 page = sg[qc->cursg].page;
726f0785 4017 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4018
4019 /* get the current page and offset */
4020 page = nth_page(page, (offset >> PAGE_SHIFT));
4021 offset %= PAGE_SIZE;
4022
1da177e4
LT
4023 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4024
91b8b313
AL
4025 if (PageHighMem(page)) {
4026 unsigned long flags;
4027
a6b2c5d4 4028 /* FIXME: use a bounce buffer */
91b8b313
AL
4029 local_irq_save(flags);
4030 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4031
91b8b313 4032 /* do the actual data transfer */
a6b2c5d4 4033 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4034
91b8b313
AL
4035 kunmap_atomic(buf, KM_IRQ0);
4036 local_irq_restore(flags);
4037 } else {
4038 buf = page_address(page);
a6b2c5d4 4039 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4040 }
1da177e4 4041
726f0785
TH
4042 qc->curbytes += ATA_SECT_SIZE;
4043 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4044
726f0785 4045 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4046 qc->cursg++;
4047 qc->cursg_ofs = 0;
4048 }
1da177e4 4049}
1da177e4 4050
07f6f7d0
AL
4051/**
4052 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4053 * @qc: Command on going
4054 *
c81e29b4 4055 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4056 * ATA device for the DRQ request.
4057 *
4058 * LOCKING:
4059 * Inherited from caller.
4060 */
1da177e4 4061
07f6f7d0
AL
4062static void ata_pio_sectors(struct ata_queued_cmd *qc)
4063{
4064 if (is_multi_taskfile(&qc->tf)) {
4065 /* READ/WRITE MULTIPLE */
4066 unsigned int nsect;
4067
587005de 4068 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4069
726f0785
TH
4070 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4071 qc->dev->multi_count);
07f6f7d0
AL
4072 while (nsect--)
4073 ata_pio_sector(qc);
4074 } else
4075 ata_pio_sector(qc);
4076}
4077
c71c1857
AL
4078/**
4079 * atapi_send_cdb - Write CDB bytes to hardware
4080 * @ap: Port to which ATAPI device is attached.
4081 * @qc: Taskfile currently active
4082 *
4083 * When device has indicated its readiness to accept
4084 * a CDB, this function is called. Send the CDB.
4085 *
4086 * LOCKING:
4087 * caller.
4088 */
4089
4090static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4091{
4092 /* send SCSI cdb */
4093 DPRINTK("send cdb\n");
db024d53 4094 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4095
a6b2c5d4 4096 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4097 ata_altstatus(ap); /* flush */
4098
4099 switch (qc->tf.protocol) {
4100 case ATA_PROT_ATAPI:
4101 ap->hsm_task_state = HSM_ST;
4102 break;
4103 case ATA_PROT_ATAPI_NODATA:
4104 ap->hsm_task_state = HSM_ST_LAST;
4105 break;
4106 case ATA_PROT_ATAPI_DMA:
4107 ap->hsm_task_state = HSM_ST_LAST;
4108 /* initiate bmdma */
4109 ap->ops->bmdma_start(qc);
4110 break;
4111 }
1da177e4
LT
4112}
4113
6ae4cfb5
AL
4114/**
4115 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4116 * @qc: Command on going
4117 * @bytes: number of bytes
4118 *
4119 * Transfer Transfer data from/to the ATAPI device.
4120 *
4121 * LOCKING:
4122 * Inherited from caller.
4123 *
4124 */
4125
1da177e4
LT
4126static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4127{
4128 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4129 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4130 struct ata_port *ap = qc->ap;
4131 struct page *page;
4132 unsigned char *buf;
4133 unsigned int offset, count;
4134
563a6e1f 4135 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4136 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4137
4138next_sg:
563a6e1f 4139 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4140 /*
563a6e1f
AL
4141 * The end of qc->sg is reached and the device expects
4142 * more data to transfer. In order not to overrun qc->sg
4143 * and fulfill length specified in the byte count register,
4144 * - for read case, discard trailing data from the device
4145 * - for write case, padding zero data to the device
4146 */
4147 u16 pad_buf[1] = { 0 };
4148 unsigned int words = bytes >> 1;
4149 unsigned int i;
4150
4151 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4152 ata_dev_printk(qc->dev, KERN_WARNING,
4153 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4154
4155 for (i = 0; i < words; i++)
a6b2c5d4 4156 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4157
14be71f4 4158 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4159 return;
4160 }
4161
cedc9a47 4162 sg = &qc->__sg[qc->cursg];
1da177e4 4163
1da177e4
LT
4164 page = sg->page;
4165 offset = sg->offset + qc->cursg_ofs;
4166
4167 /* get the current page and offset */
4168 page = nth_page(page, (offset >> PAGE_SHIFT));
4169 offset %= PAGE_SIZE;
4170
6952df03 4171 /* don't overrun current sg */
32529e01 4172 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4173
4174 /* don't cross page boundaries */
4175 count = min(count, (unsigned int)PAGE_SIZE - offset);
4176
7282aa4b
AL
4177 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4178
91b8b313
AL
4179 if (PageHighMem(page)) {
4180 unsigned long flags;
4181
a6b2c5d4 4182 /* FIXME: use bounce buffer */
91b8b313
AL
4183 local_irq_save(flags);
4184 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4185
91b8b313 4186 /* do the actual data transfer */
a6b2c5d4 4187 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4188
91b8b313
AL
4189 kunmap_atomic(buf, KM_IRQ0);
4190 local_irq_restore(flags);
4191 } else {
4192 buf = page_address(page);
a6b2c5d4 4193 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4194 }
1da177e4
LT
4195
4196 bytes -= count;
4197 qc->curbytes += count;
4198 qc->cursg_ofs += count;
4199
32529e01 4200 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4201 qc->cursg++;
4202 qc->cursg_ofs = 0;
4203 }
4204
563a6e1f 4205 if (bytes)
1da177e4 4206 goto next_sg;
1da177e4
LT
4207}
4208
6ae4cfb5
AL
4209/**
4210 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4211 * @qc: Command on going
4212 *
4213 * Transfer Transfer data from/to the ATAPI device.
4214 *
4215 * LOCKING:
4216 * Inherited from caller.
6ae4cfb5
AL
4217 */
4218
1da177e4
LT
4219static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4220{
4221 struct ata_port *ap = qc->ap;
4222 struct ata_device *dev = qc->dev;
4223 unsigned int ireason, bc_lo, bc_hi, bytes;
4224 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4225
eec4c3f3
AL
4226 /* Abuse qc->result_tf for temp storage of intermediate TF
4227 * here to save some kernel stack usage.
4228 * For normal completion, qc->result_tf is not relevant. For
4229 * error, qc->result_tf is later overwritten by ata_qc_complete().
4230 * So, the correctness of qc->result_tf is not affected.
4231 */
4232 ap->ops->tf_read(ap, &qc->result_tf);
4233 ireason = qc->result_tf.nsect;
4234 bc_lo = qc->result_tf.lbam;
4235 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4236 bytes = (bc_hi << 8) | bc_lo;
4237
4238 /* shall be cleared to zero, indicating xfer of data */
4239 if (ireason & (1 << 0))
4240 goto err_out;
4241
4242 /* make sure transfer direction matches expected */
4243 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4244 if (do_write != i_write)
4245 goto err_out;
4246
44877b4e 4247 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4248
1da177e4
LT
4249 __atapi_pio_bytes(qc, bytes);
4250
4251 return;
4252
4253err_out:
f15a1daf 4254 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4255 qc->err_mask |= AC_ERR_HSM;
14be71f4 4256 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4257}
4258
4259/**
c234fb00
AL
4260 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4261 * @ap: the target ata_port
4262 * @qc: qc on going
1da177e4 4263 *
c234fb00
AL
4264 * RETURNS:
4265 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4266 */
c234fb00
AL
4267
4268static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4269{
c234fb00
AL
4270 if (qc->tf.flags & ATA_TFLAG_POLLING)
4271 return 1;
1da177e4 4272
c234fb00
AL
4273 if (ap->hsm_task_state == HSM_ST_FIRST) {
4274 if (qc->tf.protocol == ATA_PROT_PIO &&
4275 (qc->tf.flags & ATA_TFLAG_WRITE))
4276 return 1;
1da177e4 4277
c234fb00
AL
4278 if (is_atapi_taskfile(&qc->tf) &&
4279 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4280 return 1;
fe79e683
AL
4281 }
4282
c234fb00
AL
4283 return 0;
4284}
1da177e4 4285
c17ea20d
TH
4286/**
4287 * ata_hsm_qc_complete - finish a qc running on standard HSM
4288 * @qc: Command to complete
4289 * @in_wq: 1 if called from workqueue, 0 otherwise
4290 *
4291 * Finish @qc which is running on standard HSM.
4292 *
4293 * LOCKING:
cca3974e 4294 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4295 * Otherwise, none on entry and grabs host lock.
4296 */
4297static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4298{
4299 struct ata_port *ap = qc->ap;
4300 unsigned long flags;
4301
4302 if (ap->ops->error_handler) {
4303 if (in_wq) {
ba6a1308 4304 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4305
cca3974e
JG
4306 /* EH might have kicked in while host lock is
4307 * released.
c17ea20d
TH
4308 */
4309 qc = ata_qc_from_tag(ap, qc->tag);
4310 if (qc) {
4311 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4312 ap->ops->irq_on(ap);
c17ea20d
TH
4313 ata_qc_complete(qc);
4314 } else
4315 ata_port_freeze(ap);
4316 }
4317
ba6a1308 4318 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4319 } else {
4320 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4321 ata_qc_complete(qc);
4322 else
4323 ata_port_freeze(ap);
4324 }
4325 } else {
4326 if (in_wq) {
ba6a1308 4327 spin_lock_irqsave(ap->lock, flags);
83625006 4328 ap->ops->irq_on(ap);
c17ea20d 4329 ata_qc_complete(qc);
ba6a1308 4330 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4331 } else
4332 ata_qc_complete(qc);
4333 }
1da177e4 4334
c81e29b4 4335 ata_altstatus(ap); /* flush */
c17ea20d
TH
4336}
4337
bb5cb290
AL
4338/**
4339 * ata_hsm_move - move the HSM to the next state.
4340 * @ap: the target ata_port
4341 * @qc: qc on going
4342 * @status: current device status
4343 * @in_wq: 1 if called from workqueue, 0 otherwise
4344 *
4345 * RETURNS:
4346 * 1 when poll next status needed, 0 otherwise.
4347 */
9a1004d0
TH
4348int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4349 u8 status, int in_wq)
e2cec771 4350{
bb5cb290
AL
4351 unsigned long flags = 0;
4352 int poll_next;
4353
6912ccd5
AL
4354 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4355
bb5cb290
AL
4356 /* Make sure ata_qc_issue_prot() does not throw things
4357 * like DMA polling into the workqueue. Notice that
4358 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4359 */
c234fb00 4360 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4361
e2cec771 4362fsm_start:
999bb6f4 4363 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4364 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4365
e2cec771
AL
4366 switch (ap->hsm_task_state) {
4367 case HSM_ST_FIRST:
bb5cb290
AL
4368 /* Send first data block or PACKET CDB */
4369
4370 /* If polling, we will stay in the work queue after
4371 * sending the data. Otherwise, interrupt handler
4372 * takes over after sending the data.
4373 */
4374 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4375
e2cec771 4376 /* check device status */
3655d1d3
AL
4377 if (unlikely((status & ATA_DRQ) == 0)) {
4378 /* handle BSY=0, DRQ=0 as error */
4379 if (likely(status & (ATA_ERR | ATA_DF)))
4380 /* device stops HSM for abort/error */
4381 qc->err_mask |= AC_ERR_DEV;
4382 else
4383 /* HSM violation. Let EH handle this */
4384 qc->err_mask |= AC_ERR_HSM;
4385
14be71f4 4386 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4387 goto fsm_start;
1da177e4
LT
4388 }
4389
71601958
AL
4390 /* Device should not ask for data transfer (DRQ=1)
4391 * when it finds something wrong.
eee6c32f
AL
4392 * We ignore DRQ here and stop the HSM by
4393 * changing hsm_task_state to HSM_ST_ERR and
4394 * let the EH abort the command or reset the device.
71601958
AL
4395 */
4396 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4397 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4398 "error, dev_stat 0x%X\n", status);
3655d1d3 4399 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4400 ap->hsm_task_state = HSM_ST_ERR;
4401 goto fsm_start;
71601958 4402 }
1da177e4 4403
bb5cb290
AL
4404 /* Send the CDB (atapi) or the first data block (ata pio out).
4405 * During the state transition, interrupt handler shouldn't
4406 * be invoked before the data transfer is complete and
4407 * hsm_task_state is changed. Hence, the following locking.
4408 */
4409 if (in_wq)
ba6a1308 4410 spin_lock_irqsave(ap->lock, flags);
1da177e4 4411
bb5cb290
AL
4412 if (qc->tf.protocol == ATA_PROT_PIO) {
4413 /* PIO data out protocol.
4414 * send first data block.
4415 */
0565c26d 4416
bb5cb290
AL
4417 /* ata_pio_sectors() might change the state
4418 * to HSM_ST_LAST. so, the state is changed here
4419 * before ata_pio_sectors().
4420 */
4421 ap->hsm_task_state = HSM_ST;
4422 ata_pio_sectors(qc);
4423 ata_altstatus(ap); /* flush */
4424 } else
4425 /* send CDB */
4426 atapi_send_cdb(ap, qc);
4427
4428 if (in_wq)
ba6a1308 4429 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4430
4431 /* if polling, ata_pio_task() handles the rest.
4432 * otherwise, interrupt handler takes over from here.
4433 */
e2cec771 4434 break;
1c848984 4435
e2cec771
AL
4436 case HSM_ST:
4437 /* complete command or read/write the data register */
4438 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4439 /* ATAPI PIO protocol */
4440 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4441 /* No more data to transfer or device error.
4442 * Device error will be tagged in HSM_ST_LAST.
4443 */
e2cec771
AL
4444 ap->hsm_task_state = HSM_ST_LAST;
4445 goto fsm_start;
4446 }
1da177e4 4447
71601958
AL
4448 /* Device should not ask for data transfer (DRQ=1)
4449 * when it finds something wrong.
eee6c32f
AL
4450 * We ignore DRQ here and stop the HSM by
4451 * changing hsm_task_state to HSM_ST_ERR and
4452 * let the EH abort the command or reset the device.
71601958
AL
4453 */
4454 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4455 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4456 "device error, dev_stat 0x%X\n",
4457 status);
3655d1d3 4458 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4459 ap->hsm_task_state = HSM_ST_ERR;
4460 goto fsm_start;
71601958 4461 }
1da177e4 4462
e2cec771 4463 atapi_pio_bytes(qc);
7fb6ec28 4464
e2cec771
AL
4465 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4466 /* bad ireason reported by device */
4467 goto fsm_start;
1da177e4 4468
e2cec771
AL
4469 } else {
4470 /* ATA PIO protocol */
4471 if (unlikely((status & ATA_DRQ) == 0)) {
4472 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4473 if (likely(status & (ATA_ERR | ATA_DF)))
4474 /* device stops HSM for abort/error */
4475 qc->err_mask |= AC_ERR_DEV;
4476 else
55a8e2c8
TH
4477 /* HSM violation. Let EH handle this.
4478 * Phantom devices also trigger this
4479 * condition. Mark hint.
4480 */
4481 qc->err_mask |= AC_ERR_HSM |
4482 AC_ERR_NODEV_HINT;
3655d1d3 4483
e2cec771
AL
4484 ap->hsm_task_state = HSM_ST_ERR;
4485 goto fsm_start;
4486 }
1da177e4 4487
eee6c32f
AL
4488 /* For PIO reads, some devices may ask for
4489 * data transfer (DRQ=1) alone with ERR=1.
4490 * We respect DRQ here and transfer one
4491 * block of junk data before changing the
4492 * hsm_task_state to HSM_ST_ERR.
4493 *
4494 * For PIO writes, ERR=1 DRQ=1 doesn't make
4495 * sense since the data block has been
4496 * transferred to the device.
71601958
AL
4497 */
4498 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4499 /* data might be corrputed */
4500 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4501
4502 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4503 ata_pio_sectors(qc);
4504 ata_altstatus(ap);
4505 status = ata_wait_idle(ap);
4506 }
4507
3655d1d3
AL
4508 if (status & (ATA_BUSY | ATA_DRQ))
4509 qc->err_mask |= AC_ERR_HSM;
4510
eee6c32f
AL
4511 /* ata_pio_sectors() might change the
4512 * state to HSM_ST_LAST. so, the state
4513 * is changed after ata_pio_sectors().
4514 */
4515 ap->hsm_task_state = HSM_ST_ERR;
4516 goto fsm_start;
71601958
AL
4517 }
4518
e2cec771
AL
4519 ata_pio_sectors(qc);
4520
4521 if (ap->hsm_task_state == HSM_ST_LAST &&
4522 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4523 /* all data read */
4524 ata_altstatus(ap);
52a32205 4525 status = ata_wait_idle(ap);
e2cec771
AL
4526 goto fsm_start;
4527 }
4528 }
4529
4530 ata_altstatus(ap); /* flush */
bb5cb290 4531 poll_next = 1;
1da177e4
LT
4532 break;
4533
14be71f4 4534 case HSM_ST_LAST:
6912ccd5
AL
4535 if (unlikely(!ata_ok(status))) {
4536 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4537 ap->hsm_task_state = HSM_ST_ERR;
4538 goto fsm_start;
4539 }
4540
4541 /* no more data to transfer */
4332a771 4542 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4543 ap->print_id, qc->dev->devno, status);
e2cec771 4544
6912ccd5
AL
4545 WARN_ON(qc->err_mask);
4546
e2cec771 4547 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4548
e2cec771 4549 /* complete taskfile transaction */
c17ea20d 4550 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4551
4552 poll_next = 0;
1da177e4
LT
4553 break;
4554
14be71f4 4555 case HSM_ST_ERR:
e2cec771
AL
4556 /* make sure qc->err_mask is available to
4557 * know what's wrong and recover
4558 */
4559 WARN_ON(qc->err_mask == 0);
4560
4561 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4562
999bb6f4 4563 /* complete taskfile transaction */
c17ea20d 4564 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4565
4566 poll_next = 0;
e2cec771
AL
4567 break;
4568 default:
bb5cb290 4569 poll_next = 0;
6912ccd5 4570 BUG();
1da177e4
LT
4571 }
4572
bb5cb290 4573 return poll_next;
1da177e4
LT
4574}
4575
65f27f38 4576static void ata_pio_task(struct work_struct *work)
8061f5f0 4577{
65f27f38
DH
4578 struct ata_port *ap =
4579 container_of(work, struct ata_port, port_task.work);
4580 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4581 u8 status;
a1af3734 4582 int poll_next;
8061f5f0 4583
7fb6ec28 4584fsm_start:
a1af3734 4585 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4586
a1af3734
AL
4587 /*
4588 * This is purely heuristic. This is a fast path.
4589 * Sometimes when we enter, BSY will be cleared in
4590 * a chk-status or two. If not, the drive is probably seeking
4591 * or something. Snooze for a couple msecs, then
4592 * chk-status again. If still busy, queue delayed work.
4593 */
4594 status = ata_busy_wait(ap, ATA_BUSY, 5);
4595 if (status & ATA_BUSY) {
4596 msleep(2);
4597 status = ata_busy_wait(ap, ATA_BUSY, 10);
4598 if (status & ATA_BUSY) {
31ce6dae 4599 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4600 return;
4601 }
8061f5f0
TH
4602 }
4603
a1af3734
AL
4604 /* move the HSM */
4605 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4606
a1af3734
AL
4607 /* another command or interrupt handler
4608 * may be running at this point.
4609 */
4610 if (poll_next)
7fb6ec28 4611 goto fsm_start;
8061f5f0
TH
4612}
4613
1da177e4
LT
4614/**
4615 * ata_qc_new - Request an available ATA command, for queueing
4616 * @ap: Port associated with device @dev
4617 * @dev: Device from whom we request an available command structure
4618 *
4619 * LOCKING:
0cba632b 4620 * None.
1da177e4
LT
4621 */
4622
4623static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4624{
4625 struct ata_queued_cmd *qc = NULL;
4626 unsigned int i;
4627
e3180499 4628 /* no command while frozen */
b51e9e5d 4629 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4630 return NULL;
4631
2ab7db1f
TH
4632 /* the last tag is reserved for internal command. */
4633 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4634 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4635 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4636 break;
4637 }
4638
4639 if (qc)
4640 qc->tag = i;
4641
4642 return qc;
4643}
4644
4645/**
4646 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4647 * @dev: Device from whom we request an available command structure
4648 *
4649 * LOCKING:
0cba632b 4650 * None.
1da177e4
LT
4651 */
4652
3373efd8 4653struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4654{
3373efd8 4655 struct ata_port *ap = dev->ap;
1da177e4
LT
4656 struct ata_queued_cmd *qc;
4657
4658 qc = ata_qc_new(ap);
4659 if (qc) {
1da177e4
LT
4660 qc->scsicmd = NULL;
4661 qc->ap = ap;
4662 qc->dev = dev;
1da177e4 4663
2c13b7ce 4664 ata_qc_reinit(qc);
1da177e4
LT
4665 }
4666
4667 return qc;
4668}
4669
1da177e4
LT
4670/**
4671 * ata_qc_free - free unused ata_queued_cmd
4672 * @qc: Command to complete
4673 *
4674 * Designed to free unused ata_queued_cmd object
4675 * in case something prevents using it.
4676 *
4677 * LOCKING:
cca3974e 4678 * spin_lock_irqsave(host lock)
1da177e4
LT
4679 */
4680void ata_qc_free(struct ata_queued_cmd *qc)
4681{
4ba946e9
TH
4682 struct ata_port *ap = qc->ap;
4683 unsigned int tag;
4684
a4631474 4685 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4686
4ba946e9
TH
4687 qc->flags = 0;
4688 tag = qc->tag;
4689 if (likely(ata_tag_valid(tag))) {
4ba946e9 4690 qc->tag = ATA_TAG_POISON;
6cec4a39 4691 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4692 }
1da177e4
LT
4693}
4694
76014427 4695void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4696{
dedaf2b0
TH
4697 struct ata_port *ap = qc->ap;
4698
a4631474
TH
4699 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4700 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4701
4702 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4703 ata_sg_clean(qc);
4704
7401abf2 4705 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4706 if (qc->tf.protocol == ATA_PROT_NCQ)
4707 ap->sactive &= ~(1 << qc->tag);
4708 else
4709 ap->active_tag = ATA_TAG_POISON;
7401abf2 4710
3f3791d3
AL
4711 /* atapi: mark qc as inactive to prevent the interrupt handler
4712 * from completing the command twice later, before the error handler
4713 * is called. (when rc != 0 and atapi request sense is needed)
4714 */
4715 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4716 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4717
1da177e4 4718 /* call completion callback */
77853bf2 4719 qc->complete_fn(qc);
1da177e4
LT
4720}
4721
39599a53
TH
4722static void fill_result_tf(struct ata_queued_cmd *qc)
4723{
4724 struct ata_port *ap = qc->ap;
4725
39599a53 4726 qc->result_tf.flags = qc->tf.flags;
4742d54f 4727 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
4728}
4729
f686bcb8
TH
4730/**
4731 * ata_qc_complete - Complete an active ATA command
4732 * @qc: Command to complete
4733 * @err_mask: ATA Status register contents
4734 *
4735 * Indicate to the mid and upper layers that an ATA
4736 * command has completed, with either an ok or not-ok status.
4737 *
4738 * LOCKING:
cca3974e 4739 * spin_lock_irqsave(host lock)
f686bcb8
TH
4740 */
4741void ata_qc_complete(struct ata_queued_cmd *qc)
4742{
4743 struct ata_port *ap = qc->ap;
4744
4745 /* XXX: New EH and old EH use different mechanisms to
4746 * synchronize EH with regular execution path.
4747 *
4748 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4749 * Normal execution path is responsible for not accessing a
4750 * failed qc. libata core enforces the rule by returning NULL
4751 * from ata_qc_from_tag() for failed qcs.
4752 *
4753 * Old EH depends on ata_qc_complete() nullifying completion
4754 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4755 * not synchronize with interrupt handler. Only PIO task is
4756 * taken care of.
4757 */
4758 if (ap->ops->error_handler) {
b51e9e5d 4759 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4760
4761 if (unlikely(qc->err_mask))
4762 qc->flags |= ATA_QCFLAG_FAILED;
4763
4764 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4765 if (!ata_tag_internal(qc->tag)) {
4766 /* always fill result TF for failed qc */
39599a53 4767 fill_result_tf(qc);
f686bcb8
TH
4768 ata_qc_schedule_eh(qc);
4769 return;
4770 }
4771 }
4772
4773 /* read result TF if requested */
4774 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4775 fill_result_tf(qc);
f686bcb8
TH
4776
4777 __ata_qc_complete(qc);
4778 } else {
4779 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4780 return;
4781
4782 /* read result TF if failed or requested */
4783 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4784 fill_result_tf(qc);
f686bcb8
TH
4785
4786 __ata_qc_complete(qc);
4787 }
4788}
4789
dedaf2b0
TH
4790/**
4791 * ata_qc_complete_multiple - Complete multiple qcs successfully
4792 * @ap: port in question
4793 * @qc_active: new qc_active mask
4794 * @finish_qc: LLDD callback invoked before completing a qc
4795 *
4796 * Complete in-flight commands. This functions is meant to be
4797 * called from low-level driver's interrupt routine to complete
4798 * requests normally. ap->qc_active and @qc_active is compared
4799 * and commands are completed accordingly.
4800 *
4801 * LOCKING:
cca3974e 4802 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4803 *
4804 * RETURNS:
4805 * Number of completed commands on success, -errno otherwise.
4806 */
4807int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4808 void (*finish_qc)(struct ata_queued_cmd *))
4809{
4810 int nr_done = 0;
4811 u32 done_mask;
4812 int i;
4813
4814 done_mask = ap->qc_active ^ qc_active;
4815
4816 if (unlikely(done_mask & qc_active)) {
4817 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4818 "(%08x->%08x)\n", ap->qc_active, qc_active);
4819 return -EINVAL;
4820 }
4821
4822 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4823 struct ata_queued_cmd *qc;
4824
4825 if (!(done_mask & (1 << i)))
4826 continue;
4827
4828 if ((qc = ata_qc_from_tag(ap, i))) {
4829 if (finish_qc)
4830 finish_qc(qc);
4831 ata_qc_complete(qc);
4832 nr_done++;
4833 }
4834 }
4835
4836 return nr_done;
4837}
4838
1da177e4
LT
4839static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4840{
4841 struct ata_port *ap = qc->ap;
4842
4843 switch (qc->tf.protocol) {
3dc1d881 4844 case ATA_PROT_NCQ:
1da177e4
LT
4845 case ATA_PROT_DMA:
4846 case ATA_PROT_ATAPI_DMA:
4847 return 1;
4848
4849 case ATA_PROT_ATAPI:
4850 case ATA_PROT_PIO:
1da177e4
LT
4851 if (ap->flags & ATA_FLAG_PIO_DMA)
4852 return 1;
4853
4854 /* fall through */
4855
4856 default:
4857 return 0;
4858 }
4859
4860 /* never reached */
4861}
4862
4863/**
4864 * ata_qc_issue - issue taskfile to device
4865 * @qc: command to issue to device
4866 *
4867 * Prepare an ATA command to submission to device.
4868 * This includes mapping the data into a DMA-able
4869 * area, filling in the S/G table, and finally
4870 * writing the taskfile to hardware, starting the command.
4871 *
4872 * LOCKING:
cca3974e 4873 * spin_lock_irqsave(host lock)
1da177e4 4874 */
8e0e694a 4875void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4876{
4877 struct ata_port *ap = qc->ap;
4878
dedaf2b0
TH
4879 /* Make sure only one non-NCQ command is outstanding. The
4880 * check is skipped for old EH because it reuses active qc to
4881 * request ATAPI sense.
4882 */
4883 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4884
4885 if (qc->tf.protocol == ATA_PROT_NCQ) {
4886 WARN_ON(ap->sactive & (1 << qc->tag));
4887 ap->sactive |= 1 << qc->tag;
4888 } else {
4889 WARN_ON(ap->sactive);
4890 ap->active_tag = qc->tag;
4891 }
4892
e4a70e76 4893 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4894 ap->qc_active |= 1 << qc->tag;
e4a70e76 4895
1da177e4
LT
4896 if (ata_should_dma_map(qc)) {
4897 if (qc->flags & ATA_QCFLAG_SG) {
4898 if (ata_sg_setup(qc))
8e436af9 4899 goto sg_err;
1da177e4
LT
4900 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4901 if (ata_sg_setup_one(qc))
8e436af9 4902 goto sg_err;
1da177e4
LT
4903 }
4904 } else {
4905 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4906 }
4907
4908 ap->ops->qc_prep(qc);
4909
8e0e694a
TH
4910 qc->err_mask |= ap->ops->qc_issue(qc);
4911 if (unlikely(qc->err_mask))
4912 goto err;
4913 return;
1da177e4 4914
8e436af9
TH
4915sg_err:
4916 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4917 qc->err_mask |= AC_ERR_SYSTEM;
4918err:
4919 ata_qc_complete(qc);
1da177e4
LT
4920}
4921
4922/**
4923 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4924 * @qc: command to issue to device
4925 *
4926 * Using various libata functions and hooks, this function
4927 * starts an ATA command. ATA commands are grouped into
4928 * classes called "protocols", and issuing each type of protocol
4929 * is slightly different.
4930 *
0baab86b
EF
4931 * May be used as the qc_issue() entry in ata_port_operations.
4932 *
1da177e4 4933 * LOCKING:
cca3974e 4934 * spin_lock_irqsave(host lock)
1da177e4
LT
4935 *
4936 * RETURNS:
9a3d9eb0 4937 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4938 */
4939
9a3d9eb0 4940unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4941{
4942 struct ata_port *ap = qc->ap;
4943
e50362ec
AL
4944 /* Use polling pio if the LLD doesn't handle
4945 * interrupt driven pio and atapi CDB interrupt.
4946 */
4947 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4948 switch (qc->tf.protocol) {
4949 case ATA_PROT_PIO:
e3472cbe 4950 case ATA_PROT_NODATA:
e50362ec
AL
4951 case ATA_PROT_ATAPI:
4952 case ATA_PROT_ATAPI_NODATA:
4953 qc->tf.flags |= ATA_TFLAG_POLLING;
4954 break;
4955 case ATA_PROT_ATAPI_DMA:
4956 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4957 /* see ata_dma_blacklisted() */
e50362ec
AL
4958 BUG();
4959 break;
4960 default:
4961 break;
4962 }
4963 }
4964
3d3cca37
TH
4965 /* Some controllers show flaky interrupt behavior after
4966 * setting xfer mode. Use polling instead.
4967 */
4968 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4969 qc->tf.feature == SETFEATURES_XFER) &&
4970 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4971 qc->tf.flags |= ATA_TFLAG_POLLING;
4972
312f7da2 4973 /* select the device */
1da177e4
LT
4974 ata_dev_select(ap, qc->dev->devno, 1, 0);
4975
312f7da2 4976 /* start the command */
1da177e4
LT
4977 switch (qc->tf.protocol) {
4978 case ATA_PROT_NODATA:
312f7da2
AL
4979 if (qc->tf.flags & ATA_TFLAG_POLLING)
4980 ata_qc_set_polling(qc);
4981
e5338254 4982 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4983 ap->hsm_task_state = HSM_ST_LAST;
4984
4985 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4986 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4987
1da177e4
LT
4988 break;
4989
4990 case ATA_PROT_DMA:
587005de 4991 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4992
1da177e4
LT
4993 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4994 ap->ops->bmdma_setup(qc); /* set up bmdma */
4995 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4996 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4997 break;
4998
312f7da2
AL
4999 case ATA_PROT_PIO:
5000 if (qc->tf.flags & ATA_TFLAG_POLLING)
5001 ata_qc_set_polling(qc);
1da177e4 5002
e5338254 5003 ata_tf_to_host(ap, &qc->tf);
312f7da2 5004
54f00389
AL
5005 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5006 /* PIO data out protocol */
5007 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5008 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5009
5010 /* always send first data block using
e27486db 5011 * the ata_pio_task() codepath.
54f00389 5012 */
312f7da2 5013 } else {
54f00389
AL
5014 /* PIO data in protocol */
5015 ap->hsm_task_state = HSM_ST;
5016
5017 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5018 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5019
5020 /* if polling, ata_pio_task() handles the rest.
5021 * otherwise, interrupt handler takes over from here.
5022 */
312f7da2
AL
5023 }
5024
1da177e4
LT
5025 break;
5026
1da177e4 5027 case ATA_PROT_ATAPI:
1da177e4 5028 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5029 if (qc->tf.flags & ATA_TFLAG_POLLING)
5030 ata_qc_set_polling(qc);
5031
e5338254 5032 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5033
312f7da2
AL
5034 ap->hsm_task_state = HSM_ST_FIRST;
5035
5036 /* send cdb by polling if no cdb interrupt */
5037 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5038 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5039 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5040 break;
5041
5042 case ATA_PROT_ATAPI_DMA:
587005de 5043 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5044
1da177e4
LT
5045 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5046 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5047 ap->hsm_task_state = HSM_ST_FIRST;
5048
5049 /* send cdb by polling if no cdb interrupt */
5050 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5051 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5052 break;
5053
5054 default:
5055 WARN_ON(1);
9a3d9eb0 5056 return AC_ERR_SYSTEM;
1da177e4
LT
5057 }
5058
5059 return 0;
5060}
5061
1da177e4
LT
5062/**
5063 * ata_host_intr - Handle host interrupt for given (port, task)
5064 * @ap: Port on which interrupt arrived (possibly...)
5065 * @qc: Taskfile currently active in engine
5066 *
5067 * Handle host interrupt for given queued command. Currently,
5068 * only DMA interrupts are handled. All other commands are
5069 * handled via polling with interrupts disabled (nIEN bit).
5070 *
5071 * LOCKING:
cca3974e 5072 * spin_lock_irqsave(host lock)
1da177e4
LT
5073 *
5074 * RETURNS:
5075 * One if interrupt was handled, zero if not (shared irq).
5076 */
5077
5078inline unsigned int ata_host_intr (struct ata_port *ap,
5079 struct ata_queued_cmd *qc)
5080{
ea54763f 5081 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5082 u8 status, host_stat = 0;
1da177e4 5083
312f7da2 5084 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5085 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5086
312f7da2
AL
5087 /* Check whether we are expecting interrupt in this state */
5088 switch (ap->hsm_task_state) {
5089 case HSM_ST_FIRST:
6912ccd5
AL
5090 /* Some pre-ATAPI-4 devices assert INTRQ
5091 * at this state when ready to receive CDB.
5092 */
1da177e4 5093
312f7da2
AL
5094 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5095 * The flag was turned on only for atapi devices.
5096 * No need to check is_atapi_taskfile(&qc->tf) again.
5097 */
5098 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5099 goto idle_irq;
1da177e4 5100 break;
312f7da2
AL
5101 case HSM_ST_LAST:
5102 if (qc->tf.protocol == ATA_PROT_DMA ||
5103 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5104 /* check status of DMA engine */
5105 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5106 VPRINTK("ata%u: host_stat 0x%X\n",
5107 ap->print_id, host_stat);
312f7da2
AL
5108
5109 /* if it's not our irq... */
5110 if (!(host_stat & ATA_DMA_INTR))
5111 goto idle_irq;
5112
5113 /* before we do anything else, clear DMA-Start bit */
5114 ap->ops->bmdma_stop(qc);
a4f16610
AL
5115
5116 if (unlikely(host_stat & ATA_DMA_ERR)) {
5117 /* error when transfering data to/from memory */
5118 qc->err_mask |= AC_ERR_HOST_BUS;
5119 ap->hsm_task_state = HSM_ST_ERR;
5120 }
312f7da2
AL
5121 }
5122 break;
5123 case HSM_ST:
5124 break;
1da177e4
LT
5125 default:
5126 goto idle_irq;
5127 }
5128
312f7da2
AL
5129 /* check altstatus */
5130 status = ata_altstatus(ap);
5131 if (status & ATA_BUSY)
5132 goto idle_irq;
1da177e4 5133
312f7da2
AL
5134 /* check main status, clearing INTRQ */
5135 status = ata_chk_status(ap);
5136 if (unlikely(status & ATA_BUSY))
5137 goto idle_irq;
1da177e4 5138
312f7da2
AL
5139 /* ack bmdma irq events */
5140 ap->ops->irq_clear(ap);
1da177e4 5141
bb5cb290 5142 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5143
5144 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5145 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5146 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5147
1da177e4
LT
5148 return 1; /* irq handled */
5149
5150idle_irq:
5151 ap->stats.idle_irq++;
5152
5153#ifdef ATA_IRQ_TRAP
5154 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5155 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5156 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5157 return 1;
1da177e4
LT
5158 }
5159#endif
5160 return 0; /* irq not handled */
5161}
5162
5163/**
5164 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5165 * @irq: irq line (unused)
cca3974e 5166 * @dev_instance: pointer to our ata_host information structure
1da177e4 5167 *
0cba632b
JG
5168 * Default interrupt handler for PCI IDE devices. Calls
5169 * ata_host_intr() for each port that is not disabled.
5170 *
1da177e4 5171 * LOCKING:
cca3974e 5172 * Obtains host lock during operation.
1da177e4
LT
5173 *
5174 * RETURNS:
0cba632b 5175 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5176 */
5177
7d12e780 5178irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5179{
cca3974e 5180 struct ata_host *host = dev_instance;
1da177e4
LT
5181 unsigned int i;
5182 unsigned int handled = 0;
5183 unsigned long flags;
5184
5185 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5186 spin_lock_irqsave(&host->lock, flags);
1da177e4 5187
cca3974e 5188 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5189 struct ata_port *ap;
5190
cca3974e 5191 ap = host->ports[i];
c1389503 5192 if (ap &&
029f5468 5193 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5194 struct ata_queued_cmd *qc;
5195
5196 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5197 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5198 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5199 handled |= ata_host_intr(ap, qc);
5200 }
5201 }
5202
cca3974e 5203 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5204
5205 return IRQ_RETVAL(handled);
5206}
5207
34bf2170
TH
5208/**
5209 * sata_scr_valid - test whether SCRs are accessible
5210 * @ap: ATA port to test SCR accessibility for
5211 *
5212 * Test whether SCRs are accessible for @ap.
5213 *
5214 * LOCKING:
5215 * None.
5216 *
5217 * RETURNS:
5218 * 1 if SCRs are accessible, 0 otherwise.
5219 */
5220int sata_scr_valid(struct ata_port *ap)
5221{
5222 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5223}
5224
5225/**
5226 * sata_scr_read - read SCR register of the specified port
5227 * @ap: ATA port to read SCR for
5228 * @reg: SCR to read
5229 * @val: Place to store read value
5230 *
5231 * Read SCR register @reg of @ap into *@val. This function is
5232 * guaranteed to succeed if the cable type of the port is SATA
5233 * and the port implements ->scr_read.
5234 *
5235 * LOCKING:
5236 * None.
5237 *
5238 * RETURNS:
5239 * 0 on success, negative errno on failure.
5240 */
5241int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5242{
5243 if (sata_scr_valid(ap)) {
5244 *val = ap->ops->scr_read(ap, reg);
5245 return 0;
5246 }
5247 return -EOPNOTSUPP;
5248}
5249
5250/**
5251 * sata_scr_write - write SCR register of the specified port
5252 * @ap: ATA port to write SCR for
5253 * @reg: SCR to write
5254 * @val: value to write
5255 *
5256 * Write @val to SCR register @reg of @ap. This function is
5257 * guaranteed to succeed if the cable type of the port is SATA
5258 * and the port implements ->scr_read.
5259 *
5260 * LOCKING:
5261 * None.
5262 *
5263 * RETURNS:
5264 * 0 on success, negative errno on failure.
5265 */
5266int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5267{
5268 if (sata_scr_valid(ap)) {
5269 ap->ops->scr_write(ap, reg, val);
5270 return 0;
5271 }
5272 return -EOPNOTSUPP;
5273}
5274
5275/**
5276 * sata_scr_write_flush - write SCR register of the specified port and flush
5277 * @ap: ATA port to write SCR for
5278 * @reg: SCR to write
5279 * @val: value to write
5280 *
5281 * This function is identical to sata_scr_write() except that this
5282 * function performs flush after writing to the register.
5283 *
5284 * LOCKING:
5285 * None.
5286 *
5287 * RETURNS:
5288 * 0 on success, negative errno on failure.
5289 */
5290int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5291{
5292 if (sata_scr_valid(ap)) {
5293 ap->ops->scr_write(ap, reg, val);
5294 ap->ops->scr_read(ap, reg);
5295 return 0;
5296 }
5297 return -EOPNOTSUPP;
5298}
5299
5300/**
5301 * ata_port_online - test whether the given port is online
5302 * @ap: ATA port to test
5303 *
5304 * Test whether @ap is online. Note that this function returns 0
5305 * if online status of @ap cannot be obtained, so
5306 * ata_port_online(ap) != !ata_port_offline(ap).
5307 *
5308 * LOCKING:
5309 * None.
5310 *
5311 * RETURNS:
5312 * 1 if the port online status is available and online.
5313 */
5314int ata_port_online(struct ata_port *ap)
5315{
5316 u32 sstatus;
5317
5318 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5319 return 1;
5320 return 0;
5321}
5322
5323/**
5324 * ata_port_offline - test whether the given port is offline
5325 * @ap: ATA port to test
5326 *
5327 * Test whether @ap is offline. Note that this function returns
5328 * 0 if offline status of @ap cannot be obtained, so
5329 * ata_port_online(ap) != !ata_port_offline(ap).
5330 *
5331 * LOCKING:
5332 * None.
5333 *
5334 * RETURNS:
5335 * 1 if the port offline status is available and offline.
5336 */
5337int ata_port_offline(struct ata_port *ap)
5338{
5339 u32 sstatus;
5340
5341 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5342 return 1;
5343 return 0;
5344}
0baab86b 5345
77b08fb5 5346int ata_flush_cache(struct ata_device *dev)
9b847548 5347{
977e6b9f 5348 unsigned int err_mask;
9b847548
JA
5349 u8 cmd;
5350
5351 if (!ata_try_flush_cache(dev))
5352 return 0;
5353
6fc49adb 5354 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5355 cmd = ATA_CMD_FLUSH_EXT;
5356 else
5357 cmd = ATA_CMD_FLUSH;
5358
977e6b9f
TH
5359 err_mask = ata_do_simple_cmd(dev, cmd);
5360 if (err_mask) {
5361 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5362 return -EIO;
5363 }
5364
5365 return 0;
9b847548
JA
5366}
5367
6ffa01d8 5368#ifdef CONFIG_PM
cca3974e
JG
5369static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5370 unsigned int action, unsigned int ehi_flags,
5371 int wait)
500530f6
TH
5372{
5373 unsigned long flags;
5374 int i, rc;
5375
cca3974e
JG
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
500530f6
TH
5378
5379 /* Previous resume operation might still be in
5380 * progress. Wait for PM_PENDING to clear.
5381 */
5382 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5383 ata_port_wait_eh(ap);
5384 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5385 }
5386
5387 /* request PM ops to EH */
5388 spin_lock_irqsave(ap->lock, flags);
5389
5390 ap->pm_mesg = mesg;
5391 if (wait) {
5392 rc = 0;
5393 ap->pm_result = &rc;
5394 }
5395
5396 ap->pflags |= ATA_PFLAG_PM_PENDING;
5397 ap->eh_info.action |= action;
5398 ap->eh_info.flags |= ehi_flags;
5399
5400 ata_port_schedule_eh(ap);
5401
5402 spin_unlock_irqrestore(ap->lock, flags);
5403
5404 /* wait and check result */
5405 if (wait) {
5406 ata_port_wait_eh(ap);
5407 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5408 if (rc)
5409 return rc;
5410 }
5411 }
5412
5413 return 0;
5414}
5415
5416/**
cca3974e
JG
5417 * ata_host_suspend - suspend host
5418 * @host: host to suspend
500530f6
TH
5419 * @mesg: PM message
5420 *
cca3974e 5421 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5422 * function requests EH to perform PM operations and waits for EH
5423 * to finish.
5424 *
5425 * LOCKING:
5426 * Kernel thread context (may sleep).
5427 *
5428 * RETURNS:
5429 * 0 on success, -errno on failure.
5430 */
cca3974e 5431int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5432{
5433 int i, j, rc;
5434
cca3974e 5435 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5436 if (rc)
5437 goto fail;
5438
5439 /* EH is quiescent now. Fail if we have any ready device.
5440 * This happens if hotplug occurs between completion of device
5441 * suspension and here.
5442 */
cca3974e
JG
5443 for (i = 0; i < host->n_ports; i++) {
5444 struct ata_port *ap = host->ports[i];
500530f6
TH
5445
5446 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5447 struct ata_device *dev = &ap->device[j];
5448
5449 if (ata_dev_ready(dev)) {
5450 ata_port_printk(ap, KERN_WARNING,
5451 "suspend failed, device %d "
5452 "still active\n", dev->devno);
5453 rc = -EBUSY;
5454 goto fail;
5455 }
5456 }
5457 }
5458
cca3974e 5459 host->dev->power.power_state = mesg;
500530f6
TH
5460 return 0;
5461
5462 fail:
cca3974e 5463 ata_host_resume(host);
500530f6
TH
5464 return rc;
5465}
5466
5467/**
cca3974e
JG
5468 * ata_host_resume - resume host
5469 * @host: host to resume
500530f6 5470 *
cca3974e 5471 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5472 * function requests EH to perform PM operations and returns.
5473 * Note that all resume operations are performed parallely.
5474 *
5475 * LOCKING:
5476 * Kernel thread context (may sleep).
5477 */
cca3974e 5478void ata_host_resume(struct ata_host *host)
500530f6 5479{
cca3974e
JG
5480 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5481 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5482 host->dev->power.power_state = PMSG_ON;
500530f6 5483}
6ffa01d8 5484#endif
500530f6 5485
c893a3ae
RD
5486/**
5487 * ata_port_start - Set port up for dma.
5488 * @ap: Port to initialize
5489 *
5490 * Called just after data structures for each port are
5491 * initialized. Allocates space for PRD table.
5492 *
5493 * May be used as the port_start() entry in ata_port_operations.
5494 *
5495 * LOCKING:
5496 * Inherited from caller.
5497 */
f0d36efd 5498int ata_port_start(struct ata_port *ap)
1da177e4 5499{
2f1f610b 5500 struct device *dev = ap->dev;
6037d6bb 5501 int rc;
1da177e4 5502
f0d36efd
TH
5503 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5504 GFP_KERNEL);
1da177e4
LT
5505 if (!ap->prd)
5506 return -ENOMEM;
5507
6037d6bb 5508 rc = ata_pad_alloc(ap, dev);
f0d36efd 5509 if (rc)
6037d6bb 5510 return rc;
1da177e4 5511
f0d36efd
TH
5512 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5513 (unsigned long long)ap->prd_dma);
1da177e4
LT
5514 return 0;
5515}
5516
3ef3b43d
TH
5517/**
5518 * ata_dev_init - Initialize an ata_device structure
5519 * @dev: Device structure to initialize
5520 *
5521 * Initialize @dev in preparation for probing.
5522 *
5523 * LOCKING:
5524 * Inherited from caller.
5525 */
5526void ata_dev_init(struct ata_device *dev)
5527{
5528 struct ata_port *ap = dev->ap;
72fa4b74
TH
5529 unsigned long flags;
5530
5a04bf4b
TH
5531 /* SATA spd limit is bound to the first device */
5532 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5533
72fa4b74
TH
5534 /* High bits of dev->flags are used to record warm plug
5535 * requests which occur asynchronously. Synchronize using
cca3974e 5536 * host lock.
72fa4b74 5537 */
ba6a1308 5538 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5539 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5540 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5541
72fa4b74
TH
5542 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5543 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5544 dev->pio_mask = UINT_MAX;
5545 dev->mwdma_mask = UINT_MAX;
5546 dev->udma_mask = UINT_MAX;
5547}
5548
1da177e4 5549/**
155a8a9c 5550 * ata_port_init - Initialize an ata_port structure
1da177e4 5551 * @ap: Structure to initialize
cca3974e 5552 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5553 * @ent: Probe information provided by low-level driver
5554 * @port_no: Port number associated with this ata_port
5555 *
155a8a9c 5556 * Initialize a new ata_port structure.
0cba632b 5557 *
1da177e4 5558 * LOCKING:
0cba632b 5559 * Inherited from caller.
1da177e4 5560 */
cca3974e 5561void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5562 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5563{
5564 unsigned int i;
5565
cca3974e 5566 ap->lock = &host->lock;
198e0fed 5567 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5568 ap->print_id = ata_print_id++;
1da177e4 5569 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5570 ap->host = host;
2f1f610b 5571 ap->dev = ent->dev;
1da177e4 5572 ap->port_no = port_no;
fea63e38
TH
5573 if (port_no == 1 && ent->pinfo2) {
5574 ap->pio_mask = ent->pinfo2->pio_mask;
5575 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5576 ap->udma_mask = ent->pinfo2->udma_mask;
5577 ap->flags |= ent->pinfo2->flags;
5578 ap->ops = ent->pinfo2->port_ops;
5579 } else {
5580 ap->pio_mask = ent->pio_mask;
5581 ap->mwdma_mask = ent->mwdma_mask;
5582 ap->udma_mask = ent->udma_mask;
5583 ap->flags |= ent->port_flags;
5584 ap->ops = ent->port_ops;
5585 }
5a04bf4b 5586 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5587 ap->active_tag = ATA_TAG_POISON;
5588 ap->last_ctl = 0xFF;
bd5d825c
BP
5589
5590#if defined(ATA_VERBOSE_DEBUG)
5591 /* turn on all debugging levels */
5592 ap->msg_enable = 0x00FF;
5593#elif defined(ATA_DEBUG)
5594 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5595#else
0dd4b21f 5596 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5597#endif
1da177e4 5598
65f27f38
DH
5599 INIT_DELAYED_WORK(&ap->port_task, NULL);
5600 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5601 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5602 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5603 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5604
838df628
TH
5605 /* set cable type */
5606 ap->cbl = ATA_CBL_NONE;
5607 if (ap->flags & ATA_FLAG_SATA)
5608 ap->cbl = ATA_CBL_SATA;
5609
acf356b1
TH
5610 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5611 struct ata_device *dev = &ap->device[i];
38d87234 5612 dev->ap = ap;
72fa4b74 5613 dev->devno = i;
3ef3b43d 5614 ata_dev_init(dev);
acf356b1 5615 }
1da177e4
LT
5616
5617#ifdef ATA_IRQ_TRAP
5618 ap->stats.unhandled_irq = 1;
5619 ap->stats.idle_irq = 1;
5620#endif
5621
5622 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5623}
5624
155a8a9c 5625/**
4608c160
TH
5626 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5627 * @ap: ATA port to initialize SCSI host for
5628 * @shost: SCSI host associated with @ap
155a8a9c 5629 *
4608c160 5630 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5631 *
5632 * LOCKING:
5633 * Inherited from caller.
5634 */
4608c160 5635static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5636{
cca3974e 5637 ap->scsi_host = shost;
155a8a9c 5638
44877b4e 5639 shost->unique_id = ap->print_id;
4608c160
TH
5640 shost->max_id = 16;
5641 shost->max_lun = 1;
5642 shost->max_channel = 1;
5643 shost->max_cmd_len = 12;
155a8a9c
BK
5644}
5645
1da177e4 5646/**
996139f1 5647 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5648 * @ent: Information provided by low-level driver
cca3974e 5649 * @host: Collections of ports to which we add
1da177e4
LT
5650 * @port_no: Port number associated with this host
5651 *
0cba632b
JG
5652 * Attach low-level ATA driver to system.
5653 *
1da177e4 5654 * LOCKING:
0cba632b 5655 * PCI/etc. bus probe sem.
1da177e4
LT
5656 *
5657 * RETURNS:
0cba632b 5658 * New ata_port on success, for NULL on error.
1da177e4 5659 */
996139f1 5660static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5661 struct ata_host *host,
1da177e4
LT
5662 unsigned int port_no)
5663{
996139f1 5664 struct Scsi_Host *shost;
1da177e4 5665 struct ata_port *ap;
1da177e4
LT
5666
5667 DPRINTK("ENTER\n");
aec5c3c1 5668
52783c5d 5669 if (!ent->port_ops->error_handler &&
cca3974e 5670 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5671 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5672 port_no);
5673 return NULL;
5674 }
5675
996139f1
JG
5676 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5677 if (!shost)
1da177e4
LT
5678 return NULL;
5679
996139f1 5680 shost->transportt = &ata_scsi_transport_template;
30afc84c 5681
996139f1 5682 ap = ata_shost_to_port(shost);
1da177e4 5683
cca3974e 5684 ata_port_init(ap, host, ent, port_no);
996139f1 5685 ata_port_init_shost(ap, shost);
1da177e4 5686
1da177e4 5687 return ap;
1da177e4
LT
5688}
5689
f0d36efd
TH
5690static void ata_host_release(struct device *gendev, void *res)
5691{
5692 struct ata_host *host = dev_get_drvdata(gendev);
5693 int i;
5694
5695 for (i = 0; i < host->n_ports; i++) {
5696 struct ata_port *ap = host->ports[i];
5697
1aa506e4 5698 if (ap && ap->ops->port_stop)
f0d36efd 5699 ap->ops->port_stop(ap);
f0d36efd
TH
5700 }
5701
5702 if (host->ops->host_stop)
5703 host->ops->host_stop(host);
1aa56cca 5704
1aa506e4
TH
5705 for (i = 0; i < host->n_ports; i++) {
5706 struct ata_port *ap = host->ports[i];
5707
5708 if (ap)
5709 scsi_host_put(ap->scsi_host);
5710
5711 host->ports[i] = NULL;
5712 }
5713
1aa56cca 5714 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5715}
5716
b03732f0 5717/**
cca3974e
JG
5718 * ata_sas_host_init - Initialize a host struct
5719 * @host: host to initialize
5720 * @dev: device host is attached to
5721 * @flags: host flags
5722 * @ops: port_ops
b03732f0
BK
5723 *
5724 * LOCKING:
5725 * PCI/etc. bus probe sem.
5726 *
5727 */
5728
cca3974e
JG
5729void ata_host_init(struct ata_host *host, struct device *dev,
5730 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5731{
cca3974e
JG
5732 spin_lock_init(&host->lock);
5733 host->dev = dev;
5734 host->flags = flags;
5735 host->ops = ops;
b03732f0
BK
5736}
5737
1da177e4 5738/**
0cba632b
JG
5739 * ata_device_add - Register hardware device with ATA and SCSI layers
5740 * @ent: Probe information describing hardware device to be registered
5741 *
5742 * This function processes the information provided in the probe
5743 * information struct @ent, allocates the necessary ATA and SCSI
5744 * host information structures, initializes them, and registers
5745 * everything with requisite kernel subsystems.
5746 *
5747 * This function requests irqs, probes the ATA bus, and probes
5748 * the SCSI bus.
1da177e4
LT
5749 *
5750 * LOCKING:
0cba632b 5751 * PCI/etc. bus probe sem.
1da177e4
LT
5752 *
5753 * RETURNS:
0cba632b 5754 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5755 */
057ace5e 5756int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5757{
6d0500df 5758 unsigned int i;
1da177e4 5759 struct device *dev = ent->dev;
cca3974e 5760 struct ata_host *host;
39b07ce6 5761 int rc;
1da177e4
LT
5762
5763 DPRINTK("ENTER\n");
f20b16ff 5764
02f076aa
AC
5765 if (ent->irq == 0) {
5766 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5767 return 0;
5768 }
f0d36efd
TH
5769
5770 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5771 return 0;
5772
1da177e4 5773 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5774 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5775 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5776 if (!host)
f0d36efd
TH
5777 goto err_out;
5778 devres_add(dev, host);
5779 dev_set_drvdata(dev, host);
1da177e4 5780
cca3974e
JG
5781 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5782 host->n_ports = ent->n_ports;
5783 host->irq = ent->irq;
5784 host->irq2 = ent->irq2;
0d5ff566 5785 host->iomap = ent->iomap;
cca3974e 5786 host->private_data = ent->private_data;
1da177e4
LT
5787
5788 /* register each port bound to this device */
cca3974e 5789 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5790 struct ata_port *ap;
5791 unsigned long xfer_mode_mask;
2ec7df04 5792 int irq_line = ent->irq;
1da177e4 5793
cca3974e 5794 ap = ata_port_add(ent, host, i);
c38778c3 5795 host->ports[i] = ap;
1da177e4
LT
5796 if (!ap)
5797 goto err_out;
5798
dd5b06c4
TH
5799 /* dummy? */
5800 if (ent->dummy_port_mask & (1 << i)) {
5801 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5802 ap->ops = &ata_dummy_port_ops;
5803 continue;
5804 }
5805
5806 /* start port */
5807 rc = ap->ops->port_start(ap);
5808 if (rc) {
cca3974e
JG
5809 host->ports[i] = NULL;
5810 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5811 goto err_out;
5812 }
5813
2ec7df04
AC
5814 /* Report the secondary IRQ for second channel legacy */
5815 if (i == 1 && ent->irq2)
5816 irq_line = ent->irq2;
5817
1da177e4
LT
5818 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5819 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5820 (ap->pio_mask << ATA_SHIFT_PIO);
5821
5822 /* print per-port info to dmesg */
0d5ff566
TH
5823 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5824 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5825 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5826 ata_mode_string(xfer_mode_mask),
5827 ap->ioaddr.cmd_addr,
5828 ap->ioaddr.ctl_addr,
5829 ap->ioaddr.bmdma_addr,
2ec7df04 5830 irq_line);
1da177e4 5831
0f0a3ad3
TH
5832 /* freeze port before requesting IRQ */
5833 ata_eh_freeze_port(ap);
1da177e4
LT
5834 }
5835
2ec7df04 5836 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5837 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5838 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5839 if (rc) {
5840 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5841 ent->irq, rc);
1da177e4 5842 goto err_out;
39b07ce6 5843 }
1da177e4 5844
2ec7df04
AC
5845 /* do we have a second IRQ for the other channel, eg legacy mode */
5846 if (ent->irq2) {
5847 /* We will get weird core code crashes later if this is true
5848 so trap it now */
5849 BUG_ON(ent->irq == ent->irq2);
5850
f0d36efd
TH
5851 rc = devm_request_irq(dev, ent->irq2,
5852 ent->port_ops->irq_handler, ent->irq_flags,
5853 DRV_NAME, host);
2ec7df04
AC
5854 if (rc) {
5855 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5856 ent->irq2, rc);
f0d36efd 5857 goto err_out;
2ec7df04
AC
5858 }
5859 }
5860
f0d36efd 5861 /* resource acquisition complete */
b878ca5d 5862 devres_remove_group(dev, ata_device_add);
f0d36efd 5863
1da177e4
LT
5864 /* perform each probe synchronously */
5865 DPRINTK("probe begin\n");
cca3974e
JG
5866 for (i = 0; i < host->n_ports; i++) {
5867 struct ata_port *ap = host->ports[i];
5a04bf4b 5868 u32 scontrol;
1da177e4
LT
5869 int rc;
5870
5a04bf4b
TH
5871 /* init sata_spd_limit to the current value */
5872 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5873 int spd = (scontrol >> 4) & 0xf;
5874 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5875 }
5876 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5877
cca3974e 5878 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5879 if (rc) {
f15a1daf 5880 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5881 /* FIXME: do something useful here */
5882 /* FIXME: handle unconditional calls to
5883 * scsi_scan_host and ata_host_remove, below,
5884 * at the very least
5885 */
5886 }
3e706399 5887
52783c5d 5888 if (ap->ops->error_handler) {
1cdaf534 5889 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5890 unsigned long flags;
5891
5892 ata_port_probe(ap);
5893
5894 /* kick EH for boot probing */
ba6a1308 5895 spin_lock_irqsave(ap->lock, flags);
3e706399 5896
1cdaf534
TH
5897 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5898 ehi->action |= ATA_EH_SOFTRESET;
5899 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5900
b51e9e5d 5901 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5902 ata_port_schedule_eh(ap);
5903
ba6a1308 5904 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5905
5906 /* wait for EH to finish */
5907 ata_port_wait_eh(ap);
5908 } else {
44877b4e 5909 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5910 rc = ata_bus_probe(ap);
44877b4e 5911 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5912
5913 if (rc) {
5914 /* FIXME: do something useful here?
5915 * Current libata behavior will
5916 * tear down everything when
5917 * the module is removed
5918 * or the h/w is unplugged.
5919 */
5920 }
5921 }
1da177e4
LT
5922 }
5923
5924 /* probes are done, now scan each port's disk(s) */
c893a3ae 5925 DPRINTK("host probe begin\n");
cca3974e
JG
5926 for (i = 0; i < host->n_ports; i++) {
5927 struct ata_port *ap = host->ports[i];
1da177e4 5928
644dd0cc 5929 ata_scsi_scan_host(ap);
1da177e4
LT
5930 }
5931
1da177e4
LT
5932 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5933 return ent->n_ports; /* success */
5934
f0d36efd
TH
5935 err_out:
5936 devres_release_group(dev, ata_device_add);
f0d36efd 5937 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5938 return 0;
5939}
5940
720ba126
TH
5941/**
5942 * ata_port_detach - Detach ATA port in prepration of device removal
5943 * @ap: ATA port to be detached
5944 *
5945 * Detach all ATA devices and the associated SCSI devices of @ap;
5946 * then, remove the associated SCSI host. @ap is guaranteed to
5947 * be quiescent on return from this function.
5948 *
5949 * LOCKING:
5950 * Kernel thread context (may sleep).
5951 */
5952void ata_port_detach(struct ata_port *ap)
5953{
5954 unsigned long flags;
5955 int i;
5956
5957 if (!ap->ops->error_handler)
c3cf30a9 5958 goto skip_eh;
720ba126
TH
5959
5960 /* tell EH we're leaving & flush EH */
ba6a1308 5961 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5962 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5963 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5964
5965 ata_port_wait_eh(ap);
5966
5967 /* EH is now guaranteed to see UNLOADING, so no new device
5968 * will be attached. Disable all existing devices.
5969 */
ba6a1308 5970 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5971
5972 for (i = 0; i < ATA_MAX_DEVICES; i++)
5973 ata_dev_disable(&ap->device[i]);
5974
ba6a1308 5975 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5976
5977 /* Final freeze & EH. All in-flight commands are aborted. EH
5978 * will be skipped and retrials will be terminated with bad
5979 * target.
5980 */
ba6a1308 5981 spin_lock_irqsave(ap->lock, flags);
720ba126 5982 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5983 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5984
5985 ata_port_wait_eh(ap);
5986
5987 /* Flush hotplug task. The sequence is similar to
5988 * ata_port_flush_task().
5989 */
5990 flush_workqueue(ata_aux_wq);
5991 cancel_delayed_work(&ap->hotplug_task);
5992 flush_workqueue(ata_aux_wq);
5993
c3cf30a9 5994 skip_eh:
720ba126 5995 /* remove the associated SCSI host */
cca3974e 5996 scsi_remove_host(ap->scsi_host);
720ba126
TH
5997}
5998
0529c159
TH
5999/**
6000 * ata_host_detach - Detach all ports of an ATA host
6001 * @host: Host to detach
6002 *
6003 * Detach all ports of @host.
6004 *
6005 * LOCKING:
6006 * Kernel thread context (may sleep).
6007 */
6008void ata_host_detach(struct ata_host *host)
6009{
6010 int i;
6011
6012 for (i = 0; i < host->n_ports; i++)
6013 ata_port_detach(host->ports[i]);
6014}
6015
f6d950e2
BK
6016struct ata_probe_ent *
6017ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6018{
6019 struct ata_probe_ent *probe_ent;
6020
4d05447e 6021 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6022 if (!probe_ent) {
6023 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6024 kobject_name(&(dev->kobj)));
6025 return NULL;
6026 }
6027
6028 INIT_LIST_HEAD(&probe_ent->node);
6029 probe_ent->dev = dev;
6030
6031 probe_ent->sht = port->sht;
cca3974e 6032 probe_ent->port_flags = port->flags;
f6d950e2
BK
6033 probe_ent->pio_mask = port->pio_mask;
6034 probe_ent->mwdma_mask = port->mwdma_mask;
6035 probe_ent->udma_mask = port->udma_mask;
6036 probe_ent->port_ops = port->port_ops;
d639ca94 6037 probe_ent->private_data = port->private_data;
f6d950e2
BK
6038
6039 return probe_ent;
6040}
6041
1da177e4
LT
6042/**
6043 * ata_std_ports - initialize ioaddr with standard port offsets.
6044 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6045 *
6046 * Utility function which initializes data_addr, error_addr,
6047 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6048 * device_addr, status_addr, and command_addr to standard offsets
6049 * relative to cmd_addr.
6050 *
6051 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6052 */
0baab86b 6053
1da177e4
LT
6054void ata_std_ports(struct ata_ioports *ioaddr)
6055{
6056 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6057 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6058 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6059 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6060 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6061 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6062 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6063 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6064 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6065 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6066}
6067
0baab86b 6068
374b1873
JG
6069#ifdef CONFIG_PCI
6070
1da177e4
LT
6071/**
6072 * ata_pci_remove_one - PCI layer callback for device removal
6073 * @pdev: PCI device that was removed
6074 *
b878ca5d
TH
6075 * PCI layer indicates to libata via this hook that hot-unplug or
6076 * module unload event has occurred. Detach all ports. Resource
6077 * release is handled via devres.
1da177e4
LT
6078 *
6079 * LOCKING:
6080 * Inherited from PCI layer (may sleep).
6081 */
f0d36efd 6082void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6083{
6084 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6085 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6086
b878ca5d 6087 ata_host_detach(host);
1da177e4
LT
6088}
6089
6090/* move to PCI subsystem */
057ace5e 6091int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6092{
6093 unsigned long tmp = 0;
6094
6095 switch (bits->width) {
6096 case 1: {
6097 u8 tmp8 = 0;
6098 pci_read_config_byte(pdev, bits->reg, &tmp8);
6099 tmp = tmp8;
6100 break;
6101 }
6102 case 2: {
6103 u16 tmp16 = 0;
6104 pci_read_config_word(pdev, bits->reg, &tmp16);
6105 tmp = tmp16;
6106 break;
6107 }
6108 case 4: {
6109 u32 tmp32 = 0;
6110 pci_read_config_dword(pdev, bits->reg, &tmp32);
6111 tmp = tmp32;
6112 break;
6113 }
6114
6115 default:
6116 return -EINVAL;
6117 }
6118
6119 tmp &= bits->mask;
6120
6121 return (tmp == bits->val) ? 1 : 0;
6122}
9b847548 6123
6ffa01d8 6124#ifdef CONFIG_PM
3c5100c1 6125void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6126{
6127 pci_save_state(pdev);
4c90d971 6128 pci_disable_device(pdev);
500530f6 6129
4c90d971 6130 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6131 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6132}
6133
553c4aa6 6134int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6135{
553c4aa6
TH
6136 int rc;
6137
9b847548
JA
6138 pci_set_power_state(pdev, PCI_D0);
6139 pci_restore_state(pdev);
553c4aa6 6140
b878ca5d 6141 rc = pcim_enable_device(pdev);
553c4aa6
TH
6142 if (rc) {
6143 dev_printk(KERN_ERR, &pdev->dev,
6144 "failed to enable device after resume (%d)\n", rc);
6145 return rc;
6146 }
6147
9b847548 6148 pci_set_master(pdev);
553c4aa6 6149 return 0;
500530f6
TH
6150}
6151
3c5100c1 6152int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6153{
cca3974e 6154 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6155 int rc = 0;
6156
cca3974e 6157 rc = ata_host_suspend(host, mesg);
500530f6
TH
6158 if (rc)
6159 return rc;
6160
3c5100c1 6161 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6162
6163 return 0;
6164}
6165
6166int ata_pci_device_resume(struct pci_dev *pdev)
6167{
cca3974e 6168 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6169 int rc;
500530f6 6170
553c4aa6
TH
6171 rc = ata_pci_device_do_resume(pdev);
6172 if (rc == 0)
6173 ata_host_resume(host);
6174 return rc;
9b847548 6175}
6ffa01d8
TH
6176#endif /* CONFIG_PM */
6177
1da177e4
LT
6178#endif /* CONFIG_PCI */
6179
6180
1da177e4
LT
6181static int __init ata_init(void)
6182{
a8601e5f 6183 ata_probe_timeout *= HZ;
1da177e4
LT
6184 ata_wq = create_workqueue("ata");
6185 if (!ata_wq)
6186 return -ENOMEM;
6187
453b07ac
TH
6188 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6189 if (!ata_aux_wq) {
6190 destroy_workqueue(ata_wq);
6191 return -ENOMEM;
6192 }
6193
1da177e4
LT
6194 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6195 return 0;
6196}
6197
6198static void __exit ata_exit(void)
6199{
6200 destroy_workqueue(ata_wq);
453b07ac 6201 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6202}
6203
a4625085 6204subsys_initcall(ata_init);
1da177e4
LT
6205module_exit(ata_exit);
6206
67846b30 6207static unsigned long ratelimit_time;
34af946a 6208static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6209
6210int ata_ratelimit(void)
6211{
6212 int rc;
6213 unsigned long flags;
6214
6215 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6216
6217 if (time_after(jiffies, ratelimit_time)) {
6218 rc = 1;
6219 ratelimit_time = jiffies + (HZ/5);
6220 } else
6221 rc = 0;
6222
6223 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6224
6225 return rc;
6226}
6227
c22daff4
TH
6228/**
6229 * ata_wait_register - wait until register value changes
6230 * @reg: IO-mapped register
6231 * @mask: Mask to apply to read register value
6232 * @val: Wait condition
6233 * @interval_msec: polling interval in milliseconds
6234 * @timeout_msec: timeout in milliseconds
6235 *
6236 * Waiting for some bits of register to change is a common
6237 * operation for ATA controllers. This function reads 32bit LE
6238 * IO-mapped register @reg and tests for the following condition.
6239 *
6240 * (*@reg & mask) != val
6241 *
6242 * If the condition is met, it returns; otherwise, the process is
6243 * repeated after @interval_msec until timeout.
6244 *
6245 * LOCKING:
6246 * Kernel thread context (may sleep)
6247 *
6248 * RETURNS:
6249 * The final register value.
6250 */
6251u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6252 unsigned long interval_msec,
6253 unsigned long timeout_msec)
6254{
6255 unsigned long timeout;
6256 u32 tmp;
6257
6258 tmp = ioread32(reg);
6259
6260 /* Calculate timeout _after_ the first read to make sure
6261 * preceding writes reach the controller before starting to
6262 * eat away the timeout.
6263 */
6264 timeout = jiffies + (timeout_msec * HZ) / 1000;
6265
6266 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6267 msleep(interval_msec);
6268 tmp = ioread32(reg);
6269 }
6270
6271 return tmp;
6272}
6273
dd5b06c4
TH
6274/*
6275 * Dummy port_ops
6276 */
6277static void ata_dummy_noret(struct ata_port *ap) { }
6278static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6279static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6280
6281static u8 ata_dummy_check_status(struct ata_port *ap)
6282{
6283 return ATA_DRDY;
6284}
6285
6286static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6287{
6288 return AC_ERR_SYSTEM;
6289}
6290
6291const struct ata_port_operations ata_dummy_port_ops = {
6292 .port_disable = ata_port_disable,
6293 .check_status = ata_dummy_check_status,
6294 .check_altstatus = ata_dummy_check_status,
6295 .dev_select = ata_noop_dev_select,
6296 .qc_prep = ata_noop_qc_prep,
6297 .qc_issue = ata_dummy_qc_issue,
6298 .freeze = ata_dummy_noret,
6299 .thaw = ata_dummy_noret,
6300 .error_handler = ata_dummy_noret,
6301 .post_internal_cmd = ata_dummy_qc_noret,
6302 .irq_clear = ata_dummy_noret,
6303 .port_start = ata_dummy_ret0,
6304 .port_stop = ata_dummy_noret,
6305};
6306
1da177e4
LT
6307/*
6308 * libata is essentially a library of internal helper functions for
6309 * low-level ATA host controller drivers. As such, the API/ABI is
6310 * likely to change as new drivers are added and updated.
6311 * Do not depend on ABI/API stability.
6312 */
6313
e9c83914
TH
6314EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6315EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6316EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6317EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6318EXPORT_SYMBOL_GPL(ata_std_bios_param);
6319EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6320EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6321EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6322EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6323EXPORT_SYMBOL_GPL(ata_sg_init);
6324EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6325EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6326EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6327EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6328EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6329EXPORT_SYMBOL_GPL(ata_tf_load);
6330EXPORT_SYMBOL_GPL(ata_tf_read);
6331EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6332EXPORT_SYMBOL_GPL(ata_std_dev_select);
6333EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6334EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6335EXPORT_SYMBOL_GPL(ata_check_status);
6336EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6337EXPORT_SYMBOL_GPL(ata_exec_command);
6338EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6339EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6340EXPORT_SYMBOL_GPL(ata_data_xfer);
6341EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6342EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6343EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6344EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6345EXPORT_SYMBOL_GPL(ata_bmdma_start);
6346EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6347EXPORT_SYMBOL_GPL(ata_bmdma_status);
6348EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6349EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6350EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6351EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6352EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6353EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6354EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6355EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6356EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6357EXPORT_SYMBOL_GPL(sata_phy_debounce);
6358EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6359EXPORT_SYMBOL_GPL(sata_phy_reset);
6360EXPORT_SYMBOL_GPL(__sata_phy_reset);
6361EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6362EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6363EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6364EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6365EXPORT_SYMBOL_GPL(sata_std_hardreset);
6366EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6367EXPORT_SYMBOL_GPL(ata_dev_classify);
6368EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6369EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6370EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6371EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6372EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6373EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6374EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6375EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6376EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6377EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6378EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6379EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6380EXPORT_SYMBOL_GPL(sata_scr_valid);
6381EXPORT_SYMBOL_GPL(sata_scr_read);
6382EXPORT_SYMBOL_GPL(sata_scr_write);
6383EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6384EXPORT_SYMBOL_GPL(ata_port_online);
6385EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6386#ifdef CONFIG_PM
cca3974e
JG
6387EXPORT_SYMBOL_GPL(ata_host_suspend);
6388EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6389#endif /* CONFIG_PM */
6a62a04d
TH
6390EXPORT_SYMBOL_GPL(ata_id_string);
6391EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6392EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6393EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6394EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6395
1bc4ccff 6396EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6397EXPORT_SYMBOL_GPL(ata_timing_compute);
6398EXPORT_SYMBOL_GPL(ata_timing_merge);
6399
1da177e4
LT
6400#ifdef CONFIG_PCI
6401EXPORT_SYMBOL_GPL(pci_test_config_bits);
6402EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6403EXPORT_SYMBOL_GPL(ata_pci_init_one);
6404EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6405#ifdef CONFIG_PM
500530f6
TH
6406EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6407EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6408EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6409EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6410#endif /* CONFIG_PM */
67951ade
AC
6411EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6412EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6413#endif /* CONFIG_PCI */
9b847548 6414
6ffa01d8 6415#ifdef CONFIG_PM
9b847548
JA
6416EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6417EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6418#endif /* CONFIG_PM */
ece1d636 6419
ece1d636 6420EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6421EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6422EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6423EXPORT_SYMBOL_GPL(ata_port_freeze);
6424EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6425EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6426EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6427EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6428EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6429EXPORT_SYMBOL_GPL(ata_irq_on);
6430EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6431EXPORT_SYMBOL_GPL(ata_irq_ack);
6432EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6433EXPORT_SYMBOL_GPL(ata_dev_try_classify);
This page took 0.850726 seconds and 5 git commands to generate.