ahci: reimplement port_map handling
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 73static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 74static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 75static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 76
f3187195 77unsigned int ata_print_id = 1;
1da177e4
LT
78static struct workqueue_struct *ata_wq;
79
453b07ac
TH
80struct workqueue_struct *ata_aux_wq;
81
418dc1f5 82int atapi_enabled = 1;
1623c81e
JG
83module_param(atapi_enabled, int, 0444);
84MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
85
95de719a
AL
86int atapi_dmadir = 0;
87module_param(atapi_dmadir, int, 0444);
88MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
89
baf4fdfa
ML
90int atapi_passthru16 = 1;
91module_param(atapi_passthru16, int, 0444);
92MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
93
c3c013a2
JG
94int libata_fua = 0;
95module_param_named(fua, libata_fua, int, 0444);
96MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
97
1e999736
AC
98static int ata_ignore_hpa = 0;
99module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
100MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
101
a8601e5f
AM
102static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
103module_param(ata_probe_timeout, int, 0444);
104MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
105
d7d0dad6
JG
106int libata_noacpi = 1;
107module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
108MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
109
1da177e4
LT
110MODULE_AUTHOR("Jeff Garzik");
111MODULE_DESCRIPTION("Library module for ATA devices");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
0baab86b 115
1da177e4
LT
116/**
117 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
118 * @tf: Taskfile to convert
1da177e4 119 * @pmp: Port multiplier port
9977126c
TH
120 * @is_cmd: This FIS is for command
121 * @fis: Buffer into which data will output
1da177e4
LT
122 *
123 * Converts a standard ATA taskfile to a Serial ATA
124 * FIS structure (Register - Host to Device).
125 *
126 * LOCKING:
127 * Inherited from caller.
128 */
9977126c 129void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 130{
9977126c
TH
131 fis[0] = 0x27; /* Register - Host to Device FIS */
132 fis[1] = pmp & 0xf; /* Port multiplier number*/
133 if (is_cmd)
134 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
135
1da177e4
LT
136 fis[2] = tf->command;
137 fis[3] = tf->feature;
138
139 fis[4] = tf->lbal;
140 fis[5] = tf->lbam;
141 fis[6] = tf->lbah;
142 fis[7] = tf->device;
143
144 fis[8] = tf->hob_lbal;
145 fis[9] = tf->hob_lbam;
146 fis[10] = tf->hob_lbah;
147 fis[11] = tf->hob_feature;
148
149 fis[12] = tf->nsect;
150 fis[13] = tf->hob_nsect;
151 fis[14] = 0;
152 fis[15] = tf->ctl;
153
154 fis[16] = 0;
155 fis[17] = 0;
156 fis[18] = 0;
157 fis[19] = 0;
158}
159
160/**
161 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
162 * @fis: Buffer from which data will be input
163 * @tf: Taskfile to output
164 *
e12a1be6 165 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
166 *
167 * LOCKING:
168 * Inherited from caller.
169 */
170
057ace5e 171void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
172{
173 tf->command = fis[2]; /* status */
174 tf->feature = fis[3]; /* error */
175
176 tf->lbal = fis[4];
177 tf->lbam = fis[5];
178 tf->lbah = fis[6];
179 tf->device = fis[7];
180
181 tf->hob_lbal = fis[8];
182 tf->hob_lbam = fis[9];
183 tf->hob_lbah = fis[10];
184
185 tf->nsect = fis[12];
186 tf->hob_nsect = fis[13];
187}
188
8cbd6df1
AL
189static const u8 ata_rw_cmds[] = {
190 /* pio multi */
191 ATA_CMD_READ_MULTI,
192 ATA_CMD_WRITE_MULTI,
193 ATA_CMD_READ_MULTI_EXT,
194 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
195 0,
196 0,
197 0,
198 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
199 /* pio */
200 ATA_CMD_PIO_READ,
201 ATA_CMD_PIO_WRITE,
202 ATA_CMD_PIO_READ_EXT,
203 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
204 0,
205 0,
206 0,
207 0,
8cbd6df1
AL
208 /* dma */
209 ATA_CMD_READ,
210 ATA_CMD_WRITE,
211 ATA_CMD_READ_EXT,
9a3dccc4
TH
212 ATA_CMD_WRITE_EXT,
213 0,
214 0,
215 0,
216 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 217};
1da177e4
LT
218
219/**
8cbd6df1 220 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
221 * @tf: command to examine and configure
222 * @dev: device tf belongs to
1da177e4 223 *
2e9edbf8 224 * Examine the device configuration and tf->flags to calculate
8cbd6df1 225 * the proper read/write commands and protocol to use.
1da177e4
LT
226 *
227 * LOCKING:
228 * caller.
229 */
bd056d7e 230static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 231{
9a3dccc4 232 u8 cmd;
1da177e4 233
9a3dccc4 234 int index, fua, lba48, write;
2e9edbf8 235
9a3dccc4 236 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
237 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
238 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 239
8cbd6df1
AL
240 if (dev->flags & ATA_DFLAG_PIO) {
241 tf->protocol = ATA_PROT_PIO;
9a3dccc4 242 index = dev->multi_count ? 0 : 8;
9af5c9c9 243 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
244 /* Unable to use DMA due to host limitation */
245 tf->protocol = ATA_PROT_PIO;
0565c26d 246 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
247 } else {
248 tf->protocol = ATA_PROT_DMA;
9a3dccc4 249 index = 16;
8cbd6df1 250 }
1da177e4 251
9a3dccc4
TH
252 cmd = ata_rw_cmds[index + fua + lba48 + write];
253 if (cmd) {
254 tf->command = cmd;
255 return 0;
256 }
257 return -1;
1da177e4
LT
258}
259
35b649fe
TH
260/**
261 * ata_tf_read_block - Read block address from ATA taskfile
262 * @tf: ATA taskfile of interest
263 * @dev: ATA device @tf belongs to
264 *
265 * LOCKING:
266 * None.
267 *
268 * Read block address from @tf. This function can handle all
269 * three address formats - LBA, LBA48 and CHS. tf->protocol and
270 * flags select the address format to use.
271 *
272 * RETURNS:
273 * Block address read from @tf.
274 */
275u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
276{
277 u64 block = 0;
278
279 if (tf->flags & ATA_TFLAG_LBA) {
280 if (tf->flags & ATA_TFLAG_LBA48) {
281 block |= (u64)tf->hob_lbah << 40;
282 block |= (u64)tf->hob_lbam << 32;
283 block |= tf->hob_lbal << 24;
284 } else
285 block |= (tf->device & 0xf) << 24;
286
287 block |= tf->lbah << 16;
288 block |= tf->lbam << 8;
289 block |= tf->lbal;
290 } else {
291 u32 cyl, head, sect;
292
293 cyl = tf->lbam | (tf->lbah << 8);
294 head = tf->device & 0xf;
295 sect = tf->lbal;
296
297 block = (cyl * dev->heads + head) * dev->sectors + sect;
298 }
299
300 return block;
301}
302
bd056d7e
TH
303/**
304 * ata_build_rw_tf - Build ATA taskfile for given read/write request
305 * @tf: Target ATA taskfile
306 * @dev: ATA device @tf belongs to
307 * @block: Block address
308 * @n_block: Number of blocks
309 * @tf_flags: RW/FUA etc...
310 * @tag: tag
311 *
312 * LOCKING:
313 * None.
314 *
315 * Build ATA taskfile @tf for read/write request described by
316 * @block, @n_block, @tf_flags and @tag on @dev.
317 *
318 * RETURNS:
319 *
320 * 0 on success, -ERANGE if the request is too large for @dev,
321 * -EINVAL if the request is invalid.
322 */
323int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
324 u64 block, u32 n_block, unsigned int tf_flags,
325 unsigned int tag)
326{
327 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
328 tf->flags |= tf_flags;
329
6d1245bf 330 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
331 /* yay, NCQ */
332 if (!lba_48_ok(block, n_block))
333 return -ERANGE;
334
335 tf->protocol = ATA_PROT_NCQ;
336 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
337
338 if (tf->flags & ATA_TFLAG_WRITE)
339 tf->command = ATA_CMD_FPDMA_WRITE;
340 else
341 tf->command = ATA_CMD_FPDMA_READ;
342
343 tf->nsect = tag << 3;
344 tf->hob_feature = (n_block >> 8) & 0xff;
345 tf->feature = n_block & 0xff;
346
347 tf->hob_lbah = (block >> 40) & 0xff;
348 tf->hob_lbam = (block >> 32) & 0xff;
349 tf->hob_lbal = (block >> 24) & 0xff;
350 tf->lbah = (block >> 16) & 0xff;
351 tf->lbam = (block >> 8) & 0xff;
352 tf->lbal = block & 0xff;
353
354 tf->device = 1 << 6;
355 if (tf->flags & ATA_TFLAG_FUA)
356 tf->device |= 1 << 7;
357 } else if (dev->flags & ATA_DFLAG_LBA) {
358 tf->flags |= ATA_TFLAG_LBA;
359
360 if (lba_28_ok(block, n_block)) {
361 /* use LBA28 */
362 tf->device |= (block >> 24) & 0xf;
363 } else if (lba_48_ok(block, n_block)) {
364 if (!(dev->flags & ATA_DFLAG_LBA48))
365 return -ERANGE;
366
367 /* use LBA48 */
368 tf->flags |= ATA_TFLAG_LBA48;
369
370 tf->hob_nsect = (n_block >> 8) & 0xff;
371
372 tf->hob_lbah = (block >> 40) & 0xff;
373 tf->hob_lbam = (block >> 32) & 0xff;
374 tf->hob_lbal = (block >> 24) & 0xff;
375 } else
376 /* request too large even for LBA48 */
377 return -ERANGE;
378
379 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
380 return -EINVAL;
381
382 tf->nsect = n_block & 0xff;
383
384 tf->lbah = (block >> 16) & 0xff;
385 tf->lbam = (block >> 8) & 0xff;
386 tf->lbal = block & 0xff;
387
388 tf->device |= ATA_LBA;
389 } else {
390 /* CHS */
391 u32 sect, head, cyl, track;
392
393 /* The request -may- be too large for CHS addressing. */
394 if (!lba_28_ok(block, n_block))
395 return -ERANGE;
396
397 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
398 return -EINVAL;
399
400 /* Convert LBA to CHS */
401 track = (u32)block / dev->sectors;
402 cyl = track / dev->heads;
403 head = track % dev->heads;
404 sect = (u32)block % dev->sectors + 1;
405
406 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
407 (u32)block, track, cyl, head, sect);
408
409 /* Check whether the converted CHS can fit.
410 Cylinder: 0-65535
411 Head: 0-15
412 Sector: 1-255*/
413 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
414 return -ERANGE;
415
416 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
417 tf->lbal = sect;
418 tf->lbam = cyl;
419 tf->lbah = cyl >> 8;
420 tf->device |= head;
421 }
422
423 return 0;
424}
425
cb95d562
TH
426/**
427 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
428 * @pio_mask: pio_mask
429 * @mwdma_mask: mwdma_mask
430 * @udma_mask: udma_mask
431 *
432 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
433 * unsigned int xfer_mask.
434 *
435 * LOCKING:
436 * None.
437 *
438 * RETURNS:
439 * Packed xfer_mask.
440 */
441static unsigned int ata_pack_xfermask(unsigned int pio_mask,
442 unsigned int mwdma_mask,
443 unsigned int udma_mask)
444{
445 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
446 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
447 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
448}
449
c0489e4e
TH
450/**
451 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
452 * @xfer_mask: xfer_mask to unpack
453 * @pio_mask: resulting pio_mask
454 * @mwdma_mask: resulting mwdma_mask
455 * @udma_mask: resulting udma_mask
456 *
457 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
458 * Any NULL distination masks will be ignored.
459 */
460static void ata_unpack_xfermask(unsigned int xfer_mask,
461 unsigned int *pio_mask,
462 unsigned int *mwdma_mask,
463 unsigned int *udma_mask)
464{
465 if (pio_mask)
466 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
467 if (mwdma_mask)
468 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
469 if (udma_mask)
470 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
471}
472
cb95d562 473static const struct ata_xfer_ent {
be9a50c8 474 int shift, bits;
cb95d562
TH
475 u8 base;
476} ata_xfer_tbl[] = {
477 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
478 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
479 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
480 { -1, },
481};
482
483/**
484 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
485 * @xfer_mask: xfer_mask of interest
486 *
487 * Return matching XFER_* value for @xfer_mask. Only the highest
488 * bit of @xfer_mask is considered.
489 *
490 * LOCKING:
491 * None.
492 *
493 * RETURNS:
494 * Matching XFER_* value, 0 if no match found.
495 */
496static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
497{
498 int highbit = fls(xfer_mask) - 1;
499 const struct ata_xfer_ent *ent;
500
501 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
502 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
503 return ent->base + highbit - ent->shift;
504 return 0;
505}
506
507/**
508 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
509 * @xfer_mode: XFER_* of interest
510 *
511 * Return matching xfer_mask for @xfer_mode.
512 *
513 * LOCKING:
514 * None.
515 *
516 * RETURNS:
517 * Matching xfer_mask, 0 if no match found.
518 */
519static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
520{
521 const struct ata_xfer_ent *ent;
522
523 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
524 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
525 return 1 << (ent->shift + xfer_mode - ent->base);
526 return 0;
527}
528
529/**
530 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
531 * @xfer_mode: XFER_* of interest
532 *
533 * Return matching xfer_shift for @xfer_mode.
534 *
535 * LOCKING:
536 * None.
537 *
538 * RETURNS:
539 * Matching xfer_shift, -1 if no match found.
540 */
541static int ata_xfer_mode2shift(unsigned int xfer_mode)
542{
543 const struct ata_xfer_ent *ent;
544
545 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
546 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
547 return ent->shift;
548 return -1;
549}
550
1da177e4 551/**
1da7b0d0
TH
552 * ata_mode_string - convert xfer_mask to string
553 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
554 *
555 * Determine string which represents the highest speed
1da7b0d0 556 * (highest bit in @modemask).
1da177e4
LT
557 *
558 * LOCKING:
559 * None.
560 *
561 * RETURNS:
562 * Constant C string representing highest speed listed in
1da7b0d0 563 * @mode_mask, or the constant C string "<n/a>".
1da177e4 564 */
1da7b0d0 565static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 566{
75f554bc
TH
567 static const char * const xfer_mode_str[] = {
568 "PIO0",
569 "PIO1",
570 "PIO2",
571 "PIO3",
572 "PIO4",
b352e57d
AC
573 "PIO5",
574 "PIO6",
75f554bc
TH
575 "MWDMA0",
576 "MWDMA1",
577 "MWDMA2",
b352e57d
AC
578 "MWDMA3",
579 "MWDMA4",
75f554bc
TH
580 "UDMA/16",
581 "UDMA/25",
582 "UDMA/33",
583 "UDMA/44",
584 "UDMA/66",
585 "UDMA/100",
586 "UDMA/133",
587 "UDMA7",
588 };
1da7b0d0 589 int highbit;
1da177e4 590
1da7b0d0
TH
591 highbit = fls(xfer_mask) - 1;
592 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
593 return xfer_mode_str[highbit];
1da177e4 594 return "<n/a>";
1da177e4
LT
595}
596
4c360c81
TH
597static const char *sata_spd_string(unsigned int spd)
598{
599 static const char * const spd_str[] = {
600 "1.5 Gbps",
601 "3.0 Gbps",
602 };
603
604 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
605 return "<unknown>";
606 return spd_str[spd - 1];
607}
608
3373efd8 609void ata_dev_disable(struct ata_device *dev)
0b8efb0a 610{
09d7f9b0 611 if (ata_dev_enabled(dev)) {
9af5c9c9 612 if (ata_msg_drv(dev->link->ap))
09d7f9b0 613 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
614 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
615 ATA_DNXFER_QUIET);
0b8efb0a
TH
616 dev->class++;
617 }
618}
619
1da177e4 620/**
0d5ff566 621 * ata_devchk - PATA device presence detection
1da177e4
LT
622 * @ap: ATA channel to examine
623 * @device: Device to examine (starting at zero)
624 *
625 * This technique was originally described in
626 * Hale Landis's ATADRVR (www.ata-atapi.com), and
627 * later found its way into the ATA/ATAPI spec.
628 *
629 * Write a pattern to the ATA shadow registers,
630 * and if a device is present, it will respond by
631 * correctly storing and echoing back the
632 * ATA shadow register contents.
633 *
634 * LOCKING:
635 * caller.
636 */
637
0d5ff566 638static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
639{
640 struct ata_ioports *ioaddr = &ap->ioaddr;
641 u8 nsect, lbal;
642
643 ap->ops->dev_select(ap, device);
644
0d5ff566
TH
645 iowrite8(0x55, ioaddr->nsect_addr);
646 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 647
0d5ff566
TH
648 iowrite8(0xaa, ioaddr->nsect_addr);
649 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 650
0d5ff566
TH
651 iowrite8(0x55, ioaddr->nsect_addr);
652 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 653
0d5ff566
TH
654 nsect = ioread8(ioaddr->nsect_addr);
655 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
656
657 if ((nsect == 0x55) && (lbal == 0xaa))
658 return 1; /* we found a device */
659
660 return 0; /* nothing found */
661}
662
1da177e4
LT
663/**
664 * ata_dev_classify - determine device type based on ATA-spec signature
665 * @tf: ATA taskfile register set for device to be identified
666 *
667 * Determine from taskfile register contents whether a device is
668 * ATA or ATAPI, as per "Signature and persistence" section
669 * of ATA/PI spec (volume 1, sect 5.14).
670 *
671 * LOCKING:
672 * None.
673 *
674 * RETURNS:
675 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
676 * the event of failure.
677 */
678
057ace5e 679unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
680{
681 /* Apple's open source Darwin code hints that some devices only
682 * put a proper signature into the LBA mid/high registers,
683 * So, we only check those. It's sufficient for uniqueness.
684 */
685
686 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
687 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
688 DPRINTK("found ATA device by sig\n");
689 return ATA_DEV_ATA;
690 }
691
692 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
693 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
694 DPRINTK("found ATAPI device by sig\n");
695 return ATA_DEV_ATAPI;
696 }
697
698 DPRINTK("unknown device\n");
699 return ATA_DEV_UNKNOWN;
700}
701
702/**
703 * ata_dev_try_classify - Parse returned ATA device signature
704 * @ap: ATA channel to examine
705 * @device: Device to examine (starting at zero)
b4dc7623 706 * @r_err: Value of error register on completion
1da177e4
LT
707 *
708 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
709 * an ATA/ATAPI-defined set of values is placed in the ATA
710 * shadow registers, indicating the results of device detection
711 * and diagnostics.
712 *
713 * Select the ATA device, and read the values from the ATA shadow
714 * registers. Then parse according to the Error register value,
715 * and the spec-defined values examined by ata_dev_classify().
716 *
717 * LOCKING:
718 * caller.
b4dc7623
TH
719 *
720 * RETURNS:
721 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
722 */
723
a619f981 724unsigned int
b4dc7623 725ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 726{
1da177e4
LT
727 struct ata_taskfile tf;
728 unsigned int class;
729 u8 err;
730
731 ap->ops->dev_select(ap, device);
732
733 memset(&tf, 0, sizeof(tf));
734
1da177e4 735 ap->ops->tf_read(ap, &tf);
0169e284 736 err = tf.feature;
b4dc7623
TH
737 if (r_err)
738 *r_err = err;
1da177e4 739
93590859
AC
740 /* see if device passed diags: if master then continue and warn later */
741 if (err == 0 && device == 0)
742 /* diagnostic fail : do nothing _YET_ */
9af5c9c9 743 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 744 else if (err == 1)
1da177e4
LT
745 /* do nothing */ ;
746 else if ((device == 0) && (err == 0x81))
747 /* do nothing */ ;
748 else
b4dc7623 749 return ATA_DEV_NONE;
1da177e4 750
b4dc7623 751 /* determine if device is ATA or ATAPI */
1da177e4 752 class = ata_dev_classify(&tf);
b4dc7623 753
1da177e4 754 if (class == ATA_DEV_UNKNOWN)
b4dc7623 755 return ATA_DEV_NONE;
1da177e4 756 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
757 return ATA_DEV_NONE;
758 return class;
1da177e4
LT
759}
760
761/**
6a62a04d 762 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
763 * @id: IDENTIFY DEVICE results we will examine
764 * @s: string into which data is output
765 * @ofs: offset into identify device page
766 * @len: length of string to return. must be an even number.
767 *
768 * The strings in the IDENTIFY DEVICE page are broken up into
769 * 16-bit chunks. Run through the string, and output each
770 * 8-bit chunk linearly, regardless of platform.
771 *
772 * LOCKING:
773 * caller.
774 */
775
6a62a04d
TH
776void ata_id_string(const u16 *id, unsigned char *s,
777 unsigned int ofs, unsigned int len)
1da177e4
LT
778{
779 unsigned int c;
780
781 while (len > 0) {
782 c = id[ofs] >> 8;
783 *s = c;
784 s++;
785
786 c = id[ofs] & 0xff;
787 *s = c;
788 s++;
789
790 ofs++;
791 len -= 2;
792 }
793}
794
0e949ff3 795/**
6a62a04d 796 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
797 * @id: IDENTIFY DEVICE results we will examine
798 * @s: string into which data is output
799 * @ofs: offset into identify device page
800 * @len: length of string to return. must be an odd number.
801 *
6a62a04d 802 * This function is identical to ata_id_string except that it
0e949ff3
TH
803 * trims trailing spaces and terminates the resulting string with
804 * null. @len must be actual maximum length (even number) + 1.
805 *
806 * LOCKING:
807 * caller.
808 */
6a62a04d
TH
809void ata_id_c_string(const u16 *id, unsigned char *s,
810 unsigned int ofs, unsigned int len)
0e949ff3
TH
811{
812 unsigned char *p;
813
814 WARN_ON(!(len & 1));
815
6a62a04d 816 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
817
818 p = s + strnlen(s, len - 1);
819 while (p > s && p[-1] == ' ')
820 p--;
821 *p = '\0';
822}
0baab86b 823
db6f8759
TH
824static u64 ata_id_n_sectors(const u16 *id)
825{
826 if (ata_id_has_lba(id)) {
827 if (ata_id_has_lba48(id))
828 return ata_id_u64(id, 100);
829 else
830 return ata_id_u32(id, 60);
831 } else {
832 if (ata_id_current_chs_valid(id))
833 return ata_id_u32(id, 57);
834 else
835 return id[1] * id[3] * id[6];
836 }
837}
838
1e999736
AC
839static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
840{
841 u64 sectors = 0;
842
843 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
844 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
845 sectors |= (tf->hob_lbal & 0xff) << 24;
846 sectors |= (tf->lbah & 0xff) << 16;
847 sectors |= (tf->lbam & 0xff) << 8;
848 sectors |= (tf->lbal & 0xff);
849
850 return ++sectors;
851}
852
853static u64 ata_tf_to_lba(struct ata_taskfile *tf)
854{
855 u64 sectors = 0;
856
857 sectors |= (tf->device & 0x0f) << 24;
858 sectors |= (tf->lbah & 0xff) << 16;
859 sectors |= (tf->lbam & 0xff) << 8;
860 sectors |= (tf->lbal & 0xff);
861
862 return ++sectors;
863}
864
865/**
c728a914
TH
866 * ata_read_native_max_address - Read native max address
867 * @dev: target device
868 * @max_sectors: out parameter for the result native max address
1e999736 869 *
c728a914
TH
870 * Perform an LBA48 or LBA28 native size query upon the device in
871 * question.
1e999736 872 *
c728a914
TH
873 * RETURNS:
874 * 0 on success, -EACCES if command is aborted by the drive.
875 * -EIO on other errors.
1e999736 876 */
c728a914 877static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 878{
c728a914 879 unsigned int err_mask;
1e999736 880 struct ata_taskfile tf;
c728a914 881 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
882
883 ata_tf_init(dev, &tf);
884
c728a914 885 /* always clear all address registers */
1e999736 886 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 887
c728a914
TH
888 if (lba48) {
889 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
890 tf.flags |= ATA_TFLAG_LBA48;
891 } else
892 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 893
1e999736 894 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
895 tf.device |= ATA_LBA;
896
897 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
898 if (err_mask) {
899 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
900 "max address (err_mask=0x%x)\n", err_mask);
901 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
902 return -EACCES;
903 return -EIO;
904 }
1e999736 905
c728a914
TH
906 if (lba48)
907 *max_sectors = ata_tf_to_lba48(&tf);
908 else
909 *max_sectors = ata_tf_to_lba(&tf);
1e999736 910
c728a914 911 return 0;
1e999736
AC
912}
913
914/**
c728a914
TH
915 * ata_set_max_sectors - Set max sectors
916 * @dev: target device
6b38d1d1 917 * @new_sectors: new max sectors value to set for the device
1e999736 918 *
c728a914
TH
919 * Set max sectors of @dev to @new_sectors.
920 *
921 * RETURNS:
922 * 0 on success, -EACCES if command is aborted or denied (due to
923 * previous non-volatile SET_MAX) by the drive. -EIO on other
924 * errors.
1e999736 925 */
05027adc 926static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 927{
c728a914 928 unsigned int err_mask;
1e999736 929 struct ata_taskfile tf;
c728a914 930 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
931
932 new_sectors--;
933
934 ata_tf_init(dev, &tf);
935
1e999736 936 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
937
938 if (lba48) {
939 tf.command = ATA_CMD_SET_MAX_EXT;
940 tf.flags |= ATA_TFLAG_LBA48;
941
942 tf.hob_lbal = (new_sectors >> 24) & 0xff;
943 tf.hob_lbam = (new_sectors >> 32) & 0xff;
944 tf.hob_lbah = (new_sectors >> 40) & 0xff;
945 } else
946 tf.command = ATA_CMD_SET_MAX;
947
1e999736 948 tf.protocol |= ATA_PROT_NODATA;
c728a914 949 tf.device |= ATA_LBA;
1e999736
AC
950
951 tf.lbal = (new_sectors >> 0) & 0xff;
952 tf.lbam = (new_sectors >> 8) & 0xff;
953 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 954
c728a914
TH
955 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
956 if (err_mask) {
957 ata_dev_printk(dev, KERN_WARNING, "failed to set "
958 "max address (err_mask=0x%x)\n", err_mask);
959 if (err_mask == AC_ERR_DEV &&
960 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
961 return -EACCES;
962 return -EIO;
963 }
964
c728a914 965 return 0;
1e999736
AC
966}
967
968/**
969 * ata_hpa_resize - Resize a device with an HPA set
970 * @dev: Device to resize
971 *
972 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
973 * it if required to the full size of the media. The caller must check
974 * the drive has the HPA feature set enabled.
05027adc
TH
975 *
976 * RETURNS:
977 * 0 on success, -errno on failure.
1e999736 978 */
05027adc 979static int ata_hpa_resize(struct ata_device *dev)
1e999736 980{
05027adc
TH
981 struct ata_eh_context *ehc = &dev->link->eh_context;
982 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
983 u64 sectors = ata_id_n_sectors(dev->id);
984 u64 native_sectors;
c728a914 985 int rc;
a617c09f 986
05027adc
TH
987 /* do we need to do it? */
988 if (dev->class != ATA_DEV_ATA ||
989 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
990 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 991 return 0;
1e999736 992
05027adc
TH
993 /* read native max address */
994 rc = ata_read_native_max_address(dev, &native_sectors);
995 if (rc) {
996 /* If HPA isn't going to be unlocked, skip HPA
997 * resizing from the next try.
998 */
999 if (!ata_ignore_hpa) {
1000 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1001 "broken, will skip HPA handling\n");
1002 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1003
1004 /* we can continue if device aborted the command */
1005 if (rc == -EACCES)
1006 rc = 0;
1e999736 1007 }
37301a55 1008
05027adc
TH
1009 return rc;
1010 }
1011
1012 /* nothing to do? */
1013 if (native_sectors <= sectors || !ata_ignore_hpa) {
1014 if (!print_info || native_sectors == sectors)
1015 return 0;
1016
1017 if (native_sectors > sectors)
1018 ata_dev_printk(dev, KERN_INFO,
1019 "HPA detected: current %llu, native %llu\n",
1020 (unsigned long long)sectors,
1021 (unsigned long long)native_sectors);
1022 else if (native_sectors < sectors)
1023 ata_dev_printk(dev, KERN_WARNING,
1024 "native sectors (%llu) is smaller than "
1025 "sectors (%llu)\n",
1026 (unsigned long long)native_sectors,
1027 (unsigned long long)sectors);
1028 return 0;
1029 }
1030
1031 /* let's unlock HPA */
1032 rc = ata_set_max_sectors(dev, native_sectors);
1033 if (rc == -EACCES) {
1034 /* if device aborted the command, skip HPA resizing */
1035 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1036 "(%llu -> %llu), skipping HPA handling\n",
1037 (unsigned long long)sectors,
1038 (unsigned long long)native_sectors);
1039 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1040 return 0;
1041 } else if (rc)
1042 return rc;
1043
1044 /* re-read IDENTIFY data */
1045 rc = ata_dev_reread_id(dev, 0);
1046 if (rc) {
1047 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1048 "data after HPA resizing\n");
1049 return rc;
1050 }
1051
1052 if (print_info) {
1053 u64 new_sectors = ata_id_n_sectors(dev->id);
1054 ata_dev_printk(dev, KERN_INFO,
1055 "HPA unlocked: %llu -> %llu, native %llu\n",
1056 (unsigned long long)sectors,
1057 (unsigned long long)new_sectors,
1058 (unsigned long long)native_sectors);
1059 }
1060
1061 return 0;
1e999736
AC
1062}
1063
10305f0f
A
1064/**
1065 * ata_id_to_dma_mode - Identify DMA mode from id block
1066 * @dev: device to identify
cc261267 1067 * @unknown: mode to assume if we cannot tell
10305f0f
A
1068 *
1069 * Set up the timing values for the device based upon the identify
1070 * reported values for the DMA mode. This function is used by drivers
1071 * which rely upon firmware configured modes, but wish to report the
1072 * mode correctly when possible.
1073 *
1074 * In addition we emit similarly formatted messages to the default
1075 * ata_dev_set_mode handler, in order to provide consistency of
1076 * presentation.
1077 */
1078
1079void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1080{
1081 unsigned int mask;
1082 u8 mode;
1083
1084 /* Pack the DMA modes */
1085 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1086 if (dev->id[53] & 0x04)
1087 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1088
1089 /* Select the mode in use */
1090 mode = ata_xfer_mask2mode(mask);
1091
1092 if (mode != 0) {
1093 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1094 ata_mode_string(mask));
1095 } else {
1096 /* SWDMA perhaps ? */
1097 mode = unknown;
1098 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1099 }
1100
1101 /* Configure the device reporting */
1102 dev->xfer_mode = mode;
1103 dev->xfer_shift = ata_xfer_mode2shift(mode);
1104}
1105
0baab86b
EF
1106/**
1107 * ata_noop_dev_select - Select device 0/1 on ATA bus
1108 * @ap: ATA channel to manipulate
1109 * @device: ATA device (numbered from zero) to select
1110 *
1111 * This function performs no actual function.
1112 *
1113 * May be used as the dev_select() entry in ata_port_operations.
1114 *
1115 * LOCKING:
1116 * caller.
1117 */
1da177e4
LT
1118void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1119{
1120}
1121
0baab86b 1122
1da177e4
LT
1123/**
1124 * ata_std_dev_select - Select device 0/1 on ATA bus
1125 * @ap: ATA channel to manipulate
1126 * @device: ATA device (numbered from zero) to select
1127 *
1128 * Use the method defined in the ATA specification to
1129 * make either device 0, or device 1, active on the
0baab86b
EF
1130 * ATA channel. Works with both PIO and MMIO.
1131 *
1132 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1133 *
1134 * LOCKING:
1135 * caller.
1136 */
1137
1138void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1139{
1140 u8 tmp;
1141
1142 if (device == 0)
1143 tmp = ATA_DEVICE_OBS;
1144 else
1145 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1146
0d5ff566 1147 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1148 ata_pause(ap); /* needed; also flushes, for mmio */
1149}
1150
1151/**
1152 * ata_dev_select - Select device 0/1 on ATA bus
1153 * @ap: ATA channel to manipulate
1154 * @device: ATA device (numbered from zero) to select
1155 * @wait: non-zero to wait for Status register BSY bit to clear
1156 * @can_sleep: non-zero if context allows sleeping
1157 *
1158 * Use the method defined in the ATA specification to
1159 * make either device 0, or device 1, active on the
1160 * ATA channel.
1161 *
1162 * This is a high-level version of ata_std_dev_select(),
1163 * which additionally provides the services of inserting
1164 * the proper pauses and status polling, where needed.
1165 *
1166 * LOCKING:
1167 * caller.
1168 */
1169
1170void ata_dev_select(struct ata_port *ap, unsigned int device,
1171 unsigned int wait, unsigned int can_sleep)
1172{
88574551 1173 if (ata_msg_probe(ap))
44877b4e
TH
1174 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1175 "device %u, wait %u\n", device, wait);
1da177e4
LT
1176
1177 if (wait)
1178 ata_wait_idle(ap);
1179
1180 ap->ops->dev_select(ap, device);
1181
1182 if (wait) {
9af5c9c9 1183 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1184 msleep(150);
1185 ata_wait_idle(ap);
1186 }
1187}
1188
1189/**
1190 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1191 * @id: IDENTIFY DEVICE page to dump
1da177e4 1192 *
0bd3300a
TH
1193 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1194 * page.
1da177e4
LT
1195 *
1196 * LOCKING:
1197 * caller.
1198 */
1199
0bd3300a 1200static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1201{
1202 DPRINTK("49==0x%04x "
1203 "53==0x%04x "
1204 "63==0x%04x "
1205 "64==0x%04x "
1206 "75==0x%04x \n",
0bd3300a
TH
1207 id[49],
1208 id[53],
1209 id[63],
1210 id[64],
1211 id[75]);
1da177e4
LT
1212 DPRINTK("80==0x%04x "
1213 "81==0x%04x "
1214 "82==0x%04x "
1215 "83==0x%04x "
1216 "84==0x%04x \n",
0bd3300a
TH
1217 id[80],
1218 id[81],
1219 id[82],
1220 id[83],
1221 id[84]);
1da177e4
LT
1222 DPRINTK("88==0x%04x "
1223 "93==0x%04x\n",
0bd3300a
TH
1224 id[88],
1225 id[93]);
1da177e4
LT
1226}
1227
cb95d562
TH
1228/**
1229 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1230 * @id: IDENTIFY data to compute xfer mask from
1231 *
1232 * Compute the xfermask for this device. This is not as trivial
1233 * as it seems if we must consider early devices correctly.
1234 *
1235 * FIXME: pre IDE drive timing (do we care ?).
1236 *
1237 * LOCKING:
1238 * None.
1239 *
1240 * RETURNS:
1241 * Computed xfermask
1242 */
1243static unsigned int ata_id_xfermask(const u16 *id)
1244{
1245 unsigned int pio_mask, mwdma_mask, udma_mask;
1246
1247 /* Usual case. Word 53 indicates word 64 is valid */
1248 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1249 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1250 pio_mask <<= 3;
1251 pio_mask |= 0x7;
1252 } else {
1253 /* If word 64 isn't valid then Word 51 high byte holds
1254 * the PIO timing number for the maximum. Turn it into
1255 * a mask.
1256 */
7a0f1c8a 1257 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1258 if (mode < 5) /* Valid PIO range */
1259 pio_mask = (2 << mode) - 1;
1260 else
1261 pio_mask = 1;
cb95d562
TH
1262
1263 /* But wait.. there's more. Design your standards by
1264 * committee and you too can get a free iordy field to
1265 * process. However its the speeds not the modes that
1266 * are supported... Note drivers using the timing API
1267 * will get this right anyway
1268 */
1269 }
1270
1271 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1272
b352e57d
AC
1273 if (ata_id_is_cfa(id)) {
1274 /*
1275 * Process compact flash extended modes
1276 */
1277 int pio = id[163] & 0x7;
1278 int dma = (id[163] >> 3) & 7;
1279
1280 if (pio)
1281 pio_mask |= (1 << 5);
1282 if (pio > 1)
1283 pio_mask |= (1 << 6);
1284 if (dma)
1285 mwdma_mask |= (1 << 3);
1286 if (dma > 1)
1287 mwdma_mask |= (1 << 4);
1288 }
1289
fb21f0d0
TH
1290 udma_mask = 0;
1291 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1292 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1293
1294 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1295}
1296
86e45b6b
TH
1297/**
1298 * ata_port_queue_task - Queue port_task
1299 * @ap: The ata_port to queue port_task for
e2a7f77a 1300 * @fn: workqueue function to be scheduled
65f27f38 1301 * @data: data for @fn to use
e2a7f77a 1302 * @delay: delay time for workqueue function
86e45b6b
TH
1303 *
1304 * Schedule @fn(@data) for execution after @delay jiffies using
1305 * port_task. There is one port_task per port and it's the
1306 * user(low level driver)'s responsibility to make sure that only
1307 * one task is active at any given time.
1308 *
1309 * libata core layer takes care of synchronization between
1310 * port_task and EH. ata_port_queue_task() may be ignored for EH
1311 * synchronization.
1312 *
1313 * LOCKING:
1314 * Inherited from caller.
1315 */
65f27f38 1316void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1317 unsigned long delay)
1318{
65f27f38
DH
1319 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1320 ap->port_task_data = data;
86e45b6b 1321
45a66c1c
ON
1322 /* may fail if ata_port_flush_task() in progress */
1323 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1324}
1325
1326/**
1327 * ata_port_flush_task - Flush port_task
1328 * @ap: The ata_port to flush port_task for
1329 *
1330 * After this function completes, port_task is guranteed not to
1331 * be running or scheduled.
1332 *
1333 * LOCKING:
1334 * Kernel thread context (may sleep)
1335 */
1336void ata_port_flush_task(struct ata_port *ap)
1337{
86e45b6b
TH
1338 DPRINTK("ENTER\n");
1339
45a66c1c 1340 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1341
0dd4b21f
BP
1342 if (ata_msg_ctl(ap))
1343 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1344}
1345
7102d230 1346static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1347{
77853bf2 1348 struct completion *waiting = qc->private_data;
a2a7a662 1349
a2a7a662 1350 complete(waiting);
a2a7a662
TH
1351}
1352
1353/**
2432697b 1354 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1355 * @dev: Device to which the command is sent
1356 * @tf: Taskfile registers for the command and the result
d69cf37d 1357 * @cdb: CDB for packet command
a2a7a662 1358 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1359 * @sg: sg list for the data buffer of the command
1360 * @n_elem: Number of sg entries
a2a7a662
TH
1361 *
1362 * Executes libata internal command with timeout. @tf contains
1363 * command on entry and result on return. Timeout and error
1364 * conditions are reported via return value. No recovery action
1365 * is taken after a command times out. It's caller's duty to
1366 * clean up after timeout.
1367 *
1368 * LOCKING:
1369 * None. Should be called with kernel context, might sleep.
551e8889
TH
1370 *
1371 * RETURNS:
1372 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1373 */
2432697b
TH
1374unsigned ata_exec_internal_sg(struct ata_device *dev,
1375 struct ata_taskfile *tf, const u8 *cdb,
1376 int dma_dir, struct scatterlist *sg,
1377 unsigned int n_elem)
a2a7a662 1378{
9af5c9c9
TH
1379 struct ata_link *link = dev->link;
1380 struct ata_port *ap = link->ap;
a2a7a662
TH
1381 u8 command = tf->command;
1382 struct ata_queued_cmd *qc;
2ab7db1f 1383 unsigned int tag, preempted_tag;
dedaf2b0 1384 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1385 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1386 unsigned long flags;
77853bf2 1387 unsigned int err_mask;
d95a717f 1388 int rc;
a2a7a662 1389
ba6a1308 1390 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1391
e3180499 1392 /* no internal command while frozen */
b51e9e5d 1393 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1394 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1395 return AC_ERR_SYSTEM;
1396 }
1397
2ab7db1f 1398 /* initialize internal qc */
a2a7a662 1399
2ab7db1f
TH
1400 /* XXX: Tag 0 is used for drivers with legacy EH as some
1401 * drivers choke if any other tag is given. This breaks
1402 * ata_tag_internal() test for those drivers. Don't use new
1403 * EH stuff without converting to it.
1404 */
1405 if (ap->ops->error_handler)
1406 tag = ATA_TAG_INTERNAL;
1407 else
1408 tag = 0;
1409
6cec4a39 1410 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1411 BUG();
f69499f4 1412 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1413
1414 qc->tag = tag;
1415 qc->scsicmd = NULL;
1416 qc->ap = ap;
1417 qc->dev = dev;
1418 ata_qc_reinit(qc);
1419
9af5c9c9
TH
1420 preempted_tag = link->active_tag;
1421 preempted_sactive = link->sactive;
dedaf2b0 1422 preempted_qc_active = ap->qc_active;
9af5c9c9
TH
1423 link->active_tag = ATA_TAG_POISON;
1424 link->sactive = 0;
dedaf2b0 1425 ap->qc_active = 0;
2ab7db1f
TH
1426
1427 /* prepare & issue qc */
a2a7a662 1428 qc->tf = *tf;
d69cf37d
TH
1429 if (cdb)
1430 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1431 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1432 qc->dma_dir = dma_dir;
1433 if (dma_dir != DMA_NONE) {
2432697b
TH
1434 unsigned int i, buflen = 0;
1435
1436 for (i = 0; i < n_elem; i++)
1437 buflen += sg[i].length;
1438
1439 ata_sg_init(qc, sg, n_elem);
49c80429 1440 qc->nbytes = buflen;
a2a7a662
TH
1441 }
1442
77853bf2 1443 qc->private_data = &wait;
a2a7a662
TH
1444 qc->complete_fn = ata_qc_complete_internal;
1445
8e0e694a 1446 ata_qc_issue(qc);
a2a7a662 1447
ba6a1308 1448 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1449
a8601e5f 1450 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1451
1452 ata_port_flush_task(ap);
41ade50c 1453
d95a717f 1454 if (!rc) {
ba6a1308 1455 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1456
1457 /* We're racing with irq here. If we lose, the
1458 * following test prevents us from completing the qc
d95a717f
TH
1459 * twice. If we win, the port is frozen and will be
1460 * cleaned up by ->post_internal_cmd().
a2a7a662 1461 */
77853bf2 1462 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1463 qc->err_mask |= AC_ERR_TIMEOUT;
1464
1465 if (ap->ops->error_handler)
1466 ata_port_freeze(ap);
1467 else
1468 ata_qc_complete(qc);
f15a1daf 1469
0dd4b21f
BP
1470 if (ata_msg_warn(ap))
1471 ata_dev_printk(dev, KERN_WARNING,
88574551 1472 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1473 }
1474
ba6a1308 1475 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1476 }
1477
d95a717f
TH
1478 /* do post_internal_cmd */
1479 if (ap->ops->post_internal_cmd)
1480 ap->ops->post_internal_cmd(qc);
1481
a51d644a
TH
1482 /* perform minimal error analysis */
1483 if (qc->flags & ATA_QCFLAG_FAILED) {
1484 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1485 qc->err_mask |= AC_ERR_DEV;
1486
1487 if (!qc->err_mask)
1488 qc->err_mask |= AC_ERR_OTHER;
1489
1490 if (qc->err_mask & ~AC_ERR_OTHER)
1491 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1492 }
1493
15869303 1494 /* finish up */
ba6a1308 1495 spin_lock_irqsave(ap->lock, flags);
15869303 1496
e61e0672 1497 *tf = qc->result_tf;
77853bf2
TH
1498 err_mask = qc->err_mask;
1499
1500 ata_qc_free(qc);
9af5c9c9
TH
1501 link->active_tag = preempted_tag;
1502 link->sactive = preempted_sactive;
dedaf2b0 1503 ap->qc_active = preempted_qc_active;
77853bf2 1504
1f7dd3e9
TH
1505 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1506 * Until those drivers are fixed, we detect the condition
1507 * here, fail the command with AC_ERR_SYSTEM and reenable the
1508 * port.
1509 *
1510 * Note that this doesn't change any behavior as internal
1511 * command failure results in disabling the device in the
1512 * higher layer for LLDDs without new reset/EH callbacks.
1513 *
1514 * Kill the following code as soon as those drivers are fixed.
1515 */
198e0fed 1516 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1517 err_mask |= AC_ERR_SYSTEM;
1518 ata_port_probe(ap);
1519 }
1520
ba6a1308 1521 spin_unlock_irqrestore(ap->lock, flags);
15869303 1522
77853bf2 1523 return err_mask;
a2a7a662
TH
1524}
1525
2432697b 1526/**
33480a0e 1527 * ata_exec_internal - execute libata internal command
2432697b
TH
1528 * @dev: Device to which the command is sent
1529 * @tf: Taskfile registers for the command and the result
1530 * @cdb: CDB for packet command
1531 * @dma_dir: Data tranfer direction of the command
1532 * @buf: Data buffer of the command
1533 * @buflen: Length of data buffer
1534 *
1535 * Wrapper around ata_exec_internal_sg() which takes simple
1536 * buffer instead of sg list.
1537 *
1538 * LOCKING:
1539 * None. Should be called with kernel context, might sleep.
1540 *
1541 * RETURNS:
1542 * Zero on success, AC_ERR_* mask on failure
1543 */
1544unsigned ata_exec_internal(struct ata_device *dev,
1545 struct ata_taskfile *tf, const u8 *cdb,
1546 int dma_dir, void *buf, unsigned int buflen)
1547{
33480a0e
TH
1548 struct scatterlist *psg = NULL, sg;
1549 unsigned int n_elem = 0;
2432697b 1550
33480a0e
TH
1551 if (dma_dir != DMA_NONE) {
1552 WARN_ON(!buf);
1553 sg_init_one(&sg, buf, buflen);
1554 psg = &sg;
1555 n_elem++;
1556 }
2432697b 1557
33480a0e 1558 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1559}
1560
977e6b9f
TH
1561/**
1562 * ata_do_simple_cmd - execute simple internal command
1563 * @dev: Device to which the command is sent
1564 * @cmd: Opcode to execute
1565 *
1566 * Execute a 'simple' command, that only consists of the opcode
1567 * 'cmd' itself, without filling any other registers
1568 *
1569 * LOCKING:
1570 * Kernel thread context (may sleep).
1571 *
1572 * RETURNS:
1573 * Zero on success, AC_ERR_* mask on failure
e58eb583 1574 */
77b08fb5 1575unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1576{
1577 struct ata_taskfile tf;
e58eb583
TH
1578
1579 ata_tf_init(dev, &tf);
1580
1581 tf.command = cmd;
1582 tf.flags |= ATA_TFLAG_DEVICE;
1583 tf.protocol = ATA_PROT_NODATA;
1584
977e6b9f 1585 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1586}
1587
1bc4ccff
AC
1588/**
1589 * ata_pio_need_iordy - check if iordy needed
1590 * @adev: ATA device
1591 *
1592 * Check if the current speed of the device requires IORDY. Used
1593 * by various controllers for chip configuration.
1594 */
a617c09f 1595
1bc4ccff
AC
1596unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1597{
432729f0
AC
1598 /* Controller doesn't support IORDY. Probably a pointless check
1599 as the caller should know this */
9af5c9c9 1600 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1601 return 0;
432729f0
AC
1602 /* PIO3 and higher it is mandatory */
1603 if (adev->pio_mode > XFER_PIO_2)
1604 return 1;
1605 /* We turn it on when possible */
1606 if (ata_id_has_iordy(adev->id))
1bc4ccff 1607 return 1;
432729f0
AC
1608 return 0;
1609}
2e9edbf8 1610
432729f0
AC
1611/**
1612 * ata_pio_mask_no_iordy - Return the non IORDY mask
1613 * @adev: ATA device
1614 *
1615 * Compute the highest mode possible if we are not using iordy. Return
1616 * -1 if no iordy mode is available.
1617 */
a617c09f 1618
432729f0
AC
1619static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1620{
1bc4ccff 1621 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1622 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1623 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1624 /* Is the speed faster than the drive allows non IORDY ? */
1625 if (pio) {
1626 /* This is cycle times not frequency - watch the logic! */
1627 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1628 return 3 << ATA_SHIFT_PIO;
1629 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1630 }
1631 }
432729f0 1632 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1633}
1634
1da177e4 1635/**
49016aca 1636 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1637 * @dev: target device
1638 * @p_class: pointer to class of the target device (may be changed)
bff04647 1639 * @flags: ATA_READID_* flags
fe635c7e 1640 * @id: buffer to read IDENTIFY data into
1da177e4 1641 *
49016aca
TH
1642 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1643 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1644 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1645 * for pre-ATA4 drives.
1da177e4 1646 *
50a99018
AC
1647 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1648 * now we abort if we hit that case.
1649 *
1da177e4 1650 * LOCKING:
49016aca
TH
1651 * Kernel thread context (may sleep)
1652 *
1653 * RETURNS:
1654 * 0 on success, -errno otherwise.
1da177e4 1655 */
a9beec95 1656int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1657 unsigned int flags, u16 *id)
1da177e4 1658{
9af5c9c9 1659 struct ata_port *ap = dev->link->ap;
49016aca 1660 unsigned int class = *p_class;
a0123703 1661 struct ata_taskfile tf;
49016aca
TH
1662 unsigned int err_mask = 0;
1663 const char *reason;
54936f8b 1664 int may_fallback = 1, tried_spinup = 0;
49016aca 1665 int rc;
1da177e4 1666
0dd4b21f 1667 if (ata_msg_ctl(ap))
44877b4e 1668 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1669
49016aca 1670 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1671 retry:
3373efd8 1672 ata_tf_init(dev, &tf);
a0123703 1673
49016aca
TH
1674 switch (class) {
1675 case ATA_DEV_ATA:
a0123703 1676 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1677 break;
1678 case ATA_DEV_ATAPI:
a0123703 1679 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1680 break;
1681 default:
1682 rc = -ENODEV;
1683 reason = "unsupported class";
1684 goto err_out;
1da177e4
LT
1685 }
1686
a0123703 1687 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1688
1689 /* Some devices choke if TF registers contain garbage. Make
1690 * sure those are properly initialized.
1691 */
1692 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1693
1694 /* Device presence detection is unreliable on some
1695 * controllers. Always poll IDENTIFY if available.
1696 */
1697 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1698
3373efd8 1699 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1700 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1701 if (err_mask) {
800b3996 1702 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1703 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1704 ap->print_id, dev->devno);
55a8e2c8
TH
1705 return -ENOENT;
1706 }
1707
54936f8b
TH
1708 /* Device or controller might have reported the wrong
1709 * device class. Give a shot at the other IDENTIFY if
1710 * the current one is aborted by the device.
1711 */
1712 if (may_fallback &&
1713 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1714 may_fallback = 0;
1715
1716 if (class == ATA_DEV_ATA)
1717 class = ATA_DEV_ATAPI;
1718 else
1719 class = ATA_DEV_ATA;
1720 goto retry;
1721 }
1722
49016aca
TH
1723 rc = -EIO;
1724 reason = "I/O error";
1da177e4
LT
1725 goto err_out;
1726 }
1727
54936f8b
TH
1728 /* Falling back doesn't make sense if ID data was read
1729 * successfully at least once.
1730 */
1731 may_fallback = 0;
1732
49016aca 1733 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1734
49016aca 1735 /* sanity check */
a4f5749b 1736 rc = -EINVAL;
6070068b 1737 reason = "device reports invalid type";
a4f5749b
TH
1738
1739 if (class == ATA_DEV_ATA) {
1740 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1741 goto err_out;
1742 } else {
1743 if (ata_id_is_ata(id))
1744 goto err_out;
49016aca
TH
1745 }
1746
169439c2
ML
1747 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1748 tried_spinup = 1;
1749 /*
1750 * Drive powered-up in standby mode, and requires a specific
1751 * SET_FEATURES spin-up subcommand before it will accept
1752 * anything other than the original IDENTIFY command.
1753 */
1754 ata_tf_init(dev, &tf);
1755 tf.command = ATA_CMD_SET_FEATURES;
1756 tf.feature = SETFEATURES_SPINUP;
1757 tf.protocol = ATA_PROT_NODATA;
1758 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1759 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1760 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1761 rc = -EIO;
1762 reason = "SPINUP failed";
1763 goto err_out;
1764 }
1765 /*
1766 * If the drive initially returned incomplete IDENTIFY info,
1767 * we now must reissue the IDENTIFY command.
1768 */
1769 if (id[2] == 0x37c8)
1770 goto retry;
1771 }
1772
bff04647 1773 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1774 /*
1775 * The exact sequence expected by certain pre-ATA4 drives is:
1776 * SRST RESET
50a99018
AC
1777 * IDENTIFY (optional in early ATA)
1778 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1779 * anything else..
1780 * Some drives were very specific about that exact sequence.
50a99018
AC
1781 *
1782 * Note that ATA4 says lba is mandatory so the second check
1783 * shoud never trigger.
49016aca
TH
1784 */
1785 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1786 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1787 if (err_mask) {
1788 rc = -EIO;
1789 reason = "INIT_DEV_PARAMS failed";
1790 goto err_out;
1791 }
1792
1793 /* current CHS translation info (id[53-58]) might be
1794 * changed. reread the identify device info.
1795 */
bff04647 1796 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1797 goto retry;
1798 }
1799 }
1800
1801 *p_class = class;
fe635c7e 1802
49016aca
TH
1803 return 0;
1804
1805 err_out:
88574551 1806 if (ata_msg_warn(ap))
0dd4b21f 1807 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1808 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1809 return rc;
1810}
1811
3373efd8 1812static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1813{
9af5c9c9
TH
1814 struct ata_port *ap = dev->link->ap;
1815 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1816}
1817
a6e6ce8e
TH
1818static void ata_dev_config_ncq(struct ata_device *dev,
1819 char *desc, size_t desc_sz)
1820{
9af5c9c9 1821 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1822 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1823
1824 if (!ata_id_has_ncq(dev->id)) {
1825 desc[0] = '\0';
1826 return;
1827 }
75683fe7 1828 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1829 snprintf(desc, desc_sz, "NCQ (not used)");
1830 return;
1831 }
a6e6ce8e 1832 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1833 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1834 dev->flags |= ATA_DFLAG_NCQ;
1835 }
1836
1837 if (hdepth >= ddepth)
1838 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1839 else
1840 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1841}
1842
49016aca 1843/**
ffeae418 1844 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1845 * @dev: Target device to configure
1846 *
1847 * Configure @dev according to @dev->id. Generic and low-level
1848 * driver specific fixups are also applied.
49016aca
TH
1849 *
1850 * LOCKING:
ffeae418
TH
1851 * Kernel thread context (may sleep)
1852 *
1853 * RETURNS:
1854 * 0 on success, -errno otherwise
49016aca 1855 */
efdaedc4 1856int ata_dev_configure(struct ata_device *dev)
49016aca 1857{
9af5c9c9
TH
1858 struct ata_port *ap = dev->link->ap;
1859 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1860 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1861 const u16 *id = dev->id;
ff8854b2 1862 unsigned int xfer_mask;
b352e57d 1863 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1864 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1865 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1866 int rc;
49016aca 1867
0dd4b21f 1868 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1869 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1870 __FUNCTION__);
ffeae418 1871 return 0;
49016aca
TH
1872 }
1873
0dd4b21f 1874 if (ata_msg_probe(ap))
44877b4e 1875 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1876
75683fe7
TH
1877 /* set horkage */
1878 dev->horkage |= ata_dev_blacklisted(dev);
1879
6746544c
TH
1880 /* let ACPI work its magic */
1881 rc = ata_acpi_on_devcfg(dev);
1882 if (rc)
1883 return rc;
08573a86 1884
05027adc
TH
1885 /* massage HPA, do it early as it might change IDENTIFY data */
1886 rc = ata_hpa_resize(dev);
1887 if (rc)
1888 return rc;
1889
c39f5ebe 1890 /* print device capabilities */
0dd4b21f 1891 if (ata_msg_probe(ap))
88574551
TH
1892 ata_dev_printk(dev, KERN_DEBUG,
1893 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1894 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1895 __FUNCTION__,
f15a1daf
TH
1896 id[49], id[82], id[83], id[84],
1897 id[85], id[86], id[87], id[88]);
c39f5ebe 1898
208a9933 1899 /* initialize to-be-configured parameters */
ea1dd4e1 1900 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1901 dev->max_sectors = 0;
1902 dev->cdb_len = 0;
1903 dev->n_sectors = 0;
1904 dev->cylinders = 0;
1905 dev->heads = 0;
1906 dev->sectors = 0;
1907
1da177e4
LT
1908 /*
1909 * common ATA, ATAPI feature tests
1910 */
1911
ff8854b2 1912 /* find max transfer mode; for printk only */
1148c3a7 1913 xfer_mask = ata_id_xfermask(id);
1da177e4 1914
0dd4b21f
BP
1915 if (ata_msg_probe(ap))
1916 ata_dump_id(id);
1da177e4 1917
ef143d57
AL
1918 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1919 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1920 sizeof(fwrevbuf));
1921
1922 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1923 sizeof(modelbuf));
1924
1da177e4
LT
1925 /* ATA-specific feature tests */
1926 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1927 if (ata_id_is_cfa(id)) {
1928 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1929 ata_dev_printk(dev, KERN_WARNING,
1930 "supports DRM functions and may "
1931 "not be fully accessable.\n");
b352e57d
AC
1932 snprintf(revbuf, 7, "CFA");
1933 }
1934 else
1935 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1936
1148c3a7 1937 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1938
3f64f565
EM
1939 if (dev->id[59] & 0x100)
1940 dev->multi_count = dev->id[59] & 0xff;
1941
1148c3a7 1942 if (ata_id_has_lba(id)) {
4c2d721a 1943 const char *lba_desc;
a6e6ce8e 1944 char ncq_desc[20];
8bf62ece 1945
4c2d721a
TH
1946 lba_desc = "LBA";
1947 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1948 if (ata_id_has_lba48(id)) {
8bf62ece 1949 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1950 lba_desc = "LBA48";
6fc49adb
TH
1951
1952 if (dev->n_sectors >= (1UL << 28) &&
1953 ata_id_has_flush_ext(id))
1954 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1955 }
8bf62ece 1956
a6e6ce8e
TH
1957 /* config NCQ */
1958 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1959
8bf62ece 1960 /* print device info to dmesg */
3f64f565
EM
1961 if (ata_msg_drv(ap) && print_info) {
1962 ata_dev_printk(dev, KERN_INFO,
1963 "%s: %s, %s, max %s\n",
1964 revbuf, modelbuf, fwrevbuf,
1965 ata_mode_string(xfer_mask));
1966 ata_dev_printk(dev, KERN_INFO,
1967 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1968 (unsigned long long)dev->n_sectors,
3f64f565
EM
1969 dev->multi_count, lba_desc, ncq_desc);
1970 }
ffeae418 1971 } else {
8bf62ece
AL
1972 /* CHS */
1973
1974 /* Default translation */
1148c3a7
TH
1975 dev->cylinders = id[1];
1976 dev->heads = id[3];
1977 dev->sectors = id[6];
8bf62ece 1978
1148c3a7 1979 if (ata_id_current_chs_valid(id)) {
8bf62ece 1980 /* Current CHS translation is valid. */
1148c3a7
TH
1981 dev->cylinders = id[54];
1982 dev->heads = id[55];
1983 dev->sectors = id[56];
8bf62ece
AL
1984 }
1985
1986 /* print device info to dmesg */
3f64f565 1987 if (ata_msg_drv(ap) && print_info) {
88574551 1988 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1989 "%s: %s, %s, max %s\n",
1990 revbuf, modelbuf, fwrevbuf,
1991 ata_mode_string(xfer_mask));
a84471fe 1992 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1993 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1994 (unsigned long long)dev->n_sectors,
1995 dev->multi_count, dev->cylinders,
1996 dev->heads, dev->sectors);
1997 }
07f6f7d0
AL
1998 }
1999
6e7846e9 2000 dev->cdb_len = 16;
1da177e4
LT
2001 }
2002
2003 /* ATAPI-specific feature tests */
2c13b7ce 2004 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
2005 char *cdb_intr_string = "";
2006
1148c3a7 2007 rc = atapi_cdb_len(id);
1da177e4 2008 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2009 if (ata_msg_warn(ap))
88574551
TH
2010 ata_dev_printk(dev, KERN_WARNING,
2011 "unsupported CDB len\n");
ffeae418 2012 rc = -EINVAL;
1da177e4
LT
2013 goto err_out_nosup;
2014 }
6e7846e9 2015 dev->cdb_len = (unsigned int) rc;
1da177e4 2016
9f45cbd3
KCA
2017 /*
2018 * check to see if this ATAPI device supports
2019 * Asynchronous Notification
2020 */
2021 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_AN(id)) {
2022 int err;
2023 /* issue SET feature command to turn this on */
2024 err = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2025 if (err)
2026 ata_dev_printk(dev, KERN_ERR,
2027 "unable to set AN, err %x\n",
2028 err);
2029 else
2030 dev->flags |= ATA_DFLAG_AN;
2031 }
2032
08a556db 2033 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2034 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2035 cdb_intr_string = ", CDB intr";
2036 }
312f7da2 2037
1da177e4 2038 /* print device info to dmesg */
5afc8142 2039 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
2040 ata_dev_printk(dev, KERN_INFO,
2041 "ATAPI: %s, %s, max %s%s\n",
2042 modelbuf, fwrevbuf,
12436c30
TH
2043 ata_mode_string(xfer_mask),
2044 cdb_intr_string);
1da177e4
LT
2045 }
2046
914ed354
TH
2047 /* determine max_sectors */
2048 dev->max_sectors = ATA_MAX_SECTORS;
2049 if (dev->flags & ATA_DFLAG_LBA48)
2050 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2051
93590859
AC
2052 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2053 /* Let the user know. We don't want to disallow opens for
2054 rescue purposes, or in case the vendor is just a blithering
2055 idiot */
2056 if (print_info) {
2057 ata_dev_printk(dev, KERN_WARNING,
2058"Drive reports diagnostics failure. This may indicate a drive\n");
2059 ata_dev_printk(dev, KERN_WARNING,
2060"fault or invalid emulation. Contact drive vendor for information.\n");
2061 }
2062 }
2063
4b2f3ede 2064 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2065 if (ata_dev_knobble(dev)) {
5afc8142 2066 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2067 ata_dev_printk(dev, KERN_INFO,
2068 "applying bridge limits\n");
5a529139 2069 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2070 dev->max_sectors = ATA_MAX_SECTORS;
2071 }
2072
75683fe7 2073 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2074 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2075 dev->max_sectors);
18d6e9d5 2076
4b2f3ede 2077 if (ap->ops->dev_config)
cd0d3bbc 2078 ap->ops->dev_config(dev);
4b2f3ede 2079
0dd4b21f
BP
2080 if (ata_msg_probe(ap))
2081 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2082 __FUNCTION__, ata_chk_status(ap));
ffeae418 2083 return 0;
1da177e4
LT
2084
2085err_out_nosup:
0dd4b21f 2086 if (ata_msg_probe(ap))
88574551
TH
2087 ata_dev_printk(dev, KERN_DEBUG,
2088 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2089 return rc;
1da177e4
LT
2090}
2091
be0d18df 2092/**
2e41e8e6 2093 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2094 * @ap: port
2095 *
2e41e8e6 2096 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2097 * detection.
2098 */
2099
2100int ata_cable_40wire(struct ata_port *ap)
2101{
2102 return ATA_CBL_PATA40;
2103}
2104
2105/**
2e41e8e6 2106 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2107 * @ap: port
2108 *
2e41e8e6 2109 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2110 * detection.
2111 */
2112
2113int ata_cable_80wire(struct ata_port *ap)
2114{
2115 return ATA_CBL_PATA80;
2116}
2117
2118/**
2119 * ata_cable_unknown - return unknown PATA cable.
2120 * @ap: port
2121 *
2122 * Helper method for drivers which have no PATA cable detection.
2123 */
2124
2125int ata_cable_unknown(struct ata_port *ap)
2126{
2127 return ATA_CBL_PATA_UNK;
2128}
2129
2130/**
2131 * ata_cable_sata - return SATA cable type
2132 * @ap: port
2133 *
2134 * Helper method for drivers which have SATA cables
2135 */
2136
2137int ata_cable_sata(struct ata_port *ap)
2138{
2139 return ATA_CBL_SATA;
2140}
2141
1da177e4
LT
2142/**
2143 * ata_bus_probe - Reset and probe ATA bus
2144 * @ap: Bus to probe
2145 *
0cba632b
JG
2146 * Master ATA bus probing function. Initiates a hardware-dependent
2147 * bus reset, then attempts to identify any devices found on
2148 * the bus.
2149 *
1da177e4 2150 * LOCKING:
0cba632b 2151 * PCI/etc. bus probe sem.
1da177e4
LT
2152 *
2153 * RETURNS:
96072e69 2154 * Zero on success, negative errno otherwise.
1da177e4
LT
2155 */
2156
80289167 2157int ata_bus_probe(struct ata_port *ap)
1da177e4 2158{
28ca5c57 2159 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2160 int tries[ATA_MAX_DEVICES];
f58229f8 2161 int rc;
e82cbdb9 2162 struct ata_device *dev;
1da177e4 2163
28ca5c57 2164 ata_port_probe(ap);
c19ba8af 2165
f58229f8
TH
2166 ata_link_for_each_dev(dev, &ap->link)
2167 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2168
2169 retry:
2044470c 2170 /* reset and determine device classes */
52783c5d 2171 ap->ops->phy_reset(ap);
2061a47a 2172
f58229f8 2173 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2174 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2175 dev->class != ATA_DEV_UNKNOWN)
2176 classes[dev->devno] = dev->class;
2177 else
2178 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2179
52783c5d 2180 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2181 }
1da177e4 2182
52783c5d 2183 ata_port_probe(ap);
2044470c 2184
b6079ca4
AC
2185 /* after the reset the device state is PIO 0 and the controller
2186 state is undefined. Record the mode */
2187
f58229f8
TH
2188 ata_link_for_each_dev(dev, &ap->link)
2189 dev->pio_mode = XFER_PIO_0;
b6079ca4 2190
f31f0cc2
JG
2191 /* read IDENTIFY page and configure devices. We have to do the identify
2192 specific sequence bass-ackwards so that PDIAG- is released by
2193 the slave device */
2194
f58229f8
TH
2195 ata_link_for_each_dev(dev, &ap->link) {
2196 if (tries[dev->devno])
2197 dev->class = classes[dev->devno];
ffeae418 2198
14d2bac1 2199 if (!ata_dev_enabled(dev))
ffeae418 2200 continue;
ffeae418 2201
bff04647
TH
2202 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2203 dev->id);
14d2bac1
TH
2204 if (rc)
2205 goto fail;
f31f0cc2
JG
2206 }
2207
be0d18df
AC
2208 /* Now ask for the cable type as PDIAG- should have been released */
2209 if (ap->ops->cable_detect)
2210 ap->cbl = ap->ops->cable_detect(ap);
2211
614fe29b
AC
2212 /* We may have SATA bridge glue hiding here irrespective of the
2213 reported cable types and sensed types */
2214 ata_link_for_each_dev(dev, &ap->link) {
2215 if (!ata_dev_enabled(dev))
2216 continue;
2217 /* SATA drives indicate we have a bridge. We don't know which
2218 end of the link the bridge is which is a problem */
2219 if (ata_id_is_sata(dev->id))
2220 ap->cbl = ATA_CBL_SATA;
2221 }
2222
f31f0cc2
JG
2223 /* After the identify sequence we can now set up the devices. We do
2224 this in the normal order so that the user doesn't get confused */
2225
f58229f8 2226 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2227 if (!ata_dev_enabled(dev))
2228 continue;
14d2bac1 2229
9af5c9c9 2230 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2231 rc = ata_dev_configure(dev);
9af5c9c9 2232 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2233 if (rc)
2234 goto fail;
1da177e4
LT
2235 }
2236
e82cbdb9 2237 /* configure transfer mode */
0260731f 2238 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2239 if (rc)
51713d35 2240 goto fail;
1da177e4 2241
f58229f8
TH
2242 ata_link_for_each_dev(dev, &ap->link)
2243 if (ata_dev_enabled(dev))
e82cbdb9 2244 return 0;
1da177e4 2245
e82cbdb9
TH
2246 /* no device present, disable port */
2247 ata_port_disable(ap);
96072e69 2248 return -ENODEV;
14d2bac1
TH
2249
2250 fail:
4ae72a1e
TH
2251 tries[dev->devno]--;
2252
14d2bac1
TH
2253 switch (rc) {
2254 case -EINVAL:
4ae72a1e 2255 /* eeek, something went very wrong, give up */
14d2bac1
TH
2256 tries[dev->devno] = 0;
2257 break;
4ae72a1e
TH
2258
2259 case -ENODEV:
2260 /* give it just one more chance */
2261 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2262 case -EIO:
4ae72a1e
TH
2263 if (tries[dev->devno] == 1) {
2264 /* This is the last chance, better to slow
2265 * down than lose it.
2266 */
936fd732 2267 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2268 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2269 }
14d2bac1
TH
2270 }
2271
4ae72a1e 2272 if (!tries[dev->devno])
3373efd8 2273 ata_dev_disable(dev);
ec573755 2274
14d2bac1 2275 goto retry;
1da177e4
LT
2276}
2277
2278/**
0cba632b
JG
2279 * ata_port_probe - Mark port as enabled
2280 * @ap: Port for which we indicate enablement
1da177e4 2281 *
0cba632b
JG
2282 * Modify @ap data structure such that the system
2283 * thinks that the entire port is enabled.
2284 *
cca3974e 2285 * LOCKING: host lock, or some other form of
0cba632b 2286 * serialization.
1da177e4
LT
2287 */
2288
2289void ata_port_probe(struct ata_port *ap)
2290{
198e0fed 2291 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2292}
2293
3be680b7
TH
2294/**
2295 * sata_print_link_status - Print SATA link status
936fd732 2296 * @link: SATA link to printk link status about
3be680b7
TH
2297 *
2298 * This function prints link speed and status of a SATA link.
2299 *
2300 * LOCKING:
2301 * None.
2302 */
936fd732 2303void sata_print_link_status(struct ata_link *link)
3be680b7 2304{
6d5f9732 2305 u32 sstatus, scontrol, tmp;
3be680b7 2306
936fd732 2307 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2308 return;
936fd732 2309 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2310
936fd732 2311 if (ata_link_online(link)) {
3be680b7 2312 tmp = (sstatus >> 4) & 0xf;
936fd732 2313 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2314 "SATA link up %s (SStatus %X SControl %X)\n",
2315 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2316 } else {
936fd732 2317 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2318 "SATA link down (SStatus %X SControl %X)\n",
2319 sstatus, scontrol);
3be680b7
TH
2320 }
2321}
2322
1da177e4 2323/**
780a87f7
JG
2324 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2325 * @ap: SATA port associated with target SATA PHY.
1da177e4 2326 *
780a87f7
JG
2327 * This function issues commands to standard SATA Sxxx
2328 * PHY registers, to wake up the phy (and device), and
2329 * clear any reset condition.
1da177e4
LT
2330 *
2331 * LOCKING:
0cba632b 2332 * PCI/etc. bus probe sem.
1da177e4
LT
2333 *
2334 */
2335void __sata_phy_reset(struct ata_port *ap)
2336{
936fd732 2337 struct ata_link *link = &ap->link;
1da177e4 2338 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2339 u32 sstatus;
1da177e4
LT
2340
2341 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2342 /* issue phy wake/reset */
936fd732 2343 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2344 /* Couldn't find anything in SATA I/II specs, but
2345 * AHCI-1.1 10.4.2 says at least 1 ms. */
2346 mdelay(1);
1da177e4 2347 }
81952c54 2348 /* phy wake/clear reset */
936fd732 2349 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2350
2351 /* wait for phy to become ready, if necessary */
2352 do {
2353 msleep(200);
936fd732 2354 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2355 if ((sstatus & 0xf) != 1)
2356 break;
2357 } while (time_before(jiffies, timeout));
2358
3be680b7 2359 /* print link status */
936fd732 2360 sata_print_link_status(link);
656563e3 2361
3be680b7 2362 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2363 if (!ata_link_offline(link))
1da177e4 2364 ata_port_probe(ap);
3be680b7 2365 else
1da177e4 2366 ata_port_disable(ap);
1da177e4 2367
198e0fed 2368 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2369 return;
2370
2371 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2372 ata_port_disable(ap);
2373 return;
2374 }
2375
2376 ap->cbl = ATA_CBL_SATA;
2377}
2378
2379/**
780a87f7
JG
2380 * sata_phy_reset - Reset SATA bus.
2381 * @ap: SATA port associated with target SATA PHY.
1da177e4 2382 *
780a87f7
JG
2383 * This function resets the SATA bus, and then probes
2384 * the bus for devices.
1da177e4
LT
2385 *
2386 * LOCKING:
0cba632b 2387 * PCI/etc. bus probe sem.
1da177e4
LT
2388 *
2389 */
2390void sata_phy_reset(struct ata_port *ap)
2391{
2392 __sata_phy_reset(ap);
198e0fed 2393 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2394 return;
2395 ata_bus_reset(ap);
2396}
2397
ebdfca6e
AC
2398/**
2399 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2400 * @adev: device
2401 *
2402 * Obtain the other device on the same cable, or if none is
2403 * present NULL is returned
2404 */
2e9edbf8 2405
3373efd8 2406struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2407{
9af5c9c9
TH
2408 struct ata_link *link = adev->link;
2409 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2410 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2411 return NULL;
2412 return pair;
2413}
2414
1da177e4 2415/**
780a87f7
JG
2416 * ata_port_disable - Disable port.
2417 * @ap: Port to be disabled.
1da177e4 2418 *
780a87f7
JG
2419 * Modify @ap data structure such that the system
2420 * thinks that the entire port is disabled, and should
2421 * never attempt to probe or communicate with devices
2422 * on this port.
2423 *
cca3974e 2424 * LOCKING: host lock, or some other form of
780a87f7 2425 * serialization.
1da177e4
LT
2426 */
2427
2428void ata_port_disable(struct ata_port *ap)
2429{
9af5c9c9
TH
2430 ap->link.device[0].class = ATA_DEV_NONE;
2431 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2432 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2433}
2434
1c3fae4d 2435/**
3c567b7d 2436 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2437 * @link: Link to adjust SATA spd limit for
1c3fae4d 2438 *
936fd732 2439 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2440 * function only adjusts the limit. The change must be applied
3c567b7d 2441 * using sata_set_spd().
1c3fae4d
TH
2442 *
2443 * LOCKING:
2444 * Inherited from caller.
2445 *
2446 * RETURNS:
2447 * 0 on success, negative errno on failure
2448 */
936fd732 2449int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2450{
81952c54
TH
2451 u32 sstatus, spd, mask;
2452 int rc, highbit;
1c3fae4d 2453
936fd732 2454 if (!sata_scr_valid(link))
008a7896
TH
2455 return -EOPNOTSUPP;
2456
2457 /* If SCR can be read, use it to determine the current SPD.
936fd732 2458 * If not, use cached value in link->sata_spd.
008a7896 2459 */
936fd732 2460 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2461 if (rc == 0)
2462 spd = (sstatus >> 4) & 0xf;
2463 else
936fd732 2464 spd = link->sata_spd;
1c3fae4d 2465
936fd732 2466 mask = link->sata_spd_limit;
1c3fae4d
TH
2467 if (mask <= 1)
2468 return -EINVAL;
008a7896
TH
2469
2470 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2471 highbit = fls(mask) - 1;
2472 mask &= ~(1 << highbit);
2473
008a7896
TH
2474 /* Mask off all speeds higher than or equal to the current
2475 * one. Force 1.5Gbps if current SPD is not available.
2476 */
2477 if (spd > 1)
2478 mask &= (1 << (spd - 1)) - 1;
2479 else
2480 mask &= 1;
2481
2482 /* were we already at the bottom? */
1c3fae4d
TH
2483 if (!mask)
2484 return -EINVAL;
2485
936fd732 2486 link->sata_spd_limit = mask;
1c3fae4d 2487
936fd732 2488 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2489 sata_spd_string(fls(mask)));
1c3fae4d
TH
2490
2491 return 0;
2492}
2493
936fd732 2494static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2495{
2496 u32 spd, limit;
2497
936fd732 2498 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2499 limit = 0;
2500 else
936fd732 2501 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2502
2503 spd = (*scontrol >> 4) & 0xf;
2504 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2505
2506 return spd != limit;
2507}
2508
2509/**
3c567b7d 2510 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2511 * @link: Link in question
1c3fae4d
TH
2512 *
2513 * Test whether the spd limit in SControl matches
936fd732 2514 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2515 * whether hardreset is necessary to apply SATA spd
2516 * configuration.
2517 *
2518 * LOCKING:
2519 * Inherited from caller.
2520 *
2521 * RETURNS:
2522 * 1 if SATA spd configuration is needed, 0 otherwise.
2523 */
936fd732 2524int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2525{
2526 u32 scontrol;
2527
936fd732 2528 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2529 return 0;
2530
936fd732 2531 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2532}
2533
2534/**
3c567b7d 2535 * sata_set_spd - set SATA spd according to spd limit
936fd732 2536 * @link: Link to set SATA spd for
1c3fae4d 2537 *
936fd732 2538 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2539 *
2540 * LOCKING:
2541 * Inherited from caller.
2542 *
2543 * RETURNS:
2544 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2545 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2546 */
936fd732 2547int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2548{
2549 u32 scontrol;
81952c54 2550 int rc;
1c3fae4d 2551
936fd732 2552 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2553 return rc;
1c3fae4d 2554
936fd732 2555 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2556 return 0;
2557
936fd732 2558 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2559 return rc;
2560
1c3fae4d
TH
2561 return 1;
2562}
2563
452503f9
AC
2564/*
2565 * This mode timing computation functionality is ported over from
2566 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2567 */
2568/*
b352e57d 2569 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2570 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2571 * for UDMA6, which is currently supported only by Maxtor drives.
2572 *
2573 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2574 */
2575
2576static const struct ata_timing ata_timing[] = {
2577
2578 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2579 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2580 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2581 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2582
b352e57d
AC
2583 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2584 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2585 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2586 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2587 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2588
2589/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2590
452503f9
AC
2591 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2592 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2593 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2594
452503f9
AC
2595 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2596 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2597 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2598
b352e57d
AC
2599 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2600 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2601 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2602 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2603
2604 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2605 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2606 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2607
2608/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2609
2610 { 0xFF }
2611};
2612
2613#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2614#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2615
2616static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2617{
2618 q->setup = EZ(t->setup * 1000, T);
2619 q->act8b = EZ(t->act8b * 1000, T);
2620 q->rec8b = EZ(t->rec8b * 1000, T);
2621 q->cyc8b = EZ(t->cyc8b * 1000, T);
2622 q->active = EZ(t->active * 1000, T);
2623 q->recover = EZ(t->recover * 1000, T);
2624 q->cycle = EZ(t->cycle * 1000, T);
2625 q->udma = EZ(t->udma * 1000, UT);
2626}
2627
2628void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2629 struct ata_timing *m, unsigned int what)
2630{
2631 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2632 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2633 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2634 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2635 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2636 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2637 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2638 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2639}
2640
2641static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2642{
2643 const struct ata_timing *t;
2644
2645 for (t = ata_timing; t->mode != speed; t++)
91190758 2646 if (t->mode == 0xFF)
452503f9 2647 return NULL;
2e9edbf8 2648 return t;
452503f9
AC
2649}
2650
2651int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2652 struct ata_timing *t, int T, int UT)
2653{
2654 const struct ata_timing *s;
2655 struct ata_timing p;
2656
2657 /*
2e9edbf8 2658 * Find the mode.
75b1f2f8 2659 */
452503f9
AC
2660
2661 if (!(s = ata_timing_find_mode(speed)))
2662 return -EINVAL;
2663
75b1f2f8
AL
2664 memcpy(t, s, sizeof(*s));
2665
452503f9
AC
2666 /*
2667 * If the drive is an EIDE drive, it can tell us it needs extended
2668 * PIO/MW_DMA cycle timing.
2669 */
2670
2671 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2672 memset(&p, 0, sizeof(p));
2673 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2674 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2675 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2676 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2677 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2678 }
2679 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2680 }
2681
2682 /*
2683 * Convert the timing to bus clock counts.
2684 */
2685
75b1f2f8 2686 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2687
2688 /*
c893a3ae
RD
2689 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2690 * S.M.A.R.T * and some other commands. We have to ensure that the
2691 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2692 */
2693
fd3367af 2694 if (speed > XFER_PIO_6) {
452503f9
AC
2695 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2696 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2697 }
2698
2699 /*
c893a3ae 2700 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2701 */
2702
2703 if (t->act8b + t->rec8b < t->cyc8b) {
2704 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2705 t->rec8b = t->cyc8b - t->act8b;
2706 }
2707
2708 if (t->active + t->recover < t->cycle) {
2709 t->active += (t->cycle - (t->active + t->recover)) / 2;
2710 t->recover = t->cycle - t->active;
2711 }
a617c09f 2712
4f701d1e
AC
2713 /* In a few cases quantisation may produce enough errors to
2714 leave t->cycle too low for the sum of active and recovery
2715 if so we must correct this */
2716 if (t->active + t->recover > t->cycle)
2717 t->cycle = t->active + t->recover;
452503f9
AC
2718
2719 return 0;
2720}
2721
cf176e1a
TH
2722/**
2723 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2724 * @dev: Device to adjust xfer masks
458337db 2725 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2726 *
2727 * Adjust xfer masks of @dev downward. Note that this function
2728 * does not apply the change. Invoking ata_set_mode() afterwards
2729 * will apply the limit.
2730 *
2731 * LOCKING:
2732 * Inherited from caller.
2733 *
2734 * RETURNS:
2735 * 0 on success, negative errno on failure
2736 */
458337db 2737int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2738{
458337db
TH
2739 char buf[32];
2740 unsigned int orig_mask, xfer_mask;
2741 unsigned int pio_mask, mwdma_mask, udma_mask;
2742 int quiet, highbit;
cf176e1a 2743
458337db
TH
2744 quiet = !!(sel & ATA_DNXFER_QUIET);
2745 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2746
458337db
TH
2747 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2748 dev->mwdma_mask,
2749 dev->udma_mask);
2750 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2751
458337db
TH
2752 switch (sel) {
2753 case ATA_DNXFER_PIO:
2754 highbit = fls(pio_mask) - 1;
2755 pio_mask &= ~(1 << highbit);
2756 break;
2757
2758 case ATA_DNXFER_DMA:
2759 if (udma_mask) {
2760 highbit = fls(udma_mask) - 1;
2761 udma_mask &= ~(1 << highbit);
2762 if (!udma_mask)
2763 return -ENOENT;
2764 } else if (mwdma_mask) {
2765 highbit = fls(mwdma_mask) - 1;
2766 mwdma_mask &= ~(1 << highbit);
2767 if (!mwdma_mask)
2768 return -ENOENT;
2769 }
2770 break;
2771
2772 case ATA_DNXFER_40C:
2773 udma_mask &= ATA_UDMA_MASK_40C;
2774 break;
2775
2776 case ATA_DNXFER_FORCE_PIO0:
2777 pio_mask &= 1;
2778 case ATA_DNXFER_FORCE_PIO:
2779 mwdma_mask = 0;
2780 udma_mask = 0;
2781 break;
2782
458337db
TH
2783 default:
2784 BUG();
2785 }
2786
2787 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2788
2789 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2790 return -ENOENT;
2791
2792 if (!quiet) {
2793 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2794 snprintf(buf, sizeof(buf), "%s:%s",
2795 ata_mode_string(xfer_mask),
2796 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2797 else
2798 snprintf(buf, sizeof(buf), "%s",
2799 ata_mode_string(xfer_mask));
2800
2801 ata_dev_printk(dev, KERN_WARNING,
2802 "limiting speed to %s\n", buf);
2803 }
cf176e1a
TH
2804
2805 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2806 &dev->udma_mask);
2807
cf176e1a 2808 return 0;
cf176e1a
TH
2809}
2810
3373efd8 2811static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2812{
9af5c9c9 2813 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2814 unsigned int err_mask;
2815 int rc;
1da177e4 2816
e8384607 2817 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2818 if (dev->xfer_shift == ATA_SHIFT_PIO)
2819 dev->flags |= ATA_DFLAG_PIO;
2820
3373efd8 2821 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2822 /* Old CFA may refuse this command, which is just fine */
2823 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2824 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2825 /* Some very old devices and some bad newer ones fail any kind of
2826 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2827 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2828 dev->pio_mode <= XFER_PIO_2)
2829 err_mask &= ~AC_ERR_DEV;
83206a29 2830 if (err_mask) {
f15a1daf
TH
2831 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2832 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2833 return -EIO;
2834 }
1da177e4 2835
baa1e78a 2836 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2837 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2838 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2839 if (rc)
83206a29 2840 return rc;
48a8a14f 2841
23e71c3d
TH
2842 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2843 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2844
f15a1daf
TH
2845 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2846 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2847 return 0;
1da177e4
LT
2848}
2849
1da177e4 2850/**
04351821 2851 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2852 * @link: link on which timings will be programmed
e82cbdb9 2853 * @r_failed_dev: out paramter for failed device
1da177e4 2854 *
04351821
A
2855 * Standard implementation of the function used to tune and set
2856 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2857 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2858 * returned in @r_failed_dev.
780a87f7 2859 *
1da177e4 2860 * LOCKING:
0cba632b 2861 * PCI/etc. bus probe sem.
e82cbdb9
TH
2862 *
2863 * RETURNS:
2864 * 0 on success, negative errno otherwise
1da177e4 2865 */
04351821 2866
0260731f 2867int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2868{
0260731f 2869 struct ata_port *ap = link->ap;
e8e0619f 2870 struct ata_device *dev;
f58229f8 2871 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2872
a6d5a51c 2873 /* step 1: calculate xfer_mask */
f58229f8 2874 ata_link_for_each_dev(dev, link) {
acf356b1 2875 unsigned int pio_mask, dma_mask;
a6d5a51c 2876
e1211e3f 2877 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2878 continue;
2879
3373efd8 2880 ata_dev_xfermask(dev);
1da177e4 2881
acf356b1
TH
2882 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2883 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2884 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2885 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2886
4f65977d 2887 found = 1;
5444a6f4
AC
2888 if (dev->dma_mode)
2889 used_dma = 1;
a6d5a51c 2890 }
4f65977d 2891 if (!found)
e82cbdb9 2892 goto out;
a6d5a51c
TH
2893
2894 /* step 2: always set host PIO timings */
f58229f8 2895 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2896 if (!ata_dev_enabled(dev))
2897 continue;
2898
2899 if (!dev->pio_mode) {
f15a1daf 2900 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2901 rc = -EINVAL;
e82cbdb9 2902 goto out;
e8e0619f
TH
2903 }
2904
2905 dev->xfer_mode = dev->pio_mode;
2906 dev->xfer_shift = ATA_SHIFT_PIO;
2907 if (ap->ops->set_piomode)
2908 ap->ops->set_piomode(ap, dev);
2909 }
1da177e4 2910
a6d5a51c 2911 /* step 3: set host DMA timings */
f58229f8 2912 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2913 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2914 continue;
2915
2916 dev->xfer_mode = dev->dma_mode;
2917 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2918 if (ap->ops->set_dmamode)
2919 ap->ops->set_dmamode(ap, dev);
2920 }
1da177e4
LT
2921
2922 /* step 4: update devices' xfer mode */
f58229f8 2923 ata_link_for_each_dev(dev, link) {
18d90deb 2924 /* don't update suspended devices' xfer mode */
9666f400 2925 if (!ata_dev_enabled(dev))
83206a29
TH
2926 continue;
2927
3373efd8 2928 rc = ata_dev_set_mode(dev);
5bbc53f4 2929 if (rc)
e82cbdb9 2930 goto out;
83206a29 2931 }
1da177e4 2932
e8e0619f
TH
2933 /* Record simplex status. If we selected DMA then the other
2934 * host channels are not permitted to do so.
5444a6f4 2935 */
cca3974e 2936 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2937 ap->host->simplex_claimed = ap;
5444a6f4 2938
e82cbdb9
TH
2939 out:
2940 if (rc)
2941 *r_failed_dev = dev;
2942 return rc;
1da177e4
LT
2943}
2944
04351821
A
2945/**
2946 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2947 * @link: link on which timings will be programmed
04351821
A
2948 * @r_failed_dev: out paramter for failed device
2949 *
2950 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2951 * ata_set_mode() fails, pointer to the failing device is
2952 * returned in @r_failed_dev.
2953 *
2954 * LOCKING:
2955 * PCI/etc. bus probe sem.
2956 *
2957 * RETURNS:
2958 * 0 on success, negative errno otherwise
2959 */
0260731f 2960int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2961{
0260731f
TH
2962 struct ata_port *ap = link->ap;
2963
04351821
A
2964 /* has private set_mode? */
2965 if (ap->ops->set_mode)
0260731f
TH
2966 return ap->ops->set_mode(link, r_failed_dev);
2967 return ata_do_set_mode(link, r_failed_dev);
04351821
A
2968}
2969
1fdffbce
JG
2970/**
2971 * ata_tf_to_host - issue ATA taskfile to host controller
2972 * @ap: port to which command is being issued
2973 * @tf: ATA taskfile register set
2974 *
2975 * Issues ATA taskfile register set to ATA host controller,
2976 * with proper synchronization with interrupt handler and
2977 * other threads.
2978 *
2979 * LOCKING:
cca3974e 2980 * spin_lock_irqsave(host lock)
1fdffbce
JG
2981 */
2982
2983static inline void ata_tf_to_host(struct ata_port *ap,
2984 const struct ata_taskfile *tf)
2985{
2986 ap->ops->tf_load(ap, tf);
2987 ap->ops->exec_command(ap, tf);
2988}
2989
1da177e4
LT
2990/**
2991 * ata_busy_sleep - sleep until BSY clears, or timeout
2992 * @ap: port containing status register to be polled
2993 * @tmout_pat: impatience timeout
2994 * @tmout: overall timeout
2995 *
780a87f7
JG
2996 * Sleep until ATA Status register bit BSY clears,
2997 * or a timeout occurs.
2998 *
d1adc1bb
TH
2999 * LOCKING:
3000 * Kernel thread context (may sleep).
3001 *
3002 * RETURNS:
3003 * 0 on success, -errno otherwise.
1da177e4 3004 */
d1adc1bb
TH
3005int ata_busy_sleep(struct ata_port *ap,
3006 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3007{
3008 unsigned long timer_start, timeout;
3009 u8 status;
3010
3011 status = ata_busy_wait(ap, ATA_BUSY, 300);
3012 timer_start = jiffies;
3013 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3014 while (status != 0xff && (status & ATA_BUSY) &&
3015 time_before(jiffies, timeout)) {
1da177e4
LT
3016 msleep(50);
3017 status = ata_busy_wait(ap, ATA_BUSY, 3);
3018 }
3019
d1adc1bb 3020 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3021 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3022 "port is slow to respond, please be patient "
3023 "(Status 0x%x)\n", status);
1da177e4
LT
3024
3025 timeout = timer_start + tmout;
d1adc1bb
TH
3026 while (status != 0xff && (status & ATA_BUSY) &&
3027 time_before(jiffies, timeout)) {
1da177e4
LT
3028 msleep(50);
3029 status = ata_chk_status(ap);
3030 }
3031
d1adc1bb
TH
3032 if (status == 0xff)
3033 return -ENODEV;
3034
1da177e4 3035 if (status & ATA_BUSY) {
f15a1daf 3036 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3037 "(%lu secs, Status 0x%x)\n",
3038 tmout / HZ, status);
d1adc1bb 3039 return -EBUSY;
1da177e4
LT
3040 }
3041
3042 return 0;
3043}
3044
d4b2bab4
TH
3045/**
3046 * ata_wait_ready - sleep until BSY clears, or timeout
3047 * @ap: port containing status register to be polled
3048 * @deadline: deadline jiffies for the operation
3049 *
3050 * Sleep until ATA Status register bit BSY clears, or timeout
3051 * occurs.
3052 *
3053 * LOCKING:
3054 * Kernel thread context (may sleep).
3055 *
3056 * RETURNS:
3057 * 0 on success, -errno otherwise.
3058 */
3059int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3060{
3061 unsigned long start = jiffies;
3062 int warned = 0;
3063
3064 while (1) {
3065 u8 status = ata_chk_status(ap);
3066 unsigned long now = jiffies;
3067
3068 if (!(status & ATA_BUSY))
3069 return 0;
936fd732 3070 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3071 return -ENODEV;
3072 if (time_after(now, deadline))
3073 return -EBUSY;
3074
3075 if (!warned && time_after(now, start + 5 * HZ) &&
3076 (deadline - now > 3 * HZ)) {
3077 ata_port_printk(ap, KERN_WARNING,
3078 "port is slow to respond, please be patient "
3079 "(Status 0x%x)\n", status);
3080 warned = 1;
3081 }
3082
3083 msleep(50);
3084 }
3085}
3086
3087static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3088 unsigned long deadline)
1da177e4
LT
3089{
3090 struct ata_ioports *ioaddr = &ap->ioaddr;
3091 unsigned int dev0 = devmask & (1 << 0);
3092 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3093 int rc, ret = 0;
1da177e4
LT
3094
3095 /* if device 0 was found in ata_devchk, wait for its
3096 * BSY bit to clear
3097 */
d4b2bab4
TH
3098 if (dev0) {
3099 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3100 if (rc) {
3101 if (rc != -ENODEV)
3102 return rc;
3103 ret = rc;
3104 }
d4b2bab4 3105 }
1da177e4 3106
e141d999
TH
3107 /* if device 1 was found in ata_devchk, wait for register
3108 * access briefly, then wait for BSY to clear.
1da177e4 3109 */
e141d999
TH
3110 if (dev1) {
3111 int i;
1da177e4
LT
3112
3113 ap->ops->dev_select(ap, 1);
e141d999
TH
3114
3115 /* Wait for register access. Some ATAPI devices fail
3116 * to set nsect/lbal after reset, so don't waste too
3117 * much time on it. We're gonna wait for !BSY anyway.
3118 */
3119 for (i = 0; i < 2; i++) {
3120 u8 nsect, lbal;
3121
3122 nsect = ioread8(ioaddr->nsect_addr);
3123 lbal = ioread8(ioaddr->lbal_addr);
3124 if ((nsect == 1) && (lbal == 1))
3125 break;
3126 msleep(50); /* give drive a breather */
3127 }
3128
d4b2bab4 3129 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3130 if (rc) {
3131 if (rc != -ENODEV)
3132 return rc;
3133 ret = rc;
3134 }
d4b2bab4 3135 }
1da177e4
LT
3136
3137 /* is all this really necessary? */
3138 ap->ops->dev_select(ap, 0);
3139 if (dev1)
3140 ap->ops->dev_select(ap, 1);
3141 if (dev0)
3142 ap->ops->dev_select(ap, 0);
d4b2bab4 3143
9b89391c 3144 return ret;
1da177e4
LT
3145}
3146
d4b2bab4
TH
3147static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3148 unsigned long deadline)
1da177e4
LT
3149{
3150 struct ata_ioports *ioaddr = &ap->ioaddr;
3151
44877b4e 3152 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3153
3154 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3155 iowrite8(ap->ctl, ioaddr->ctl_addr);
3156 udelay(20); /* FIXME: flush */
3157 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3158 udelay(20); /* FIXME: flush */
3159 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3160
3161 /* spec mandates ">= 2ms" before checking status.
3162 * We wait 150ms, because that was the magic delay used for
3163 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3164 * between when the ATA command register is written, and then
3165 * status is checked. Because waiting for "a while" before
3166 * checking status is fine, post SRST, we perform this magic
3167 * delay here as well.
09c7ad79
AC
3168 *
3169 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3170 */
3171 msleep(150);
3172
2e9edbf8 3173 /* Before we perform post reset processing we want to see if
298a41ca
TH
3174 * the bus shows 0xFF because the odd clown forgets the D7
3175 * pulldown resistor.
3176 */
d1adc1bb 3177 if (ata_check_status(ap) == 0xFF)
9b89391c 3178 return -ENODEV;
09c7ad79 3179
d4b2bab4 3180 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3181}
3182
3183/**
3184 * ata_bus_reset - reset host port and associated ATA channel
3185 * @ap: port to reset
3186 *
3187 * This is typically the first time we actually start issuing
3188 * commands to the ATA channel. We wait for BSY to clear, then
3189 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3190 * result. Determine what devices, if any, are on the channel
3191 * by looking at the device 0/1 error register. Look at the signature
3192 * stored in each device's taskfile registers, to determine if
3193 * the device is ATA or ATAPI.
3194 *
3195 * LOCKING:
0cba632b 3196 * PCI/etc. bus probe sem.
cca3974e 3197 * Obtains host lock.
1da177e4
LT
3198 *
3199 * SIDE EFFECTS:
198e0fed 3200 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3201 */
3202
3203void ata_bus_reset(struct ata_port *ap)
3204{
9af5c9c9 3205 struct ata_device *device = ap->link.device;
1da177e4
LT
3206 struct ata_ioports *ioaddr = &ap->ioaddr;
3207 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3208 u8 err;
aec5c3c1 3209 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3210 int rc;
1da177e4 3211
44877b4e 3212 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3213
3214 /* determine if device 0/1 are present */
3215 if (ap->flags & ATA_FLAG_SATA_RESET)
3216 dev0 = 1;
3217 else {
3218 dev0 = ata_devchk(ap, 0);
3219 if (slave_possible)
3220 dev1 = ata_devchk(ap, 1);
3221 }
3222
3223 if (dev0)
3224 devmask |= (1 << 0);
3225 if (dev1)
3226 devmask |= (1 << 1);
3227
3228 /* select device 0 again */
3229 ap->ops->dev_select(ap, 0);
3230
3231 /* issue bus reset */
9b89391c
TH
3232 if (ap->flags & ATA_FLAG_SRST) {
3233 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3234 if (rc && rc != -ENODEV)
aec5c3c1 3235 goto err_out;
9b89391c 3236 }
1da177e4
LT
3237
3238 /*
3239 * determine by signature whether we have ATA or ATAPI devices
3240 */
9af5c9c9 3241 device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3242 if ((slave_possible) && (err != 0x81))
9af5c9c9 3243 device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4 3244
1da177e4 3245 /* is double-select really necessary? */
9af5c9c9 3246 if (device[1].class != ATA_DEV_NONE)
1da177e4 3247 ap->ops->dev_select(ap, 1);
9af5c9c9 3248 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3249 ap->ops->dev_select(ap, 0);
3250
3251 /* if no devices were detected, disable this port */
9af5c9c9
TH
3252 if ((device[0].class == ATA_DEV_NONE) &&
3253 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3254 goto err_out;
3255
3256 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3257 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3258 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3259 }
3260
3261 DPRINTK("EXIT\n");
3262 return;
3263
3264err_out:
f15a1daf 3265 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3266 ata_port_disable(ap);
1da177e4
LT
3267
3268 DPRINTK("EXIT\n");
3269}
3270
d7bb4cc7 3271/**
936fd732
TH
3272 * sata_link_debounce - debounce SATA phy status
3273 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3274 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3275 * @deadline: deadline jiffies for the operation
d7bb4cc7 3276 *
936fd732 3277* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3278 * holding the same value where DET is not 1 for @duration polled
3279 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3280 * beginning of the stable state. Because DET gets stuck at 1 on
3281 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3282 * until timeout then returns 0 if DET is stable at 1.
3283 *
d4b2bab4
TH
3284 * @timeout is further limited by @deadline. The sooner of the
3285 * two is used.
3286 *
d7bb4cc7
TH
3287 * LOCKING:
3288 * Kernel thread context (may sleep)
3289 *
3290 * RETURNS:
3291 * 0 on success, -errno on failure.
3292 */
936fd732
TH
3293int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3294 unsigned long deadline)
7a7921e8 3295{
d7bb4cc7 3296 unsigned long interval_msec = params[0];
d4b2bab4
TH
3297 unsigned long duration = msecs_to_jiffies(params[1]);
3298 unsigned long last_jiffies, t;
d7bb4cc7
TH
3299 u32 last, cur;
3300 int rc;
3301
d4b2bab4
TH
3302 t = jiffies + msecs_to_jiffies(params[2]);
3303 if (time_before(t, deadline))
3304 deadline = t;
3305
936fd732 3306 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3307 return rc;
3308 cur &= 0xf;
3309
3310 last = cur;
3311 last_jiffies = jiffies;
3312
3313 while (1) {
3314 msleep(interval_msec);
936fd732 3315 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3316 return rc;
3317 cur &= 0xf;
3318
3319 /* DET stable? */
3320 if (cur == last) {
d4b2bab4 3321 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3322 continue;
3323 if (time_after(jiffies, last_jiffies + duration))
3324 return 0;
3325 continue;
3326 }
3327
3328 /* unstable, start over */
3329 last = cur;
3330 last_jiffies = jiffies;
3331
f1545154
TH
3332 /* Check deadline. If debouncing failed, return
3333 * -EPIPE to tell upper layer to lower link speed.
3334 */
d4b2bab4 3335 if (time_after(jiffies, deadline))
f1545154 3336 return -EPIPE;
d7bb4cc7
TH
3337 }
3338}
3339
3340/**
936fd732
TH
3341 * sata_link_resume - resume SATA link
3342 * @link: ATA link to resume SATA
d7bb4cc7 3343 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3344 * @deadline: deadline jiffies for the operation
d7bb4cc7 3345 *
936fd732 3346 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3347 *
3348 * LOCKING:
3349 * Kernel thread context (may sleep)
3350 *
3351 * RETURNS:
3352 * 0 on success, -errno on failure.
3353 */
936fd732
TH
3354int sata_link_resume(struct ata_link *link, const unsigned long *params,
3355 unsigned long deadline)
d7bb4cc7
TH
3356{
3357 u32 scontrol;
81952c54
TH
3358 int rc;
3359
936fd732 3360 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3361 return rc;
7a7921e8 3362
852ee16a 3363 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3364
936fd732 3365 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3366 return rc;
7a7921e8 3367
d7bb4cc7
TH
3368 /* Some PHYs react badly if SStatus is pounded immediately
3369 * after resuming. Delay 200ms before debouncing.
3370 */
3371 msleep(200);
7a7921e8 3372
936fd732 3373 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3374}
3375
f5914a46
TH
3376/**
3377 * ata_std_prereset - prepare for reset
cc0680a5 3378 * @link: ATA link to be reset
d4b2bab4 3379 * @deadline: deadline jiffies for the operation
f5914a46 3380 *
cc0680a5 3381 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3382 * prereset makes libata abort whole reset sequence and give up
3383 * that port, so prereset should be best-effort. It does its
3384 * best to prepare for reset sequence but if things go wrong, it
3385 * should just whine, not fail.
f5914a46
TH
3386 *
3387 * LOCKING:
3388 * Kernel thread context (may sleep)
3389 *
3390 * RETURNS:
3391 * 0 on success, -errno otherwise.
3392 */
cc0680a5 3393int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3394{
cc0680a5 3395 struct ata_port *ap = link->ap;
936fd732 3396 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3397 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3398 int rc;
3399
31daabda 3400 /* handle link resume */
28324304 3401 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3402 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3403 ehc->i.action |= ATA_EH_HARDRESET;
3404
f5914a46
TH
3405 /* if we're about to do hardreset, nothing more to do */
3406 if (ehc->i.action & ATA_EH_HARDRESET)
3407 return 0;
3408
936fd732 3409 /* if SATA, resume link */
a16abc0b 3410 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3411 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3412 /* whine about phy resume failure but proceed */
3413 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3414 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3415 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3416 }
3417
3418 /* Wait for !BSY if the controller can wait for the first D2H
3419 * Reg FIS and we don't know that no device is attached.
3420 */
0c88758b 3421 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3422 rc = ata_wait_ready(ap, deadline);
6dffaf61 3423 if (rc && rc != -ENODEV) {
cc0680a5 3424 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3425 "(errno=%d), forcing hardreset\n", rc);
3426 ehc->i.action |= ATA_EH_HARDRESET;
3427 }
3428 }
f5914a46
TH
3429
3430 return 0;
3431}
3432
c2bd5804
TH
3433/**
3434 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3435 * @link: ATA link to reset
c2bd5804 3436 * @classes: resulting classes of attached devices
d4b2bab4 3437 * @deadline: deadline jiffies for the operation
c2bd5804 3438 *
52783c5d 3439 * Reset host port using ATA SRST.
c2bd5804
TH
3440 *
3441 * LOCKING:
3442 * Kernel thread context (may sleep)
3443 *
3444 * RETURNS:
3445 * 0 on success, -errno otherwise.
3446 */
cc0680a5 3447int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3448 unsigned long deadline)
c2bd5804 3449{
cc0680a5 3450 struct ata_port *ap = link->ap;
c2bd5804 3451 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3452 unsigned int devmask = 0;
3453 int rc;
c2bd5804
TH
3454 u8 err;
3455
3456 DPRINTK("ENTER\n");
3457
936fd732 3458 if (ata_link_offline(link)) {
3a39746a
TH
3459 classes[0] = ATA_DEV_NONE;
3460 goto out;
3461 }
3462
c2bd5804
TH
3463 /* determine if device 0/1 are present */
3464 if (ata_devchk(ap, 0))
3465 devmask |= (1 << 0);
3466 if (slave_possible && ata_devchk(ap, 1))
3467 devmask |= (1 << 1);
3468
c2bd5804
TH
3469 /* select device 0 again */
3470 ap->ops->dev_select(ap, 0);
3471
3472 /* issue bus reset */
3473 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3474 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3475 /* if link is occupied, -ENODEV too is an error */
936fd732 3476 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3477 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3478 return rc;
c2bd5804
TH
3479 }
3480
3481 /* determine by signature whether we have ATA or ATAPI devices */
3482 classes[0] = ata_dev_try_classify(ap, 0, &err);
3483 if (slave_possible && err != 0x81)
3484 classes[1] = ata_dev_try_classify(ap, 1, &err);
3485
3a39746a 3486 out:
c2bd5804
TH
3487 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3488 return 0;
3489}
3490
3491/**
cc0680a5
TH
3492 * sata_link_hardreset - reset link via SATA phy reset
3493 * @link: link to reset
b6103f6d 3494 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3495 * @deadline: deadline jiffies for the operation
c2bd5804 3496 *
cc0680a5 3497 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3498 *
3499 * LOCKING:
3500 * Kernel thread context (may sleep)
3501 *
3502 * RETURNS:
3503 * 0 on success, -errno otherwise.
3504 */
cc0680a5 3505int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3506 unsigned long deadline)
c2bd5804 3507{
852ee16a 3508 u32 scontrol;
81952c54 3509 int rc;
852ee16a 3510
c2bd5804
TH
3511 DPRINTK("ENTER\n");
3512
936fd732 3513 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3514 /* SATA spec says nothing about how to reconfigure
3515 * spd. To be on the safe side, turn off phy during
3516 * reconfiguration. This works for at least ICH7 AHCI
3517 * and Sil3124.
3518 */
936fd732 3519 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3520 goto out;
81952c54 3521
a34b6fc0 3522 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3523
936fd732 3524 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3525 goto out;
1c3fae4d 3526
936fd732 3527 sata_set_spd(link);
1c3fae4d
TH
3528 }
3529
3530 /* issue phy wake/reset */
936fd732 3531 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3532 goto out;
81952c54 3533
852ee16a 3534 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3535
936fd732 3536 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3537 goto out;
c2bd5804 3538
1c3fae4d 3539 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3540 * 10.4.2 says at least 1 ms.
3541 */
3542 msleep(1);
3543
936fd732
TH
3544 /* bring link back */
3545 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3546 out:
3547 DPRINTK("EXIT, rc=%d\n", rc);
3548 return rc;
3549}
3550
3551/**
3552 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3553 * @link: link to reset
b6103f6d 3554 * @class: resulting class of attached device
d4b2bab4 3555 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3556 *
3557 * SATA phy-reset host port using DET bits of SControl register,
3558 * wait for !BSY and classify the attached device.
3559 *
3560 * LOCKING:
3561 * Kernel thread context (may sleep)
3562 *
3563 * RETURNS:
3564 * 0 on success, -errno otherwise.
3565 */
cc0680a5 3566int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3567 unsigned long deadline)
b6103f6d 3568{
cc0680a5 3569 struct ata_port *ap = link->ap;
936fd732 3570 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3571 int rc;
3572
3573 DPRINTK("ENTER\n");
3574
3575 /* do hardreset */
cc0680a5 3576 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3577 if (rc) {
cc0680a5 3578 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3579 "COMRESET failed (errno=%d)\n", rc);
3580 return rc;
3581 }
c2bd5804 3582
c2bd5804 3583 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3584 if (ata_link_offline(link)) {
c2bd5804
TH
3585 *class = ATA_DEV_NONE;
3586 DPRINTK("EXIT, link offline\n");
3587 return 0;
3588 }
3589
34fee227
TH
3590 /* wait a while before checking status, see SRST for more info */
3591 msleep(150);
3592
d4b2bab4 3593 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3594 /* link occupied, -ENODEV too is an error */
3595 if (rc) {
cc0680a5 3596 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3597 "COMRESET failed (errno=%d)\n", rc);
3598 return rc;
c2bd5804
TH
3599 }
3600
3a39746a
TH
3601 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3602
c2bd5804
TH
3603 *class = ata_dev_try_classify(ap, 0, NULL);
3604
3605 DPRINTK("EXIT, class=%u\n", *class);
3606 return 0;
3607}
3608
3609/**
3610 * ata_std_postreset - standard postreset callback
cc0680a5 3611 * @link: the target ata_link
c2bd5804
TH
3612 * @classes: classes of attached devices
3613 *
3614 * This function is invoked after a successful reset. Note that
3615 * the device might have been reset more than once using
3616 * different reset methods before postreset is invoked.
c2bd5804 3617 *
c2bd5804
TH
3618 * LOCKING:
3619 * Kernel thread context (may sleep)
3620 */
cc0680a5 3621void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3622{
cc0680a5 3623 struct ata_port *ap = link->ap;
dc2b3515
TH
3624 u32 serror;
3625
c2bd5804
TH
3626 DPRINTK("ENTER\n");
3627
c2bd5804 3628 /* print link status */
936fd732 3629 sata_print_link_status(link);
c2bd5804 3630
dc2b3515 3631 /* clear SError */
936fd732
TH
3632 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3633 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3634
c2bd5804
TH
3635 /* is double-select really necessary? */
3636 if (classes[0] != ATA_DEV_NONE)
3637 ap->ops->dev_select(ap, 1);
3638 if (classes[1] != ATA_DEV_NONE)
3639 ap->ops->dev_select(ap, 0);
3640
3a39746a
TH
3641 /* bail out if no device is present */
3642 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3643 DPRINTK("EXIT, no device\n");
3644 return;
3645 }
3646
3647 /* set up device control */
0d5ff566
TH
3648 if (ap->ioaddr.ctl_addr)
3649 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3650
3651 DPRINTK("EXIT\n");
3652}
3653
623a3128
TH
3654/**
3655 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3656 * @dev: device to compare against
3657 * @new_class: class of the new device
3658 * @new_id: IDENTIFY page of the new device
3659 *
3660 * Compare @new_class and @new_id against @dev and determine
3661 * whether @dev is the device indicated by @new_class and
3662 * @new_id.
3663 *
3664 * LOCKING:
3665 * None.
3666 *
3667 * RETURNS:
3668 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3669 */
3373efd8
TH
3670static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3671 const u16 *new_id)
623a3128
TH
3672{
3673 const u16 *old_id = dev->id;
a0cf733b
TH
3674 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3675 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3676
3677 if (dev->class != new_class) {
f15a1daf
TH
3678 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3679 dev->class, new_class);
623a3128
TH
3680 return 0;
3681 }
3682
a0cf733b
TH
3683 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3684 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3685 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3686 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3687
3688 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3689 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3690 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3691 return 0;
3692 }
3693
3694 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3695 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3696 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3697 return 0;
3698 }
3699
623a3128
TH
3700 return 1;
3701}
3702
3703/**
fe30911b 3704 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3705 * @dev: target ATA device
bff04647 3706 * @readid_flags: read ID flags
623a3128
TH
3707 *
3708 * Re-read IDENTIFY page and make sure @dev is still attached to
3709 * the port.
3710 *
3711 * LOCKING:
3712 * Kernel thread context (may sleep)
3713 *
3714 * RETURNS:
3715 * 0 on success, negative errno otherwise
3716 */
fe30911b 3717int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3718{
5eb45c02 3719 unsigned int class = dev->class;
9af5c9c9 3720 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3721 int rc;
3722
fe635c7e 3723 /* read ID data */
bff04647 3724 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3725 if (rc)
fe30911b 3726 return rc;
623a3128
TH
3727
3728 /* is the device still there? */
fe30911b
TH
3729 if (!ata_dev_same_device(dev, class, id))
3730 return -ENODEV;
623a3128 3731
fe635c7e 3732 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3733 return 0;
3734}
3735
3736/**
3737 * ata_dev_revalidate - Revalidate ATA device
3738 * @dev: device to revalidate
3739 * @readid_flags: read ID flags
3740 *
3741 * Re-read IDENTIFY page, make sure @dev is still attached to the
3742 * port and reconfigure it according to the new IDENTIFY page.
3743 *
3744 * LOCKING:
3745 * Kernel thread context (may sleep)
3746 *
3747 * RETURNS:
3748 * 0 on success, negative errno otherwise
3749 */
3750int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3751{
6ddcd3b0 3752 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3753 int rc;
3754
3755 if (!ata_dev_enabled(dev))
3756 return -ENODEV;
3757
3758 /* re-read ID */
3759 rc = ata_dev_reread_id(dev, readid_flags);
3760 if (rc)
3761 goto fail;
623a3128
TH
3762
3763 /* configure device according to the new ID */
efdaedc4 3764 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3765 if (rc)
3766 goto fail;
3767
3768 /* verify n_sectors hasn't changed */
b54eebd6
TH
3769 if (dev->class == ATA_DEV_ATA && n_sectors &&
3770 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3771 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3772 "%llu != %llu\n",
3773 (unsigned long long)n_sectors,
3774 (unsigned long long)dev->n_sectors);
8270bec4
TH
3775
3776 /* restore original n_sectors */
3777 dev->n_sectors = n_sectors;
3778
6ddcd3b0
TH
3779 rc = -ENODEV;
3780 goto fail;
3781 }
3782
3783 return 0;
623a3128
TH
3784
3785 fail:
f15a1daf 3786 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3787 return rc;
3788}
3789
6919a0a6
AC
3790struct ata_blacklist_entry {
3791 const char *model_num;
3792 const char *model_rev;
3793 unsigned long horkage;
3794};
3795
3796static const struct ata_blacklist_entry ata_device_blacklist [] = {
3797 /* Devices with DMA related problems under Linux */
3798 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3799 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3800 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3801 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3802 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3803 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3804 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3805 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3806 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3807 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3808 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3809 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3810 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3811 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3812 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3813 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3814 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3815 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3816 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3817 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3818 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3819 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3820 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3821 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3822 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3823 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3824 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3825 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3826 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3827 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3828 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3829 { "IOMEGA ZIP 250 ATAPI Floppy",
3830 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3831
18d6e9d5 3832 /* Weird ATAPI devices */
40a1d531 3833 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3834
6919a0a6
AC
3835 /* Devices we expect to fail diagnostics */
3836
3837 /* Devices where NCQ should be avoided */
3838 /* NCQ is slow */
3839 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3840 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3841 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3842 /* NCQ is broken */
539cc7c7 3843 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3844 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab 3845 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
539cc7c7
JG
3846 ATA_HORKAGE_NONCQ },
3847
36e337d0
RH
3848 /* Blacklist entries taken from Silicon Image 3124/3132
3849 Windows driver .inf file - also several Linux problem reports */
3850 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3851 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3852 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3853 /* Drives which do spurious command completion */
3854 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3855 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3856 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3857 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3858 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3859 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3860 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3861 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3862
16c55b03
TH
3863 /* devices which puke on READ_NATIVE_MAX */
3864 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3865 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3866 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3867 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3868
3869 /* End Marker */
3870 { }
1da177e4 3871};
2e9edbf8 3872
539cc7c7
JG
3873int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3874{
3875 const char *p;
3876 int len;
3877
3878 /*
3879 * check for trailing wildcard: *\0
3880 */
3881 p = strchr(patt, wildchar);
3882 if (p && ((*(p + 1)) == 0))
3883 len = p - patt;
3884 else
3885 len = strlen(name);
3886
3887 return strncmp(patt, name, len);
3888}
3889
75683fe7 3890static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3891{
8bfa79fc
TH
3892 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3893 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3894 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3895
8bfa79fc
TH
3896 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3897 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3898
6919a0a6 3899 while (ad->model_num) {
539cc7c7 3900 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3901 if (ad->model_rev == NULL)
3902 return ad->horkage;
539cc7c7 3903 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3904 return ad->horkage;
f4b15fef 3905 }
6919a0a6 3906 ad++;
f4b15fef 3907 }
1da177e4
LT
3908 return 0;
3909}
3910
6919a0a6
AC
3911static int ata_dma_blacklisted(const struct ata_device *dev)
3912{
3913 /* We don't support polling DMA.
3914 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3915 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3916 */
9af5c9c9 3917 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3918 (dev->flags & ATA_DFLAG_CDB_INTR))
3919 return 1;
75683fe7 3920 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3921}
3922
a6d5a51c
TH
3923/**
3924 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3925 * @dev: Device to compute xfermask for
3926 *
acf356b1
TH
3927 * Compute supported xfermask of @dev and store it in
3928 * dev->*_mask. This function is responsible for applying all
3929 * known limits including host controller limits, device
3930 * blacklist, etc...
a6d5a51c
TH
3931 *
3932 * LOCKING:
3933 * None.
a6d5a51c 3934 */
3373efd8 3935static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3936{
9af5c9c9
TH
3937 struct ata_link *link = dev->link;
3938 struct ata_port *ap = link->ap;
cca3974e 3939 struct ata_host *host = ap->host;
a6d5a51c 3940 unsigned long xfer_mask;
1da177e4 3941
37deecb5 3942 /* controller modes available */
565083e1
TH
3943 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3944 ap->mwdma_mask, ap->udma_mask);
3945
8343f889 3946 /* drive modes available */
37deecb5
TH
3947 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3948 dev->mwdma_mask, dev->udma_mask);
3949 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3950
b352e57d
AC
3951 /*
3952 * CFA Advanced TrueIDE timings are not allowed on a shared
3953 * cable
3954 */
3955 if (ata_dev_pair(dev)) {
3956 /* No PIO5 or PIO6 */
3957 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3958 /* No MWDMA3 or MWDMA 4 */
3959 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3960 }
3961
37deecb5
TH
3962 if (ata_dma_blacklisted(dev)) {
3963 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3964 ata_dev_printk(dev, KERN_WARNING,
3965 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3966 }
a6d5a51c 3967
14d66ab7
PV
3968 if ((host->flags & ATA_HOST_SIMPLEX) &&
3969 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3970 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3971 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3972 "other device, disabling DMA\n");
5444a6f4 3973 }
565083e1 3974
e424675f
JG
3975 if (ap->flags & ATA_FLAG_NO_IORDY)
3976 xfer_mask &= ata_pio_mask_no_iordy(dev);
3977
5444a6f4 3978 if (ap->ops->mode_filter)
a76b62ca 3979 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3980
8343f889
RH
3981 /* Apply cable rule here. Don't apply it early because when
3982 * we handle hot plug the cable type can itself change.
3983 * Check this last so that we know if the transfer rate was
3984 * solely limited by the cable.
3985 * Unknown or 80 wire cables reported host side are checked
3986 * drive side as well. Cases where we know a 40wire cable
3987 * is used safely for 80 are not checked here.
3988 */
3989 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3990 /* UDMA/44 or higher would be available */
3991 if((ap->cbl == ATA_CBL_PATA40) ||
3992 (ata_drive_40wire(dev->id) &&
3993 (ap->cbl == ATA_CBL_PATA_UNK ||
3994 ap->cbl == ATA_CBL_PATA80))) {
3995 ata_dev_printk(dev, KERN_WARNING,
3996 "limited to UDMA/33 due to 40-wire cable\n");
3997 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3998 }
3999
565083e1
TH
4000 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4001 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4002}
4003
1da177e4
LT
4004/**
4005 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4006 * @dev: Device to which command will be sent
4007 *
780a87f7
JG
4008 * Issue SET FEATURES - XFER MODE command to device @dev
4009 * on port @ap.
4010 *
1da177e4 4011 * LOCKING:
0cba632b 4012 * PCI/etc. bus probe sem.
83206a29
TH
4013 *
4014 * RETURNS:
4015 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4016 */
4017
3373efd8 4018static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4019{
a0123703 4020 struct ata_taskfile tf;
83206a29 4021 unsigned int err_mask;
1da177e4
LT
4022
4023 /* set up set-features taskfile */
4024 DPRINTK("set features - xfer mode\n");
4025
464cf177
TH
4026 /* Some controllers and ATAPI devices show flaky interrupt
4027 * behavior after setting xfer mode. Use polling instead.
4028 */
3373efd8 4029 ata_tf_init(dev, &tf);
a0123703
TH
4030 tf.command = ATA_CMD_SET_FEATURES;
4031 tf.feature = SETFEATURES_XFER;
464cf177 4032 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4033 tf.protocol = ATA_PROT_NODATA;
4034 tf.nsect = dev->xfer_mode;
1da177e4 4035
3373efd8 4036 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4037
4038 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4039 return err_mask;
4040}
4041
4042/**
4043 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4044 * @dev: Device to which command will be sent
4045 * @enable: Whether to enable or disable the feature
4046 *
4047 * Issue SET FEATURES - SATA FEATURES command to device @dev
4048 * on port @ap with sector count set to indicate Asynchronous
4049 * Notification feature
4050 *
4051 * LOCKING:
4052 * PCI/etc. bus probe sem.
4053 *
4054 * RETURNS:
4055 * 0 on success, AC_ERR_* mask otherwise.
4056 */
4057static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4058{
4059 struct ata_taskfile tf;
4060 unsigned int err_mask;
4061
4062 /* set up set-features taskfile */
4063 DPRINTK("set features - SATA features\n");
4064
4065 ata_tf_init(dev, &tf);
4066 tf.command = ATA_CMD_SET_FEATURES;
4067 tf.feature = enable;
4068 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4069 tf.protocol = ATA_PROT_NODATA;
4070 tf.nsect = SATA_AN;
4071
4072 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4073
83206a29
TH
4074 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4075 return err_mask;
1da177e4
LT
4076}
4077
8bf62ece
AL
4078/**
4079 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4080 * @dev: Device to which command will be sent
e2a7f77a
RD
4081 * @heads: Number of heads (taskfile parameter)
4082 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4083 *
4084 * LOCKING:
6aff8f1f
TH
4085 * Kernel thread context (may sleep)
4086 *
4087 * RETURNS:
4088 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4089 */
3373efd8
TH
4090static unsigned int ata_dev_init_params(struct ata_device *dev,
4091 u16 heads, u16 sectors)
8bf62ece 4092{
a0123703 4093 struct ata_taskfile tf;
6aff8f1f 4094 unsigned int err_mask;
8bf62ece
AL
4095
4096 /* Number of sectors per track 1-255. Number of heads 1-16 */
4097 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4098 return AC_ERR_INVALID;
8bf62ece
AL
4099
4100 /* set up init dev params taskfile */
4101 DPRINTK("init dev params \n");
4102
3373efd8 4103 ata_tf_init(dev, &tf);
a0123703
TH
4104 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4105 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4106 tf.protocol = ATA_PROT_NODATA;
4107 tf.nsect = sectors;
4108 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4109
3373efd8 4110 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4111 /* A clean abort indicates an original or just out of spec drive
4112 and we should continue as we issue the setup based on the
4113 drive reported working geometry */
4114 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4115 err_mask = 0;
8bf62ece 4116
6aff8f1f
TH
4117 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4118 return err_mask;
8bf62ece
AL
4119}
4120
1da177e4 4121/**
0cba632b
JG
4122 * ata_sg_clean - Unmap DMA memory associated with command
4123 * @qc: Command containing DMA memory to be released
4124 *
4125 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4126 *
4127 * LOCKING:
cca3974e 4128 * spin_lock_irqsave(host lock)
1da177e4 4129 */
70e6ad0c 4130void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4131{
4132 struct ata_port *ap = qc->ap;
cedc9a47 4133 struct scatterlist *sg = qc->__sg;
1da177e4 4134 int dir = qc->dma_dir;
cedc9a47 4135 void *pad_buf = NULL;
1da177e4 4136
a4631474
TH
4137 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4138 WARN_ON(sg == NULL);
1da177e4
LT
4139
4140 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4141 WARN_ON(qc->n_elem > 1);
1da177e4 4142
2c13b7ce 4143 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4144
cedc9a47
JG
4145 /* if we padded the buffer out to 32-bit bound, and data
4146 * xfer direction is from-device, we must copy from the
4147 * pad buffer back into the supplied buffer
4148 */
4149 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4150 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4151
4152 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4153 if (qc->n_elem)
2f1f610b 4154 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4155 /* restore last sg */
4156 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4157 if (pad_buf) {
4158 struct scatterlist *psg = &qc->pad_sgent;
4159 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4160 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4161 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4162 }
4163 } else {
2e242fa9 4164 if (qc->n_elem)
2f1f610b 4165 dma_unmap_single(ap->dev,
e1410f2d
JG
4166 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4167 dir);
cedc9a47
JG
4168 /* restore sg */
4169 sg->length += qc->pad_len;
4170 if (pad_buf)
4171 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4172 pad_buf, qc->pad_len);
4173 }
1da177e4
LT
4174
4175 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4176 qc->__sg = NULL;
1da177e4
LT
4177}
4178
4179/**
4180 * ata_fill_sg - Fill PCI IDE PRD table
4181 * @qc: Metadata associated with taskfile to be transferred
4182 *
780a87f7
JG
4183 * Fill PCI IDE PRD (scatter-gather) table with segments
4184 * associated with the current disk command.
4185 *
1da177e4 4186 * LOCKING:
cca3974e 4187 * spin_lock_irqsave(host lock)
1da177e4
LT
4188 *
4189 */
4190static void ata_fill_sg(struct ata_queued_cmd *qc)
4191{
1da177e4 4192 struct ata_port *ap = qc->ap;
cedc9a47
JG
4193 struct scatterlist *sg;
4194 unsigned int idx;
1da177e4 4195
a4631474 4196 WARN_ON(qc->__sg == NULL);
f131883e 4197 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4198
4199 idx = 0;
cedc9a47 4200 ata_for_each_sg(sg, qc) {
1da177e4
LT
4201 u32 addr, offset;
4202 u32 sg_len, len;
4203
4204 /* determine if physical DMA addr spans 64K boundary.
4205 * Note h/w doesn't support 64-bit, so we unconditionally
4206 * truncate dma_addr_t to u32.
4207 */
4208 addr = (u32) sg_dma_address(sg);
4209 sg_len = sg_dma_len(sg);
4210
4211 while (sg_len) {
4212 offset = addr & 0xffff;
4213 len = sg_len;
4214 if ((offset + sg_len) > 0x10000)
4215 len = 0x10000 - offset;
4216
4217 ap->prd[idx].addr = cpu_to_le32(addr);
4218 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4219 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4220
4221 idx++;
4222 sg_len -= len;
4223 addr += len;
4224 }
4225 }
4226
4227 if (idx)
4228 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4229}
b9a4197e 4230
d26fc955
AC
4231/**
4232 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4233 * @qc: Metadata associated with taskfile to be transferred
4234 *
4235 * Fill PCI IDE PRD (scatter-gather) table with segments
4236 * associated with the current disk command. Perform the fill
4237 * so that we avoid writing any length 64K records for
4238 * controllers that don't follow the spec.
4239 *
4240 * LOCKING:
4241 * spin_lock_irqsave(host lock)
4242 *
4243 */
4244static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4245{
4246 struct ata_port *ap = qc->ap;
4247 struct scatterlist *sg;
4248 unsigned int idx;
4249
4250 WARN_ON(qc->__sg == NULL);
4251 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4252
4253 idx = 0;
4254 ata_for_each_sg(sg, qc) {
4255 u32 addr, offset;
4256 u32 sg_len, len, blen;
4257
4258 /* determine if physical DMA addr spans 64K boundary.
4259 * Note h/w doesn't support 64-bit, so we unconditionally
4260 * truncate dma_addr_t to u32.
4261 */
4262 addr = (u32) sg_dma_address(sg);
4263 sg_len = sg_dma_len(sg);
4264
4265 while (sg_len) {
4266 offset = addr & 0xffff;
4267 len = sg_len;
4268 if ((offset + sg_len) > 0x10000)
4269 len = 0x10000 - offset;
4270
4271 blen = len & 0xffff;
4272 ap->prd[idx].addr = cpu_to_le32(addr);
4273 if (blen == 0) {
4274 /* Some PATA chipsets like the CS5530 can't
4275 cope with 0x0000 meaning 64K as the spec says */
4276 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4277 blen = 0x8000;
4278 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4279 }
4280 ap->prd[idx].flags_len = cpu_to_le32(blen);
4281 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4282
4283 idx++;
4284 sg_len -= len;
4285 addr += len;
4286 }
4287 }
4288
4289 if (idx)
4290 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4291}
4292
1da177e4
LT
4293/**
4294 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4295 * @qc: Metadata associated with taskfile to check
4296 *
780a87f7
JG
4297 * Allow low-level driver to filter ATA PACKET commands, returning
4298 * a status indicating whether or not it is OK to use DMA for the
4299 * supplied PACKET command.
4300 *
1da177e4 4301 * LOCKING:
cca3974e 4302 * spin_lock_irqsave(host lock)
0cba632b 4303 *
1da177e4
LT
4304 * RETURNS: 0 when ATAPI DMA can be used
4305 * nonzero otherwise
4306 */
4307int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4308{
4309 struct ata_port *ap = qc->ap;
b9a4197e
TH
4310
4311 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4312 * few ATAPI devices choke on such DMA requests.
4313 */
4314 if (unlikely(qc->nbytes & 15))
4315 return 1;
6f23a31d 4316
1da177e4 4317 if (ap->ops->check_atapi_dma)
b9a4197e 4318 return ap->ops->check_atapi_dma(qc);
1da177e4 4319
b9a4197e 4320 return 0;
1da177e4 4321}
b9a4197e 4322
1da177e4
LT
4323/**
4324 * ata_qc_prep - Prepare taskfile for submission
4325 * @qc: Metadata associated with taskfile to be prepared
4326 *
780a87f7
JG
4327 * Prepare ATA taskfile for submission.
4328 *
1da177e4 4329 * LOCKING:
cca3974e 4330 * spin_lock_irqsave(host lock)
1da177e4
LT
4331 */
4332void ata_qc_prep(struct ata_queued_cmd *qc)
4333{
4334 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4335 return;
4336
4337 ata_fill_sg(qc);
4338}
4339
d26fc955
AC
4340/**
4341 * ata_dumb_qc_prep - Prepare taskfile for submission
4342 * @qc: Metadata associated with taskfile to be prepared
4343 *
4344 * Prepare ATA taskfile for submission.
4345 *
4346 * LOCKING:
4347 * spin_lock_irqsave(host lock)
4348 */
4349void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4350{
4351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4352 return;
4353
4354 ata_fill_sg_dumb(qc);
4355}
4356
e46834cd
BK
4357void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4358
0cba632b
JG
4359/**
4360 * ata_sg_init_one - Associate command with memory buffer
4361 * @qc: Command to be associated
4362 * @buf: Memory buffer
4363 * @buflen: Length of memory buffer, in bytes.
4364 *
4365 * Initialize the data-related elements of queued_cmd @qc
4366 * to point to a single memory buffer, @buf of byte length @buflen.
4367 *
4368 * LOCKING:
cca3974e 4369 * spin_lock_irqsave(host lock)
0cba632b
JG
4370 */
4371
1da177e4
LT
4372void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4373{
1da177e4
LT
4374 qc->flags |= ATA_QCFLAG_SINGLE;
4375
cedc9a47 4376 qc->__sg = &qc->sgent;
1da177e4 4377 qc->n_elem = 1;
cedc9a47 4378 qc->orig_n_elem = 1;
1da177e4 4379 qc->buf_virt = buf;
233277ca 4380 qc->nbytes = buflen;
1da177e4 4381
61c0596c 4382 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4383}
4384
0cba632b
JG
4385/**
4386 * ata_sg_init - Associate command with scatter-gather table.
4387 * @qc: Command to be associated
4388 * @sg: Scatter-gather table.
4389 * @n_elem: Number of elements in s/g table.
4390 *
4391 * Initialize the data-related elements of queued_cmd @qc
4392 * to point to a scatter-gather table @sg, containing @n_elem
4393 * elements.
4394 *
4395 * LOCKING:
cca3974e 4396 * spin_lock_irqsave(host lock)
0cba632b
JG
4397 */
4398
1da177e4
LT
4399void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4400 unsigned int n_elem)
4401{
4402 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4403 qc->__sg = sg;
1da177e4 4404 qc->n_elem = n_elem;
cedc9a47 4405 qc->orig_n_elem = n_elem;
1da177e4
LT
4406}
4407
4408/**
0cba632b
JG
4409 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4410 * @qc: Command with memory buffer to be mapped.
4411 *
4412 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4413 *
4414 * LOCKING:
cca3974e 4415 * spin_lock_irqsave(host lock)
1da177e4
LT
4416 *
4417 * RETURNS:
0cba632b 4418 * Zero on success, negative on error.
1da177e4
LT
4419 */
4420
4421static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4422{
4423 struct ata_port *ap = qc->ap;
4424 int dir = qc->dma_dir;
cedc9a47 4425 struct scatterlist *sg = qc->__sg;
1da177e4 4426 dma_addr_t dma_address;
2e242fa9 4427 int trim_sg = 0;
1da177e4 4428
cedc9a47
JG
4429 /* we must lengthen transfers to end on a 32-bit boundary */
4430 qc->pad_len = sg->length & 3;
4431 if (qc->pad_len) {
4432 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4433 struct scatterlist *psg = &qc->pad_sgent;
4434
a4631474 4435 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4436
4437 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4438
4439 if (qc->tf.flags & ATA_TFLAG_WRITE)
4440 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4441 qc->pad_len);
4442
4443 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4444 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4445 /* trim sg */
4446 sg->length -= qc->pad_len;
2e242fa9
TH
4447 if (sg->length == 0)
4448 trim_sg = 1;
cedc9a47
JG
4449
4450 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4451 sg->length, qc->pad_len);
4452 }
4453
2e242fa9
TH
4454 if (trim_sg) {
4455 qc->n_elem--;
e1410f2d
JG
4456 goto skip_map;
4457 }
4458
2f1f610b 4459 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4460 sg->length, dir);
537a95d9
TH
4461 if (dma_mapping_error(dma_address)) {
4462 /* restore sg */
4463 sg->length += qc->pad_len;
1da177e4 4464 return -1;
537a95d9 4465 }
1da177e4
LT
4466
4467 sg_dma_address(sg) = dma_address;
32529e01 4468 sg_dma_len(sg) = sg->length;
1da177e4 4469
2e242fa9 4470skip_map:
1da177e4
LT
4471 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4472 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4473
4474 return 0;
4475}
4476
4477/**
0cba632b
JG
4478 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4479 * @qc: Command with scatter-gather table to be mapped.
4480 *
4481 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4482 *
4483 * LOCKING:
cca3974e 4484 * spin_lock_irqsave(host lock)
1da177e4
LT
4485 *
4486 * RETURNS:
0cba632b 4487 * Zero on success, negative on error.
1da177e4
LT
4488 *
4489 */
4490
4491static int ata_sg_setup(struct ata_queued_cmd *qc)
4492{
4493 struct ata_port *ap = qc->ap;
cedc9a47
JG
4494 struct scatterlist *sg = qc->__sg;
4495 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4496 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4497
44877b4e 4498 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4499 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4500
cedc9a47
JG
4501 /* we must lengthen transfers to end on a 32-bit boundary */
4502 qc->pad_len = lsg->length & 3;
4503 if (qc->pad_len) {
4504 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4505 struct scatterlist *psg = &qc->pad_sgent;
4506 unsigned int offset;
4507
a4631474 4508 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4509
4510 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4511
4512 /*
4513 * psg->page/offset are used to copy to-be-written
4514 * data in this function or read data in ata_sg_clean.
4515 */
4516 offset = lsg->offset + lsg->length - qc->pad_len;
4517 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4518 psg->offset = offset_in_page(offset);
4519
4520 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4521 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4522 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4523 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4524 }
4525
4526 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4527 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4528 /* trim last sg */
4529 lsg->length -= qc->pad_len;
e1410f2d
JG
4530 if (lsg->length == 0)
4531 trim_sg = 1;
cedc9a47
JG
4532
4533 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4534 qc->n_elem - 1, lsg->length, qc->pad_len);
4535 }
4536
e1410f2d
JG
4537 pre_n_elem = qc->n_elem;
4538 if (trim_sg && pre_n_elem)
4539 pre_n_elem--;
4540
4541 if (!pre_n_elem) {
4542 n_elem = 0;
4543 goto skip_map;
4544 }
4545
1da177e4 4546 dir = qc->dma_dir;
2f1f610b 4547 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4548 if (n_elem < 1) {
4549 /* restore last sg */
4550 lsg->length += qc->pad_len;
1da177e4 4551 return -1;
537a95d9 4552 }
1da177e4
LT
4553
4554 DPRINTK("%d sg elements mapped\n", n_elem);
4555
e1410f2d 4556skip_map:
1da177e4
LT
4557 qc->n_elem = n_elem;
4558
4559 return 0;
4560}
4561
0baab86b 4562/**
c893a3ae 4563 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4564 * @buf: Buffer to swap
4565 * @buf_words: Number of 16-bit words in buffer.
4566 *
4567 * Swap halves of 16-bit words if needed to convert from
4568 * little-endian byte order to native cpu byte order, or
4569 * vice-versa.
4570 *
4571 * LOCKING:
6f0ef4fa 4572 * Inherited from caller.
0baab86b 4573 */
1da177e4
LT
4574void swap_buf_le16(u16 *buf, unsigned int buf_words)
4575{
4576#ifdef __BIG_ENDIAN
4577 unsigned int i;
4578
4579 for (i = 0; i < buf_words; i++)
4580 buf[i] = le16_to_cpu(buf[i]);
4581#endif /* __BIG_ENDIAN */
4582}
4583
6ae4cfb5 4584/**
0d5ff566 4585 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4586 * @adev: device to target
6ae4cfb5
AL
4587 * @buf: data buffer
4588 * @buflen: buffer length
344babaa 4589 * @write_data: read/write
6ae4cfb5
AL
4590 *
4591 * Transfer data from/to the device data register by PIO.
4592 *
4593 * LOCKING:
4594 * Inherited from caller.
6ae4cfb5 4595 */
0d5ff566
TH
4596void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4597 unsigned int buflen, int write_data)
1da177e4 4598{
9af5c9c9 4599 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4600 unsigned int words = buflen >> 1;
1da177e4 4601
6ae4cfb5 4602 /* Transfer multiple of 2 bytes */
1da177e4 4603 if (write_data)
0d5ff566 4604 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4605 else
0d5ff566 4606 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4607
4608 /* Transfer trailing 1 byte, if any. */
4609 if (unlikely(buflen & 0x01)) {
4610 u16 align_buf[1] = { 0 };
4611 unsigned char *trailing_buf = buf + buflen - 1;
4612
4613 if (write_data) {
4614 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4615 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4616 } else {
0d5ff566 4617 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4618 memcpy(trailing_buf, align_buf, 1);
4619 }
4620 }
1da177e4
LT
4621}
4622
75e99585 4623/**
0d5ff566 4624 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4625 * @adev: device to target
4626 * @buf: data buffer
4627 * @buflen: buffer length
4628 * @write_data: read/write
4629 *
88574551 4630 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4631 * transfer with interrupts disabled.
4632 *
4633 * LOCKING:
4634 * Inherited from caller.
4635 */
0d5ff566
TH
4636void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4637 unsigned int buflen, int write_data)
75e99585
AC
4638{
4639 unsigned long flags;
4640 local_irq_save(flags);
0d5ff566 4641 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4642 local_irq_restore(flags);
4643}
4644
4645
6ae4cfb5 4646/**
5a5dbd18 4647 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4648 * @qc: Command on going
4649 *
5a5dbd18 4650 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4651 *
4652 * LOCKING:
4653 * Inherited from caller.
4654 */
4655
1da177e4
LT
4656static void ata_pio_sector(struct ata_queued_cmd *qc)
4657{
4658 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4659 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4660 struct ata_port *ap = qc->ap;
4661 struct page *page;
4662 unsigned int offset;
4663 unsigned char *buf;
4664
5a5dbd18 4665 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4666 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4667
4668 page = sg[qc->cursg].page;
726f0785 4669 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4670
4671 /* get the current page and offset */
4672 page = nth_page(page, (offset >> PAGE_SHIFT));
4673 offset %= PAGE_SIZE;
4674
1da177e4
LT
4675 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4676
91b8b313
AL
4677 if (PageHighMem(page)) {
4678 unsigned long flags;
4679
a6b2c5d4 4680 /* FIXME: use a bounce buffer */
91b8b313
AL
4681 local_irq_save(flags);
4682 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4683
91b8b313 4684 /* do the actual data transfer */
5a5dbd18 4685 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4686
91b8b313
AL
4687 kunmap_atomic(buf, KM_IRQ0);
4688 local_irq_restore(flags);
4689 } else {
4690 buf = page_address(page);
5a5dbd18 4691 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4692 }
1da177e4 4693
5a5dbd18
ML
4694 qc->curbytes += qc->sect_size;
4695 qc->cursg_ofs += qc->sect_size;
1da177e4 4696
726f0785 4697 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4698 qc->cursg++;
4699 qc->cursg_ofs = 0;
4700 }
1da177e4 4701}
1da177e4 4702
07f6f7d0 4703/**
5a5dbd18 4704 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4705 * @qc: Command on going
4706 *
5a5dbd18 4707 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4708 * ATA device for the DRQ request.
4709 *
4710 * LOCKING:
4711 * Inherited from caller.
4712 */
1da177e4 4713
07f6f7d0
AL
4714static void ata_pio_sectors(struct ata_queued_cmd *qc)
4715{
4716 if (is_multi_taskfile(&qc->tf)) {
4717 /* READ/WRITE MULTIPLE */
4718 unsigned int nsect;
4719
587005de 4720 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4721
5a5dbd18 4722 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4723 qc->dev->multi_count);
07f6f7d0
AL
4724 while (nsect--)
4725 ata_pio_sector(qc);
4726 } else
4727 ata_pio_sector(qc);
4cc980b3
AL
4728
4729 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4730}
4731
c71c1857
AL
4732/**
4733 * atapi_send_cdb - Write CDB bytes to hardware
4734 * @ap: Port to which ATAPI device is attached.
4735 * @qc: Taskfile currently active
4736 *
4737 * When device has indicated its readiness to accept
4738 * a CDB, this function is called. Send the CDB.
4739 *
4740 * LOCKING:
4741 * caller.
4742 */
4743
4744static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4745{
4746 /* send SCSI cdb */
4747 DPRINTK("send cdb\n");
db024d53 4748 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4749
a6b2c5d4 4750 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4751 ata_altstatus(ap); /* flush */
4752
4753 switch (qc->tf.protocol) {
4754 case ATA_PROT_ATAPI:
4755 ap->hsm_task_state = HSM_ST;
4756 break;
4757 case ATA_PROT_ATAPI_NODATA:
4758 ap->hsm_task_state = HSM_ST_LAST;
4759 break;
4760 case ATA_PROT_ATAPI_DMA:
4761 ap->hsm_task_state = HSM_ST_LAST;
4762 /* initiate bmdma */
4763 ap->ops->bmdma_start(qc);
4764 break;
4765 }
1da177e4
LT
4766}
4767
6ae4cfb5
AL
4768/**
4769 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4770 * @qc: Command on going
4771 * @bytes: number of bytes
4772 *
4773 * Transfer Transfer data from/to the ATAPI device.
4774 *
4775 * LOCKING:
4776 * Inherited from caller.
4777 *
4778 */
4779
1da177e4
LT
4780static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4781{
4782 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4783 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4784 struct ata_port *ap = qc->ap;
4785 struct page *page;
4786 unsigned char *buf;
4787 unsigned int offset, count;
4788
563a6e1f 4789 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4790 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4791
4792next_sg:
563a6e1f 4793 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4794 /*
563a6e1f
AL
4795 * The end of qc->sg is reached and the device expects
4796 * more data to transfer. In order not to overrun qc->sg
4797 * and fulfill length specified in the byte count register,
4798 * - for read case, discard trailing data from the device
4799 * - for write case, padding zero data to the device
4800 */
4801 u16 pad_buf[1] = { 0 };
4802 unsigned int words = bytes >> 1;
4803 unsigned int i;
4804
4805 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4806 ata_dev_printk(qc->dev, KERN_WARNING,
4807 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4808
4809 for (i = 0; i < words; i++)
a6b2c5d4 4810 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4811
14be71f4 4812 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4813 return;
4814 }
4815
cedc9a47 4816 sg = &qc->__sg[qc->cursg];
1da177e4 4817
1da177e4
LT
4818 page = sg->page;
4819 offset = sg->offset + qc->cursg_ofs;
4820
4821 /* get the current page and offset */
4822 page = nth_page(page, (offset >> PAGE_SHIFT));
4823 offset %= PAGE_SIZE;
4824
6952df03 4825 /* don't overrun current sg */
32529e01 4826 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4827
4828 /* don't cross page boundaries */
4829 count = min(count, (unsigned int)PAGE_SIZE - offset);
4830
7282aa4b
AL
4831 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4832
91b8b313
AL
4833 if (PageHighMem(page)) {
4834 unsigned long flags;
4835
a6b2c5d4 4836 /* FIXME: use bounce buffer */
91b8b313
AL
4837 local_irq_save(flags);
4838 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4839
91b8b313 4840 /* do the actual data transfer */
a6b2c5d4 4841 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4842
91b8b313
AL
4843 kunmap_atomic(buf, KM_IRQ0);
4844 local_irq_restore(flags);
4845 } else {
4846 buf = page_address(page);
a6b2c5d4 4847 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4848 }
1da177e4
LT
4849
4850 bytes -= count;
4851 qc->curbytes += count;
4852 qc->cursg_ofs += count;
4853
32529e01 4854 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4855 qc->cursg++;
4856 qc->cursg_ofs = 0;
4857 }
4858
563a6e1f 4859 if (bytes)
1da177e4 4860 goto next_sg;
1da177e4
LT
4861}
4862
6ae4cfb5
AL
4863/**
4864 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4865 * @qc: Command on going
4866 *
4867 * Transfer Transfer data from/to the ATAPI device.
4868 *
4869 * LOCKING:
4870 * Inherited from caller.
6ae4cfb5
AL
4871 */
4872
1da177e4
LT
4873static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4874{
4875 struct ata_port *ap = qc->ap;
4876 struct ata_device *dev = qc->dev;
4877 unsigned int ireason, bc_lo, bc_hi, bytes;
4878 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4879
eec4c3f3
AL
4880 /* Abuse qc->result_tf for temp storage of intermediate TF
4881 * here to save some kernel stack usage.
4882 * For normal completion, qc->result_tf is not relevant. For
4883 * error, qc->result_tf is later overwritten by ata_qc_complete().
4884 * So, the correctness of qc->result_tf is not affected.
4885 */
4886 ap->ops->tf_read(ap, &qc->result_tf);
4887 ireason = qc->result_tf.nsect;
4888 bc_lo = qc->result_tf.lbam;
4889 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4890 bytes = (bc_hi << 8) | bc_lo;
4891
4892 /* shall be cleared to zero, indicating xfer of data */
4893 if (ireason & (1 << 0))
4894 goto err_out;
4895
4896 /* make sure transfer direction matches expected */
4897 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4898 if (do_write != i_write)
4899 goto err_out;
4900
44877b4e 4901 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4902
1da177e4 4903 __atapi_pio_bytes(qc, bytes);
4cc980b3 4904 ata_altstatus(ap); /* flush */
1da177e4
LT
4905
4906 return;
4907
4908err_out:
f15a1daf 4909 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4910 qc->err_mask |= AC_ERR_HSM;
14be71f4 4911 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4912}
4913
4914/**
c234fb00
AL
4915 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4916 * @ap: the target ata_port
4917 * @qc: qc on going
1da177e4 4918 *
c234fb00
AL
4919 * RETURNS:
4920 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4921 */
c234fb00
AL
4922
4923static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4924{
c234fb00
AL
4925 if (qc->tf.flags & ATA_TFLAG_POLLING)
4926 return 1;
1da177e4 4927
c234fb00
AL
4928 if (ap->hsm_task_state == HSM_ST_FIRST) {
4929 if (qc->tf.protocol == ATA_PROT_PIO &&
4930 (qc->tf.flags & ATA_TFLAG_WRITE))
4931 return 1;
1da177e4 4932
c234fb00
AL
4933 if (is_atapi_taskfile(&qc->tf) &&
4934 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4935 return 1;
fe79e683
AL
4936 }
4937
c234fb00
AL
4938 return 0;
4939}
1da177e4 4940
c17ea20d
TH
4941/**
4942 * ata_hsm_qc_complete - finish a qc running on standard HSM
4943 * @qc: Command to complete
4944 * @in_wq: 1 if called from workqueue, 0 otherwise
4945 *
4946 * Finish @qc which is running on standard HSM.
4947 *
4948 * LOCKING:
cca3974e 4949 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4950 * Otherwise, none on entry and grabs host lock.
4951 */
4952static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4953{
4954 struct ata_port *ap = qc->ap;
4955 unsigned long flags;
4956
4957 if (ap->ops->error_handler) {
4958 if (in_wq) {
ba6a1308 4959 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4960
cca3974e
JG
4961 /* EH might have kicked in while host lock is
4962 * released.
c17ea20d
TH
4963 */
4964 qc = ata_qc_from_tag(ap, qc->tag);
4965 if (qc) {
4966 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4967 ap->ops->irq_on(ap);
c17ea20d
TH
4968 ata_qc_complete(qc);
4969 } else
4970 ata_port_freeze(ap);
4971 }
4972
ba6a1308 4973 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4974 } else {
4975 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4976 ata_qc_complete(qc);
4977 else
4978 ata_port_freeze(ap);
4979 }
4980 } else {
4981 if (in_wq) {
ba6a1308 4982 spin_lock_irqsave(ap->lock, flags);
83625006 4983 ap->ops->irq_on(ap);
c17ea20d 4984 ata_qc_complete(qc);
ba6a1308 4985 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4986 } else
4987 ata_qc_complete(qc);
4988 }
4989}
4990
bb5cb290
AL
4991/**
4992 * ata_hsm_move - move the HSM to the next state.
4993 * @ap: the target ata_port
4994 * @qc: qc on going
4995 * @status: current device status
4996 * @in_wq: 1 if called from workqueue, 0 otherwise
4997 *
4998 * RETURNS:
4999 * 1 when poll next status needed, 0 otherwise.
5000 */
9a1004d0
TH
5001int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5002 u8 status, int in_wq)
e2cec771 5003{
bb5cb290
AL
5004 unsigned long flags = 0;
5005 int poll_next;
5006
6912ccd5
AL
5007 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5008
bb5cb290
AL
5009 /* Make sure ata_qc_issue_prot() does not throw things
5010 * like DMA polling into the workqueue. Notice that
5011 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5012 */
c234fb00 5013 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5014
e2cec771 5015fsm_start:
999bb6f4 5016 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5017 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5018
e2cec771
AL
5019 switch (ap->hsm_task_state) {
5020 case HSM_ST_FIRST:
bb5cb290
AL
5021 /* Send first data block or PACKET CDB */
5022
5023 /* If polling, we will stay in the work queue after
5024 * sending the data. Otherwise, interrupt handler
5025 * takes over after sending the data.
5026 */
5027 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5028
e2cec771 5029 /* check device status */
3655d1d3
AL
5030 if (unlikely((status & ATA_DRQ) == 0)) {
5031 /* handle BSY=0, DRQ=0 as error */
5032 if (likely(status & (ATA_ERR | ATA_DF)))
5033 /* device stops HSM for abort/error */
5034 qc->err_mask |= AC_ERR_DEV;
5035 else
5036 /* HSM violation. Let EH handle this */
5037 qc->err_mask |= AC_ERR_HSM;
5038
14be71f4 5039 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5040 goto fsm_start;
1da177e4
LT
5041 }
5042
71601958
AL
5043 /* Device should not ask for data transfer (DRQ=1)
5044 * when it finds something wrong.
eee6c32f
AL
5045 * We ignore DRQ here and stop the HSM by
5046 * changing hsm_task_state to HSM_ST_ERR and
5047 * let the EH abort the command or reset the device.
71601958
AL
5048 */
5049 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5050 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5051 "error, dev_stat 0x%X\n", status);
3655d1d3 5052 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5053 ap->hsm_task_state = HSM_ST_ERR;
5054 goto fsm_start;
71601958 5055 }
1da177e4 5056
bb5cb290
AL
5057 /* Send the CDB (atapi) or the first data block (ata pio out).
5058 * During the state transition, interrupt handler shouldn't
5059 * be invoked before the data transfer is complete and
5060 * hsm_task_state is changed. Hence, the following locking.
5061 */
5062 if (in_wq)
ba6a1308 5063 spin_lock_irqsave(ap->lock, flags);
1da177e4 5064
bb5cb290
AL
5065 if (qc->tf.protocol == ATA_PROT_PIO) {
5066 /* PIO data out protocol.
5067 * send first data block.
5068 */
0565c26d 5069
bb5cb290
AL
5070 /* ata_pio_sectors() might change the state
5071 * to HSM_ST_LAST. so, the state is changed here
5072 * before ata_pio_sectors().
5073 */
5074 ap->hsm_task_state = HSM_ST;
5075 ata_pio_sectors(qc);
bb5cb290
AL
5076 } else
5077 /* send CDB */
5078 atapi_send_cdb(ap, qc);
5079
5080 if (in_wq)
ba6a1308 5081 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5082
5083 /* if polling, ata_pio_task() handles the rest.
5084 * otherwise, interrupt handler takes over from here.
5085 */
e2cec771 5086 break;
1c848984 5087
e2cec771
AL
5088 case HSM_ST:
5089 /* complete command or read/write the data register */
5090 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5091 /* ATAPI PIO protocol */
5092 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5093 /* No more data to transfer or device error.
5094 * Device error will be tagged in HSM_ST_LAST.
5095 */
e2cec771
AL
5096 ap->hsm_task_state = HSM_ST_LAST;
5097 goto fsm_start;
5098 }
1da177e4 5099
71601958
AL
5100 /* Device should not ask for data transfer (DRQ=1)
5101 * when it finds something wrong.
eee6c32f
AL
5102 * We ignore DRQ here and stop the HSM by
5103 * changing hsm_task_state to HSM_ST_ERR and
5104 * let the EH abort the command or reset the device.
71601958
AL
5105 */
5106 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5107 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5108 "device error, dev_stat 0x%X\n",
5109 status);
3655d1d3 5110 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5111 ap->hsm_task_state = HSM_ST_ERR;
5112 goto fsm_start;
71601958 5113 }
1da177e4 5114
e2cec771 5115 atapi_pio_bytes(qc);
7fb6ec28 5116
e2cec771
AL
5117 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5118 /* bad ireason reported by device */
5119 goto fsm_start;
1da177e4 5120
e2cec771
AL
5121 } else {
5122 /* ATA PIO protocol */
5123 if (unlikely((status & ATA_DRQ) == 0)) {
5124 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5125 if (likely(status & (ATA_ERR | ATA_DF)))
5126 /* device stops HSM for abort/error */
5127 qc->err_mask |= AC_ERR_DEV;
5128 else
55a8e2c8
TH
5129 /* HSM violation. Let EH handle this.
5130 * Phantom devices also trigger this
5131 * condition. Mark hint.
5132 */
5133 qc->err_mask |= AC_ERR_HSM |
5134 AC_ERR_NODEV_HINT;
3655d1d3 5135
e2cec771
AL
5136 ap->hsm_task_state = HSM_ST_ERR;
5137 goto fsm_start;
5138 }
1da177e4 5139
eee6c32f
AL
5140 /* For PIO reads, some devices may ask for
5141 * data transfer (DRQ=1) alone with ERR=1.
5142 * We respect DRQ here and transfer one
5143 * block of junk data before changing the
5144 * hsm_task_state to HSM_ST_ERR.
5145 *
5146 * For PIO writes, ERR=1 DRQ=1 doesn't make
5147 * sense since the data block has been
5148 * transferred to the device.
71601958
AL
5149 */
5150 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5151 /* data might be corrputed */
5152 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5153
5154 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5155 ata_pio_sectors(qc);
eee6c32f
AL
5156 status = ata_wait_idle(ap);
5157 }
5158
3655d1d3
AL
5159 if (status & (ATA_BUSY | ATA_DRQ))
5160 qc->err_mask |= AC_ERR_HSM;
5161
eee6c32f
AL
5162 /* ata_pio_sectors() might change the
5163 * state to HSM_ST_LAST. so, the state
5164 * is changed after ata_pio_sectors().
5165 */
5166 ap->hsm_task_state = HSM_ST_ERR;
5167 goto fsm_start;
71601958
AL
5168 }
5169
e2cec771
AL
5170 ata_pio_sectors(qc);
5171
5172 if (ap->hsm_task_state == HSM_ST_LAST &&
5173 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5174 /* all data read */
52a32205 5175 status = ata_wait_idle(ap);
e2cec771
AL
5176 goto fsm_start;
5177 }
5178 }
5179
bb5cb290 5180 poll_next = 1;
1da177e4
LT
5181 break;
5182
14be71f4 5183 case HSM_ST_LAST:
6912ccd5
AL
5184 if (unlikely(!ata_ok(status))) {
5185 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5186 ap->hsm_task_state = HSM_ST_ERR;
5187 goto fsm_start;
5188 }
5189
5190 /* no more data to transfer */
4332a771 5191 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5192 ap->print_id, qc->dev->devno, status);
e2cec771 5193
6912ccd5
AL
5194 WARN_ON(qc->err_mask);
5195
e2cec771 5196 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5197
e2cec771 5198 /* complete taskfile transaction */
c17ea20d 5199 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5200
5201 poll_next = 0;
1da177e4
LT
5202 break;
5203
14be71f4 5204 case HSM_ST_ERR:
e2cec771
AL
5205 /* make sure qc->err_mask is available to
5206 * know what's wrong and recover
5207 */
5208 WARN_ON(qc->err_mask == 0);
5209
5210 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5211
999bb6f4 5212 /* complete taskfile transaction */
c17ea20d 5213 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5214
5215 poll_next = 0;
e2cec771
AL
5216 break;
5217 default:
bb5cb290 5218 poll_next = 0;
6912ccd5 5219 BUG();
1da177e4
LT
5220 }
5221
bb5cb290 5222 return poll_next;
1da177e4
LT
5223}
5224
65f27f38 5225static void ata_pio_task(struct work_struct *work)
8061f5f0 5226{
65f27f38
DH
5227 struct ata_port *ap =
5228 container_of(work, struct ata_port, port_task.work);
5229 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5230 u8 status;
a1af3734 5231 int poll_next;
8061f5f0 5232
7fb6ec28 5233fsm_start:
a1af3734 5234 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5235
a1af3734
AL
5236 /*
5237 * This is purely heuristic. This is a fast path.
5238 * Sometimes when we enter, BSY will be cleared in
5239 * a chk-status or two. If not, the drive is probably seeking
5240 * or something. Snooze for a couple msecs, then
5241 * chk-status again. If still busy, queue delayed work.
5242 */
5243 status = ata_busy_wait(ap, ATA_BUSY, 5);
5244 if (status & ATA_BUSY) {
5245 msleep(2);
5246 status = ata_busy_wait(ap, ATA_BUSY, 10);
5247 if (status & ATA_BUSY) {
31ce6dae 5248 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5249 return;
5250 }
8061f5f0
TH
5251 }
5252
a1af3734
AL
5253 /* move the HSM */
5254 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5255
a1af3734
AL
5256 /* another command or interrupt handler
5257 * may be running at this point.
5258 */
5259 if (poll_next)
7fb6ec28 5260 goto fsm_start;
8061f5f0
TH
5261}
5262
1da177e4
LT
5263/**
5264 * ata_qc_new - Request an available ATA command, for queueing
5265 * @ap: Port associated with device @dev
5266 * @dev: Device from whom we request an available command structure
5267 *
5268 * LOCKING:
0cba632b 5269 * None.
1da177e4
LT
5270 */
5271
5272static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5273{
5274 struct ata_queued_cmd *qc = NULL;
5275 unsigned int i;
5276
e3180499 5277 /* no command while frozen */
b51e9e5d 5278 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5279 return NULL;
5280
2ab7db1f
TH
5281 /* the last tag is reserved for internal command. */
5282 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5283 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5284 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5285 break;
5286 }
5287
5288 if (qc)
5289 qc->tag = i;
5290
5291 return qc;
5292}
5293
5294/**
5295 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5296 * @dev: Device from whom we request an available command structure
5297 *
5298 * LOCKING:
0cba632b 5299 * None.
1da177e4
LT
5300 */
5301
3373efd8 5302struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5303{
9af5c9c9 5304 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5305 struct ata_queued_cmd *qc;
5306
5307 qc = ata_qc_new(ap);
5308 if (qc) {
1da177e4
LT
5309 qc->scsicmd = NULL;
5310 qc->ap = ap;
5311 qc->dev = dev;
1da177e4 5312
2c13b7ce 5313 ata_qc_reinit(qc);
1da177e4
LT
5314 }
5315
5316 return qc;
5317}
5318
1da177e4
LT
5319/**
5320 * ata_qc_free - free unused ata_queued_cmd
5321 * @qc: Command to complete
5322 *
5323 * Designed to free unused ata_queued_cmd object
5324 * in case something prevents using it.
5325 *
5326 * LOCKING:
cca3974e 5327 * spin_lock_irqsave(host lock)
1da177e4
LT
5328 */
5329void ata_qc_free(struct ata_queued_cmd *qc)
5330{
4ba946e9
TH
5331 struct ata_port *ap = qc->ap;
5332 unsigned int tag;
5333
a4631474 5334 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5335
4ba946e9
TH
5336 qc->flags = 0;
5337 tag = qc->tag;
5338 if (likely(ata_tag_valid(tag))) {
4ba946e9 5339 qc->tag = ATA_TAG_POISON;
6cec4a39 5340 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5341 }
1da177e4
LT
5342}
5343
76014427 5344void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5345{
dedaf2b0 5346 struct ata_port *ap = qc->ap;
9af5c9c9 5347 struct ata_link *link = qc->dev->link;
dedaf2b0 5348
a4631474
TH
5349 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5350 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5351
5352 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5353 ata_sg_clean(qc);
5354
7401abf2 5355 /* command should be marked inactive atomically with qc completion */
dedaf2b0 5356 if (qc->tf.protocol == ATA_PROT_NCQ)
9af5c9c9 5357 link->sactive &= ~(1 << qc->tag);
dedaf2b0 5358 else
9af5c9c9 5359 link->active_tag = ATA_TAG_POISON;
7401abf2 5360
3f3791d3
AL
5361 /* atapi: mark qc as inactive to prevent the interrupt handler
5362 * from completing the command twice later, before the error handler
5363 * is called. (when rc != 0 and atapi request sense is needed)
5364 */
5365 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5366 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5367
1da177e4 5368 /* call completion callback */
77853bf2 5369 qc->complete_fn(qc);
1da177e4
LT
5370}
5371
39599a53
TH
5372static void fill_result_tf(struct ata_queued_cmd *qc)
5373{
5374 struct ata_port *ap = qc->ap;
5375
39599a53 5376 qc->result_tf.flags = qc->tf.flags;
4742d54f 5377 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5378}
5379
f686bcb8
TH
5380/**
5381 * ata_qc_complete - Complete an active ATA command
5382 * @qc: Command to complete
5383 * @err_mask: ATA Status register contents
5384 *
5385 * Indicate to the mid and upper layers that an ATA
5386 * command has completed, with either an ok or not-ok status.
5387 *
5388 * LOCKING:
cca3974e 5389 * spin_lock_irqsave(host lock)
f686bcb8
TH
5390 */
5391void ata_qc_complete(struct ata_queued_cmd *qc)
5392{
5393 struct ata_port *ap = qc->ap;
5394
5395 /* XXX: New EH and old EH use different mechanisms to
5396 * synchronize EH with regular execution path.
5397 *
5398 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5399 * Normal execution path is responsible for not accessing a
5400 * failed qc. libata core enforces the rule by returning NULL
5401 * from ata_qc_from_tag() for failed qcs.
5402 *
5403 * Old EH depends on ata_qc_complete() nullifying completion
5404 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5405 * not synchronize with interrupt handler. Only PIO task is
5406 * taken care of.
5407 */
5408 if (ap->ops->error_handler) {
b51e9e5d 5409 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5410
5411 if (unlikely(qc->err_mask))
5412 qc->flags |= ATA_QCFLAG_FAILED;
5413
5414 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5415 if (!ata_tag_internal(qc->tag)) {
5416 /* always fill result TF for failed qc */
39599a53 5417 fill_result_tf(qc);
f686bcb8
TH
5418 ata_qc_schedule_eh(qc);
5419 return;
5420 }
5421 }
5422
5423 /* read result TF if requested */
5424 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5425 fill_result_tf(qc);
f686bcb8
TH
5426
5427 __ata_qc_complete(qc);
5428 } else {
5429 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5430 return;
5431
5432 /* read result TF if failed or requested */
5433 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5434 fill_result_tf(qc);
f686bcb8
TH
5435
5436 __ata_qc_complete(qc);
5437 }
5438}
5439
dedaf2b0
TH
5440/**
5441 * ata_qc_complete_multiple - Complete multiple qcs successfully
5442 * @ap: port in question
5443 * @qc_active: new qc_active mask
5444 * @finish_qc: LLDD callback invoked before completing a qc
5445 *
5446 * Complete in-flight commands. This functions is meant to be
5447 * called from low-level driver's interrupt routine to complete
5448 * requests normally. ap->qc_active and @qc_active is compared
5449 * and commands are completed accordingly.
5450 *
5451 * LOCKING:
cca3974e 5452 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5453 *
5454 * RETURNS:
5455 * Number of completed commands on success, -errno otherwise.
5456 */
5457int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5458 void (*finish_qc)(struct ata_queued_cmd *))
5459{
5460 int nr_done = 0;
5461 u32 done_mask;
5462 int i;
5463
5464 done_mask = ap->qc_active ^ qc_active;
5465
5466 if (unlikely(done_mask & qc_active)) {
5467 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5468 "(%08x->%08x)\n", ap->qc_active, qc_active);
5469 return -EINVAL;
5470 }
5471
5472 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5473 struct ata_queued_cmd *qc;
5474
5475 if (!(done_mask & (1 << i)))
5476 continue;
5477
5478 if ((qc = ata_qc_from_tag(ap, i))) {
5479 if (finish_qc)
5480 finish_qc(qc);
5481 ata_qc_complete(qc);
5482 nr_done++;
5483 }
5484 }
5485
5486 return nr_done;
5487}
5488
1da177e4
LT
5489static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5490{
5491 struct ata_port *ap = qc->ap;
5492
5493 switch (qc->tf.protocol) {
3dc1d881 5494 case ATA_PROT_NCQ:
1da177e4
LT
5495 case ATA_PROT_DMA:
5496 case ATA_PROT_ATAPI_DMA:
5497 return 1;
5498
5499 case ATA_PROT_ATAPI:
5500 case ATA_PROT_PIO:
1da177e4
LT
5501 if (ap->flags & ATA_FLAG_PIO_DMA)
5502 return 1;
5503
5504 /* fall through */
5505
5506 default:
5507 return 0;
5508 }
5509
5510 /* never reached */
5511}
5512
5513/**
5514 * ata_qc_issue - issue taskfile to device
5515 * @qc: command to issue to device
5516 *
5517 * Prepare an ATA command to submission to device.
5518 * This includes mapping the data into a DMA-able
5519 * area, filling in the S/G table, and finally
5520 * writing the taskfile to hardware, starting the command.
5521 *
5522 * LOCKING:
cca3974e 5523 * spin_lock_irqsave(host lock)
1da177e4 5524 */
8e0e694a 5525void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5526{
5527 struct ata_port *ap = qc->ap;
9af5c9c9 5528 struct ata_link *link = qc->dev->link;
1da177e4 5529
dedaf2b0
TH
5530 /* Make sure only one non-NCQ command is outstanding. The
5531 * check is skipped for old EH because it reuses active qc to
5532 * request ATAPI sense.
5533 */
9af5c9c9 5534 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5535
5536 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9
TH
5537 WARN_ON(link->sactive & (1 << qc->tag));
5538 link->sactive |= 1 << qc->tag;
dedaf2b0 5539 } else {
9af5c9c9
TH
5540 WARN_ON(link->sactive);
5541 link->active_tag = qc->tag;
dedaf2b0
TH
5542 }
5543
e4a70e76 5544 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5545 ap->qc_active |= 1 << qc->tag;
e4a70e76 5546
1da177e4
LT
5547 if (ata_should_dma_map(qc)) {
5548 if (qc->flags & ATA_QCFLAG_SG) {
5549 if (ata_sg_setup(qc))
8e436af9 5550 goto sg_err;
1da177e4
LT
5551 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5552 if (ata_sg_setup_one(qc))
8e436af9 5553 goto sg_err;
1da177e4
LT
5554 }
5555 } else {
5556 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5557 }
5558
5559 ap->ops->qc_prep(qc);
5560
8e0e694a
TH
5561 qc->err_mask |= ap->ops->qc_issue(qc);
5562 if (unlikely(qc->err_mask))
5563 goto err;
5564 return;
1da177e4 5565
8e436af9
TH
5566sg_err:
5567 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5568 qc->err_mask |= AC_ERR_SYSTEM;
5569err:
5570 ata_qc_complete(qc);
1da177e4
LT
5571}
5572
5573/**
5574 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5575 * @qc: command to issue to device
5576 *
5577 * Using various libata functions and hooks, this function
5578 * starts an ATA command. ATA commands are grouped into
5579 * classes called "protocols", and issuing each type of protocol
5580 * is slightly different.
5581 *
0baab86b
EF
5582 * May be used as the qc_issue() entry in ata_port_operations.
5583 *
1da177e4 5584 * LOCKING:
cca3974e 5585 * spin_lock_irqsave(host lock)
1da177e4
LT
5586 *
5587 * RETURNS:
9a3d9eb0 5588 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5589 */
5590
9a3d9eb0 5591unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5592{
5593 struct ata_port *ap = qc->ap;
5594
e50362ec
AL
5595 /* Use polling pio if the LLD doesn't handle
5596 * interrupt driven pio and atapi CDB interrupt.
5597 */
5598 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5599 switch (qc->tf.protocol) {
5600 case ATA_PROT_PIO:
e3472cbe 5601 case ATA_PROT_NODATA:
e50362ec
AL
5602 case ATA_PROT_ATAPI:
5603 case ATA_PROT_ATAPI_NODATA:
5604 qc->tf.flags |= ATA_TFLAG_POLLING;
5605 break;
5606 case ATA_PROT_ATAPI_DMA:
5607 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5608 /* see ata_dma_blacklisted() */
e50362ec
AL
5609 BUG();
5610 break;
5611 default:
5612 break;
5613 }
5614 }
5615
312f7da2 5616 /* select the device */
1da177e4
LT
5617 ata_dev_select(ap, qc->dev->devno, 1, 0);
5618
312f7da2 5619 /* start the command */
1da177e4
LT
5620 switch (qc->tf.protocol) {
5621 case ATA_PROT_NODATA:
312f7da2
AL
5622 if (qc->tf.flags & ATA_TFLAG_POLLING)
5623 ata_qc_set_polling(qc);
5624
e5338254 5625 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5626 ap->hsm_task_state = HSM_ST_LAST;
5627
5628 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5629 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5630
1da177e4
LT
5631 break;
5632
5633 case ATA_PROT_DMA:
587005de 5634 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5635
1da177e4
LT
5636 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5637 ap->ops->bmdma_setup(qc); /* set up bmdma */
5638 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5639 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5640 break;
5641
312f7da2
AL
5642 case ATA_PROT_PIO:
5643 if (qc->tf.flags & ATA_TFLAG_POLLING)
5644 ata_qc_set_polling(qc);
1da177e4 5645
e5338254 5646 ata_tf_to_host(ap, &qc->tf);
312f7da2 5647
54f00389
AL
5648 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5649 /* PIO data out protocol */
5650 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5651 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5652
5653 /* always send first data block using
e27486db 5654 * the ata_pio_task() codepath.
54f00389 5655 */
312f7da2 5656 } else {
54f00389
AL
5657 /* PIO data in protocol */
5658 ap->hsm_task_state = HSM_ST;
5659
5660 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5661 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5662
5663 /* if polling, ata_pio_task() handles the rest.
5664 * otherwise, interrupt handler takes over from here.
5665 */
312f7da2
AL
5666 }
5667
1da177e4
LT
5668 break;
5669
1da177e4 5670 case ATA_PROT_ATAPI:
1da177e4 5671 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5672 if (qc->tf.flags & ATA_TFLAG_POLLING)
5673 ata_qc_set_polling(qc);
5674
e5338254 5675 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5676
312f7da2
AL
5677 ap->hsm_task_state = HSM_ST_FIRST;
5678
5679 /* send cdb by polling if no cdb interrupt */
5680 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5681 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5682 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5683 break;
5684
5685 case ATA_PROT_ATAPI_DMA:
587005de 5686 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5687
1da177e4
LT
5688 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5689 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5690 ap->hsm_task_state = HSM_ST_FIRST;
5691
5692 /* send cdb by polling if no cdb interrupt */
5693 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5694 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5695 break;
5696
5697 default:
5698 WARN_ON(1);
9a3d9eb0 5699 return AC_ERR_SYSTEM;
1da177e4
LT
5700 }
5701
5702 return 0;
5703}
5704
1da177e4
LT
5705/**
5706 * ata_host_intr - Handle host interrupt for given (port, task)
5707 * @ap: Port on which interrupt arrived (possibly...)
5708 * @qc: Taskfile currently active in engine
5709 *
5710 * Handle host interrupt for given queued command. Currently,
5711 * only DMA interrupts are handled. All other commands are
5712 * handled via polling with interrupts disabled (nIEN bit).
5713 *
5714 * LOCKING:
cca3974e 5715 * spin_lock_irqsave(host lock)
1da177e4
LT
5716 *
5717 * RETURNS:
5718 * One if interrupt was handled, zero if not (shared irq).
5719 */
5720
5721inline unsigned int ata_host_intr (struct ata_port *ap,
5722 struct ata_queued_cmd *qc)
5723{
9af5c9c9 5724 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5725 u8 status, host_stat = 0;
1da177e4 5726
312f7da2 5727 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5728 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5729
312f7da2
AL
5730 /* Check whether we are expecting interrupt in this state */
5731 switch (ap->hsm_task_state) {
5732 case HSM_ST_FIRST:
6912ccd5
AL
5733 /* Some pre-ATAPI-4 devices assert INTRQ
5734 * at this state when ready to receive CDB.
5735 */
1da177e4 5736
312f7da2
AL
5737 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5738 * The flag was turned on only for atapi devices.
5739 * No need to check is_atapi_taskfile(&qc->tf) again.
5740 */
5741 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5742 goto idle_irq;
1da177e4 5743 break;
312f7da2
AL
5744 case HSM_ST_LAST:
5745 if (qc->tf.protocol == ATA_PROT_DMA ||
5746 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5747 /* check status of DMA engine */
5748 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5749 VPRINTK("ata%u: host_stat 0x%X\n",
5750 ap->print_id, host_stat);
312f7da2
AL
5751
5752 /* if it's not our irq... */
5753 if (!(host_stat & ATA_DMA_INTR))
5754 goto idle_irq;
5755
5756 /* before we do anything else, clear DMA-Start bit */
5757 ap->ops->bmdma_stop(qc);
a4f16610
AL
5758
5759 if (unlikely(host_stat & ATA_DMA_ERR)) {
5760 /* error when transfering data to/from memory */
5761 qc->err_mask |= AC_ERR_HOST_BUS;
5762 ap->hsm_task_state = HSM_ST_ERR;
5763 }
312f7da2
AL
5764 }
5765 break;
5766 case HSM_ST:
5767 break;
1da177e4
LT
5768 default:
5769 goto idle_irq;
5770 }
5771
312f7da2
AL
5772 /* check altstatus */
5773 status = ata_altstatus(ap);
5774 if (status & ATA_BUSY)
5775 goto idle_irq;
1da177e4 5776
312f7da2
AL
5777 /* check main status, clearing INTRQ */
5778 status = ata_chk_status(ap);
5779 if (unlikely(status & ATA_BUSY))
5780 goto idle_irq;
1da177e4 5781
312f7da2
AL
5782 /* ack bmdma irq events */
5783 ap->ops->irq_clear(ap);
1da177e4 5784
bb5cb290 5785 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5786
5787 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5788 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5789 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5790
1da177e4
LT
5791 return 1; /* irq handled */
5792
5793idle_irq:
5794 ap->stats.idle_irq++;
5795
5796#ifdef ATA_IRQ_TRAP
5797 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5798 ata_chk_status(ap);
5799 ap->ops->irq_clear(ap);
f15a1daf 5800 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5801 return 1;
1da177e4
LT
5802 }
5803#endif
5804 return 0; /* irq not handled */
5805}
5806
5807/**
5808 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5809 * @irq: irq line (unused)
cca3974e 5810 * @dev_instance: pointer to our ata_host information structure
1da177e4 5811 *
0cba632b
JG
5812 * Default interrupt handler for PCI IDE devices. Calls
5813 * ata_host_intr() for each port that is not disabled.
5814 *
1da177e4 5815 * LOCKING:
cca3974e 5816 * Obtains host lock during operation.
1da177e4
LT
5817 *
5818 * RETURNS:
0cba632b 5819 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5820 */
5821
7d12e780 5822irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5823{
cca3974e 5824 struct ata_host *host = dev_instance;
1da177e4
LT
5825 unsigned int i;
5826 unsigned int handled = 0;
5827 unsigned long flags;
5828
5829 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5830 spin_lock_irqsave(&host->lock, flags);
1da177e4 5831
cca3974e 5832 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5833 struct ata_port *ap;
5834
cca3974e 5835 ap = host->ports[i];
c1389503 5836 if (ap &&
029f5468 5837 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5838 struct ata_queued_cmd *qc;
5839
9af5c9c9 5840 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5841 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5842 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5843 handled |= ata_host_intr(ap, qc);
5844 }
5845 }
5846
cca3974e 5847 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5848
5849 return IRQ_RETVAL(handled);
5850}
5851
34bf2170
TH
5852/**
5853 * sata_scr_valid - test whether SCRs are accessible
936fd732 5854 * @link: ATA link to test SCR accessibility for
34bf2170 5855 *
936fd732 5856 * Test whether SCRs are accessible for @link.
34bf2170
TH
5857 *
5858 * LOCKING:
5859 * None.
5860 *
5861 * RETURNS:
5862 * 1 if SCRs are accessible, 0 otherwise.
5863 */
936fd732 5864int sata_scr_valid(struct ata_link *link)
34bf2170 5865{
936fd732
TH
5866 struct ata_port *ap = link->ap;
5867
a16abc0b 5868 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5869}
5870
5871/**
5872 * sata_scr_read - read SCR register of the specified port
936fd732 5873 * @link: ATA link to read SCR for
34bf2170
TH
5874 * @reg: SCR to read
5875 * @val: Place to store read value
5876 *
936fd732 5877 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5878 * guaranteed to succeed if the cable type of the port is SATA
5879 * and the port implements ->scr_read.
5880 *
5881 * LOCKING:
5882 * None.
5883 *
5884 * RETURNS:
5885 * 0 on success, negative errno on failure.
5886 */
936fd732 5887int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5888{
936fd732
TH
5889 struct ata_port *ap = link->ap;
5890
5891 if (sata_scr_valid(link))
da3dbb17 5892 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5893 return -EOPNOTSUPP;
5894}
5895
5896/**
5897 * sata_scr_write - write SCR register of the specified port
936fd732 5898 * @link: ATA link to write SCR for
34bf2170
TH
5899 * @reg: SCR to write
5900 * @val: value to write
5901 *
936fd732 5902 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5903 * guaranteed to succeed if the cable type of the port is SATA
5904 * and the port implements ->scr_read.
5905 *
5906 * LOCKING:
5907 * None.
5908 *
5909 * RETURNS:
5910 * 0 on success, negative errno on failure.
5911 */
936fd732 5912int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5913{
936fd732
TH
5914 struct ata_port *ap = link->ap;
5915
5916 if (sata_scr_valid(link))
da3dbb17 5917 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5918 return -EOPNOTSUPP;
5919}
5920
5921/**
5922 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5923 * @link: ATA link to write SCR for
34bf2170
TH
5924 * @reg: SCR to write
5925 * @val: value to write
5926 *
5927 * This function is identical to sata_scr_write() except that this
5928 * function performs flush after writing to the register.
5929 *
5930 * LOCKING:
5931 * None.
5932 *
5933 * RETURNS:
5934 * 0 on success, negative errno on failure.
5935 */
936fd732 5936int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5937{
936fd732 5938 struct ata_port *ap = link->ap;
da3dbb17
TH
5939 int rc;
5940
936fd732 5941 if (sata_scr_valid(link)) {
da3dbb17
TH
5942 rc = ap->ops->scr_write(ap, reg, val);
5943 if (rc == 0)
5944 rc = ap->ops->scr_read(ap, reg, &val);
5945 return rc;
34bf2170
TH
5946 }
5947 return -EOPNOTSUPP;
5948}
5949
5950/**
936fd732
TH
5951 * ata_link_online - test whether the given link is online
5952 * @link: ATA link to test
34bf2170 5953 *
936fd732
TH
5954 * Test whether @link is online. Note that this function returns
5955 * 0 if online status of @link cannot be obtained, so
5956 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5957 *
5958 * LOCKING:
5959 * None.
5960 *
5961 * RETURNS:
5962 * 1 if the port online status is available and online.
5963 */
936fd732 5964int ata_link_online(struct ata_link *link)
34bf2170
TH
5965{
5966 u32 sstatus;
5967
936fd732
TH
5968 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5969 (sstatus & 0xf) == 0x3)
34bf2170
TH
5970 return 1;
5971 return 0;
5972}
5973
5974/**
936fd732
TH
5975 * ata_link_offline - test whether the given link is offline
5976 * @link: ATA link to test
34bf2170 5977 *
936fd732
TH
5978 * Test whether @link is offline. Note that this function
5979 * returns 0 if offline status of @link cannot be obtained, so
5980 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5981 *
5982 * LOCKING:
5983 * None.
5984 *
5985 * RETURNS:
5986 * 1 if the port offline status is available and offline.
5987 */
936fd732 5988int ata_link_offline(struct ata_link *link)
34bf2170
TH
5989{
5990 u32 sstatus;
5991
936fd732
TH
5992 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5993 (sstatus & 0xf) != 0x3)
34bf2170
TH
5994 return 1;
5995 return 0;
5996}
0baab86b 5997
77b08fb5 5998int ata_flush_cache(struct ata_device *dev)
9b847548 5999{
977e6b9f 6000 unsigned int err_mask;
9b847548
JA
6001 u8 cmd;
6002
6003 if (!ata_try_flush_cache(dev))
6004 return 0;
6005
6fc49adb 6006 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6007 cmd = ATA_CMD_FLUSH_EXT;
6008 else
6009 cmd = ATA_CMD_FLUSH;
6010
4f34337b
AC
6011 /* This is wrong. On a failed flush we get back the LBA of the lost
6012 sector and we should (assuming it wasn't aborted as unknown) issue
6013 a further flush command to continue the writeback until it
6014 does not error */
977e6b9f
TH
6015 err_mask = ata_do_simple_cmd(dev, cmd);
6016 if (err_mask) {
6017 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6018 return -EIO;
6019 }
6020
6021 return 0;
9b847548
JA
6022}
6023
6ffa01d8 6024#ifdef CONFIG_PM
cca3974e
JG
6025static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6026 unsigned int action, unsigned int ehi_flags,
6027 int wait)
500530f6
TH
6028{
6029 unsigned long flags;
6030 int i, rc;
6031
cca3974e
JG
6032 for (i = 0; i < host->n_ports; i++) {
6033 struct ata_port *ap = host->ports[i];
e3667ebf 6034 struct ata_link *link;
500530f6
TH
6035
6036 /* Previous resume operation might still be in
6037 * progress. Wait for PM_PENDING to clear.
6038 */
6039 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6040 ata_port_wait_eh(ap);
6041 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6042 }
6043
6044 /* request PM ops to EH */
6045 spin_lock_irqsave(ap->lock, flags);
6046
6047 ap->pm_mesg = mesg;
6048 if (wait) {
6049 rc = 0;
6050 ap->pm_result = &rc;
6051 }
6052
6053 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6054 __ata_port_for_each_link(link, ap) {
6055 link->eh_info.action |= action;
6056 link->eh_info.flags |= ehi_flags;
6057 }
500530f6
TH
6058
6059 ata_port_schedule_eh(ap);
6060
6061 spin_unlock_irqrestore(ap->lock, flags);
6062
6063 /* wait and check result */
6064 if (wait) {
6065 ata_port_wait_eh(ap);
6066 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6067 if (rc)
6068 return rc;
6069 }
6070 }
6071
6072 return 0;
6073}
6074
6075/**
cca3974e
JG
6076 * ata_host_suspend - suspend host
6077 * @host: host to suspend
500530f6
TH
6078 * @mesg: PM message
6079 *
cca3974e 6080 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6081 * function requests EH to perform PM operations and waits for EH
6082 * to finish.
6083 *
6084 * LOCKING:
6085 * Kernel thread context (may sleep).
6086 *
6087 * RETURNS:
6088 * 0 on success, -errno on failure.
6089 */
cca3974e 6090int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6091{
9666f400 6092 int rc;
500530f6 6093
cca3974e 6094 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6095 if (rc == 0)
6096 host->dev->power.power_state = mesg;
500530f6
TH
6097 return rc;
6098}
6099
6100/**
cca3974e
JG
6101 * ata_host_resume - resume host
6102 * @host: host to resume
500530f6 6103 *
cca3974e 6104 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6105 * function requests EH to perform PM operations and returns.
6106 * Note that all resume operations are performed parallely.
6107 *
6108 * LOCKING:
6109 * Kernel thread context (may sleep).
6110 */
cca3974e 6111void ata_host_resume(struct ata_host *host)
500530f6 6112{
cca3974e
JG
6113 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6114 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6115 host->dev->power.power_state = PMSG_ON;
500530f6 6116}
6ffa01d8 6117#endif
500530f6 6118
c893a3ae
RD
6119/**
6120 * ata_port_start - Set port up for dma.
6121 * @ap: Port to initialize
6122 *
6123 * Called just after data structures for each port are
6124 * initialized. Allocates space for PRD table.
6125 *
6126 * May be used as the port_start() entry in ata_port_operations.
6127 *
6128 * LOCKING:
6129 * Inherited from caller.
6130 */
f0d36efd 6131int ata_port_start(struct ata_port *ap)
1da177e4 6132{
2f1f610b 6133 struct device *dev = ap->dev;
6037d6bb 6134 int rc;
1da177e4 6135
f0d36efd
TH
6136 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6137 GFP_KERNEL);
1da177e4
LT
6138 if (!ap->prd)
6139 return -ENOMEM;
6140
6037d6bb 6141 rc = ata_pad_alloc(ap, dev);
f0d36efd 6142 if (rc)
6037d6bb 6143 return rc;
1da177e4 6144
f0d36efd
TH
6145 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6146 (unsigned long long)ap->prd_dma);
1da177e4
LT
6147 return 0;
6148}
6149
3ef3b43d
TH
6150/**
6151 * ata_dev_init - Initialize an ata_device structure
6152 * @dev: Device structure to initialize
6153 *
6154 * Initialize @dev in preparation for probing.
6155 *
6156 * LOCKING:
6157 * Inherited from caller.
6158 */
6159void ata_dev_init(struct ata_device *dev)
6160{
9af5c9c9
TH
6161 struct ata_link *link = dev->link;
6162 struct ata_port *ap = link->ap;
72fa4b74
TH
6163 unsigned long flags;
6164
5a04bf4b 6165 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6166 link->sata_spd_limit = link->hw_sata_spd_limit;
6167 link->sata_spd = 0;
5a04bf4b 6168
72fa4b74
TH
6169 /* High bits of dev->flags are used to record warm plug
6170 * requests which occur asynchronously. Synchronize using
cca3974e 6171 * host lock.
72fa4b74 6172 */
ba6a1308 6173 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6174 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6175 dev->horkage = 0;
ba6a1308 6176 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6177
72fa4b74
TH
6178 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6179 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6180 dev->pio_mask = UINT_MAX;
6181 dev->mwdma_mask = UINT_MAX;
6182 dev->udma_mask = UINT_MAX;
6183}
6184
4fb37a25
TH
6185/**
6186 * ata_link_init - Initialize an ata_link structure
6187 * @ap: ATA port link is attached to
6188 * @link: Link structure to initialize
8989805d 6189 * @pmp: Port multiplier port number
4fb37a25
TH
6190 *
6191 * Initialize @link.
6192 *
6193 * LOCKING:
6194 * Kernel thread context (may sleep)
6195 */
8989805d 6196static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6197{
6198 int i;
6199
6200 /* clear everything except for devices */
6201 memset(link, 0, offsetof(struct ata_link, device[0]));
6202
6203 link->ap = ap;
8989805d 6204 link->pmp = pmp;
4fb37a25
TH
6205 link->active_tag = ATA_TAG_POISON;
6206 link->hw_sata_spd_limit = UINT_MAX;
6207
6208 /* can't use iterator, ap isn't initialized yet */
6209 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6210 struct ata_device *dev = &link->device[i];
6211
6212 dev->link = link;
6213 dev->devno = dev - link->device;
6214 ata_dev_init(dev);
6215 }
6216}
6217
6218/**
6219 * sata_link_init_spd - Initialize link->sata_spd_limit
6220 * @link: Link to configure sata_spd_limit for
6221 *
6222 * Initialize @link->[hw_]sata_spd_limit to the currently
6223 * configured value.
6224 *
6225 * LOCKING:
6226 * Kernel thread context (may sleep).
6227 *
6228 * RETURNS:
6229 * 0 on success, -errno on failure.
6230 */
6231static int sata_link_init_spd(struct ata_link *link)
6232{
6233 u32 scontrol, spd;
6234 int rc;
6235
6236 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6237 if (rc)
6238 return rc;
6239
6240 spd = (scontrol >> 4) & 0xf;
6241 if (spd)
6242 link->hw_sata_spd_limit &= (1 << spd) - 1;
6243
6244 link->sata_spd_limit = link->hw_sata_spd_limit;
6245
6246 return 0;
6247}
6248
1da177e4 6249/**
f3187195
TH
6250 * ata_port_alloc - allocate and initialize basic ATA port resources
6251 * @host: ATA host this allocated port belongs to
1da177e4 6252 *
f3187195
TH
6253 * Allocate and initialize basic ATA port resources.
6254 *
6255 * RETURNS:
6256 * Allocate ATA port on success, NULL on failure.
0cba632b 6257 *
1da177e4 6258 * LOCKING:
f3187195 6259 * Inherited from calling layer (may sleep).
1da177e4 6260 */
f3187195 6261struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6262{
f3187195 6263 struct ata_port *ap;
1da177e4 6264
f3187195
TH
6265 DPRINTK("ENTER\n");
6266
6267 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6268 if (!ap)
6269 return NULL;
6270
f4d6d004 6271 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6272 ap->lock = &host->lock;
198e0fed 6273 ap->flags = ATA_FLAG_DISABLED;
f3187195 6274 ap->print_id = -1;
1da177e4 6275 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6276 ap->host = host;
f3187195 6277 ap->dev = host->dev;
1da177e4 6278 ap->last_ctl = 0xFF;
bd5d825c
BP
6279
6280#if defined(ATA_VERBOSE_DEBUG)
6281 /* turn on all debugging levels */
6282 ap->msg_enable = 0x00FF;
6283#elif defined(ATA_DEBUG)
6284 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6285#else
0dd4b21f 6286 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6287#endif
1da177e4 6288
65f27f38
DH
6289 INIT_DELAYED_WORK(&ap->port_task, NULL);
6290 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6291 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6292 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6293 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6294 init_timer_deferrable(&ap->fastdrain_timer);
6295 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6296 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6297
838df628 6298 ap->cbl = ATA_CBL_NONE;
838df628 6299
8989805d 6300 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6301
6302#ifdef ATA_IRQ_TRAP
6303 ap->stats.unhandled_irq = 1;
6304 ap->stats.idle_irq = 1;
6305#endif
1da177e4 6306 return ap;
1da177e4
LT
6307}
6308
f0d36efd
TH
6309static void ata_host_release(struct device *gendev, void *res)
6310{
6311 struct ata_host *host = dev_get_drvdata(gendev);
6312 int i;
6313
6314 for (i = 0; i < host->n_ports; i++) {
6315 struct ata_port *ap = host->ports[i];
6316
ecef7253
TH
6317 if (!ap)
6318 continue;
6319
6320 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6321 ap->ops->port_stop(ap);
f0d36efd
TH
6322 }
6323
ecef7253 6324 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6325 host->ops->host_stop(host);
1aa56cca 6326
1aa506e4
TH
6327 for (i = 0; i < host->n_ports; i++) {
6328 struct ata_port *ap = host->ports[i];
6329
4911487a
TH
6330 if (!ap)
6331 continue;
6332
6333 if (ap->scsi_host)
1aa506e4
TH
6334 scsi_host_put(ap->scsi_host);
6335
4911487a 6336 kfree(ap);
1aa506e4
TH
6337 host->ports[i] = NULL;
6338 }
6339
1aa56cca 6340 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6341}
6342
f3187195
TH
6343/**
6344 * ata_host_alloc - allocate and init basic ATA host resources
6345 * @dev: generic device this host is associated with
6346 * @max_ports: maximum number of ATA ports associated with this host
6347 *
6348 * Allocate and initialize basic ATA host resources. LLD calls
6349 * this function to allocate a host, initializes it fully and
6350 * attaches it using ata_host_register().
6351 *
6352 * @max_ports ports are allocated and host->n_ports is
6353 * initialized to @max_ports. The caller is allowed to decrease
6354 * host->n_ports before calling ata_host_register(). The unused
6355 * ports will be automatically freed on registration.
6356 *
6357 * RETURNS:
6358 * Allocate ATA host on success, NULL on failure.
6359 *
6360 * LOCKING:
6361 * Inherited from calling layer (may sleep).
6362 */
6363struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6364{
6365 struct ata_host *host;
6366 size_t sz;
6367 int i;
6368
6369 DPRINTK("ENTER\n");
6370
6371 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6372 return NULL;
6373
6374 /* alloc a container for our list of ATA ports (buses) */
6375 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6376 /* alloc a container for our list of ATA ports (buses) */
6377 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6378 if (!host)
6379 goto err_out;
6380
6381 devres_add(dev, host);
6382 dev_set_drvdata(dev, host);
6383
6384 spin_lock_init(&host->lock);
6385 host->dev = dev;
6386 host->n_ports = max_ports;
6387
6388 /* allocate ports bound to this host */
6389 for (i = 0; i < max_ports; i++) {
6390 struct ata_port *ap;
6391
6392 ap = ata_port_alloc(host);
6393 if (!ap)
6394 goto err_out;
6395
6396 ap->port_no = i;
6397 host->ports[i] = ap;
6398 }
6399
6400 devres_remove_group(dev, NULL);
6401 return host;
6402
6403 err_out:
6404 devres_release_group(dev, NULL);
6405 return NULL;
6406}
6407
f5cda257
TH
6408/**
6409 * ata_host_alloc_pinfo - alloc host and init with port_info array
6410 * @dev: generic device this host is associated with
6411 * @ppi: array of ATA port_info to initialize host with
6412 * @n_ports: number of ATA ports attached to this host
6413 *
6414 * Allocate ATA host and initialize with info from @ppi. If NULL
6415 * terminated, @ppi may contain fewer entries than @n_ports. The
6416 * last entry will be used for the remaining ports.
6417 *
6418 * RETURNS:
6419 * Allocate ATA host on success, NULL on failure.
6420 *
6421 * LOCKING:
6422 * Inherited from calling layer (may sleep).
6423 */
6424struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6425 const struct ata_port_info * const * ppi,
6426 int n_ports)
6427{
6428 const struct ata_port_info *pi;
6429 struct ata_host *host;
6430 int i, j;
6431
6432 host = ata_host_alloc(dev, n_ports);
6433 if (!host)
6434 return NULL;
6435
6436 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6437 struct ata_port *ap = host->ports[i];
6438
6439 if (ppi[j])
6440 pi = ppi[j++];
6441
6442 ap->pio_mask = pi->pio_mask;
6443 ap->mwdma_mask = pi->mwdma_mask;
6444 ap->udma_mask = pi->udma_mask;
6445 ap->flags |= pi->flags;
0c88758b 6446 ap->link.flags |= pi->link_flags;
f5cda257
TH
6447 ap->ops = pi->port_ops;
6448
6449 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6450 host->ops = pi->port_ops;
6451 if (!host->private_data && pi->private_data)
6452 host->private_data = pi->private_data;
6453 }
6454
6455 return host;
6456}
6457
ecef7253
TH
6458/**
6459 * ata_host_start - start and freeze ports of an ATA host
6460 * @host: ATA host to start ports for
6461 *
6462 * Start and then freeze ports of @host. Started status is
6463 * recorded in host->flags, so this function can be called
6464 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6465 * once. If host->ops isn't initialized yet, its set to the
6466 * first non-dummy port ops.
ecef7253
TH
6467 *
6468 * LOCKING:
6469 * Inherited from calling layer (may sleep).
6470 *
6471 * RETURNS:
6472 * 0 if all ports are started successfully, -errno otherwise.
6473 */
6474int ata_host_start(struct ata_host *host)
6475{
6476 int i, rc;
6477
6478 if (host->flags & ATA_HOST_STARTED)
6479 return 0;
6480
6481 for (i = 0; i < host->n_ports; i++) {
6482 struct ata_port *ap = host->ports[i];
6483
f3187195
TH
6484 if (!host->ops && !ata_port_is_dummy(ap))
6485 host->ops = ap->ops;
6486
ecef7253
TH
6487 if (ap->ops->port_start) {
6488 rc = ap->ops->port_start(ap);
6489 if (rc) {
6490 ata_port_printk(ap, KERN_ERR, "failed to "
6491 "start port (errno=%d)\n", rc);
6492 goto err_out;
6493 }
6494 }
6495
6496 ata_eh_freeze_port(ap);
6497 }
6498
6499 host->flags |= ATA_HOST_STARTED;
6500 return 0;
6501
6502 err_out:
6503 while (--i >= 0) {
6504 struct ata_port *ap = host->ports[i];
6505
6506 if (ap->ops->port_stop)
6507 ap->ops->port_stop(ap);
6508 }
6509 return rc;
6510}
6511
b03732f0 6512/**
cca3974e
JG
6513 * ata_sas_host_init - Initialize a host struct
6514 * @host: host to initialize
6515 * @dev: device host is attached to
6516 * @flags: host flags
6517 * @ops: port_ops
b03732f0
BK
6518 *
6519 * LOCKING:
6520 * PCI/etc. bus probe sem.
6521 *
6522 */
f3187195 6523/* KILLME - the only user left is ipr */
cca3974e
JG
6524void ata_host_init(struct ata_host *host, struct device *dev,
6525 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6526{
cca3974e
JG
6527 spin_lock_init(&host->lock);
6528 host->dev = dev;
6529 host->flags = flags;
6530 host->ops = ops;
b03732f0
BK
6531}
6532
f3187195
TH
6533/**
6534 * ata_host_register - register initialized ATA host
6535 * @host: ATA host to register
6536 * @sht: template for SCSI host
6537 *
6538 * Register initialized ATA host. @host is allocated using
6539 * ata_host_alloc() and fully initialized by LLD. This function
6540 * starts ports, registers @host with ATA and SCSI layers and
6541 * probe registered devices.
6542 *
6543 * LOCKING:
6544 * Inherited from calling layer (may sleep).
6545 *
6546 * RETURNS:
6547 * 0 on success, -errno otherwise.
6548 */
6549int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6550{
6551 int i, rc;
6552
6553 /* host must have been started */
6554 if (!(host->flags & ATA_HOST_STARTED)) {
6555 dev_printk(KERN_ERR, host->dev,
6556 "BUG: trying to register unstarted host\n");
6557 WARN_ON(1);
6558 return -EINVAL;
6559 }
6560
6561 /* Blow away unused ports. This happens when LLD can't
6562 * determine the exact number of ports to allocate at
6563 * allocation time.
6564 */
6565 for (i = host->n_ports; host->ports[i]; i++)
6566 kfree(host->ports[i]);
6567
6568 /* give ports names and add SCSI hosts */
6569 for (i = 0; i < host->n_ports; i++)
6570 host->ports[i]->print_id = ata_print_id++;
6571
6572 rc = ata_scsi_add_hosts(host, sht);
6573 if (rc)
6574 return rc;
6575
fafbae87
TH
6576 /* associate with ACPI nodes */
6577 ata_acpi_associate(host);
6578
f3187195
TH
6579 /* set cable, sata_spd_limit and report */
6580 for (i = 0; i < host->n_ports; i++) {
6581 struct ata_port *ap = host->ports[i];
f3187195
TH
6582 unsigned long xfer_mask;
6583
6584 /* set SATA cable type if still unset */
6585 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6586 ap->cbl = ATA_CBL_SATA;
6587
6588 /* init sata_spd_limit to the current value */
4fb37a25 6589 sata_link_init_spd(&ap->link);
f3187195 6590
cbcdd875 6591 /* print per-port info to dmesg */
f3187195
TH
6592 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6593 ap->udma_mask);
6594
f3187195 6595 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6596 ata_port_printk(ap, KERN_INFO,
6597 "%cATA max %s %s\n",
a16abc0b 6598 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6599 ata_mode_string(xfer_mask),
cbcdd875 6600 ap->link.eh_info.desc);
f3187195
TH
6601 else
6602 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6603 }
6604
6605 /* perform each probe synchronously */
6606 DPRINTK("probe begin\n");
6607 for (i = 0; i < host->n_ports; i++) {
6608 struct ata_port *ap = host->ports[i];
6609 int rc;
6610
6611 /* probe */
6612 if (ap->ops->error_handler) {
9af5c9c9 6613 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6614 unsigned long flags;
6615
6616 ata_port_probe(ap);
6617
6618 /* kick EH for boot probing */
6619 spin_lock_irqsave(ap->lock, flags);
6620
f58229f8
TH
6621 ehi->probe_mask =
6622 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6623 ehi->action |= ATA_EH_SOFTRESET;
6624 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6625
f4d6d004 6626 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6627 ap->pflags |= ATA_PFLAG_LOADING;
6628 ata_port_schedule_eh(ap);
6629
6630 spin_unlock_irqrestore(ap->lock, flags);
6631
6632 /* wait for EH to finish */
6633 ata_port_wait_eh(ap);
6634 } else {
6635 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6636 rc = ata_bus_probe(ap);
6637 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6638
6639 if (rc) {
6640 /* FIXME: do something useful here?
6641 * Current libata behavior will
6642 * tear down everything when
6643 * the module is removed
6644 * or the h/w is unplugged.
6645 */
6646 }
6647 }
6648 }
6649
6650 /* probes are done, now scan each port's disk(s) */
6651 DPRINTK("host probe begin\n");
6652 for (i = 0; i < host->n_ports; i++) {
6653 struct ata_port *ap = host->ports[i];
6654
1ae46317 6655 ata_scsi_scan_host(ap, 1);
f3187195
TH
6656 }
6657
6658 return 0;
6659}
6660
f5cda257
TH
6661/**
6662 * ata_host_activate - start host, request IRQ and register it
6663 * @host: target ATA host
6664 * @irq: IRQ to request
6665 * @irq_handler: irq_handler used when requesting IRQ
6666 * @irq_flags: irq_flags used when requesting IRQ
6667 * @sht: scsi_host_template to use when registering the host
6668 *
6669 * After allocating an ATA host and initializing it, most libata
6670 * LLDs perform three steps to activate the host - start host,
6671 * request IRQ and register it. This helper takes necessasry
6672 * arguments and performs the three steps in one go.
6673 *
6674 * LOCKING:
6675 * Inherited from calling layer (may sleep).
6676 *
6677 * RETURNS:
6678 * 0 on success, -errno otherwise.
6679 */
6680int ata_host_activate(struct ata_host *host, int irq,
6681 irq_handler_t irq_handler, unsigned long irq_flags,
6682 struct scsi_host_template *sht)
6683{
cbcdd875 6684 int i, rc;
f5cda257
TH
6685
6686 rc = ata_host_start(host);
6687 if (rc)
6688 return rc;
6689
6690 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6691 dev_driver_string(host->dev), host);
6692 if (rc)
6693 return rc;
6694
cbcdd875
TH
6695 for (i = 0; i < host->n_ports; i++)
6696 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6697
f5cda257
TH
6698 rc = ata_host_register(host, sht);
6699 /* if failed, just free the IRQ and leave ports alone */
6700 if (rc)
6701 devm_free_irq(host->dev, irq, host);
6702
6703 return rc;
6704}
6705
720ba126
TH
6706/**
6707 * ata_port_detach - Detach ATA port in prepration of device removal
6708 * @ap: ATA port to be detached
6709 *
6710 * Detach all ATA devices and the associated SCSI devices of @ap;
6711 * then, remove the associated SCSI host. @ap is guaranteed to
6712 * be quiescent on return from this function.
6713 *
6714 * LOCKING:
6715 * Kernel thread context (may sleep).
6716 */
6717void ata_port_detach(struct ata_port *ap)
6718{
6719 unsigned long flags;
41bda9c9 6720 struct ata_link *link;
f58229f8 6721 struct ata_device *dev;
720ba126
TH
6722
6723 if (!ap->ops->error_handler)
c3cf30a9 6724 goto skip_eh;
720ba126
TH
6725
6726 /* tell EH we're leaving & flush EH */
ba6a1308 6727 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6728 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6729 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6730
6731 ata_port_wait_eh(ap);
6732
6733 /* EH is now guaranteed to see UNLOADING, so no new device
6734 * will be attached. Disable all existing devices.
6735 */
ba6a1308 6736 spin_lock_irqsave(ap->lock, flags);
720ba126 6737
41bda9c9
TH
6738 ata_port_for_each_link(link, ap) {
6739 ata_link_for_each_dev(dev, link)
6740 ata_dev_disable(dev);
6741 }
720ba126 6742
ba6a1308 6743 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6744
6745 /* Final freeze & EH. All in-flight commands are aborted. EH
6746 * will be skipped and retrials will be terminated with bad
6747 * target.
6748 */
ba6a1308 6749 spin_lock_irqsave(ap->lock, flags);
720ba126 6750 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6751 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6752
6753 ata_port_wait_eh(ap);
45a66c1c 6754 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6755
c3cf30a9 6756 skip_eh:
720ba126 6757 /* remove the associated SCSI host */
cca3974e 6758 scsi_remove_host(ap->scsi_host);
720ba126
TH
6759}
6760
0529c159
TH
6761/**
6762 * ata_host_detach - Detach all ports of an ATA host
6763 * @host: Host to detach
6764 *
6765 * Detach all ports of @host.
6766 *
6767 * LOCKING:
6768 * Kernel thread context (may sleep).
6769 */
6770void ata_host_detach(struct ata_host *host)
6771{
6772 int i;
6773
6774 for (i = 0; i < host->n_ports; i++)
6775 ata_port_detach(host->ports[i]);
6776}
6777
1da177e4
LT
6778/**
6779 * ata_std_ports - initialize ioaddr with standard port offsets.
6780 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6781 *
6782 * Utility function which initializes data_addr, error_addr,
6783 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6784 * device_addr, status_addr, and command_addr to standard offsets
6785 * relative to cmd_addr.
6786 *
6787 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6788 */
0baab86b 6789
1da177e4
LT
6790void ata_std_ports(struct ata_ioports *ioaddr)
6791{
6792 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6793 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6794 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6795 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6796 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6797 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6798 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6799 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6800 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6801 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6802}
6803
0baab86b 6804
374b1873
JG
6805#ifdef CONFIG_PCI
6806
1da177e4
LT
6807/**
6808 * ata_pci_remove_one - PCI layer callback for device removal
6809 * @pdev: PCI device that was removed
6810 *
b878ca5d
TH
6811 * PCI layer indicates to libata via this hook that hot-unplug or
6812 * module unload event has occurred. Detach all ports. Resource
6813 * release is handled via devres.
1da177e4
LT
6814 *
6815 * LOCKING:
6816 * Inherited from PCI layer (may sleep).
6817 */
f0d36efd 6818void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6819{
6820 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6821 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6822
b878ca5d 6823 ata_host_detach(host);
1da177e4
LT
6824}
6825
6826/* move to PCI subsystem */
057ace5e 6827int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6828{
6829 unsigned long tmp = 0;
6830
6831 switch (bits->width) {
6832 case 1: {
6833 u8 tmp8 = 0;
6834 pci_read_config_byte(pdev, bits->reg, &tmp8);
6835 tmp = tmp8;
6836 break;
6837 }
6838 case 2: {
6839 u16 tmp16 = 0;
6840 pci_read_config_word(pdev, bits->reg, &tmp16);
6841 tmp = tmp16;
6842 break;
6843 }
6844 case 4: {
6845 u32 tmp32 = 0;
6846 pci_read_config_dword(pdev, bits->reg, &tmp32);
6847 tmp = tmp32;
6848 break;
6849 }
6850
6851 default:
6852 return -EINVAL;
6853 }
6854
6855 tmp &= bits->mask;
6856
6857 return (tmp == bits->val) ? 1 : 0;
6858}
9b847548 6859
6ffa01d8 6860#ifdef CONFIG_PM
3c5100c1 6861void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6862{
6863 pci_save_state(pdev);
4c90d971 6864 pci_disable_device(pdev);
500530f6 6865
4c90d971 6866 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6867 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6868}
6869
553c4aa6 6870int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6871{
553c4aa6
TH
6872 int rc;
6873
9b847548
JA
6874 pci_set_power_state(pdev, PCI_D0);
6875 pci_restore_state(pdev);
553c4aa6 6876
b878ca5d 6877 rc = pcim_enable_device(pdev);
553c4aa6
TH
6878 if (rc) {
6879 dev_printk(KERN_ERR, &pdev->dev,
6880 "failed to enable device after resume (%d)\n", rc);
6881 return rc;
6882 }
6883
9b847548 6884 pci_set_master(pdev);
553c4aa6 6885 return 0;
500530f6
TH
6886}
6887
3c5100c1 6888int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6889{
cca3974e 6890 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6891 int rc = 0;
6892
cca3974e 6893 rc = ata_host_suspend(host, mesg);
500530f6
TH
6894 if (rc)
6895 return rc;
6896
3c5100c1 6897 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6898
6899 return 0;
6900}
6901
6902int ata_pci_device_resume(struct pci_dev *pdev)
6903{
cca3974e 6904 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6905 int rc;
500530f6 6906
553c4aa6
TH
6907 rc = ata_pci_device_do_resume(pdev);
6908 if (rc == 0)
6909 ata_host_resume(host);
6910 return rc;
9b847548 6911}
6ffa01d8
TH
6912#endif /* CONFIG_PM */
6913
1da177e4
LT
6914#endif /* CONFIG_PCI */
6915
6916
1da177e4
LT
6917static int __init ata_init(void)
6918{
a8601e5f 6919 ata_probe_timeout *= HZ;
1da177e4
LT
6920 ata_wq = create_workqueue("ata");
6921 if (!ata_wq)
6922 return -ENOMEM;
6923
453b07ac
TH
6924 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6925 if (!ata_aux_wq) {
6926 destroy_workqueue(ata_wq);
6927 return -ENOMEM;
6928 }
6929
1da177e4
LT
6930 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6931 return 0;
6932}
6933
6934static void __exit ata_exit(void)
6935{
6936 destroy_workqueue(ata_wq);
453b07ac 6937 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6938}
6939
a4625085 6940subsys_initcall(ata_init);
1da177e4
LT
6941module_exit(ata_exit);
6942
67846b30 6943static unsigned long ratelimit_time;
34af946a 6944static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6945
6946int ata_ratelimit(void)
6947{
6948 int rc;
6949 unsigned long flags;
6950
6951 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6952
6953 if (time_after(jiffies, ratelimit_time)) {
6954 rc = 1;
6955 ratelimit_time = jiffies + (HZ/5);
6956 } else
6957 rc = 0;
6958
6959 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6960
6961 return rc;
6962}
6963
c22daff4
TH
6964/**
6965 * ata_wait_register - wait until register value changes
6966 * @reg: IO-mapped register
6967 * @mask: Mask to apply to read register value
6968 * @val: Wait condition
6969 * @interval_msec: polling interval in milliseconds
6970 * @timeout_msec: timeout in milliseconds
6971 *
6972 * Waiting for some bits of register to change is a common
6973 * operation for ATA controllers. This function reads 32bit LE
6974 * IO-mapped register @reg and tests for the following condition.
6975 *
6976 * (*@reg & mask) != val
6977 *
6978 * If the condition is met, it returns; otherwise, the process is
6979 * repeated after @interval_msec until timeout.
6980 *
6981 * LOCKING:
6982 * Kernel thread context (may sleep)
6983 *
6984 * RETURNS:
6985 * The final register value.
6986 */
6987u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6988 unsigned long interval_msec,
6989 unsigned long timeout_msec)
6990{
6991 unsigned long timeout;
6992 u32 tmp;
6993
6994 tmp = ioread32(reg);
6995
6996 /* Calculate timeout _after_ the first read to make sure
6997 * preceding writes reach the controller before starting to
6998 * eat away the timeout.
6999 */
7000 timeout = jiffies + (timeout_msec * HZ) / 1000;
7001
7002 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7003 msleep(interval_msec);
7004 tmp = ioread32(reg);
7005 }
7006
7007 return tmp;
7008}
7009
dd5b06c4
TH
7010/*
7011 * Dummy port_ops
7012 */
7013static void ata_dummy_noret(struct ata_port *ap) { }
7014static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7015static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7016
7017static u8 ata_dummy_check_status(struct ata_port *ap)
7018{
7019 return ATA_DRDY;
7020}
7021
7022static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7023{
7024 return AC_ERR_SYSTEM;
7025}
7026
7027const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7028 .check_status = ata_dummy_check_status,
7029 .check_altstatus = ata_dummy_check_status,
7030 .dev_select = ata_noop_dev_select,
7031 .qc_prep = ata_noop_qc_prep,
7032 .qc_issue = ata_dummy_qc_issue,
7033 .freeze = ata_dummy_noret,
7034 .thaw = ata_dummy_noret,
7035 .error_handler = ata_dummy_noret,
7036 .post_internal_cmd = ata_dummy_qc_noret,
7037 .irq_clear = ata_dummy_noret,
7038 .port_start = ata_dummy_ret0,
7039 .port_stop = ata_dummy_noret,
7040};
7041
21b0ad4f
TH
7042const struct ata_port_info ata_dummy_port_info = {
7043 .port_ops = &ata_dummy_port_ops,
7044};
7045
1da177e4
LT
7046/*
7047 * libata is essentially a library of internal helper functions for
7048 * low-level ATA host controller drivers. As such, the API/ABI is
7049 * likely to change as new drivers are added and updated.
7050 * Do not depend on ABI/API stability.
7051 */
7052
e9c83914
TH
7053EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7054EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7055EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7056EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7057EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7058EXPORT_SYMBOL_GPL(ata_std_bios_param);
7059EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7060EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7061EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7062EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7063EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7064EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7065EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7066EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7067EXPORT_SYMBOL_GPL(ata_sg_init);
7068EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7069EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7070EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7071EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7072EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7073EXPORT_SYMBOL_GPL(ata_tf_load);
7074EXPORT_SYMBOL_GPL(ata_tf_read);
7075EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7076EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7077EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7078EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7079EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7080EXPORT_SYMBOL_GPL(ata_check_status);
7081EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7082EXPORT_SYMBOL_GPL(ata_exec_command);
7083EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7084EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7085EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7086EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7087EXPORT_SYMBOL_GPL(ata_data_xfer);
7088EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 7089EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7090EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7091EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7092EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7093EXPORT_SYMBOL_GPL(ata_bmdma_start);
7094EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7095EXPORT_SYMBOL_GPL(ata_bmdma_status);
7096EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7097EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7098EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7099EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7100EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7101EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7102EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7103EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7104EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7105EXPORT_SYMBOL_GPL(sata_link_debounce);
7106EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7107EXPORT_SYMBOL_GPL(sata_phy_reset);
7108EXPORT_SYMBOL_GPL(__sata_phy_reset);
7109EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7110EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7111EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7112EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7113EXPORT_SYMBOL_GPL(sata_std_hardreset);
7114EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7115EXPORT_SYMBOL_GPL(ata_dev_classify);
7116EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7117EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7118EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7119EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7120EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7121EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7122EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7123EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7124EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7125EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7126EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7127EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7128EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7129EXPORT_SYMBOL_GPL(sata_scr_valid);
7130EXPORT_SYMBOL_GPL(sata_scr_read);
7131EXPORT_SYMBOL_GPL(sata_scr_write);
7132EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7133EXPORT_SYMBOL_GPL(ata_link_online);
7134EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7135#ifdef CONFIG_PM
cca3974e
JG
7136EXPORT_SYMBOL_GPL(ata_host_suspend);
7137EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7138#endif /* CONFIG_PM */
6a62a04d
TH
7139EXPORT_SYMBOL_GPL(ata_id_string);
7140EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7141EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7142EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7143
1bc4ccff 7144EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7145EXPORT_SYMBOL_GPL(ata_timing_compute);
7146EXPORT_SYMBOL_GPL(ata_timing_merge);
7147
1da177e4
LT
7148#ifdef CONFIG_PCI
7149EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7150EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7151EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7152EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7153EXPORT_SYMBOL_GPL(ata_pci_init_one);
7154EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7155#ifdef CONFIG_PM
500530f6
TH
7156EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7157EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7158EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7159EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7160#endif /* CONFIG_PM */
67951ade
AC
7161EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7162EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7163#endif /* CONFIG_PCI */
9b847548 7164
b64bbc39
TH
7165EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7166EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7167EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7168EXPORT_SYMBOL_GPL(ata_port_desc);
7169#ifdef CONFIG_PCI
7170EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7171#endif /* CONFIG_PCI */
ece1d636 7172EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7173EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7174EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7175EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7176EXPORT_SYMBOL_GPL(ata_port_freeze);
7177EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7178EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7179EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7180EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7181EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7182EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7183EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7184
7185EXPORT_SYMBOL_GPL(ata_cable_40wire);
7186EXPORT_SYMBOL_GPL(ata_cable_80wire);
7187EXPORT_SYMBOL_GPL(ata_cable_unknown);
7188EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.913475 seconds and 5 git commands to generate.