libata-core: Document some limits/assumptions about ID_ATA
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 75
f3187195 76unsigned int ata_print_id = 1;
1da177e4
LT
77static struct workqueue_struct *ata_wq;
78
453b07ac
TH
79struct workqueue_struct *ata_aux_wq;
80
418dc1f5 81int atapi_enabled = 1;
1623c81e
JG
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
95de719a
AL
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
baf4fdfa
ML
89int atapi_passthru16 = 1;
90module_param(atapi_passthru16, int, 0444);
91MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
c3c013a2
JG
93int libata_fua = 0;
94module_param_named(fua, libata_fua, int, 0444);
95MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
1e999736
AC
97static int ata_ignore_hpa = 0;
98module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
a8601e5f
AM
101static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
102module_param(ata_probe_timeout, int, 0444);
103MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
104
d7d0dad6
JG
105int libata_noacpi = 1;
106module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
107MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
108
1da177e4
LT
109MODULE_AUTHOR("Jeff Garzik");
110MODULE_DESCRIPTION("Library module for ATA devices");
111MODULE_LICENSE("GPL");
112MODULE_VERSION(DRV_VERSION);
113
0baab86b 114
1da177e4
LT
115/**
116 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
117 * @tf: Taskfile to convert
1da177e4 118 * @pmp: Port multiplier port
9977126c
TH
119 * @is_cmd: This FIS is for command
120 * @fis: Buffer into which data will output
1da177e4
LT
121 *
122 * Converts a standard ATA taskfile to a Serial ATA
123 * FIS structure (Register - Host to Device).
124 *
125 * LOCKING:
126 * Inherited from caller.
127 */
9977126c 128void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 129{
9977126c
TH
130 fis[0] = 0x27; /* Register - Host to Device FIS */
131 fis[1] = pmp & 0xf; /* Port multiplier number*/
132 if (is_cmd)
133 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
134
1da177e4
LT
135 fis[2] = tf->command;
136 fis[3] = tf->feature;
137
138 fis[4] = tf->lbal;
139 fis[5] = tf->lbam;
140 fis[6] = tf->lbah;
141 fis[7] = tf->device;
142
143 fis[8] = tf->hob_lbal;
144 fis[9] = tf->hob_lbam;
145 fis[10] = tf->hob_lbah;
146 fis[11] = tf->hob_feature;
147
148 fis[12] = tf->nsect;
149 fis[13] = tf->hob_nsect;
150 fis[14] = 0;
151 fis[15] = tf->ctl;
152
153 fis[16] = 0;
154 fis[17] = 0;
155 fis[18] = 0;
156 fis[19] = 0;
157}
158
159/**
160 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
161 * @fis: Buffer from which data will be input
162 * @tf: Taskfile to output
163 *
e12a1be6 164 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
165 *
166 * LOCKING:
167 * Inherited from caller.
168 */
169
057ace5e 170void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
171{
172 tf->command = fis[2]; /* status */
173 tf->feature = fis[3]; /* error */
174
175 tf->lbal = fis[4];
176 tf->lbam = fis[5];
177 tf->lbah = fis[6];
178 tf->device = fis[7];
179
180 tf->hob_lbal = fis[8];
181 tf->hob_lbam = fis[9];
182 tf->hob_lbah = fis[10];
183
184 tf->nsect = fis[12];
185 tf->hob_nsect = fis[13];
186}
187
8cbd6df1
AL
188static const u8 ata_rw_cmds[] = {
189 /* pio multi */
190 ATA_CMD_READ_MULTI,
191 ATA_CMD_WRITE_MULTI,
192 ATA_CMD_READ_MULTI_EXT,
193 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
198 /* pio */
199 ATA_CMD_PIO_READ,
200 ATA_CMD_PIO_WRITE,
201 ATA_CMD_PIO_READ_EXT,
202 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
203 0,
204 0,
205 0,
206 0,
8cbd6df1
AL
207 /* dma */
208 ATA_CMD_READ,
209 ATA_CMD_WRITE,
210 ATA_CMD_READ_EXT,
9a3dccc4
TH
211 ATA_CMD_WRITE_EXT,
212 0,
213 0,
214 0,
215 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 216};
1da177e4
LT
217
218/**
8cbd6df1 219 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
220 * @tf: command to examine and configure
221 * @dev: device tf belongs to
1da177e4 222 *
2e9edbf8 223 * Examine the device configuration and tf->flags to calculate
8cbd6df1 224 * the proper read/write commands and protocol to use.
1da177e4
LT
225 *
226 * LOCKING:
227 * caller.
228 */
bd056d7e 229static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 230{
9a3dccc4 231 u8 cmd;
1da177e4 232
9a3dccc4 233 int index, fua, lba48, write;
2e9edbf8 234
9a3dccc4 235 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
236 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
237 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 238
8cbd6df1
AL
239 if (dev->flags & ATA_DFLAG_PIO) {
240 tf->protocol = ATA_PROT_PIO;
9a3dccc4 241 index = dev->multi_count ? 0 : 8;
9af5c9c9 242 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
243 /* Unable to use DMA due to host limitation */
244 tf->protocol = ATA_PROT_PIO;
0565c26d 245 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
246 } else {
247 tf->protocol = ATA_PROT_DMA;
9a3dccc4 248 index = 16;
8cbd6df1 249 }
1da177e4 250
9a3dccc4
TH
251 cmd = ata_rw_cmds[index + fua + lba48 + write];
252 if (cmd) {
253 tf->command = cmd;
254 return 0;
255 }
256 return -1;
1da177e4
LT
257}
258
35b649fe
TH
259/**
260 * ata_tf_read_block - Read block address from ATA taskfile
261 * @tf: ATA taskfile of interest
262 * @dev: ATA device @tf belongs to
263 *
264 * LOCKING:
265 * None.
266 *
267 * Read block address from @tf. This function can handle all
268 * three address formats - LBA, LBA48 and CHS. tf->protocol and
269 * flags select the address format to use.
270 *
271 * RETURNS:
272 * Block address read from @tf.
273 */
274u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
275{
276 u64 block = 0;
277
278 if (tf->flags & ATA_TFLAG_LBA) {
279 if (tf->flags & ATA_TFLAG_LBA48) {
280 block |= (u64)tf->hob_lbah << 40;
281 block |= (u64)tf->hob_lbam << 32;
282 block |= tf->hob_lbal << 24;
283 } else
284 block |= (tf->device & 0xf) << 24;
285
286 block |= tf->lbah << 16;
287 block |= tf->lbam << 8;
288 block |= tf->lbal;
289 } else {
290 u32 cyl, head, sect;
291
292 cyl = tf->lbam | (tf->lbah << 8);
293 head = tf->device & 0xf;
294 sect = tf->lbal;
295
296 block = (cyl * dev->heads + head) * dev->sectors + sect;
297 }
298
299 return block;
300}
301
bd056d7e
TH
302/**
303 * ata_build_rw_tf - Build ATA taskfile for given read/write request
304 * @tf: Target ATA taskfile
305 * @dev: ATA device @tf belongs to
306 * @block: Block address
307 * @n_block: Number of blocks
308 * @tf_flags: RW/FUA etc...
309 * @tag: tag
310 *
311 * LOCKING:
312 * None.
313 *
314 * Build ATA taskfile @tf for read/write request described by
315 * @block, @n_block, @tf_flags and @tag on @dev.
316 *
317 * RETURNS:
318 *
319 * 0 on success, -ERANGE if the request is too large for @dev,
320 * -EINVAL if the request is invalid.
321 */
322int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
323 u64 block, u32 n_block, unsigned int tf_flags,
324 unsigned int tag)
325{
326 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
327 tf->flags |= tf_flags;
328
6d1245bf 329 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
330 /* yay, NCQ */
331 if (!lba_48_ok(block, n_block))
332 return -ERANGE;
333
334 tf->protocol = ATA_PROT_NCQ;
335 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
336
337 if (tf->flags & ATA_TFLAG_WRITE)
338 tf->command = ATA_CMD_FPDMA_WRITE;
339 else
340 tf->command = ATA_CMD_FPDMA_READ;
341
342 tf->nsect = tag << 3;
343 tf->hob_feature = (n_block >> 8) & 0xff;
344 tf->feature = n_block & 0xff;
345
346 tf->hob_lbah = (block >> 40) & 0xff;
347 tf->hob_lbam = (block >> 32) & 0xff;
348 tf->hob_lbal = (block >> 24) & 0xff;
349 tf->lbah = (block >> 16) & 0xff;
350 tf->lbam = (block >> 8) & 0xff;
351 tf->lbal = block & 0xff;
352
353 tf->device = 1 << 6;
354 if (tf->flags & ATA_TFLAG_FUA)
355 tf->device |= 1 << 7;
356 } else if (dev->flags & ATA_DFLAG_LBA) {
357 tf->flags |= ATA_TFLAG_LBA;
358
359 if (lba_28_ok(block, n_block)) {
360 /* use LBA28 */
361 tf->device |= (block >> 24) & 0xf;
362 } else if (lba_48_ok(block, n_block)) {
363 if (!(dev->flags & ATA_DFLAG_LBA48))
364 return -ERANGE;
365
366 /* use LBA48 */
367 tf->flags |= ATA_TFLAG_LBA48;
368
369 tf->hob_nsect = (n_block >> 8) & 0xff;
370
371 tf->hob_lbah = (block >> 40) & 0xff;
372 tf->hob_lbam = (block >> 32) & 0xff;
373 tf->hob_lbal = (block >> 24) & 0xff;
374 } else
375 /* request too large even for LBA48 */
376 return -ERANGE;
377
378 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
379 return -EINVAL;
380
381 tf->nsect = n_block & 0xff;
382
383 tf->lbah = (block >> 16) & 0xff;
384 tf->lbam = (block >> 8) & 0xff;
385 tf->lbal = block & 0xff;
386
387 tf->device |= ATA_LBA;
388 } else {
389 /* CHS */
390 u32 sect, head, cyl, track;
391
392 /* The request -may- be too large for CHS addressing. */
393 if (!lba_28_ok(block, n_block))
394 return -ERANGE;
395
396 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
397 return -EINVAL;
398
399 /* Convert LBA to CHS */
400 track = (u32)block / dev->sectors;
401 cyl = track / dev->heads;
402 head = track % dev->heads;
403 sect = (u32)block % dev->sectors + 1;
404
405 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
406 (u32)block, track, cyl, head, sect);
407
408 /* Check whether the converted CHS can fit.
409 Cylinder: 0-65535
410 Head: 0-15
411 Sector: 1-255*/
412 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
413 return -ERANGE;
414
415 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
416 tf->lbal = sect;
417 tf->lbam = cyl;
418 tf->lbah = cyl >> 8;
419 tf->device |= head;
420 }
421
422 return 0;
423}
424
cb95d562
TH
425/**
426 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
427 * @pio_mask: pio_mask
428 * @mwdma_mask: mwdma_mask
429 * @udma_mask: udma_mask
430 *
431 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
432 * unsigned int xfer_mask.
433 *
434 * LOCKING:
435 * None.
436 *
437 * RETURNS:
438 * Packed xfer_mask.
439 */
440static unsigned int ata_pack_xfermask(unsigned int pio_mask,
441 unsigned int mwdma_mask,
442 unsigned int udma_mask)
443{
444 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
445 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
446 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
447}
448
c0489e4e
TH
449/**
450 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
451 * @xfer_mask: xfer_mask to unpack
452 * @pio_mask: resulting pio_mask
453 * @mwdma_mask: resulting mwdma_mask
454 * @udma_mask: resulting udma_mask
455 *
456 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
457 * Any NULL distination masks will be ignored.
458 */
459static void ata_unpack_xfermask(unsigned int xfer_mask,
460 unsigned int *pio_mask,
461 unsigned int *mwdma_mask,
462 unsigned int *udma_mask)
463{
464 if (pio_mask)
465 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
466 if (mwdma_mask)
467 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
468 if (udma_mask)
469 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
470}
471
cb95d562 472static const struct ata_xfer_ent {
be9a50c8 473 int shift, bits;
cb95d562
TH
474 u8 base;
475} ata_xfer_tbl[] = {
476 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
477 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
478 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
479 { -1, },
480};
481
482/**
483 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
484 * @xfer_mask: xfer_mask of interest
485 *
486 * Return matching XFER_* value for @xfer_mask. Only the highest
487 * bit of @xfer_mask is considered.
488 *
489 * LOCKING:
490 * None.
491 *
492 * RETURNS:
493 * Matching XFER_* value, 0 if no match found.
494 */
495static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
496{
497 int highbit = fls(xfer_mask) - 1;
498 const struct ata_xfer_ent *ent;
499
500 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
501 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
502 return ent->base + highbit - ent->shift;
503 return 0;
504}
505
506/**
507 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
508 * @xfer_mode: XFER_* of interest
509 *
510 * Return matching xfer_mask for @xfer_mode.
511 *
512 * LOCKING:
513 * None.
514 *
515 * RETURNS:
516 * Matching xfer_mask, 0 if no match found.
517 */
518static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
519{
520 const struct ata_xfer_ent *ent;
521
522 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
523 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
524 return 1 << (ent->shift + xfer_mode - ent->base);
525 return 0;
526}
527
528/**
529 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
530 * @xfer_mode: XFER_* of interest
531 *
532 * Return matching xfer_shift for @xfer_mode.
533 *
534 * LOCKING:
535 * None.
536 *
537 * RETURNS:
538 * Matching xfer_shift, -1 if no match found.
539 */
540static int ata_xfer_mode2shift(unsigned int xfer_mode)
541{
542 const struct ata_xfer_ent *ent;
543
544 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
545 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
546 return ent->shift;
547 return -1;
548}
549
1da177e4 550/**
1da7b0d0
TH
551 * ata_mode_string - convert xfer_mask to string
552 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
553 *
554 * Determine string which represents the highest speed
1da7b0d0 555 * (highest bit in @modemask).
1da177e4
LT
556 *
557 * LOCKING:
558 * None.
559 *
560 * RETURNS:
561 * Constant C string representing highest speed listed in
1da7b0d0 562 * @mode_mask, or the constant C string "<n/a>".
1da177e4 563 */
1da7b0d0 564static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 565{
75f554bc
TH
566 static const char * const xfer_mode_str[] = {
567 "PIO0",
568 "PIO1",
569 "PIO2",
570 "PIO3",
571 "PIO4",
b352e57d
AC
572 "PIO5",
573 "PIO6",
75f554bc
TH
574 "MWDMA0",
575 "MWDMA1",
576 "MWDMA2",
b352e57d
AC
577 "MWDMA3",
578 "MWDMA4",
75f554bc
TH
579 "UDMA/16",
580 "UDMA/25",
581 "UDMA/33",
582 "UDMA/44",
583 "UDMA/66",
584 "UDMA/100",
585 "UDMA/133",
586 "UDMA7",
587 };
1da7b0d0 588 int highbit;
1da177e4 589
1da7b0d0
TH
590 highbit = fls(xfer_mask) - 1;
591 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
592 return xfer_mode_str[highbit];
1da177e4 593 return "<n/a>";
1da177e4
LT
594}
595
4c360c81
TH
596static const char *sata_spd_string(unsigned int spd)
597{
598 static const char * const spd_str[] = {
599 "1.5 Gbps",
600 "3.0 Gbps",
601 };
602
603 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
604 return "<unknown>";
605 return spd_str[spd - 1];
606}
607
3373efd8 608void ata_dev_disable(struct ata_device *dev)
0b8efb0a 609{
09d7f9b0 610 if (ata_dev_enabled(dev)) {
9af5c9c9 611 if (ata_msg_drv(dev->link->ap))
09d7f9b0 612 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
613 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
614 ATA_DNXFER_QUIET);
0b8efb0a
TH
615 dev->class++;
616 }
617}
618
1da177e4 619/**
0d5ff566 620 * ata_devchk - PATA device presence detection
1da177e4
LT
621 * @ap: ATA channel to examine
622 * @device: Device to examine (starting at zero)
623 *
624 * This technique was originally described in
625 * Hale Landis's ATADRVR (www.ata-atapi.com), and
626 * later found its way into the ATA/ATAPI spec.
627 *
628 * Write a pattern to the ATA shadow registers,
629 * and if a device is present, it will respond by
630 * correctly storing and echoing back the
631 * ATA shadow register contents.
632 *
633 * LOCKING:
634 * caller.
635 */
636
0d5ff566 637static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
638{
639 struct ata_ioports *ioaddr = &ap->ioaddr;
640 u8 nsect, lbal;
641
642 ap->ops->dev_select(ap, device);
643
0d5ff566
TH
644 iowrite8(0x55, ioaddr->nsect_addr);
645 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 646
0d5ff566
TH
647 iowrite8(0xaa, ioaddr->nsect_addr);
648 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 649
0d5ff566
TH
650 iowrite8(0x55, ioaddr->nsect_addr);
651 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 652
0d5ff566
TH
653 nsect = ioread8(ioaddr->nsect_addr);
654 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
655
656 if ((nsect == 0x55) && (lbal == 0xaa))
657 return 1; /* we found a device */
658
659 return 0; /* nothing found */
660}
661
1da177e4
LT
662/**
663 * ata_dev_classify - determine device type based on ATA-spec signature
664 * @tf: ATA taskfile register set for device to be identified
665 *
666 * Determine from taskfile register contents whether a device is
667 * ATA or ATAPI, as per "Signature and persistence" section
668 * of ATA/PI spec (volume 1, sect 5.14).
669 *
670 * LOCKING:
671 * None.
672 *
673 * RETURNS:
674 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
675 * the event of failure.
676 */
677
057ace5e 678unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
679{
680 /* Apple's open source Darwin code hints that some devices only
681 * put a proper signature into the LBA mid/high registers,
682 * So, we only check those. It's sufficient for uniqueness.
683 */
684
685 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
686 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
687 DPRINTK("found ATA device by sig\n");
688 return ATA_DEV_ATA;
689 }
690
691 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
692 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
693 DPRINTK("found ATAPI device by sig\n");
694 return ATA_DEV_ATAPI;
695 }
696
697 DPRINTK("unknown device\n");
698 return ATA_DEV_UNKNOWN;
699}
700
701/**
702 * ata_dev_try_classify - Parse returned ATA device signature
703 * @ap: ATA channel to examine
704 * @device: Device to examine (starting at zero)
b4dc7623 705 * @r_err: Value of error register on completion
1da177e4
LT
706 *
707 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
708 * an ATA/ATAPI-defined set of values is placed in the ATA
709 * shadow registers, indicating the results of device detection
710 * and diagnostics.
711 *
712 * Select the ATA device, and read the values from the ATA shadow
713 * registers. Then parse according to the Error register value,
714 * and the spec-defined values examined by ata_dev_classify().
715 *
716 * LOCKING:
717 * caller.
b4dc7623
TH
718 *
719 * RETURNS:
720 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
721 */
722
a619f981 723unsigned int
b4dc7623 724ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 725{
1da177e4
LT
726 struct ata_taskfile tf;
727 unsigned int class;
728 u8 err;
729
730 ap->ops->dev_select(ap, device);
731
732 memset(&tf, 0, sizeof(tf));
733
1da177e4 734 ap->ops->tf_read(ap, &tf);
0169e284 735 err = tf.feature;
b4dc7623
TH
736 if (r_err)
737 *r_err = err;
1da177e4 738
93590859
AC
739 /* see if device passed diags: if master then continue and warn later */
740 if (err == 0 && device == 0)
741 /* diagnostic fail : do nothing _YET_ */
9af5c9c9 742 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 743 else if (err == 1)
1da177e4
LT
744 /* do nothing */ ;
745 else if ((device == 0) && (err == 0x81))
746 /* do nothing */ ;
747 else
b4dc7623 748 return ATA_DEV_NONE;
1da177e4 749
b4dc7623 750 /* determine if device is ATA or ATAPI */
1da177e4 751 class = ata_dev_classify(&tf);
b4dc7623 752
1da177e4 753 if (class == ATA_DEV_UNKNOWN)
b4dc7623 754 return ATA_DEV_NONE;
1da177e4 755 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
756 return ATA_DEV_NONE;
757 return class;
1da177e4
LT
758}
759
760/**
6a62a04d 761 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
762 * @id: IDENTIFY DEVICE results we will examine
763 * @s: string into which data is output
764 * @ofs: offset into identify device page
765 * @len: length of string to return. must be an even number.
766 *
767 * The strings in the IDENTIFY DEVICE page are broken up into
768 * 16-bit chunks. Run through the string, and output each
769 * 8-bit chunk linearly, regardless of platform.
770 *
771 * LOCKING:
772 * caller.
773 */
774
6a62a04d
TH
775void ata_id_string(const u16 *id, unsigned char *s,
776 unsigned int ofs, unsigned int len)
1da177e4
LT
777{
778 unsigned int c;
779
780 while (len > 0) {
781 c = id[ofs] >> 8;
782 *s = c;
783 s++;
784
785 c = id[ofs] & 0xff;
786 *s = c;
787 s++;
788
789 ofs++;
790 len -= 2;
791 }
792}
793
0e949ff3 794/**
6a62a04d 795 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
796 * @id: IDENTIFY DEVICE results we will examine
797 * @s: string into which data is output
798 * @ofs: offset into identify device page
799 * @len: length of string to return. must be an odd number.
800 *
6a62a04d 801 * This function is identical to ata_id_string except that it
0e949ff3
TH
802 * trims trailing spaces and terminates the resulting string with
803 * null. @len must be actual maximum length (even number) + 1.
804 *
805 * LOCKING:
806 * caller.
807 */
6a62a04d
TH
808void ata_id_c_string(const u16 *id, unsigned char *s,
809 unsigned int ofs, unsigned int len)
0e949ff3
TH
810{
811 unsigned char *p;
812
813 WARN_ON(!(len & 1));
814
6a62a04d 815 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
816
817 p = s + strnlen(s, len - 1);
818 while (p > s && p[-1] == ' ')
819 p--;
820 *p = '\0';
821}
0baab86b 822
1e999736
AC
823static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
824{
825 u64 sectors = 0;
826
827 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
828 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
829 sectors |= (tf->hob_lbal & 0xff) << 24;
830 sectors |= (tf->lbah & 0xff) << 16;
831 sectors |= (tf->lbam & 0xff) << 8;
832 sectors |= (tf->lbal & 0xff);
833
834 return ++sectors;
835}
836
837static u64 ata_tf_to_lba(struct ata_taskfile *tf)
838{
839 u64 sectors = 0;
840
841 sectors |= (tf->device & 0x0f) << 24;
842 sectors |= (tf->lbah & 0xff) << 16;
843 sectors |= (tf->lbam & 0xff) << 8;
844 sectors |= (tf->lbal & 0xff);
845
846 return ++sectors;
847}
848
849/**
850 * ata_read_native_max_address_ext - LBA48 native max query
851 * @dev: Device to query
852 *
853 * Perform an LBA48 size query upon the device in question. Return the
854 * actual LBA48 size or zero if the command fails.
855 */
856
857static u64 ata_read_native_max_address_ext(struct ata_device *dev)
858{
859 unsigned int err;
860 struct ata_taskfile tf;
861
862 ata_tf_init(dev, &tf);
863
864 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
865 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
866 tf.protocol |= ATA_PROT_NODATA;
867 tf.device |= 0x40;
868
869 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
870 if (err)
871 return 0;
872
873 return ata_tf_to_lba48(&tf);
874}
875
876/**
877 * ata_read_native_max_address - LBA28 native max query
878 * @dev: Device to query
879 *
880 * Performa an LBA28 size query upon the device in question. Return the
881 * actual LBA28 size or zero if the command fails.
882 */
883
884static u64 ata_read_native_max_address(struct ata_device *dev)
885{
886 unsigned int err;
887 struct ata_taskfile tf;
888
889 ata_tf_init(dev, &tf);
890
891 tf.command = ATA_CMD_READ_NATIVE_MAX;
892 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
893 tf.protocol |= ATA_PROT_NODATA;
894 tf.device |= 0x40;
895
896 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
897 if (err)
898 return 0;
899
900 return ata_tf_to_lba(&tf);
901}
902
903/**
904 * ata_set_native_max_address_ext - LBA48 native max set
905 * @dev: Device to query
6b38d1d1 906 * @new_sectors: new max sectors value to set for the device
1e999736
AC
907 *
908 * Perform an LBA48 size set max upon the device in question. Return the
909 * actual LBA48 size or zero if the command fails.
910 */
911
912static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
913{
914 unsigned int err;
915 struct ata_taskfile tf;
916
917 new_sectors--;
918
919 ata_tf_init(dev, &tf);
920
921 tf.command = ATA_CMD_SET_MAX_EXT;
922 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
923 tf.protocol |= ATA_PROT_NODATA;
924 tf.device |= 0x40;
925
926 tf.lbal = (new_sectors >> 0) & 0xff;
927 tf.lbam = (new_sectors >> 8) & 0xff;
928 tf.lbah = (new_sectors >> 16) & 0xff;
929
930 tf.hob_lbal = (new_sectors >> 24) & 0xff;
931 tf.hob_lbam = (new_sectors >> 32) & 0xff;
932 tf.hob_lbah = (new_sectors >> 40) & 0xff;
933
934 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
935 if (err)
936 return 0;
937
938 return ata_tf_to_lba48(&tf);
939}
940
941/**
942 * ata_set_native_max_address - LBA28 native max set
943 * @dev: Device to query
6b38d1d1 944 * @new_sectors: new max sectors value to set for the device
1e999736
AC
945 *
946 * Perform an LBA28 size set max upon the device in question. Return the
947 * actual LBA28 size or zero if the command fails.
948 */
949
950static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
951{
952 unsigned int err;
953 struct ata_taskfile tf;
954
955 new_sectors--;
956
957 ata_tf_init(dev, &tf);
958
959 tf.command = ATA_CMD_SET_MAX;
960 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
961 tf.protocol |= ATA_PROT_NODATA;
962
963 tf.lbal = (new_sectors >> 0) & 0xff;
964 tf.lbam = (new_sectors >> 8) & 0xff;
965 tf.lbah = (new_sectors >> 16) & 0xff;
966 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
967
968 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
969 if (err)
970 return 0;
971
972 return ata_tf_to_lba(&tf);
973}
974
975/**
976 * ata_hpa_resize - Resize a device with an HPA set
977 * @dev: Device to resize
978 *
979 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
980 * it if required to the full size of the media. The caller must check
981 * the drive has the HPA feature set enabled.
982 */
983
984static u64 ata_hpa_resize(struct ata_device *dev)
985{
986 u64 sectors = dev->n_sectors;
987 u64 hpa_sectors;
a617c09f 988
1e999736
AC
989 if (ata_id_has_lba48(dev->id))
990 hpa_sectors = ata_read_native_max_address_ext(dev);
991 else
992 hpa_sectors = ata_read_native_max_address(dev);
993
1e999736
AC
994 if (hpa_sectors > sectors) {
995 ata_dev_printk(dev, KERN_INFO,
996 "Host Protected Area detected:\n"
997 "\tcurrent size: %lld sectors\n"
998 "\tnative size: %lld sectors\n",
bd1d5ec6 999 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
1000
1001 if (ata_ignore_hpa) {
1002 if (ata_id_has_lba48(dev->id))
1003 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1004 else
bd1d5ec6
AM
1005 hpa_sectors = ata_set_native_max_address(dev,
1006 hpa_sectors);
1e999736
AC
1007
1008 if (hpa_sectors) {
bd1d5ec6
AM
1009 ata_dev_printk(dev, KERN_INFO, "native size "
1010 "increased to %lld sectors\n",
1011 (long long)hpa_sectors);
1e999736
AC
1012 return hpa_sectors;
1013 }
1014 }
37301a55
TH
1015 } else if (hpa_sectors < sectors)
1016 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1017 "is smaller than sectors (%lld)\n", __FUNCTION__,
1018 (long long)hpa_sectors, (long long)sectors);
1019
1e999736
AC
1020 return sectors;
1021}
1022
2940740b
TH
1023static u64 ata_id_n_sectors(const u16 *id)
1024{
1025 if (ata_id_has_lba(id)) {
1026 if (ata_id_has_lba48(id))
1027 return ata_id_u64(id, 100);
1028 else
1029 return ata_id_u32(id, 60);
1030 } else {
1031 if (ata_id_current_chs_valid(id))
1032 return ata_id_u32(id, 57);
1033 else
1034 return id[1] * id[3] * id[6];
1035 }
1036}
1037
10305f0f
A
1038/**
1039 * ata_id_to_dma_mode - Identify DMA mode from id block
1040 * @dev: device to identify
cc261267 1041 * @unknown: mode to assume if we cannot tell
10305f0f
A
1042 *
1043 * Set up the timing values for the device based upon the identify
1044 * reported values for the DMA mode. This function is used by drivers
1045 * which rely upon firmware configured modes, but wish to report the
1046 * mode correctly when possible.
1047 *
1048 * In addition we emit similarly formatted messages to the default
1049 * ata_dev_set_mode handler, in order to provide consistency of
1050 * presentation.
1051 */
1052
1053void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1054{
1055 unsigned int mask;
1056 u8 mode;
1057
1058 /* Pack the DMA modes */
1059 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1060 if (dev->id[53] & 0x04)
1061 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1062
1063 /* Select the mode in use */
1064 mode = ata_xfer_mask2mode(mask);
1065
1066 if (mode != 0) {
1067 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1068 ata_mode_string(mask));
1069 } else {
1070 /* SWDMA perhaps ? */
1071 mode = unknown;
1072 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1073 }
1074
1075 /* Configure the device reporting */
1076 dev->xfer_mode = mode;
1077 dev->xfer_shift = ata_xfer_mode2shift(mode);
1078}
1079
0baab86b
EF
1080/**
1081 * ata_noop_dev_select - Select device 0/1 on ATA bus
1082 * @ap: ATA channel to manipulate
1083 * @device: ATA device (numbered from zero) to select
1084 *
1085 * This function performs no actual function.
1086 *
1087 * May be used as the dev_select() entry in ata_port_operations.
1088 *
1089 * LOCKING:
1090 * caller.
1091 */
1da177e4
LT
1092void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1093{
1094}
1095
0baab86b 1096
1da177e4
LT
1097/**
1098 * ata_std_dev_select - Select device 0/1 on ATA bus
1099 * @ap: ATA channel to manipulate
1100 * @device: ATA device (numbered from zero) to select
1101 *
1102 * Use the method defined in the ATA specification to
1103 * make either device 0, or device 1, active on the
0baab86b
EF
1104 * ATA channel. Works with both PIO and MMIO.
1105 *
1106 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1107 *
1108 * LOCKING:
1109 * caller.
1110 */
1111
1112void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1113{
1114 u8 tmp;
1115
1116 if (device == 0)
1117 tmp = ATA_DEVICE_OBS;
1118 else
1119 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1120
0d5ff566 1121 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1122 ata_pause(ap); /* needed; also flushes, for mmio */
1123}
1124
1125/**
1126 * ata_dev_select - Select device 0/1 on ATA bus
1127 * @ap: ATA channel to manipulate
1128 * @device: ATA device (numbered from zero) to select
1129 * @wait: non-zero to wait for Status register BSY bit to clear
1130 * @can_sleep: non-zero if context allows sleeping
1131 *
1132 * Use the method defined in the ATA specification to
1133 * make either device 0, or device 1, active on the
1134 * ATA channel.
1135 *
1136 * This is a high-level version of ata_std_dev_select(),
1137 * which additionally provides the services of inserting
1138 * the proper pauses and status polling, where needed.
1139 *
1140 * LOCKING:
1141 * caller.
1142 */
1143
1144void ata_dev_select(struct ata_port *ap, unsigned int device,
1145 unsigned int wait, unsigned int can_sleep)
1146{
88574551 1147 if (ata_msg_probe(ap))
44877b4e
TH
1148 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1149 "device %u, wait %u\n", device, wait);
1da177e4
LT
1150
1151 if (wait)
1152 ata_wait_idle(ap);
1153
1154 ap->ops->dev_select(ap, device);
1155
1156 if (wait) {
9af5c9c9 1157 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1158 msleep(150);
1159 ata_wait_idle(ap);
1160 }
1161}
1162
1163/**
1164 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1165 * @id: IDENTIFY DEVICE page to dump
1da177e4 1166 *
0bd3300a
TH
1167 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1168 * page.
1da177e4
LT
1169 *
1170 * LOCKING:
1171 * caller.
1172 */
1173
0bd3300a 1174static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1175{
1176 DPRINTK("49==0x%04x "
1177 "53==0x%04x "
1178 "63==0x%04x "
1179 "64==0x%04x "
1180 "75==0x%04x \n",
0bd3300a
TH
1181 id[49],
1182 id[53],
1183 id[63],
1184 id[64],
1185 id[75]);
1da177e4
LT
1186 DPRINTK("80==0x%04x "
1187 "81==0x%04x "
1188 "82==0x%04x "
1189 "83==0x%04x "
1190 "84==0x%04x \n",
0bd3300a
TH
1191 id[80],
1192 id[81],
1193 id[82],
1194 id[83],
1195 id[84]);
1da177e4
LT
1196 DPRINTK("88==0x%04x "
1197 "93==0x%04x\n",
0bd3300a
TH
1198 id[88],
1199 id[93]);
1da177e4
LT
1200}
1201
cb95d562
TH
1202/**
1203 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1204 * @id: IDENTIFY data to compute xfer mask from
1205 *
1206 * Compute the xfermask for this device. This is not as trivial
1207 * as it seems if we must consider early devices correctly.
1208 *
1209 * FIXME: pre IDE drive timing (do we care ?).
1210 *
1211 * LOCKING:
1212 * None.
1213 *
1214 * RETURNS:
1215 * Computed xfermask
1216 */
1217static unsigned int ata_id_xfermask(const u16 *id)
1218{
1219 unsigned int pio_mask, mwdma_mask, udma_mask;
1220
1221 /* Usual case. Word 53 indicates word 64 is valid */
1222 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1223 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1224 pio_mask <<= 3;
1225 pio_mask |= 0x7;
1226 } else {
1227 /* If word 64 isn't valid then Word 51 high byte holds
1228 * the PIO timing number for the maximum. Turn it into
1229 * a mask.
1230 */
7a0f1c8a 1231 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1232 if (mode < 5) /* Valid PIO range */
1233 pio_mask = (2 << mode) - 1;
1234 else
1235 pio_mask = 1;
cb95d562
TH
1236
1237 /* But wait.. there's more. Design your standards by
1238 * committee and you too can get a free iordy field to
1239 * process. However its the speeds not the modes that
1240 * are supported... Note drivers using the timing API
1241 * will get this right anyway
1242 */
1243 }
1244
1245 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1246
b352e57d
AC
1247 if (ata_id_is_cfa(id)) {
1248 /*
1249 * Process compact flash extended modes
1250 */
1251 int pio = id[163] & 0x7;
1252 int dma = (id[163] >> 3) & 7;
1253
1254 if (pio)
1255 pio_mask |= (1 << 5);
1256 if (pio > 1)
1257 pio_mask |= (1 << 6);
1258 if (dma)
1259 mwdma_mask |= (1 << 3);
1260 if (dma > 1)
1261 mwdma_mask |= (1 << 4);
1262 }
1263
fb21f0d0
TH
1264 udma_mask = 0;
1265 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1266 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1267
1268 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1269}
1270
86e45b6b
TH
1271/**
1272 * ata_port_queue_task - Queue port_task
1273 * @ap: The ata_port to queue port_task for
e2a7f77a 1274 * @fn: workqueue function to be scheduled
65f27f38 1275 * @data: data for @fn to use
e2a7f77a 1276 * @delay: delay time for workqueue function
86e45b6b
TH
1277 *
1278 * Schedule @fn(@data) for execution after @delay jiffies using
1279 * port_task. There is one port_task per port and it's the
1280 * user(low level driver)'s responsibility to make sure that only
1281 * one task is active at any given time.
1282 *
1283 * libata core layer takes care of synchronization between
1284 * port_task and EH. ata_port_queue_task() may be ignored for EH
1285 * synchronization.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
65f27f38 1290void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1291 unsigned long delay)
1292{
65f27f38
DH
1293 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1294 ap->port_task_data = data;
86e45b6b 1295
45a66c1c
ON
1296 /* may fail if ata_port_flush_task() in progress */
1297 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1298}
1299
1300/**
1301 * ata_port_flush_task - Flush port_task
1302 * @ap: The ata_port to flush port_task for
1303 *
1304 * After this function completes, port_task is guranteed not to
1305 * be running or scheduled.
1306 *
1307 * LOCKING:
1308 * Kernel thread context (may sleep)
1309 */
1310void ata_port_flush_task(struct ata_port *ap)
1311{
86e45b6b
TH
1312 DPRINTK("ENTER\n");
1313
45a66c1c 1314 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1315
0dd4b21f
BP
1316 if (ata_msg_ctl(ap))
1317 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1318}
1319
7102d230 1320static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1321{
77853bf2 1322 struct completion *waiting = qc->private_data;
a2a7a662 1323
a2a7a662 1324 complete(waiting);
a2a7a662
TH
1325}
1326
1327/**
2432697b 1328 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1329 * @dev: Device to which the command is sent
1330 * @tf: Taskfile registers for the command and the result
d69cf37d 1331 * @cdb: CDB for packet command
a2a7a662 1332 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1333 * @sg: sg list for the data buffer of the command
1334 * @n_elem: Number of sg entries
a2a7a662
TH
1335 *
1336 * Executes libata internal command with timeout. @tf contains
1337 * command on entry and result on return. Timeout and error
1338 * conditions are reported via return value. No recovery action
1339 * is taken after a command times out. It's caller's duty to
1340 * clean up after timeout.
1341 *
1342 * LOCKING:
1343 * None. Should be called with kernel context, might sleep.
551e8889
TH
1344 *
1345 * RETURNS:
1346 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1347 */
2432697b
TH
1348unsigned ata_exec_internal_sg(struct ata_device *dev,
1349 struct ata_taskfile *tf, const u8 *cdb,
1350 int dma_dir, struct scatterlist *sg,
1351 unsigned int n_elem)
a2a7a662 1352{
9af5c9c9
TH
1353 struct ata_link *link = dev->link;
1354 struct ata_port *ap = link->ap;
a2a7a662
TH
1355 u8 command = tf->command;
1356 struct ata_queued_cmd *qc;
2ab7db1f 1357 unsigned int tag, preempted_tag;
dedaf2b0 1358 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1359 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1360 unsigned long flags;
77853bf2 1361 unsigned int err_mask;
d95a717f 1362 int rc;
a2a7a662 1363
ba6a1308 1364 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1365
e3180499 1366 /* no internal command while frozen */
b51e9e5d 1367 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1368 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1369 return AC_ERR_SYSTEM;
1370 }
1371
2ab7db1f 1372 /* initialize internal qc */
a2a7a662 1373
2ab7db1f
TH
1374 /* XXX: Tag 0 is used for drivers with legacy EH as some
1375 * drivers choke if any other tag is given. This breaks
1376 * ata_tag_internal() test for those drivers. Don't use new
1377 * EH stuff without converting to it.
1378 */
1379 if (ap->ops->error_handler)
1380 tag = ATA_TAG_INTERNAL;
1381 else
1382 tag = 0;
1383
6cec4a39 1384 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1385 BUG();
f69499f4 1386 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1387
1388 qc->tag = tag;
1389 qc->scsicmd = NULL;
1390 qc->ap = ap;
1391 qc->dev = dev;
1392 ata_qc_reinit(qc);
1393
9af5c9c9
TH
1394 preempted_tag = link->active_tag;
1395 preempted_sactive = link->sactive;
dedaf2b0 1396 preempted_qc_active = ap->qc_active;
9af5c9c9
TH
1397 link->active_tag = ATA_TAG_POISON;
1398 link->sactive = 0;
dedaf2b0 1399 ap->qc_active = 0;
2ab7db1f
TH
1400
1401 /* prepare & issue qc */
a2a7a662 1402 qc->tf = *tf;
d69cf37d
TH
1403 if (cdb)
1404 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1405 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1406 qc->dma_dir = dma_dir;
1407 if (dma_dir != DMA_NONE) {
2432697b
TH
1408 unsigned int i, buflen = 0;
1409
1410 for (i = 0; i < n_elem; i++)
1411 buflen += sg[i].length;
1412
1413 ata_sg_init(qc, sg, n_elem);
49c80429 1414 qc->nbytes = buflen;
a2a7a662
TH
1415 }
1416
77853bf2 1417 qc->private_data = &wait;
a2a7a662
TH
1418 qc->complete_fn = ata_qc_complete_internal;
1419
8e0e694a 1420 ata_qc_issue(qc);
a2a7a662 1421
ba6a1308 1422 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1423
a8601e5f 1424 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1425
1426 ata_port_flush_task(ap);
41ade50c 1427
d95a717f 1428 if (!rc) {
ba6a1308 1429 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1430
1431 /* We're racing with irq here. If we lose, the
1432 * following test prevents us from completing the qc
d95a717f
TH
1433 * twice. If we win, the port is frozen and will be
1434 * cleaned up by ->post_internal_cmd().
a2a7a662 1435 */
77853bf2 1436 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1437 qc->err_mask |= AC_ERR_TIMEOUT;
1438
1439 if (ap->ops->error_handler)
1440 ata_port_freeze(ap);
1441 else
1442 ata_qc_complete(qc);
f15a1daf 1443
0dd4b21f
BP
1444 if (ata_msg_warn(ap))
1445 ata_dev_printk(dev, KERN_WARNING,
88574551 1446 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1447 }
1448
ba6a1308 1449 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1450 }
1451
d95a717f
TH
1452 /* do post_internal_cmd */
1453 if (ap->ops->post_internal_cmd)
1454 ap->ops->post_internal_cmd(qc);
1455
a51d644a
TH
1456 /* perform minimal error analysis */
1457 if (qc->flags & ATA_QCFLAG_FAILED) {
1458 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1459 qc->err_mask |= AC_ERR_DEV;
1460
1461 if (!qc->err_mask)
1462 qc->err_mask |= AC_ERR_OTHER;
1463
1464 if (qc->err_mask & ~AC_ERR_OTHER)
1465 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1466 }
1467
15869303 1468 /* finish up */
ba6a1308 1469 spin_lock_irqsave(ap->lock, flags);
15869303 1470
e61e0672 1471 *tf = qc->result_tf;
77853bf2
TH
1472 err_mask = qc->err_mask;
1473
1474 ata_qc_free(qc);
9af5c9c9
TH
1475 link->active_tag = preempted_tag;
1476 link->sactive = preempted_sactive;
dedaf2b0 1477 ap->qc_active = preempted_qc_active;
77853bf2 1478
1f7dd3e9
TH
1479 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1480 * Until those drivers are fixed, we detect the condition
1481 * here, fail the command with AC_ERR_SYSTEM and reenable the
1482 * port.
1483 *
1484 * Note that this doesn't change any behavior as internal
1485 * command failure results in disabling the device in the
1486 * higher layer for LLDDs without new reset/EH callbacks.
1487 *
1488 * Kill the following code as soon as those drivers are fixed.
1489 */
198e0fed 1490 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1491 err_mask |= AC_ERR_SYSTEM;
1492 ata_port_probe(ap);
1493 }
1494
ba6a1308 1495 spin_unlock_irqrestore(ap->lock, flags);
15869303 1496
77853bf2 1497 return err_mask;
a2a7a662
TH
1498}
1499
2432697b 1500/**
33480a0e 1501 * ata_exec_internal - execute libata internal command
2432697b
TH
1502 * @dev: Device to which the command is sent
1503 * @tf: Taskfile registers for the command and the result
1504 * @cdb: CDB for packet command
1505 * @dma_dir: Data tranfer direction of the command
1506 * @buf: Data buffer of the command
1507 * @buflen: Length of data buffer
1508 *
1509 * Wrapper around ata_exec_internal_sg() which takes simple
1510 * buffer instead of sg list.
1511 *
1512 * LOCKING:
1513 * None. Should be called with kernel context, might sleep.
1514 *
1515 * RETURNS:
1516 * Zero on success, AC_ERR_* mask on failure
1517 */
1518unsigned ata_exec_internal(struct ata_device *dev,
1519 struct ata_taskfile *tf, const u8 *cdb,
1520 int dma_dir, void *buf, unsigned int buflen)
1521{
33480a0e
TH
1522 struct scatterlist *psg = NULL, sg;
1523 unsigned int n_elem = 0;
2432697b 1524
33480a0e
TH
1525 if (dma_dir != DMA_NONE) {
1526 WARN_ON(!buf);
1527 sg_init_one(&sg, buf, buflen);
1528 psg = &sg;
1529 n_elem++;
1530 }
2432697b 1531
33480a0e 1532 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1533}
1534
977e6b9f
TH
1535/**
1536 * ata_do_simple_cmd - execute simple internal command
1537 * @dev: Device to which the command is sent
1538 * @cmd: Opcode to execute
1539 *
1540 * Execute a 'simple' command, that only consists of the opcode
1541 * 'cmd' itself, without filling any other registers
1542 *
1543 * LOCKING:
1544 * Kernel thread context (may sleep).
1545 *
1546 * RETURNS:
1547 * Zero on success, AC_ERR_* mask on failure
e58eb583 1548 */
77b08fb5 1549unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1550{
1551 struct ata_taskfile tf;
e58eb583
TH
1552
1553 ata_tf_init(dev, &tf);
1554
1555 tf.command = cmd;
1556 tf.flags |= ATA_TFLAG_DEVICE;
1557 tf.protocol = ATA_PROT_NODATA;
1558
977e6b9f 1559 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1560}
1561
1bc4ccff
AC
1562/**
1563 * ata_pio_need_iordy - check if iordy needed
1564 * @adev: ATA device
1565 *
1566 * Check if the current speed of the device requires IORDY. Used
1567 * by various controllers for chip configuration.
1568 */
a617c09f 1569
1bc4ccff
AC
1570unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1571{
432729f0
AC
1572 /* Controller doesn't support IORDY. Probably a pointless check
1573 as the caller should know this */
9af5c9c9 1574 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1575 return 0;
432729f0
AC
1576 /* PIO3 and higher it is mandatory */
1577 if (adev->pio_mode > XFER_PIO_2)
1578 return 1;
1579 /* We turn it on when possible */
1580 if (ata_id_has_iordy(adev->id))
1bc4ccff 1581 return 1;
432729f0
AC
1582 return 0;
1583}
2e9edbf8 1584
432729f0
AC
1585/**
1586 * ata_pio_mask_no_iordy - Return the non IORDY mask
1587 * @adev: ATA device
1588 *
1589 * Compute the highest mode possible if we are not using iordy. Return
1590 * -1 if no iordy mode is available.
1591 */
a617c09f 1592
432729f0
AC
1593static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1594{
1bc4ccff 1595 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1596 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1597 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1598 /* Is the speed faster than the drive allows non IORDY ? */
1599 if (pio) {
1600 /* This is cycle times not frequency - watch the logic! */
1601 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1602 return 3 << ATA_SHIFT_PIO;
1603 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1604 }
1605 }
432729f0 1606 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1607}
1608
1da177e4 1609/**
49016aca 1610 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1611 * @dev: target device
1612 * @p_class: pointer to class of the target device (may be changed)
bff04647 1613 * @flags: ATA_READID_* flags
fe635c7e 1614 * @id: buffer to read IDENTIFY data into
1da177e4 1615 *
49016aca
TH
1616 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1617 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1618 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1619 * for pre-ATA4 drives.
1da177e4 1620 *
50a99018
AC
1621 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1622 * now we abort if we hit that case.
1623 *
1da177e4 1624 * LOCKING:
49016aca
TH
1625 * Kernel thread context (may sleep)
1626 *
1627 * RETURNS:
1628 * 0 on success, -errno otherwise.
1da177e4 1629 */
a9beec95 1630int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1631 unsigned int flags, u16 *id)
1da177e4 1632{
9af5c9c9 1633 struct ata_port *ap = dev->link->ap;
49016aca 1634 unsigned int class = *p_class;
a0123703 1635 struct ata_taskfile tf;
49016aca
TH
1636 unsigned int err_mask = 0;
1637 const char *reason;
54936f8b 1638 int may_fallback = 1, tried_spinup = 0;
49016aca 1639 int rc;
1da177e4 1640
0dd4b21f 1641 if (ata_msg_ctl(ap))
44877b4e 1642 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1643
49016aca 1644 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1645 retry:
3373efd8 1646 ata_tf_init(dev, &tf);
a0123703 1647
49016aca
TH
1648 switch (class) {
1649 case ATA_DEV_ATA:
a0123703 1650 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1651 break;
1652 case ATA_DEV_ATAPI:
a0123703 1653 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1654 break;
1655 default:
1656 rc = -ENODEV;
1657 reason = "unsupported class";
1658 goto err_out;
1da177e4
LT
1659 }
1660
a0123703 1661 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1662
1663 /* Some devices choke if TF registers contain garbage. Make
1664 * sure those are properly initialized.
1665 */
1666 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1667
1668 /* Device presence detection is unreliable on some
1669 * controllers. Always poll IDENTIFY if available.
1670 */
1671 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1672
3373efd8 1673 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1674 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1675 if (err_mask) {
800b3996 1676 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1677 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1678 ap->print_id, dev->devno);
55a8e2c8
TH
1679 return -ENOENT;
1680 }
1681
54936f8b
TH
1682 /* Device or controller might have reported the wrong
1683 * device class. Give a shot at the other IDENTIFY if
1684 * the current one is aborted by the device.
1685 */
1686 if (may_fallback &&
1687 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1688 may_fallback = 0;
1689
1690 if (class == ATA_DEV_ATA)
1691 class = ATA_DEV_ATAPI;
1692 else
1693 class = ATA_DEV_ATA;
1694 goto retry;
1695 }
1696
49016aca
TH
1697 rc = -EIO;
1698 reason = "I/O error";
1da177e4
LT
1699 goto err_out;
1700 }
1701
54936f8b
TH
1702 /* Falling back doesn't make sense if ID data was read
1703 * successfully at least once.
1704 */
1705 may_fallback = 0;
1706
49016aca 1707 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1708
49016aca 1709 /* sanity check */
a4f5749b 1710 rc = -EINVAL;
6070068b 1711 reason = "device reports invalid type";
a4f5749b
TH
1712
1713 if (class == ATA_DEV_ATA) {
1714 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1715 goto err_out;
1716 } else {
1717 if (ata_id_is_ata(id))
1718 goto err_out;
49016aca
TH
1719 }
1720
169439c2
ML
1721 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1722 tried_spinup = 1;
1723 /*
1724 * Drive powered-up in standby mode, and requires a specific
1725 * SET_FEATURES spin-up subcommand before it will accept
1726 * anything other than the original IDENTIFY command.
1727 */
1728 ata_tf_init(dev, &tf);
1729 tf.command = ATA_CMD_SET_FEATURES;
1730 tf.feature = SETFEATURES_SPINUP;
1731 tf.protocol = ATA_PROT_NODATA;
1732 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1733 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1734 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1735 rc = -EIO;
1736 reason = "SPINUP failed";
1737 goto err_out;
1738 }
1739 /*
1740 * If the drive initially returned incomplete IDENTIFY info,
1741 * we now must reissue the IDENTIFY command.
1742 */
1743 if (id[2] == 0x37c8)
1744 goto retry;
1745 }
1746
bff04647 1747 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1748 /*
1749 * The exact sequence expected by certain pre-ATA4 drives is:
1750 * SRST RESET
50a99018
AC
1751 * IDENTIFY (optional in early ATA)
1752 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1753 * anything else..
1754 * Some drives were very specific about that exact sequence.
50a99018
AC
1755 *
1756 * Note that ATA4 says lba is mandatory so the second check
1757 * shoud never trigger.
49016aca
TH
1758 */
1759 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1760 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1761 if (err_mask) {
1762 rc = -EIO;
1763 reason = "INIT_DEV_PARAMS failed";
1764 goto err_out;
1765 }
1766
1767 /* current CHS translation info (id[53-58]) might be
1768 * changed. reread the identify device info.
1769 */
bff04647 1770 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1771 goto retry;
1772 }
1773 }
1774
1775 *p_class = class;
fe635c7e 1776
49016aca
TH
1777 return 0;
1778
1779 err_out:
88574551 1780 if (ata_msg_warn(ap))
0dd4b21f 1781 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1782 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1783 return rc;
1784}
1785
3373efd8 1786static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1787{
9af5c9c9
TH
1788 struct ata_port *ap = dev->link->ap;
1789 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1790}
1791
a6e6ce8e
TH
1792static void ata_dev_config_ncq(struct ata_device *dev,
1793 char *desc, size_t desc_sz)
1794{
9af5c9c9 1795 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1796 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1797
1798 if (!ata_id_has_ncq(dev->id)) {
1799 desc[0] = '\0';
1800 return;
1801 }
75683fe7 1802 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1803 snprintf(desc, desc_sz, "NCQ (not used)");
1804 return;
1805 }
a6e6ce8e 1806 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1807 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1808 dev->flags |= ATA_DFLAG_NCQ;
1809 }
1810
1811 if (hdepth >= ddepth)
1812 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1813 else
1814 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1815}
1816
49016aca 1817/**
ffeae418 1818 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1819 * @dev: Target device to configure
1820 *
1821 * Configure @dev according to @dev->id. Generic and low-level
1822 * driver specific fixups are also applied.
49016aca
TH
1823 *
1824 * LOCKING:
ffeae418
TH
1825 * Kernel thread context (may sleep)
1826 *
1827 * RETURNS:
1828 * 0 on success, -errno otherwise
49016aca 1829 */
efdaedc4 1830int ata_dev_configure(struct ata_device *dev)
49016aca 1831{
9af5c9c9
TH
1832 struct ata_port *ap = dev->link->ap;
1833 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1834 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1835 const u16 *id = dev->id;
ff8854b2 1836 unsigned int xfer_mask;
b352e57d 1837 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1838 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1839 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1840 int rc;
49016aca 1841
0dd4b21f 1842 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1843 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1844 __FUNCTION__);
ffeae418 1845 return 0;
49016aca
TH
1846 }
1847
0dd4b21f 1848 if (ata_msg_probe(ap))
44877b4e 1849 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1850
75683fe7
TH
1851 /* set horkage */
1852 dev->horkage |= ata_dev_blacklisted(dev);
1853
6746544c
TH
1854 /* let ACPI work its magic */
1855 rc = ata_acpi_on_devcfg(dev);
1856 if (rc)
1857 return rc;
08573a86 1858
c39f5ebe 1859 /* print device capabilities */
0dd4b21f 1860 if (ata_msg_probe(ap))
88574551
TH
1861 ata_dev_printk(dev, KERN_DEBUG,
1862 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1863 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1864 __FUNCTION__,
f15a1daf
TH
1865 id[49], id[82], id[83], id[84],
1866 id[85], id[86], id[87], id[88]);
c39f5ebe 1867
208a9933 1868 /* initialize to-be-configured parameters */
ea1dd4e1 1869 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1870 dev->max_sectors = 0;
1871 dev->cdb_len = 0;
1872 dev->n_sectors = 0;
1873 dev->cylinders = 0;
1874 dev->heads = 0;
1875 dev->sectors = 0;
1876
1da177e4
LT
1877 /*
1878 * common ATA, ATAPI feature tests
1879 */
1880
ff8854b2 1881 /* find max transfer mode; for printk only */
1148c3a7 1882 xfer_mask = ata_id_xfermask(id);
1da177e4 1883
0dd4b21f
BP
1884 if (ata_msg_probe(ap))
1885 ata_dump_id(id);
1da177e4 1886
ef143d57
AL
1887 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1888 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1889 sizeof(fwrevbuf));
1890
1891 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1892 sizeof(modelbuf));
1893
1da177e4
LT
1894 /* ATA-specific feature tests */
1895 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1896 if (ata_id_is_cfa(id)) {
1897 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1898 ata_dev_printk(dev, KERN_WARNING,
1899 "supports DRM functions and may "
1900 "not be fully accessable.\n");
b352e57d
AC
1901 snprintf(revbuf, 7, "CFA");
1902 }
1903 else
1904 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1905
1148c3a7 1906 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1907
3f64f565
EM
1908 if (dev->id[59] & 0x100)
1909 dev->multi_count = dev->id[59] & 0xff;
1910
1148c3a7 1911 if (ata_id_has_lba(id)) {
4c2d721a 1912 const char *lba_desc;
a6e6ce8e 1913 char ncq_desc[20];
8bf62ece 1914
4c2d721a
TH
1915 lba_desc = "LBA";
1916 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1917 if (ata_id_has_lba48(id)) {
8bf62ece 1918 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1919 lba_desc = "LBA48";
6fc49adb
TH
1920
1921 if (dev->n_sectors >= (1UL << 28) &&
1922 ata_id_has_flush_ext(id))
1923 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1924 }
8bf62ece 1925
16c55b03
TH
1926 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1927 ata_id_hpa_enabled(dev->id))
1928 dev->n_sectors = ata_hpa_resize(dev);
1e999736 1929
a6e6ce8e
TH
1930 /* config NCQ */
1931 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1932
8bf62ece 1933 /* print device info to dmesg */
3f64f565
EM
1934 if (ata_msg_drv(ap) && print_info) {
1935 ata_dev_printk(dev, KERN_INFO,
1936 "%s: %s, %s, max %s\n",
1937 revbuf, modelbuf, fwrevbuf,
1938 ata_mode_string(xfer_mask));
1939 ata_dev_printk(dev, KERN_INFO,
1940 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1941 (unsigned long long)dev->n_sectors,
3f64f565
EM
1942 dev->multi_count, lba_desc, ncq_desc);
1943 }
ffeae418 1944 } else {
8bf62ece
AL
1945 /* CHS */
1946
1947 /* Default translation */
1148c3a7
TH
1948 dev->cylinders = id[1];
1949 dev->heads = id[3];
1950 dev->sectors = id[6];
8bf62ece 1951
1148c3a7 1952 if (ata_id_current_chs_valid(id)) {
8bf62ece 1953 /* Current CHS translation is valid. */
1148c3a7
TH
1954 dev->cylinders = id[54];
1955 dev->heads = id[55];
1956 dev->sectors = id[56];
8bf62ece
AL
1957 }
1958
1959 /* print device info to dmesg */
3f64f565 1960 if (ata_msg_drv(ap) && print_info) {
88574551 1961 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1962 "%s: %s, %s, max %s\n",
1963 revbuf, modelbuf, fwrevbuf,
1964 ata_mode_string(xfer_mask));
a84471fe 1965 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1966 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1967 (unsigned long long)dev->n_sectors,
1968 dev->multi_count, dev->cylinders,
1969 dev->heads, dev->sectors);
1970 }
07f6f7d0
AL
1971 }
1972
6e7846e9 1973 dev->cdb_len = 16;
1da177e4
LT
1974 }
1975
1976 /* ATAPI-specific feature tests */
2c13b7ce 1977 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1978 char *cdb_intr_string = "";
1979
1148c3a7 1980 rc = atapi_cdb_len(id);
1da177e4 1981 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1982 if (ata_msg_warn(ap))
88574551
TH
1983 ata_dev_printk(dev, KERN_WARNING,
1984 "unsupported CDB len\n");
ffeae418 1985 rc = -EINVAL;
1da177e4
LT
1986 goto err_out_nosup;
1987 }
6e7846e9 1988 dev->cdb_len = (unsigned int) rc;
1da177e4 1989
08a556db 1990 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1991 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1992 cdb_intr_string = ", CDB intr";
1993 }
312f7da2 1994
1da177e4 1995 /* print device info to dmesg */
5afc8142 1996 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
1997 ata_dev_printk(dev, KERN_INFO,
1998 "ATAPI: %s, %s, max %s%s\n",
1999 modelbuf, fwrevbuf,
12436c30
TH
2000 ata_mode_string(xfer_mask),
2001 cdb_intr_string);
1da177e4
LT
2002 }
2003
914ed354
TH
2004 /* determine max_sectors */
2005 dev->max_sectors = ATA_MAX_SECTORS;
2006 if (dev->flags & ATA_DFLAG_LBA48)
2007 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2008
93590859
AC
2009 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2010 /* Let the user know. We don't want to disallow opens for
2011 rescue purposes, or in case the vendor is just a blithering
2012 idiot */
2013 if (print_info) {
2014 ata_dev_printk(dev, KERN_WARNING,
2015"Drive reports diagnostics failure. This may indicate a drive\n");
2016 ata_dev_printk(dev, KERN_WARNING,
2017"fault or invalid emulation. Contact drive vendor for information.\n");
2018 }
2019 }
2020
4b2f3ede 2021 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2022 if (ata_dev_knobble(dev)) {
5afc8142 2023 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2024 ata_dev_printk(dev, KERN_INFO,
2025 "applying bridge limits\n");
5a529139 2026 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2027 dev->max_sectors = ATA_MAX_SECTORS;
2028 }
2029
75683fe7 2030 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2031 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2032 dev->max_sectors);
18d6e9d5 2033
4b2f3ede 2034 if (ap->ops->dev_config)
cd0d3bbc 2035 ap->ops->dev_config(dev);
4b2f3ede 2036
0dd4b21f
BP
2037 if (ata_msg_probe(ap))
2038 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2039 __FUNCTION__, ata_chk_status(ap));
ffeae418 2040 return 0;
1da177e4
LT
2041
2042err_out_nosup:
0dd4b21f 2043 if (ata_msg_probe(ap))
88574551
TH
2044 ata_dev_printk(dev, KERN_DEBUG,
2045 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2046 return rc;
1da177e4
LT
2047}
2048
be0d18df 2049/**
2e41e8e6 2050 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2051 * @ap: port
2052 *
2e41e8e6 2053 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2054 * detection.
2055 */
2056
2057int ata_cable_40wire(struct ata_port *ap)
2058{
2059 return ATA_CBL_PATA40;
2060}
2061
2062/**
2e41e8e6 2063 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2064 * @ap: port
2065 *
2e41e8e6 2066 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2067 * detection.
2068 */
2069
2070int ata_cable_80wire(struct ata_port *ap)
2071{
2072 return ATA_CBL_PATA80;
2073}
2074
2075/**
2076 * ata_cable_unknown - return unknown PATA cable.
2077 * @ap: port
2078 *
2079 * Helper method for drivers which have no PATA cable detection.
2080 */
2081
2082int ata_cable_unknown(struct ata_port *ap)
2083{
2084 return ATA_CBL_PATA_UNK;
2085}
2086
2087/**
2088 * ata_cable_sata - return SATA cable type
2089 * @ap: port
2090 *
2091 * Helper method for drivers which have SATA cables
2092 */
2093
2094int ata_cable_sata(struct ata_port *ap)
2095{
2096 return ATA_CBL_SATA;
2097}
2098
1da177e4
LT
2099/**
2100 * ata_bus_probe - Reset and probe ATA bus
2101 * @ap: Bus to probe
2102 *
0cba632b
JG
2103 * Master ATA bus probing function. Initiates a hardware-dependent
2104 * bus reset, then attempts to identify any devices found on
2105 * the bus.
2106 *
1da177e4 2107 * LOCKING:
0cba632b 2108 * PCI/etc. bus probe sem.
1da177e4
LT
2109 *
2110 * RETURNS:
96072e69 2111 * Zero on success, negative errno otherwise.
1da177e4
LT
2112 */
2113
80289167 2114int ata_bus_probe(struct ata_port *ap)
1da177e4 2115{
28ca5c57 2116 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2117 int tries[ATA_MAX_DEVICES];
f58229f8 2118 int rc;
e82cbdb9 2119 struct ata_device *dev;
1da177e4 2120
28ca5c57 2121 ata_port_probe(ap);
c19ba8af 2122
f58229f8
TH
2123 ata_link_for_each_dev(dev, &ap->link)
2124 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2125
2126 retry:
2044470c 2127 /* reset and determine device classes */
52783c5d 2128 ap->ops->phy_reset(ap);
2061a47a 2129
f58229f8 2130 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2131 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2132 dev->class != ATA_DEV_UNKNOWN)
2133 classes[dev->devno] = dev->class;
2134 else
2135 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2136
52783c5d 2137 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2138 }
1da177e4 2139
52783c5d 2140 ata_port_probe(ap);
2044470c 2141
b6079ca4
AC
2142 /* after the reset the device state is PIO 0 and the controller
2143 state is undefined. Record the mode */
2144
f58229f8
TH
2145 ata_link_for_each_dev(dev, &ap->link)
2146 dev->pio_mode = XFER_PIO_0;
b6079ca4 2147
f31f0cc2
JG
2148 /* read IDENTIFY page and configure devices. We have to do the identify
2149 specific sequence bass-ackwards so that PDIAG- is released by
2150 the slave device */
2151
f58229f8
TH
2152 ata_link_for_each_dev(dev, &ap->link) {
2153 if (tries[dev->devno])
2154 dev->class = classes[dev->devno];
ffeae418 2155
14d2bac1 2156 if (!ata_dev_enabled(dev))
ffeae418 2157 continue;
ffeae418 2158
bff04647
TH
2159 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2160 dev->id);
14d2bac1
TH
2161 if (rc)
2162 goto fail;
f31f0cc2
JG
2163 }
2164
be0d18df
AC
2165 /* Now ask for the cable type as PDIAG- should have been released */
2166 if (ap->ops->cable_detect)
2167 ap->cbl = ap->ops->cable_detect(ap);
2168
f31f0cc2
JG
2169 /* After the identify sequence we can now set up the devices. We do
2170 this in the normal order so that the user doesn't get confused */
2171
f58229f8 2172 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2173 if (!ata_dev_enabled(dev))
2174 continue;
14d2bac1 2175
9af5c9c9 2176 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2177 rc = ata_dev_configure(dev);
9af5c9c9 2178 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2179 if (rc)
2180 goto fail;
1da177e4
LT
2181 }
2182
e82cbdb9 2183 /* configure transfer mode */
0260731f 2184 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2185 if (rc)
51713d35 2186 goto fail;
1da177e4 2187
f58229f8
TH
2188 ata_link_for_each_dev(dev, &ap->link)
2189 if (ata_dev_enabled(dev))
e82cbdb9 2190 return 0;
1da177e4 2191
e82cbdb9
TH
2192 /* no device present, disable port */
2193 ata_port_disable(ap);
1da177e4 2194 ap->ops->port_disable(ap);
96072e69 2195 return -ENODEV;
14d2bac1
TH
2196
2197 fail:
4ae72a1e
TH
2198 tries[dev->devno]--;
2199
14d2bac1
TH
2200 switch (rc) {
2201 case -EINVAL:
4ae72a1e 2202 /* eeek, something went very wrong, give up */
14d2bac1
TH
2203 tries[dev->devno] = 0;
2204 break;
4ae72a1e
TH
2205
2206 case -ENODEV:
2207 /* give it just one more chance */
2208 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2209 case -EIO:
4ae72a1e
TH
2210 if (tries[dev->devno] == 1) {
2211 /* This is the last chance, better to slow
2212 * down than lose it.
2213 */
936fd732 2214 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2215 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2216 }
14d2bac1
TH
2217 }
2218
4ae72a1e 2219 if (!tries[dev->devno])
3373efd8 2220 ata_dev_disable(dev);
ec573755 2221
14d2bac1 2222 goto retry;
1da177e4
LT
2223}
2224
2225/**
0cba632b
JG
2226 * ata_port_probe - Mark port as enabled
2227 * @ap: Port for which we indicate enablement
1da177e4 2228 *
0cba632b
JG
2229 * Modify @ap data structure such that the system
2230 * thinks that the entire port is enabled.
2231 *
cca3974e 2232 * LOCKING: host lock, or some other form of
0cba632b 2233 * serialization.
1da177e4
LT
2234 */
2235
2236void ata_port_probe(struct ata_port *ap)
2237{
198e0fed 2238 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2239}
2240
3be680b7
TH
2241/**
2242 * sata_print_link_status - Print SATA link status
936fd732 2243 * @link: SATA link to printk link status about
3be680b7
TH
2244 *
2245 * This function prints link speed and status of a SATA link.
2246 *
2247 * LOCKING:
2248 * None.
2249 */
936fd732 2250void sata_print_link_status(struct ata_link *link)
3be680b7 2251{
6d5f9732 2252 u32 sstatus, scontrol, tmp;
3be680b7 2253
936fd732 2254 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2255 return;
936fd732 2256 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2257
936fd732 2258 if (ata_link_online(link)) {
3be680b7 2259 tmp = (sstatus >> 4) & 0xf;
936fd732 2260 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2261 "SATA link up %s (SStatus %X SControl %X)\n",
2262 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2263 } else {
936fd732 2264 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2265 "SATA link down (SStatus %X SControl %X)\n",
2266 sstatus, scontrol);
3be680b7
TH
2267 }
2268}
2269
1da177e4 2270/**
780a87f7
JG
2271 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2272 * @ap: SATA port associated with target SATA PHY.
1da177e4 2273 *
780a87f7
JG
2274 * This function issues commands to standard SATA Sxxx
2275 * PHY registers, to wake up the phy (and device), and
2276 * clear any reset condition.
1da177e4
LT
2277 *
2278 * LOCKING:
0cba632b 2279 * PCI/etc. bus probe sem.
1da177e4
LT
2280 *
2281 */
2282void __sata_phy_reset(struct ata_port *ap)
2283{
936fd732 2284 struct ata_link *link = &ap->link;
1da177e4 2285 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2286 u32 sstatus;
1da177e4
LT
2287
2288 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2289 /* issue phy wake/reset */
936fd732 2290 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2291 /* Couldn't find anything in SATA I/II specs, but
2292 * AHCI-1.1 10.4.2 says at least 1 ms. */
2293 mdelay(1);
1da177e4 2294 }
81952c54 2295 /* phy wake/clear reset */
936fd732 2296 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2297
2298 /* wait for phy to become ready, if necessary */
2299 do {
2300 msleep(200);
936fd732 2301 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2302 if ((sstatus & 0xf) != 1)
2303 break;
2304 } while (time_before(jiffies, timeout));
2305
3be680b7 2306 /* print link status */
936fd732 2307 sata_print_link_status(link);
656563e3 2308
3be680b7 2309 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2310 if (!ata_link_offline(link))
1da177e4 2311 ata_port_probe(ap);
3be680b7 2312 else
1da177e4 2313 ata_port_disable(ap);
1da177e4 2314
198e0fed 2315 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2316 return;
2317
2318 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2319 ata_port_disable(ap);
2320 return;
2321 }
2322
2323 ap->cbl = ATA_CBL_SATA;
2324}
2325
2326/**
780a87f7
JG
2327 * sata_phy_reset - Reset SATA bus.
2328 * @ap: SATA port associated with target SATA PHY.
1da177e4 2329 *
780a87f7
JG
2330 * This function resets the SATA bus, and then probes
2331 * the bus for devices.
1da177e4
LT
2332 *
2333 * LOCKING:
0cba632b 2334 * PCI/etc. bus probe sem.
1da177e4
LT
2335 *
2336 */
2337void sata_phy_reset(struct ata_port *ap)
2338{
2339 __sata_phy_reset(ap);
198e0fed 2340 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2341 return;
2342 ata_bus_reset(ap);
2343}
2344
ebdfca6e
AC
2345/**
2346 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2347 * @adev: device
2348 *
2349 * Obtain the other device on the same cable, or if none is
2350 * present NULL is returned
2351 */
2e9edbf8 2352
3373efd8 2353struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2354{
9af5c9c9
TH
2355 struct ata_link *link = adev->link;
2356 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2357 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2358 return NULL;
2359 return pair;
2360}
2361
1da177e4 2362/**
780a87f7
JG
2363 * ata_port_disable - Disable port.
2364 * @ap: Port to be disabled.
1da177e4 2365 *
780a87f7
JG
2366 * Modify @ap data structure such that the system
2367 * thinks that the entire port is disabled, and should
2368 * never attempt to probe or communicate with devices
2369 * on this port.
2370 *
cca3974e 2371 * LOCKING: host lock, or some other form of
780a87f7 2372 * serialization.
1da177e4
LT
2373 */
2374
2375void ata_port_disable(struct ata_port *ap)
2376{
9af5c9c9
TH
2377 ap->link.device[0].class = ATA_DEV_NONE;
2378 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2379 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2380}
2381
1c3fae4d 2382/**
3c567b7d 2383 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2384 * @link: Link to adjust SATA spd limit for
1c3fae4d 2385 *
936fd732 2386 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2387 * function only adjusts the limit. The change must be applied
3c567b7d 2388 * using sata_set_spd().
1c3fae4d
TH
2389 *
2390 * LOCKING:
2391 * Inherited from caller.
2392 *
2393 * RETURNS:
2394 * 0 on success, negative errno on failure
2395 */
936fd732 2396int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2397{
81952c54
TH
2398 u32 sstatus, spd, mask;
2399 int rc, highbit;
1c3fae4d 2400
936fd732 2401 if (!sata_scr_valid(link))
008a7896
TH
2402 return -EOPNOTSUPP;
2403
2404 /* If SCR can be read, use it to determine the current SPD.
936fd732 2405 * If not, use cached value in link->sata_spd.
008a7896 2406 */
936fd732 2407 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2408 if (rc == 0)
2409 spd = (sstatus >> 4) & 0xf;
2410 else
936fd732 2411 spd = link->sata_spd;
1c3fae4d 2412
936fd732 2413 mask = link->sata_spd_limit;
1c3fae4d
TH
2414 if (mask <= 1)
2415 return -EINVAL;
008a7896
TH
2416
2417 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2418 highbit = fls(mask) - 1;
2419 mask &= ~(1 << highbit);
2420
008a7896
TH
2421 /* Mask off all speeds higher than or equal to the current
2422 * one. Force 1.5Gbps if current SPD is not available.
2423 */
2424 if (spd > 1)
2425 mask &= (1 << (spd - 1)) - 1;
2426 else
2427 mask &= 1;
2428
2429 /* were we already at the bottom? */
1c3fae4d
TH
2430 if (!mask)
2431 return -EINVAL;
2432
936fd732 2433 link->sata_spd_limit = mask;
1c3fae4d 2434
936fd732 2435 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2436 sata_spd_string(fls(mask)));
1c3fae4d
TH
2437
2438 return 0;
2439}
2440
936fd732 2441static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2442{
2443 u32 spd, limit;
2444
936fd732 2445 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2446 limit = 0;
2447 else
936fd732 2448 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2449
2450 spd = (*scontrol >> 4) & 0xf;
2451 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2452
2453 return spd != limit;
2454}
2455
2456/**
3c567b7d 2457 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2458 * @link: Link in question
1c3fae4d
TH
2459 *
2460 * Test whether the spd limit in SControl matches
936fd732 2461 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2462 * whether hardreset is necessary to apply SATA spd
2463 * configuration.
2464 *
2465 * LOCKING:
2466 * Inherited from caller.
2467 *
2468 * RETURNS:
2469 * 1 if SATA spd configuration is needed, 0 otherwise.
2470 */
936fd732 2471int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2472{
2473 u32 scontrol;
2474
936fd732 2475 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2476 return 0;
2477
936fd732 2478 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2479}
2480
2481/**
3c567b7d 2482 * sata_set_spd - set SATA spd according to spd limit
936fd732 2483 * @link: Link to set SATA spd for
1c3fae4d 2484 *
936fd732 2485 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 *
2490 * RETURNS:
2491 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2492 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2493 */
936fd732 2494int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2495{
2496 u32 scontrol;
81952c54 2497 int rc;
1c3fae4d 2498
936fd732 2499 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2500 return rc;
1c3fae4d 2501
936fd732 2502 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2503 return 0;
2504
936fd732 2505 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2506 return rc;
2507
1c3fae4d
TH
2508 return 1;
2509}
2510
452503f9
AC
2511/*
2512 * This mode timing computation functionality is ported over from
2513 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2514 */
2515/*
b352e57d 2516 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2517 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2518 * for UDMA6, which is currently supported only by Maxtor drives.
2519 *
2520 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2521 */
2522
2523static const struct ata_timing ata_timing[] = {
2524
2525 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2526 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2527 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2528 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2529
b352e57d
AC
2530 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2531 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2532 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2533 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2534 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2535
2536/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2537
452503f9
AC
2538 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2539 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2540 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2541
452503f9
AC
2542 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2543 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2544 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2545
b352e57d
AC
2546 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2547 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2548 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2549 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2550
2551 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2552 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2553 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2554
2555/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2556
2557 { 0xFF }
2558};
2559
2560#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2561#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2562
2563static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2564{
2565 q->setup = EZ(t->setup * 1000, T);
2566 q->act8b = EZ(t->act8b * 1000, T);
2567 q->rec8b = EZ(t->rec8b * 1000, T);
2568 q->cyc8b = EZ(t->cyc8b * 1000, T);
2569 q->active = EZ(t->active * 1000, T);
2570 q->recover = EZ(t->recover * 1000, T);
2571 q->cycle = EZ(t->cycle * 1000, T);
2572 q->udma = EZ(t->udma * 1000, UT);
2573}
2574
2575void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2576 struct ata_timing *m, unsigned int what)
2577{
2578 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2579 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2580 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2581 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2582 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2583 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2584 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2585 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2586}
2587
2588static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2589{
2590 const struct ata_timing *t;
2591
2592 for (t = ata_timing; t->mode != speed; t++)
91190758 2593 if (t->mode == 0xFF)
452503f9 2594 return NULL;
2e9edbf8 2595 return t;
452503f9
AC
2596}
2597
2598int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2599 struct ata_timing *t, int T, int UT)
2600{
2601 const struct ata_timing *s;
2602 struct ata_timing p;
2603
2604 /*
2e9edbf8 2605 * Find the mode.
75b1f2f8 2606 */
452503f9
AC
2607
2608 if (!(s = ata_timing_find_mode(speed)))
2609 return -EINVAL;
2610
75b1f2f8
AL
2611 memcpy(t, s, sizeof(*s));
2612
452503f9
AC
2613 /*
2614 * If the drive is an EIDE drive, it can tell us it needs extended
2615 * PIO/MW_DMA cycle timing.
2616 */
2617
2618 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2619 memset(&p, 0, sizeof(p));
2620 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2621 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2622 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2623 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2624 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2625 }
2626 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2627 }
2628
2629 /*
2630 * Convert the timing to bus clock counts.
2631 */
2632
75b1f2f8 2633 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2634
2635 /*
c893a3ae
RD
2636 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2637 * S.M.A.R.T * and some other commands. We have to ensure that the
2638 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2639 */
2640
fd3367af 2641 if (speed > XFER_PIO_6) {
452503f9
AC
2642 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2643 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2644 }
2645
2646 /*
c893a3ae 2647 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2648 */
2649
2650 if (t->act8b + t->rec8b < t->cyc8b) {
2651 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2652 t->rec8b = t->cyc8b - t->act8b;
2653 }
2654
2655 if (t->active + t->recover < t->cycle) {
2656 t->active += (t->cycle - (t->active + t->recover)) / 2;
2657 t->recover = t->cycle - t->active;
2658 }
a617c09f 2659
4f701d1e
AC
2660 /* In a few cases quantisation may produce enough errors to
2661 leave t->cycle too low for the sum of active and recovery
2662 if so we must correct this */
2663 if (t->active + t->recover > t->cycle)
2664 t->cycle = t->active + t->recover;
452503f9
AC
2665
2666 return 0;
2667}
2668
cf176e1a
TH
2669/**
2670 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2671 * @dev: Device to adjust xfer masks
458337db 2672 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2673 *
2674 * Adjust xfer masks of @dev downward. Note that this function
2675 * does not apply the change. Invoking ata_set_mode() afterwards
2676 * will apply the limit.
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 *
2681 * RETURNS:
2682 * 0 on success, negative errno on failure
2683 */
458337db 2684int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2685{
458337db
TH
2686 char buf[32];
2687 unsigned int orig_mask, xfer_mask;
2688 unsigned int pio_mask, mwdma_mask, udma_mask;
2689 int quiet, highbit;
cf176e1a 2690
458337db
TH
2691 quiet = !!(sel & ATA_DNXFER_QUIET);
2692 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2693
458337db
TH
2694 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2695 dev->mwdma_mask,
2696 dev->udma_mask);
2697 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2698
458337db
TH
2699 switch (sel) {
2700 case ATA_DNXFER_PIO:
2701 highbit = fls(pio_mask) - 1;
2702 pio_mask &= ~(1 << highbit);
2703 break;
2704
2705 case ATA_DNXFER_DMA:
2706 if (udma_mask) {
2707 highbit = fls(udma_mask) - 1;
2708 udma_mask &= ~(1 << highbit);
2709 if (!udma_mask)
2710 return -ENOENT;
2711 } else if (mwdma_mask) {
2712 highbit = fls(mwdma_mask) - 1;
2713 mwdma_mask &= ~(1 << highbit);
2714 if (!mwdma_mask)
2715 return -ENOENT;
2716 }
2717 break;
2718
2719 case ATA_DNXFER_40C:
2720 udma_mask &= ATA_UDMA_MASK_40C;
2721 break;
2722
2723 case ATA_DNXFER_FORCE_PIO0:
2724 pio_mask &= 1;
2725 case ATA_DNXFER_FORCE_PIO:
2726 mwdma_mask = 0;
2727 udma_mask = 0;
2728 break;
2729
458337db
TH
2730 default:
2731 BUG();
2732 }
2733
2734 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2735
2736 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2737 return -ENOENT;
2738
2739 if (!quiet) {
2740 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2741 snprintf(buf, sizeof(buf), "%s:%s",
2742 ata_mode_string(xfer_mask),
2743 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2744 else
2745 snprintf(buf, sizeof(buf), "%s",
2746 ata_mode_string(xfer_mask));
2747
2748 ata_dev_printk(dev, KERN_WARNING,
2749 "limiting speed to %s\n", buf);
2750 }
cf176e1a
TH
2751
2752 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2753 &dev->udma_mask);
2754
cf176e1a 2755 return 0;
cf176e1a
TH
2756}
2757
3373efd8 2758static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2759{
9af5c9c9 2760 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2761 unsigned int err_mask;
2762 int rc;
1da177e4 2763
e8384607 2764 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2765 if (dev->xfer_shift == ATA_SHIFT_PIO)
2766 dev->flags |= ATA_DFLAG_PIO;
2767
3373efd8 2768 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2769 /* Old CFA may refuse this command, which is just fine */
2770 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2771 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2772 /* Some very old devices and some bad newer ones fail any kind of
2773 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2774 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2775 dev->pio_mode <= XFER_PIO_2)
2776 err_mask &= ~AC_ERR_DEV;
83206a29 2777 if (err_mask) {
f15a1daf
TH
2778 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2779 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2780 return -EIO;
2781 }
1da177e4 2782
baa1e78a 2783 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2784 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2785 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2786 if (rc)
83206a29 2787 return rc;
48a8a14f 2788
23e71c3d
TH
2789 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2790 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2791
f15a1daf
TH
2792 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2793 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2794 return 0;
1da177e4
LT
2795}
2796
1da177e4 2797/**
04351821 2798 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2799 * @link: link on which timings will be programmed
e82cbdb9 2800 * @r_failed_dev: out paramter for failed device
1da177e4 2801 *
04351821
A
2802 * Standard implementation of the function used to tune and set
2803 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2804 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2805 * returned in @r_failed_dev.
780a87f7 2806 *
1da177e4 2807 * LOCKING:
0cba632b 2808 * PCI/etc. bus probe sem.
e82cbdb9
TH
2809 *
2810 * RETURNS:
2811 * 0 on success, negative errno otherwise
1da177e4 2812 */
04351821 2813
0260731f 2814int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2815{
0260731f 2816 struct ata_port *ap = link->ap;
e8e0619f 2817 struct ata_device *dev;
f58229f8 2818 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2819
a6d5a51c 2820 /* step 1: calculate xfer_mask */
f58229f8 2821 ata_link_for_each_dev(dev, link) {
acf356b1 2822 unsigned int pio_mask, dma_mask;
a6d5a51c 2823
e1211e3f 2824 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2825 continue;
2826
3373efd8 2827 ata_dev_xfermask(dev);
1da177e4 2828
acf356b1
TH
2829 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2830 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2831 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2832 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2833
4f65977d 2834 found = 1;
5444a6f4
AC
2835 if (dev->dma_mode)
2836 used_dma = 1;
a6d5a51c 2837 }
4f65977d 2838 if (!found)
e82cbdb9 2839 goto out;
a6d5a51c
TH
2840
2841 /* step 2: always set host PIO timings */
f58229f8 2842 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2843 if (!ata_dev_enabled(dev))
2844 continue;
2845
2846 if (!dev->pio_mode) {
f15a1daf 2847 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2848 rc = -EINVAL;
e82cbdb9 2849 goto out;
e8e0619f
TH
2850 }
2851
2852 dev->xfer_mode = dev->pio_mode;
2853 dev->xfer_shift = ATA_SHIFT_PIO;
2854 if (ap->ops->set_piomode)
2855 ap->ops->set_piomode(ap, dev);
2856 }
1da177e4 2857
a6d5a51c 2858 /* step 3: set host DMA timings */
f58229f8 2859 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2860 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2861 continue;
2862
2863 dev->xfer_mode = dev->dma_mode;
2864 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2865 if (ap->ops->set_dmamode)
2866 ap->ops->set_dmamode(ap, dev);
2867 }
1da177e4
LT
2868
2869 /* step 4: update devices' xfer mode */
f58229f8 2870 ata_link_for_each_dev(dev, link) {
18d90deb 2871 /* don't update suspended devices' xfer mode */
9666f400 2872 if (!ata_dev_enabled(dev))
83206a29
TH
2873 continue;
2874
3373efd8 2875 rc = ata_dev_set_mode(dev);
5bbc53f4 2876 if (rc)
e82cbdb9 2877 goto out;
83206a29 2878 }
1da177e4 2879
e8e0619f
TH
2880 /* Record simplex status. If we selected DMA then the other
2881 * host channels are not permitted to do so.
5444a6f4 2882 */
cca3974e 2883 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2884 ap->host->simplex_claimed = ap;
5444a6f4 2885
e82cbdb9
TH
2886 out:
2887 if (rc)
2888 *r_failed_dev = dev;
2889 return rc;
1da177e4
LT
2890}
2891
04351821
A
2892/**
2893 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2894 * @link: link on which timings will be programmed
04351821
A
2895 * @r_failed_dev: out paramter for failed device
2896 *
2897 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2898 * ata_set_mode() fails, pointer to the failing device is
2899 * returned in @r_failed_dev.
2900 *
2901 * LOCKING:
2902 * PCI/etc. bus probe sem.
2903 *
2904 * RETURNS:
2905 * 0 on success, negative errno otherwise
2906 */
0260731f 2907int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2908{
0260731f
TH
2909 struct ata_port *ap = link->ap;
2910
04351821
A
2911 /* has private set_mode? */
2912 if (ap->ops->set_mode)
0260731f
TH
2913 return ap->ops->set_mode(link, r_failed_dev);
2914 return ata_do_set_mode(link, r_failed_dev);
04351821
A
2915}
2916
1fdffbce
JG
2917/**
2918 * ata_tf_to_host - issue ATA taskfile to host controller
2919 * @ap: port to which command is being issued
2920 * @tf: ATA taskfile register set
2921 *
2922 * Issues ATA taskfile register set to ATA host controller,
2923 * with proper synchronization with interrupt handler and
2924 * other threads.
2925 *
2926 * LOCKING:
cca3974e 2927 * spin_lock_irqsave(host lock)
1fdffbce
JG
2928 */
2929
2930static inline void ata_tf_to_host(struct ata_port *ap,
2931 const struct ata_taskfile *tf)
2932{
2933 ap->ops->tf_load(ap, tf);
2934 ap->ops->exec_command(ap, tf);
2935}
2936
1da177e4
LT
2937/**
2938 * ata_busy_sleep - sleep until BSY clears, or timeout
2939 * @ap: port containing status register to be polled
2940 * @tmout_pat: impatience timeout
2941 * @tmout: overall timeout
2942 *
780a87f7
JG
2943 * Sleep until ATA Status register bit BSY clears,
2944 * or a timeout occurs.
2945 *
d1adc1bb
TH
2946 * LOCKING:
2947 * Kernel thread context (may sleep).
2948 *
2949 * RETURNS:
2950 * 0 on success, -errno otherwise.
1da177e4 2951 */
d1adc1bb
TH
2952int ata_busy_sleep(struct ata_port *ap,
2953 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2954{
2955 unsigned long timer_start, timeout;
2956 u8 status;
2957
2958 status = ata_busy_wait(ap, ATA_BUSY, 300);
2959 timer_start = jiffies;
2960 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2961 while (status != 0xff && (status & ATA_BUSY) &&
2962 time_before(jiffies, timeout)) {
1da177e4
LT
2963 msleep(50);
2964 status = ata_busy_wait(ap, ATA_BUSY, 3);
2965 }
2966
d1adc1bb 2967 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2968 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2969 "port is slow to respond, please be patient "
2970 "(Status 0x%x)\n", status);
1da177e4
LT
2971
2972 timeout = timer_start + tmout;
d1adc1bb
TH
2973 while (status != 0xff && (status & ATA_BUSY) &&
2974 time_before(jiffies, timeout)) {
1da177e4
LT
2975 msleep(50);
2976 status = ata_chk_status(ap);
2977 }
2978
d1adc1bb
TH
2979 if (status == 0xff)
2980 return -ENODEV;
2981
1da177e4 2982 if (status & ATA_BUSY) {
f15a1daf 2983 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2984 "(%lu secs, Status 0x%x)\n",
2985 tmout / HZ, status);
d1adc1bb 2986 return -EBUSY;
1da177e4
LT
2987 }
2988
2989 return 0;
2990}
2991
d4b2bab4
TH
2992/**
2993 * ata_wait_ready - sleep until BSY clears, or timeout
2994 * @ap: port containing status register to be polled
2995 * @deadline: deadline jiffies for the operation
2996 *
2997 * Sleep until ATA Status register bit BSY clears, or timeout
2998 * occurs.
2999 *
3000 * LOCKING:
3001 * Kernel thread context (may sleep).
3002 *
3003 * RETURNS:
3004 * 0 on success, -errno otherwise.
3005 */
3006int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3007{
3008 unsigned long start = jiffies;
3009 int warned = 0;
3010
3011 while (1) {
3012 u8 status = ata_chk_status(ap);
3013 unsigned long now = jiffies;
3014
3015 if (!(status & ATA_BUSY))
3016 return 0;
936fd732 3017 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3018 return -ENODEV;
3019 if (time_after(now, deadline))
3020 return -EBUSY;
3021
3022 if (!warned && time_after(now, start + 5 * HZ) &&
3023 (deadline - now > 3 * HZ)) {
3024 ata_port_printk(ap, KERN_WARNING,
3025 "port is slow to respond, please be patient "
3026 "(Status 0x%x)\n", status);
3027 warned = 1;
3028 }
3029
3030 msleep(50);
3031 }
3032}
3033
3034static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3035 unsigned long deadline)
1da177e4
LT
3036{
3037 struct ata_ioports *ioaddr = &ap->ioaddr;
3038 unsigned int dev0 = devmask & (1 << 0);
3039 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3040 int rc, ret = 0;
1da177e4
LT
3041
3042 /* if device 0 was found in ata_devchk, wait for its
3043 * BSY bit to clear
3044 */
d4b2bab4
TH
3045 if (dev0) {
3046 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3047 if (rc) {
3048 if (rc != -ENODEV)
3049 return rc;
3050 ret = rc;
3051 }
d4b2bab4 3052 }
1da177e4 3053
e141d999
TH
3054 /* if device 1 was found in ata_devchk, wait for register
3055 * access briefly, then wait for BSY to clear.
1da177e4 3056 */
e141d999
TH
3057 if (dev1) {
3058 int i;
1da177e4
LT
3059
3060 ap->ops->dev_select(ap, 1);
e141d999
TH
3061
3062 /* Wait for register access. Some ATAPI devices fail
3063 * to set nsect/lbal after reset, so don't waste too
3064 * much time on it. We're gonna wait for !BSY anyway.
3065 */
3066 for (i = 0; i < 2; i++) {
3067 u8 nsect, lbal;
3068
3069 nsect = ioread8(ioaddr->nsect_addr);
3070 lbal = ioread8(ioaddr->lbal_addr);
3071 if ((nsect == 1) && (lbal == 1))
3072 break;
3073 msleep(50); /* give drive a breather */
3074 }
3075
d4b2bab4 3076 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3077 if (rc) {
3078 if (rc != -ENODEV)
3079 return rc;
3080 ret = rc;
3081 }
d4b2bab4 3082 }
1da177e4
LT
3083
3084 /* is all this really necessary? */
3085 ap->ops->dev_select(ap, 0);
3086 if (dev1)
3087 ap->ops->dev_select(ap, 1);
3088 if (dev0)
3089 ap->ops->dev_select(ap, 0);
d4b2bab4 3090
9b89391c 3091 return ret;
1da177e4
LT
3092}
3093
d4b2bab4
TH
3094static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3095 unsigned long deadline)
1da177e4
LT
3096{
3097 struct ata_ioports *ioaddr = &ap->ioaddr;
3098
44877b4e 3099 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3100
3101 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3102 iowrite8(ap->ctl, ioaddr->ctl_addr);
3103 udelay(20); /* FIXME: flush */
3104 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3105 udelay(20); /* FIXME: flush */
3106 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3107
3108 /* spec mandates ">= 2ms" before checking status.
3109 * We wait 150ms, because that was the magic delay used for
3110 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3111 * between when the ATA command register is written, and then
3112 * status is checked. Because waiting for "a while" before
3113 * checking status is fine, post SRST, we perform this magic
3114 * delay here as well.
09c7ad79
AC
3115 *
3116 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3117 */
3118 msleep(150);
3119
2e9edbf8 3120 /* Before we perform post reset processing we want to see if
298a41ca
TH
3121 * the bus shows 0xFF because the odd clown forgets the D7
3122 * pulldown resistor.
3123 */
d1adc1bb 3124 if (ata_check_status(ap) == 0xFF)
9b89391c 3125 return -ENODEV;
09c7ad79 3126
d4b2bab4 3127 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3128}
3129
3130/**
3131 * ata_bus_reset - reset host port and associated ATA channel
3132 * @ap: port to reset
3133 *
3134 * This is typically the first time we actually start issuing
3135 * commands to the ATA channel. We wait for BSY to clear, then
3136 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3137 * result. Determine what devices, if any, are on the channel
3138 * by looking at the device 0/1 error register. Look at the signature
3139 * stored in each device's taskfile registers, to determine if
3140 * the device is ATA or ATAPI.
3141 *
3142 * LOCKING:
0cba632b 3143 * PCI/etc. bus probe sem.
cca3974e 3144 * Obtains host lock.
1da177e4
LT
3145 *
3146 * SIDE EFFECTS:
198e0fed 3147 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3148 */
3149
3150void ata_bus_reset(struct ata_port *ap)
3151{
9af5c9c9 3152 struct ata_device *device = ap->link.device;
1da177e4
LT
3153 struct ata_ioports *ioaddr = &ap->ioaddr;
3154 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3155 u8 err;
aec5c3c1 3156 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3157 int rc;
1da177e4 3158
44877b4e 3159 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3160
3161 /* determine if device 0/1 are present */
3162 if (ap->flags & ATA_FLAG_SATA_RESET)
3163 dev0 = 1;
3164 else {
3165 dev0 = ata_devchk(ap, 0);
3166 if (slave_possible)
3167 dev1 = ata_devchk(ap, 1);
3168 }
3169
3170 if (dev0)
3171 devmask |= (1 << 0);
3172 if (dev1)
3173 devmask |= (1 << 1);
3174
3175 /* select device 0 again */
3176 ap->ops->dev_select(ap, 0);
3177
3178 /* issue bus reset */
9b89391c
TH
3179 if (ap->flags & ATA_FLAG_SRST) {
3180 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3181 if (rc && rc != -ENODEV)
aec5c3c1 3182 goto err_out;
9b89391c 3183 }
1da177e4
LT
3184
3185 /*
3186 * determine by signature whether we have ATA or ATAPI devices
3187 */
9af5c9c9 3188 device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3189 if ((slave_possible) && (err != 0x81))
9af5c9c9 3190 device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4 3191
1da177e4 3192 /* is double-select really necessary? */
9af5c9c9 3193 if (device[1].class != ATA_DEV_NONE)
1da177e4 3194 ap->ops->dev_select(ap, 1);
9af5c9c9 3195 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3196 ap->ops->dev_select(ap, 0);
3197
3198 /* if no devices were detected, disable this port */
9af5c9c9
TH
3199 if ((device[0].class == ATA_DEV_NONE) &&
3200 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3201 goto err_out;
3202
3203 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3204 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3205 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3206 }
3207
3208 DPRINTK("EXIT\n");
3209 return;
3210
3211err_out:
f15a1daf 3212 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3213 ap->ops->port_disable(ap);
3214
3215 DPRINTK("EXIT\n");
3216}
3217
d7bb4cc7 3218/**
936fd732
TH
3219 * sata_link_debounce - debounce SATA phy status
3220 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3221 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3222 * @deadline: deadline jiffies for the operation
d7bb4cc7 3223 *
936fd732 3224* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3225 * holding the same value where DET is not 1 for @duration polled
3226 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3227 * beginning of the stable state. Because DET gets stuck at 1 on
3228 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3229 * until timeout then returns 0 if DET is stable at 1.
3230 *
d4b2bab4
TH
3231 * @timeout is further limited by @deadline. The sooner of the
3232 * two is used.
3233 *
d7bb4cc7
TH
3234 * LOCKING:
3235 * Kernel thread context (may sleep)
3236 *
3237 * RETURNS:
3238 * 0 on success, -errno on failure.
3239 */
936fd732
TH
3240int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3241 unsigned long deadline)
7a7921e8 3242{
d7bb4cc7 3243 unsigned long interval_msec = params[0];
d4b2bab4
TH
3244 unsigned long duration = msecs_to_jiffies(params[1]);
3245 unsigned long last_jiffies, t;
d7bb4cc7
TH
3246 u32 last, cur;
3247 int rc;
3248
d4b2bab4
TH
3249 t = jiffies + msecs_to_jiffies(params[2]);
3250 if (time_before(t, deadline))
3251 deadline = t;
3252
936fd732 3253 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3254 return rc;
3255 cur &= 0xf;
3256
3257 last = cur;
3258 last_jiffies = jiffies;
3259
3260 while (1) {
3261 msleep(interval_msec);
936fd732 3262 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3263 return rc;
3264 cur &= 0xf;
3265
3266 /* DET stable? */
3267 if (cur == last) {
d4b2bab4 3268 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3269 continue;
3270 if (time_after(jiffies, last_jiffies + duration))
3271 return 0;
3272 continue;
3273 }
3274
3275 /* unstable, start over */
3276 last = cur;
3277 last_jiffies = jiffies;
3278
f1545154
TH
3279 /* Check deadline. If debouncing failed, return
3280 * -EPIPE to tell upper layer to lower link speed.
3281 */
d4b2bab4 3282 if (time_after(jiffies, deadline))
f1545154 3283 return -EPIPE;
d7bb4cc7
TH
3284 }
3285}
3286
3287/**
936fd732
TH
3288 * sata_link_resume - resume SATA link
3289 * @link: ATA link to resume SATA
d7bb4cc7 3290 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3291 * @deadline: deadline jiffies for the operation
d7bb4cc7 3292 *
936fd732 3293 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3294 *
3295 * LOCKING:
3296 * Kernel thread context (may sleep)
3297 *
3298 * RETURNS:
3299 * 0 on success, -errno on failure.
3300 */
936fd732
TH
3301int sata_link_resume(struct ata_link *link, const unsigned long *params,
3302 unsigned long deadline)
d7bb4cc7
TH
3303{
3304 u32 scontrol;
81952c54
TH
3305 int rc;
3306
936fd732 3307 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3308 return rc;
7a7921e8 3309
852ee16a 3310 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3311
936fd732 3312 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3313 return rc;
7a7921e8 3314
d7bb4cc7
TH
3315 /* Some PHYs react badly if SStatus is pounded immediately
3316 * after resuming. Delay 200ms before debouncing.
3317 */
3318 msleep(200);
7a7921e8 3319
936fd732 3320 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3321}
3322
f5914a46
TH
3323/**
3324 * ata_std_prereset - prepare for reset
cc0680a5 3325 * @link: ATA link to be reset
d4b2bab4 3326 * @deadline: deadline jiffies for the operation
f5914a46 3327 *
cc0680a5 3328 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3329 * prereset makes libata abort whole reset sequence and give up
3330 * that port, so prereset should be best-effort. It does its
3331 * best to prepare for reset sequence but if things go wrong, it
3332 * should just whine, not fail.
f5914a46
TH
3333 *
3334 * LOCKING:
3335 * Kernel thread context (may sleep)
3336 *
3337 * RETURNS:
3338 * 0 on success, -errno otherwise.
3339 */
cc0680a5 3340int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3341{
cc0680a5 3342 struct ata_port *ap = link->ap;
936fd732 3343 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3344 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3345 int rc;
3346
31daabda 3347 /* handle link resume */
28324304 3348 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3349 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3350 ehc->i.action |= ATA_EH_HARDRESET;
3351
f5914a46
TH
3352 /* if we're about to do hardreset, nothing more to do */
3353 if (ehc->i.action & ATA_EH_HARDRESET)
3354 return 0;
3355
936fd732 3356 /* if SATA, resume link */
a16abc0b 3357 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3358 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3359 /* whine about phy resume failure but proceed */
3360 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3361 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3362 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3363 }
3364
3365 /* Wait for !BSY if the controller can wait for the first D2H
3366 * Reg FIS and we don't know that no device is attached.
3367 */
0c88758b 3368 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3369 rc = ata_wait_ready(ap, deadline);
6dffaf61 3370 if (rc && rc != -ENODEV) {
cc0680a5 3371 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3372 "(errno=%d), forcing hardreset\n", rc);
3373 ehc->i.action |= ATA_EH_HARDRESET;
3374 }
3375 }
f5914a46
TH
3376
3377 return 0;
3378}
3379
c2bd5804
TH
3380/**
3381 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3382 * @link: ATA link to reset
c2bd5804 3383 * @classes: resulting classes of attached devices
d4b2bab4 3384 * @deadline: deadline jiffies for the operation
c2bd5804 3385 *
52783c5d 3386 * Reset host port using ATA SRST.
c2bd5804
TH
3387 *
3388 * LOCKING:
3389 * Kernel thread context (may sleep)
3390 *
3391 * RETURNS:
3392 * 0 on success, -errno otherwise.
3393 */
cc0680a5 3394int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3395 unsigned long deadline)
c2bd5804 3396{
cc0680a5 3397 struct ata_port *ap = link->ap;
c2bd5804 3398 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3399 unsigned int devmask = 0;
3400 int rc;
c2bd5804
TH
3401 u8 err;
3402
3403 DPRINTK("ENTER\n");
3404
936fd732 3405 if (ata_link_offline(link)) {
3a39746a
TH
3406 classes[0] = ATA_DEV_NONE;
3407 goto out;
3408 }
3409
c2bd5804
TH
3410 /* determine if device 0/1 are present */
3411 if (ata_devchk(ap, 0))
3412 devmask |= (1 << 0);
3413 if (slave_possible && ata_devchk(ap, 1))
3414 devmask |= (1 << 1);
3415
c2bd5804
TH
3416 /* select device 0 again */
3417 ap->ops->dev_select(ap, 0);
3418
3419 /* issue bus reset */
3420 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3421 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3422 /* if link is occupied, -ENODEV too is an error */
936fd732 3423 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3424 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3425 return rc;
c2bd5804
TH
3426 }
3427
3428 /* determine by signature whether we have ATA or ATAPI devices */
3429 classes[0] = ata_dev_try_classify(ap, 0, &err);
3430 if (slave_possible && err != 0x81)
3431 classes[1] = ata_dev_try_classify(ap, 1, &err);
3432
3a39746a 3433 out:
c2bd5804
TH
3434 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3435 return 0;
3436}
3437
3438/**
cc0680a5
TH
3439 * sata_link_hardreset - reset link via SATA phy reset
3440 * @link: link to reset
b6103f6d 3441 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3442 * @deadline: deadline jiffies for the operation
c2bd5804 3443 *
cc0680a5 3444 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3445 *
3446 * LOCKING:
3447 * Kernel thread context (may sleep)
3448 *
3449 * RETURNS:
3450 * 0 on success, -errno otherwise.
3451 */
cc0680a5 3452int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3453 unsigned long deadline)
c2bd5804 3454{
852ee16a 3455 u32 scontrol;
81952c54 3456 int rc;
852ee16a 3457
c2bd5804
TH
3458 DPRINTK("ENTER\n");
3459
936fd732 3460 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3461 /* SATA spec says nothing about how to reconfigure
3462 * spd. To be on the safe side, turn off phy during
3463 * reconfiguration. This works for at least ICH7 AHCI
3464 * and Sil3124.
3465 */
936fd732 3466 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3467 goto out;
81952c54 3468
a34b6fc0 3469 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3470
936fd732 3471 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3472 goto out;
1c3fae4d 3473
936fd732 3474 sata_set_spd(link);
1c3fae4d
TH
3475 }
3476
3477 /* issue phy wake/reset */
936fd732 3478 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3479 goto out;
81952c54 3480
852ee16a 3481 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3482
936fd732 3483 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3484 goto out;
c2bd5804 3485
1c3fae4d 3486 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3487 * 10.4.2 says at least 1 ms.
3488 */
3489 msleep(1);
3490
936fd732
TH
3491 /* bring link back */
3492 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3493 out:
3494 DPRINTK("EXIT, rc=%d\n", rc);
3495 return rc;
3496}
3497
3498/**
3499 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3500 * @link: link to reset
b6103f6d 3501 * @class: resulting class of attached device
d4b2bab4 3502 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3503 *
3504 * SATA phy-reset host port using DET bits of SControl register,
3505 * wait for !BSY and classify the attached device.
3506 *
3507 * LOCKING:
3508 * Kernel thread context (may sleep)
3509 *
3510 * RETURNS:
3511 * 0 on success, -errno otherwise.
3512 */
cc0680a5 3513int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3514 unsigned long deadline)
b6103f6d 3515{
cc0680a5 3516 struct ata_port *ap = link->ap;
936fd732 3517 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3518 int rc;
3519
3520 DPRINTK("ENTER\n");
3521
3522 /* do hardreset */
cc0680a5 3523 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3524 if (rc) {
cc0680a5 3525 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3526 "COMRESET failed (errno=%d)\n", rc);
3527 return rc;
3528 }
c2bd5804 3529
c2bd5804 3530 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3531 if (ata_link_offline(link)) {
c2bd5804
TH
3532 *class = ATA_DEV_NONE;
3533 DPRINTK("EXIT, link offline\n");
3534 return 0;
3535 }
3536
34fee227
TH
3537 /* wait a while before checking status, see SRST for more info */
3538 msleep(150);
3539
d4b2bab4 3540 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3541 /* link occupied, -ENODEV too is an error */
3542 if (rc) {
cc0680a5 3543 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3544 "COMRESET failed (errno=%d)\n", rc);
3545 return rc;
c2bd5804
TH
3546 }
3547
3a39746a
TH
3548 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3549
c2bd5804
TH
3550 *class = ata_dev_try_classify(ap, 0, NULL);
3551
3552 DPRINTK("EXIT, class=%u\n", *class);
3553 return 0;
3554}
3555
3556/**
3557 * ata_std_postreset - standard postreset callback
cc0680a5 3558 * @link: the target ata_link
c2bd5804
TH
3559 * @classes: classes of attached devices
3560 *
3561 * This function is invoked after a successful reset. Note that
3562 * the device might have been reset more than once using
3563 * different reset methods before postreset is invoked.
c2bd5804 3564 *
c2bd5804
TH
3565 * LOCKING:
3566 * Kernel thread context (may sleep)
3567 */
cc0680a5 3568void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3569{
cc0680a5 3570 struct ata_port *ap = link->ap;
dc2b3515
TH
3571 u32 serror;
3572
c2bd5804
TH
3573 DPRINTK("ENTER\n");
3574
c2bd5804 3575 /* print link status */
936fd732 3576 sata_print_link_status(link);
c2bd5804 3577
dc2b3515 3578 /* clear SError */
936fd732
TH
3579 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3580 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3581
c2bd5804
TH
3582 /* is double-select really necessary? */
3583 if (classes[0] != ATA_DEV_NONE)
3584 ap->ops->dev_select(ap, 1);
3585 if (classes[1] != ATA_DEV_NONE)
3586 ap->ops->dev_select(ap, 0);
3587
3a39746a
TH
3588 /* bail out if no device is present */
3589 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3590 DPRINTK("EXIT, no device\n");
3591 return;
3592 }
3593
3594 /* set up device control */
0d5ff566
TH
3595 if (ap->ioaddr.ctl_addr)
3596 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3597
3598 DPRINTK("EXIT\n");
3599}
3600
623a3128
TH
3601/**
3602 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3603 * @dev: device to compare against
3604 * @new_class: class of the new device
3605 * @new_id: IDENTIFY page of the new device
3606 *
3607 * Compare @new_class and @new_id against @dev and determine
3608 * whether @dev is the device indicated by @new_class and
3609 * @new_id.
3610 *
3611 * LOCKING:
3612 * None.
3613 *
3614 * RETURNS:
3615 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3616 */
3373efd8
TH
3617static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3618 const u16 *new_id)
623a3128
TH
3619{
3620 const u16 *old_id = dev->id;
a0cf733b
TH
3621 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3622 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3623
3624 if (dev->class != new_class) {
f15a1daf
TH
3625 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3626 dev->class, new_class);
623a3128
TH
3627 return 0;
3628 }
3629
a0cf733b
TH
3630 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3631 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3632 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3633 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3634
3635 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3636 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3637 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3638 return 0;
3639 }
3640
3641 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3642 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3643 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3644 return 0;
3645 }
3646
623a3128
TH
3647 return 1;
3648}
3649
3650/**
fe30911b 3651 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3652 * @dev: target ATA device
bff04647 3653 * @readid_flags: read ID flags
623a3128
TH
3654 *
3655 * Re-read IDENTIFY page and make sure @dev is still attached to
3656 * the port.
3657 *
3658 * LOCKING:
3659 * Kernel thread context (may sleep)
3660 *
3661 * RETURNS:
3662 * 0 on success, negative errno otherwise
3663 */
fe30911b 3664int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3665{
5eb45c02 3666 unsigned int class = dev->class;
9af5c9c9 3667 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3668 int rc;
3669
fe635c7e 3670 /* read ID data */
bff04647 3671 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3672 if (rc)
fe30911b 3673 return rc;
623a3128
TH
3674
3675 /* is the device still there? */
fe30911b
TH
3676 if (!ata_dev_same_device(dev, class, id))
3677 return -ENODEV;
623a3128 3678
fe635c7e 3679 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3680 return 0;
3681}
3682
3683/**
3684 * ata_dev_revalidate - Revalidate ATA device
3685 * @dev: device to revalidate
3686 * @readid_flags: read ID flags
3687 *
3688 * Re-read IDENTIFY page, make sure @dev is still attached to the
3689 * port and reconfigure it according to the new IDENTIFY page.
3690 *
3691 * LOCKING:
3692 * Kernel thread context (may sleep)
3693 *
3694 * RETURNS:
3695 * 0 on success, negative errno otherwise
3696 */
3697int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3698{
6ddcd3b0 3699 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3700 int rc;
3701
3702 if (!ata_dev_enabled(dev))
3703 return -ENODEV;
3704
3705 /* re-read ID */
3706 rc = ata_dev_reread_id(dev, readid_flags);
3707 if (rc)
3708 goto fail;
623a3128
TH
3709
3710 /* configure device according to the new ID */
efdaedc4 3711 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3712 if (rc)
3713 goto fail;
3714
3715 /* verify n_sectors hasn't changed */
b54eebd6
TH
3716 if (dev->class == ATA_DEV_ATA && n_sectors &&
3717 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3718 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3719 "%llu != %llu\n",
3720 (unsigned long long)n_sectors,
3721 (unsigned long long)dev->n_sectors);
8270bec4
TH
3722
3723 /* restore original n_sectors */
3724 dev->n_sectors = n_sectors;
3725
6ddcd3b0
TH
3726 rc = -ENODEV;
3727 goto fail;
3728 }
3729
3730 return 0;
623a3128
TH
3731
3732 fail:
f15a1daf 3733 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3734 return rc;
3735}
3736
6919a0a6
AC
3737struct ata_blacklist_entry {
3738 const char *model_num;
3739 const char *model_rev;
3740 unsigned long horkage;
3741};
3742
3743static const struct ata_blacklist_entry ata_device_blacklist [] = {
3744 /* Devices with DMA related problems under Linux */
3745 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3746 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3747 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3748 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3749 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3750 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3751 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3752 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3753 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3754 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3755 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3756 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3757 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3758 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3759 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3760 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3761 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3762 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3763 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3764 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3765 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3766 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3767 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3768 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3769 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3770 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3771 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3772 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3773 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3774 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3775 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3776 { "IOMEGA ZIP 250 ATAPI Floppy",
3777 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3778
18d6e9d5 3779 /* Weird ATAPI devices */
40a1d531 3780 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3781
6919a0a6
AC
3782 /* Devices we expect to fail diagnostics */
3783
3784 /* Devices where NCQ should be avoided */
3785 /* NCQ is slow */
3786 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3787 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3788 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3789 /* NCQ is broken */
3790 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
e8361fc4 3791 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
471e44b2 3792 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
0e3dbc01
AC
3793 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3794 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3795 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab
PB
3796 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3797 ATA_HORKAGE_NONCQ },
96442925
JA
3798 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3799 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3800 /* Blacklist entries taken from Silicon Image 3124/3132
3801 Windows driver .inf file - also several Linux problem reports */
3802 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3803 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3804 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3805 /* Drives which do spurious command completion */
3806 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3807 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3808 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3809 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3810 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3811 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3812 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3813 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3814
16c55b03
TH
3815 /* devices which puke on READ_NATIVE_MAX */
3816 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3817 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3818 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3819 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3820
3821 /* End Marker */
3822 { }
1da177e4 3823};
2e9edbf8 3824
75683fe7 3825static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3826{
8bfa79fc
TH
3827 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3828 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3829 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3830
8bfa79fc
TH
3831 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3832 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3833
6919a0a6 3834 while (ad->model_num) {
8bfa79fc 3835 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3836 if (ad->model_rev == NULL)
3837 return ad->horkage;
8bfa79fc 3838 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3839 return ad->horkage;
f4b15fef 3840 }
6919a0a6 3841 ad++;
f4b15fef 3842 }
1da177e4
LT
3843 return 0;
3844}
3845
6919a0a6
AC
3846static int ata_dma_blacklisted(const struct ata_device *dev)
3847{
3848 /* We don't support polling DMA.
3849 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3850 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3851 */
9af5c9c9 3852 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3853 (dev->flags & ATA_DFLAG_CDB_INTR))
3854 return 1;
75683fe7 3855 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3856}
3857
a6d5a51c
TH
3858/**
3859 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3860 * @dev: Device to compute xfermask for
3861 *
acf356b1
TH
3862 * Compute supported xfermask of @dev and store it in
3863 * dev->*_mask. This function is responsible for applying all
3864 * known limits including host controller limits, device
3865 * blacklist, etc...
a6d5a51c
TH
3866 *
3867 * LOCKING:
3868 * None.
a6d5a51c 3869 */
3373efd8 3870static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3871{
9af5c9c9
TH
3872 struct ata_link *link = dev->link;
3873 struct ata_port *ap = link->ap;
cca3974e 3874 struct ata_host *host = ap->host;
a6d5a51c 3875 unsigned long xfer_mask;
1da177e4 3876
37deecb5 3877 /* controller modes available */
565083e1
TH
3878 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3879 ap->mwdma_mask, ap->udma_mask);
3880
8343f889 3881 /* drive modes available */
37deecb5
TH
3882 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3883 dev->mwdma_mask, dev->udma_mask);
3884 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3885
b352e57d
AC
3886 /*
3887 * CFA Advanced TrueIDE timings are not allowed on a shared
3888 * cable
3889 */
3890 if (ata_dev_pair(dev)) {
3891 /* No PIO5 or PIO6 */
3892 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3893 /* No MWDMA3 or MWDMA 4 */
3894 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3895 }
3896
37deecb5
TH
3897 if (ata_dma_blacklisted(dev)) {
3898 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3899 ata_dev_printk(dev, KERN_WARNING,
3900 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3901 }
a6d5a51c 3902
14d66ab7
PV
3903 if ((host->flags & ATA_HOST_SIMPLEX) &&
3904 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3905 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3906 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3907 "other device, disabling DMA\n");
5444a6f4 3908 }
565083e1 3909
e424675f
JG
3910 if (ap->flags & ATA_FLAG_NO_IORDY)
3911 xfer_mask &= ata_pio_mask_no_iordy(dev);
3912
5444a6f4 3913 if (ap->ops->mode_filter)
a76b62ca 3914 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3915
8343f889
RH
3916 /* Apply cable rule here. Don't apply it early because when
3917 * we handle hot plug the cable type can itself change.
3918 * Check this last so that we know if the transfer rate was
3919 * solely limited by the cable.
3920 * Unknown or 80 wire cables reported host side are checked
3921 * drive side as well. Cases where we know a 40wire cable
3922 * is used safely for 80 are not checked here.
3923 */
3924 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3925 /* UDMA/44 or higher would be available */
3926 if((ap->cbl == ATA_CBL_PATA40) ||
3927 (ata_drive_40wire(dev->id) &&
3928 (ap->cbl == ATA_CBL_PATA_UNK ||
3929 ap->cbl == ATA_CBL_PATA80))) {
3930 ata_dev_printk(dev, KERN_WARNING,
3931 "limited to UDMA/33 due to 40-wire cable\n");
3932 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3933 }
3934
565083e1
TH
3935 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3936 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3937}
3938
1da177e4
LT
3939/**
3940 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3941 * @dev: Device to which command will be sent
3942 *
780a87f7
JG
3943 * Issue SET FEATURES - XFER MODE command to device @dev
3944 * on port @ap.
3945 *
1da177e4 3946 * LOCKING:
0cba632b 3947 * PCI/etc. bus probe sem.
83206a29
TH
3948 *
3949 * RETURNS:
3950 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3951 */
3952
3373efd8 3953static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3954{
a0123703 3955 struct ata_taskfile tf;
83206a29 3956 unsigned int err_mask;
1da177e4
LT
3957
3958 /* set up set-features taskfile */
3959 DPRINTK("set features - xfer mode\n");
3960
464cf177
TH
3961 /* Some controllers and ATAPI devices show flaky interrupt
3962 * behavior after setting xfer mode. Use polling instead.
3963 */
3373efd8 3964 ata_tf_init(dev, &tf);
a0123703
TH
3965 tf.command = ATA_CMD_SET_FEATURES;
3966 tf.feature = SETFEATURES_XFER;
464cf177 3967 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
3968 tf.protocol = ATA_PROT_NODATA;
3969 tf.nsect = dev->xfer_mode;
1da177e4 3970
3373efd8 3971 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3972
83206a29
TH
3973 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3974 return err_mask;
1da177e4
LT
3975}
3976
8bf62ece
AL
3977/**
3978 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3979 * @dev: Device to which command will be sent
e2a7f77a
RD
3980 * @heads: Number of heads (taskfile parameter)
3981 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3982 *
3983 * LOCKING:
6aff8f1f
TH
3984 * Kernel thread context (may sleep)
3985 *
3986 * RETURNS:
3987 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3988 */
3373efd8
TH
3989static unsigned int ata_dev_init_params(struct ata_device *dev,
3990 u16 heads, u16 sectors)
8bf62ece 3991{
a0123703 3992 struct ata_taskfile tf;
6aff8f1f 3993 unsigned int err_mask;
8bf62ece
AL
3994
3995 /* Number of sectors per track 1-255. Number of heads 1-16 */
3996 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3997 return AC_ERR_INVALID;
8bf62ece
AL
3998
3999 /* set up init dev params taskfile */
4000 DPRINTK("init dev params \n");
4001
3373efd8 4002 ata_tf_init(dev, &tf);
a0123703
TH
4003 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4004 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4005 tf.protocol = ATA_PROT_NODATA;
4006 tf.nsect = sectors;
4007 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4008
3373efd8 4009 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4010 /* A clean abort indicates an original or just out of spec drive
4011 and we should continue as we issue the setup based on the
4012 drive reported working geometry */
4013 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4014 err_mask = 0;
8bf62ece 4015
6aff8f1f
TH
4016 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4017 return err_mask;
8bf62ece
AL
4018}
4019
1da177e4 4020/**
0cba632b
JG
4021 * ata_sg_clean - Unmap DMA memory associated with command
4022 * @qc: Command containing DMA memory to be released
4023 *
4024 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4025 *
4026 * LOCKING:
cca3974e 4027 * spin_lock_irqsave(host lock)
1da177e4 4028 */
70e6ad0c 4029void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4030{
4031 struct ata_port *ap = qc->ap;
cedc9a47 4032 struct scatterlist *sg = qc->__sg;
1da177e4 4033 int dir = qc->dma_dir;
cedc9a47 4034 void *pad_buf = NULL;
1da177e4 4035
a4631474
TH
4036 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4037 WARN_ON(sg == NULL);
1da177e4
LT
4038
4039 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4040 WARN_ON(qc->n_elem > 1);
1da177e4 4041
2c13b7ce 4042 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4043
cedc9a47
JG
4044 /* if we padded the buffer out to 32-bit bound, and data
4045 * xfer direction is from-device, we must copy from the
4046 * pad buffer back into the supplied buffer
4047 */
4048 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4049 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4050
4051 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4052 if (qc->n_elem)
2f1f610b 4053 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4054 /* restore last sg */
4055 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4056 if (pad_buf) {
4057 struct scatterlist *psg = &qc->pad_sgent;
4058 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4059 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4060 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4061 }
4062 } else {
2e242fa9 4063 if (qc->n_elem)
2f1f610b 4064 dma_unmap_single(ap->dev,
e1410f2d
JG
4065 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4066 dir);
cedc9a47
JG
4067 /* restore sg */
4068 sg->length += qc->pad_len;
4069 if (pad_buf)
4070 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4071 pad_buf, qc->pad_len);
4072 }
1da177e4
LT
4073
4074 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4075 qc->__sg = NULL;
1da177e4
LT
4076}
4077
4078/**
4079 * ata_fill_sg - Fill PCI IDE PRD table
4080 * @qc: Metadata associated with taskfile to be transferred
4081 *
780a87f7
JG
4082 * Fill PCI IDE PRD (scatter-gather) table with segments
4083 * associated with the current disk command.
4084 *
1da177e4 4085 * LOCKING:
cca3974e 4086 * spin_lock_irqsave(host lock)
1da177e4
LT
4087 *
4088 */
4089static void ata_fill_sg(struct ata_queued_cmd *qc)
4090{
1da177e4 4091 struct ata_port *ap = qc->ap;
cedc9a47
JG
4092 struct scatterlist *sg;
4093 unsigned int idx;
1da177e4 4094
a4631474 4095 WARN_ON(qc->__sg == NULL);
f131883e 4096 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4097
4098 idx = 0;
cedc9a47 4099 ata_for_each_sg(sg, qc) {
1da177e4
LT
4100 u32 addr, offset;
4101 u32 sg_len, len;
4102
4103 /* determine if physical DMA addr spans 64K boundary.
4104 * Note h/w doesn't support 64-bit, so we unconditionally
4105 * truncate dma_addr_t to u32.
4106 */
4107 addr = (u32) sg_dma_address(sg);
4108 sg_len = sg_dma_len(sg);
4109
4110 while (sg_len) {
4111 offset = addr & 0xffff;
4112 len = sg_len;
4113 if ((offset + sg_len) > 0x10000)
4114 len = 0x10000 - offset;
4115
4116 ap->prd[idx].addr = cpu_to_le32(addr);
4117 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4118 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4119
4120 idx++;
4121 sg_len -= len;
4122 addr += len;
4123 }
4124 }
4125
4126 if (idx)
4127 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4128}
b9a4197e 4129
d26fc955
AC
4130/**
4131 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4132 * @qc: Metadata associated with taskfile to be transferred
4133 *
4134 * Fill PCI IDE PRD (scatter-gather) table with segments
4135 * associated with the current disk command. Perform the fill
4136 * so that we avoid writing any length 64K records for
4137 * controllers that don't follow the spec.
4138 *
4139 * LOCKING:
4140 * spin_lock_irqsave(host lock)
4141 *
4142 */
4143static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4144{
4145 struct ata_port *ap = qc->ap;
4146 struct scatterlist *sg;
4147 unsigned int idx;
4148
4149 WARN_ON(qc->__sg == NULL);
4150 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4151
4152 idx = 0;
4153 ata_for_each_sg(sg, qc) {
4154 u32 addr, offset;
4155 u32 sg_len, len, blen;
4156
4157 /* determine if physical DMA addr spans 64K boundary.
4158 * Note h/w doesn't support 64-bit, so we unconditionally
4159 * truncate dma_addr_t to u32.
4160 */
4161 addr = (u32) sg_dma_address(sg);
4162 sg_len = sg_dma_len(sg);
4163
4164 while (sg_len) {
4165 offset = addr & 0xffff;
4166 len = sg_len;
4167 if ((offset + sg_len) > 0x10000)
4168 len = 0x10000 - offset;
4169
4170 blen = len & 0xffff;
4171 ap->prd[idx].addr = cpu_to_le32(addr);
4172 if (blen == 0) {
4173 /* Some PATA chipsets like the CS5530 can't
4174 cope with 0x0000 meaning 64K as the spec says */
4175 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4176 blen = 0x8000;
4177 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4178 }
4179 ap->prd[idx].flags_len = cpu_to_le32(blen);
4180 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4181
4182 idx++;
4183 sg_len -= len;
4184 addr += len;
4185 }
4186 }
4187
4188 if (idx)
4189 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4190}
4191
1da177e4
LT
4192/**
4193 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4194 * @qc: Metadata associated with taskfile to check
4195 *
780a87f7
JG
4196 * Allow low-level driver to filter ATA PACKET commands, returning
4197 * a status indicating whether or not it is OK to use DMA for the
4198 * supplied PACKET command.
4199 *
1da177e4 4200 * LOCKING:
cca3974e 4201 * spin_lock_irqsave(host lock)
0cba632b 4202 *
1da177e4
LT
4203 * RETURNS: 0 when ATAPI DMA can be used
4204 * nonzero otherwise
4205 */
4206int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4207{
4208 struct ata_port *ap = qc->ap;
b9a4197e
TH
4209
4210 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4211 * few ATAPI devices choke on such DMA requests.
4212 */
4213 if (unlikely(qc->nbytes & 15))
4214 return 1;
6f23a31d 4215
1da177e4 4216 if (ap->ops->check_atapi_dma)
b9a4197e 4217 return ap->ops->check_atapi_dma(qc);
1da177e4 4218
b9a4197e 4219 return 0;
1da177e4 4220}
b9a4197e 4221
1da177e4
LT
4222/**
4223 * ata_qc_prep - Prepare taskfile for submission
4224 * @qc: Metadata associated with taskfile to be prepared
4225 *
780a87f7
JG
4226 * Prepare ATA taskfile for submission.
4227 *
1da177e4 4228 * LOCKING:
cca3974e 4229 * spin_lock_irqsave(host lock)
1da177e4
LT
4230 */
4231void ata_qc_prep(struct ata_queued_cmd *qc)
4232{
4233 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4234 return;
4235
4236 ata_fill_sg(qc);
4237}
4238
d26fc955
AC
4239/**
4240 * ata_dumb_qc_prep - Prepare taskfile for submission
4241 * @qc: Metadata associated with taskfile to be prepared
4242 *
4243 * Prepare ATA taskfile for submission.
4244 *
4245 * LOCKING:
4246 * spin_lock_irqsave(host lock)
4247 */
4248void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4249{
4250 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4251 return;
4252
4253 ata_fill_sg_dumb(qc);
4254}
4255
e46834cd
BK
4256void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4257
0cba632b
JG
4258/**
4259 * ata_sg_init_one - Associate command with memory buffer
4260 * @qc: Command to be associated
4261 * @buf: Memory buffer
4262 * @buflen: Length of memory buffer, in bytes.
4263 *
4264 * Initialize the data-related elements of queued_cmd @qc
4265 * to point to a single memory buffer, @buf of byte length @buflen.
4266 *
4267 * LOCKING:
cca3974e 4268 * spin_lock_irqsave(host lock)
0cba632b
JG
4269 */
4270
1da177e4
LT
4271void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4272{
1da177e4
LT
4273 qc->flags |= ATA_QCFLAG_SINGLE;
4274
cedc9a47 4275 qc->__sg = &qc->sgent;
1da177e4 4276 qc->n_elem = 1;
cedc9a47 4277 qc->orig_n_elem = 1;
1da177e4 4278 qc->buf_virt = buf;
233277ca 4279 qc->nbytes = buflen;
1da177e4 4280
61c0596c 4281 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4282}
4283
0cba632b
JG
4284/**
4285 * ata_sg_init - Associate command with scatter-gather table.
4286 * @qc: Command to be associated
4287 * @sg: Scatter-gather table.
4288 * @n_elem: Number of elements in s/g table.
4289 *
4290 * Initialize the data-related elements of queued_cmd @qc
4291 * to point to a scatter-gather table @sg, containing @n_elem
4292 * elements.
4293 *
4294 * LOCKING:
cca3974e 4295 * spin_lock_irqsave(host lock)
0cba632b
JG
4296 */
4297
1da177e4
LT
4298void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4299 unsigned int n_elem)
4300{
4301 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4302 qc->__sg = sg;
1da177e4 4303 qc->n_elem = n_elem;
cedc9a47 4304 qc->orig_n_elem = n_elem;
1da177e4
LT
4305}
4306
4307/**
0cba632b
JG
4308 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4309 * @qc: Command with memory buffer to be mapped.
4310 *
4311 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4312 *
4313 * LOCKING:
cca3974e 4314 * spin_lock_irqsave(host lock)
1da177e4
LT
4315 *
4316 * RETURNS:
0cba632b 4317 * Zero on success, negative on error.
1da177e4
LT
4318 */
4319
4320static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4321{
4322 struct ata_port *ap = qc->ap;
4323 int dir = qc->dma_dir;
cedc9a47 4324 struct scatterlist *sg = qc->__sg;
1da177e4 4325 dma_addr_t dma_address;
2e242fa9 4326 int trim_sg = 0;
1da177e4 4327
cedc9a47
JG
4328 /* we must lengthen transfers to end on a 32-bit boundary */
4329 qc->pad_len = sg->length & 3;
4330 if (qc->pad_len) {
4331 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4332 struct scatterlist *psg = &qc->pad_sgent;
4333
a4631474 4334 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4335
4336 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4337
4338 if (qc->tf.flags & ATA_TFLAG_WRITE)
4339 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4340 qc->pad_len);
4341
4342 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4343 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4344 /* trim sg */
4345 sg->length -= qc->pad_len;
2e242fa9
TH
4346 if (sg->length == 0)
4347 trim_sg = 1;
cedc9a47
JG
4348
4349 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4350 sg->length, qc->pad_len);
4351 }
4352
2e242fa9
TH
4353 if (trim_sg) {
4354 qc->n_elem--;
e1410f2d
JG
4355 goto skip_map;
4356 }
4357
2f1f610b 4358 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4359 sg->length, dir);
537a95d9
TH
4360 if (dma_mapping_error(dma_address)) {
4361 /* restore sg */
4362 sg->length += qc->pad_len;
1da177e4 4363 return -1;
537a95d9 4364 }
1da177e4
LT
4365
4366 sg_dma_address(sg) = dma_address;
32529e01 4367 sg_dma_len(sg) = sg->length;
1da177e4 4368
2e242fa9 4369skip_map:
1da177e4
LT
4370 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4371 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4372
4373 return 0;
4374}
4375
4376/**
0cba632b
JG
4377 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4378 * @qc: Command with scatter-gather table to be mapped.
4379 *
4380 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4381 *
4382 * LOCKING:
cca3974e 4383 * spin_lock_irqsave(host lock)
1da177e4
LT
4384 *
4385 * RETURNS:
0cba632b 4386 * Zero on success, negative on error.
1da177e4
LT
4387 *
4388 */
4389
4390static int ata_sg_setup(struct ata_queued_cmd *qc)
4391{
4392 struct ata_port *ap = qc->ap;
cedc9a47
JG
4393 struct scatterlist *sg = qc->__sg;
4394 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4395 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4396
44877b4e 4397 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4398 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4399
cedc9a47
JG
4400 /* we must lengthen transfers to end on a 32-bit boundary */
4401 qc->pad_len = lsg->length & 3;
4402 if (qc->pad_len) {
4403 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4404 struct scatterlist *psg = &qc->pad_sgent;
4405 unsigned int offset;
4406
a4631474 4407 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4408
4409 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4410
4411 /*
4412 * psg->page/offset are used to copy to-be-written
4413 * data in this function or read data in ata_sg_clean.
4414 */
4415 offset = lsg->offset + lsg->length - qc->pad_len;
4416 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4417 psg->offset = offset_in_page(offset);
4418
4419 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4420 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4421 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4422 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4423 }
4424
4425 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4426 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4427 /* trim last sg */
4428 lsg->length -= qc->pad_len;
e1410f2d
JG
4429 if (lsg->length == 0)
4430 trim_sg = 1;
cedc9a47
JG
4431
4432 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4433 qc->n_elem - 1, lsg->length, qc->pad_len);
4434 }
4435
e1410f2d
JG
4436 pre_n_elem = qc->n_elem;
4437 if (trim_sg && pre_n_elem)
4438 pre_n_elem--;
4439
4440 if (!pre_n_elem) {
4441 n_elem = 0;
4442 goto skip_map;
4443 }
4444
1da177e4 4445 dir = qc->dma_dir;
2f1f610b 4446 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4447 if (n_elem < 1) {
4448 /* restore last sg */
4449 lsg->length += qc->pad_len;
1da177e4 4450 return -1;
537a95d9 4451 }
1da177e4
LT
4452
4453 DPRINTK("%d sg elements mapped\n", n_elem);
4454
e1410f2d 4455skip_map:
1da177e4
LT
4456 qc->n_elem = n_elem;
4457
4458 return 0;
4459}
4460
0baab86b 4461/**
c893a3ae 4462 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4463 * @buf: Buffer to swap
4464 * @buf_words: Number of 16-bit words in buffer.
4465 *
4466 * Swap halves of 16-bit words if needed to convert from
4467 * little-endian byte order to native cpu byte order, or
4468 * vice-versa.
4469 *
4470 * LOCKING:
6f0ef4fa 4471 * Inherited from caller.
0baab86b 4472 */
1da177e4
LT
4473void swap_buf_le16(u16 *buf, unsigned int buf_words)
4474{
4475#ifdef __BIG_ENDIAN
4476 unsigned int i;
4477
4478 for (i = 0; i < buf_words; i++)
4479 buf[i] = le16_to_cpu(buf[i]);
4480#endif /* __BIG_ENDIAN */
4481}
4482
6ae4cfb5 4483/**
0d5ff566 4484 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4485 * @adev: device to target
6ae4cfb5
AL
4486 * @buf: data buffer
4487 * @buflen: buffer length
344babaa 4488 * @write_data: read/write
6ae4cfb5
AL
4489 *
4490 * Transfer data from/to the device data register by PIO.
4491 *
4492 * LOCKING:
4493 * Inherited from caller.
6ae4cfb5 4494 */
0d5ff566
TH
4495void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4496 unsigned int buflen, int write_data)
1da177e4 4497{
9af5c9c9 4498 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4499 unsigned int words = buflen >> 1;
1da177e4 4500
6ae4cfb5 4501 /* Transfer multiple of 2 bytes */
1da177e4 4502 if (write_data)
0d5ff566 4503 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4504 else
0d5ff566 4505 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4506
4507 /* Transfer trailing 1 byte, if any. */
4508 if (unlikely(buflen & 0x01)) {
4509 u16 align_buf[1] = { 0 };
4510 unsigned char *trailing_buf = buf + buflen - 1;
4511
4512 if (write_data) {
4513 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4514 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4515 } else {
0d5ff566 4516 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4517 memcpy(trailing_buf, align_buf, 1);
4518 }
4519 }
1da177e4
LT
4520}
4521
75e99585 4522/**
0d5ff566 4523 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4524 * @adev: device to target
4525 * @buf: data buffer
4526 * @buflen: buffer length
4527 * @write_data: read/write
4528 *
88574551 4529 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4530 * transfer with interrupts disabled.
4531 *
4532 * LOCKING:
4533 * Inherited from caller.
4534 */
0d5ff566
TH
4535void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4536 unsigned int buflen, int write_data)
75e99585
AC
4537{
4538 unsigned long flags;
4539 local_irq_save(flags);
0d5ff566 4540 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4541 local_irq_restore(flags);
4542}
4543
4544
6ae4cfb5 4545/**
5a5dbd18 4546 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4547 * @qc: Command on going
4548 *
5a5dbd18 4549 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4550 *
4551 * LOCKING:
4552 * Inherited from caller.
4553 */
4554
1da177e4
LT
4555static void ata_pio_sector(struct ata_queued_cmd *qc)
4556{
4557 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4558 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4559 struct ata_port *ap = qc->ap;
4560 struct page *page;
4561 unsigned int offset;
4562 unsigned char *buf;
4563
5a5dbd18 4564 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4565 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4566
4567 page = sg[qc->cursg].page;
726f0785 4568 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4569
4570 /* get the current page and offset */
4571 page = nth_page(page, (offset >> PAGE_SHIFT));
4572 offset %= PAGE_SIZE;
4573
1da177e4
LT
4574 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4575
91b8b313
AL
4576 if (PageHighMem(page)) {
4577 unsigned long flags;
4578
a6b2c5d4 4579 /* FIXME: use a bounce buffer */
91b8b313
AL
4580 local_irq_save(flags);
4581 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4582
91b8b313 4583 /* do the actual data transfer */
5a5dbd18 4584 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4585
91b8b313
AL
4586 kunmap_atomic(buf, KM_IRQ0);
4587 local_irq_restore(flags);
4588 } else {
4589 buf = page_address(page);
5a5dbd18 4590 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4591 }
1da177e4 4592
5a5dbd18
ML
4593 qc->curbytes += qc->sect_size;
4594 qc->cursg_ofs += qc->sect_size;
1da177e4 4595
726f0785 4596 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4597 qc->cursg++;
4598 qc->cursg_ofs = 0;
4599 }
1da177e4 4600}
1da177e4 4601
07f6f7d0 4602/**
5a5dbd18 4603 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4604 * @qc: Command on going
4605 *
5a5dbd18 4606 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4607 * ATA device for the DRQ request.
4608 *
4609 * LOCKING:
4610 * Inherited from caller.
4611 */
1da177e4 4612
07f6f7d0
AL
4613static void ata_pio_sectors(struct ata_queued_cmd *qc)
4614{
4615 if (is_multi_taskfile(&qc->tf)) {
4616 /* READ/WRITE MULTIPLE */
4617 unsigned int nsect;
4618
587005de 4619 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4620
5a5dbd18 4621 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4622 qc->dev->multi_count);
07f6f7d0
AL
4623 while (nsect--)
4624 ata_pio_sector(qc);
4625 } else
4626 ata_pio_sector(qc);
4cc980b3
AL
4627
4628 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4629}
4630
c71c1857
AL
4631/**
4632 * atapi_send_cdb - Write CDB bytes to hardware
4633 * @ap: Port to which ATAPI device is attached.
4634 * @qc: Taskfile currently active
4635 *
4636 * When device has indicated its readiness to accept
4637 * a CDB, this function is called. Send the CDB.
4638 *
4639 * LOCKING:
4640 * caller.
4641 */
4642
4643static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4644{
4645 /* send SCSI cdb */
4646 DPRINTK("send cdb\n");
db024d53 4647 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4648
a6b2c5d4 4649 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4650 ata_altstatus(ap); /* flush */
4651
4652 switch (qc->tf.protocol) {
4653 case ATA_PROT_ATAPI:
4654 ap->hsm_task_state = HSM_ST;
4655 break;
4656 case ATA_PROT_ATAPI_NODATA:
4657 ap->hsm_task_state = HSM_ST_LAST;
4658 break;
4659 case ATA_PROT_ATAPI_DMA:
4660 ap->hsm_task_state = HSM_ST_LAST;
4661 /* initiate bmdma */
4662 ap->ops->bmdma_start(qc);
4663 break;
4664 }
1da177e4
LT
4665}
4666
6ae4cfb5
AL
4667/**
4668 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4669 * @qc: Command on going
4670 * @bytes: number of bytes
4671 *
4672 * Transfer Transfer data from/to the ATAPI device.
4673 *
4674 * LOCKING:
4675 * Inherited from caller.
4676 *
4677 */
4678
1da177e4
LT
4679static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4680{
4681 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4682 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4683 struct ata_port *ap = qc->ap;
4684 struct page *page;
4685 unsigned char *buf;
4686 unsigned int offset, count;
4687
563a6e1f 4688 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4689 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4690
4691next_sg:
563a6e1f 4692 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4693 /*
563a6e1f
AL
4694 * The end of qc->sg is reached and the device expects
4695 * more data to transfer. In order not to overrun qc->sg
4696 * and fulfill length specified in the byte count register,
4697 * - for read case, discard trailing data from the device
4698 * - for write case, padding zero data to the device
4699 */
4700 u16 pad_buf[1] = { 0 };
4701 unsigned int words = bytes >> 1;
4702 unsigned int i;
4703
4704 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4705 ata_dev_printk(qc->dev, KERN_WARNING,
4706 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4707
4708 for (i = 0; i < words; i++)
a6b2c5d4 4709 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4710
14be71f4 4711 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4712 return;
4713 }
4714
cedc9a47 4715 sg = &qc->__sg[qc->cursg];
1da177e4 4716
1da177e4
LT
4717 page = sg->page;
4718 offset = sg->offset + qc->cursg_ofs;
4719
4720 /* get the current page and offset */
4721 page = nth_page(page, (offset >> PAGE_SHIFT));
4722 offset %= PAGE_SIZE;
4723
6952df03 4724 /* don't overrun current sg */
32529e01 4725 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4726
4727 /* don't cross page boundaries */
4728 count = min(count, (unsigned int)PAGE_SIZE - offset);
4729
7282aa4b
AL
4730 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4731
91b8b313
AL
4732 if (PageHighMem(page)) {
4733 unsigned long flags;
4734
a6b2c5d4 4735 /* FIXME: use bounce buffer */
91b8b313
AL
4736 local_irq_save(flags);
4737 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4738
91b8b313 4739 /* do the actual data transfer */
a6b2c5d4 4740 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4741
91b8b313
AL
4742 kunmap_atomic(buf, KM_IRQ0);
4743 local_irq_restore(flags);
4744 } else {
4745 buf = page_address(page);
a6b2c5d4 4746 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4747 }
1da177e4
LT
4748
4749 bytes -= count;
4750 qc->curbytes += count;
4751 qc->cursg_ofs += count;
4752
32529e01 4753 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4754 qc->cursg++;
4755 qc->cursg_ofs = 0;
4756 }
4757
563a6e1f 4758 if (bytes)
1da177e4 4759 goto next_sg;
1da177e4
LT
4760}
4761
6ae4cfb5
AL
4762/**
4763 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4764 * @qc: Command on going
4765 *
4766 * Transfer Transfer data from/to the ATAPI device.
4767 *
4768 * LOCKING:
4769 * Inherited from caller.
6ae4cfb5
AL
4770 */
4771
1da177e4
LT
4772static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4773{
4774 struct ata_port *ap = qc->ap;
4775 struct ata_device *dev = qc->dev;
4776 unsigned int ireason, bc_lo, bc_hi, bytes;
4777 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4778
eec4c3f3
AL
4779 /* Abuse qc->result_tf for temp storage of intermediate TF
4780 * here to save some kernel stack usage.
4781 * For normal completion, qc->result_tf is not relevant. For
4782 * error, qc->result_tf is later overwritten by ata_qc_complete().
4783 * So, the correctness of qc->result_tf is not affected.
4784 */
4785 ap->ops->tf_read(ap, &qc->result_tf);
4786 ireason = qc->result_tf.nsect;
4787 bc_lo = qc->result_tf.lbam;
4788 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4789 bytes = (bc_hi << 8) | bc_lo;
4790
4791 /* shall be cleared to zero, indicating xfer of data */
4792 if (ireason & (1 << 0))
4793 goto err_out;
4794
4795 /* make sure transfer direction matches expected */
4796 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4797 if (do_write != i_write)
4798 goto err_out;
4799
44877b4e 4800 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4801
1da177e4 4802 __atapi_pio_bytes(qc, bytes);
4cc980b3 4803 ata_altstatus(ap); /* flush */
1da177e4
LT
4804
4805 return;
4806
4807err_out:
f15a1daf 4808 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4809 qc->err_mask |= AC_ERR_HSM;
14be71f4 4810 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4811}
4812
4813/**
c234fb00
AL
4814 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4815 * @ap: the target ata_port
4816 * @qc: qc on going
1da177e4 4817 *
c234fb00
AL
4818 * RETURNS:
4819 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4820 */
c234fb00
AL
4821
4822static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4823{
c234fb00
AL
4824 if (qc->tf.flags & ATA_TFLAG_POLLING)
4825 return 1;
1da177e4 4826
c234fb00
AL
4827 if (ap->hsm_task_state == HSM_ST_FIRST) {
4828 if (qc->tf.protocol == ATA_PROT_PIO &&
4829 (qc->tf.flags & ATA_TFLAG_WRITE))
4830 return 1;
1da177e4 4831
c234fb00
AL
4832 if (is_atapi_taskfile(&qc->tf) &&
4833 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4834 return 1;
fe79e683
AL
4835 }
4836
c234fb00
AL
4837 return 0;
4838}
1da177e4 4839
c17ea20d
TH
4840/**
4841 * ata_hsm_qc_complete - finish a qc running on standard HSM
4842 * @qc: Command to complete
4843 * @in_wq: 1 if called from workqueue, 0 otherwise
4844 *
4845 * Finish @qc which is running on standard HSM.
4846 *
4847 * LOCKING:
cca3974e 4848 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4849 * Otherwise, none on entry and grabs host lock.
4850 */
4851static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4852{
4853 struct ata_port *ap = qc->ap;
4854 unsigned long flags;
4855
4856 if (ap->ops->error_handler) {
4857 if (in_wq) {
ba6a1308 4858 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4859
cca3974e
JG
4860 /* EH might have kicked in while host lock is
4861 * released.
c17ea20d
TH
4862 */
4863 qc = ata_qc_from_tag(ap, qc->tag);
4864 if (qc) {
4865 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4866 ap->ops->irq_on(ap);
c17ea20d
TH
4867 ata_qc_complete(qc);
4868 } else
4869 ata_port_freeze(ap);
4870 }
4871
ba6a1308 4872 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4873 } else {
4874 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4875 ata_qc_complete(qc);
4876 else
4877 ata_port_freeze(ap);
4878 }
4879 } else {
4880 if (in_wq) {
ba6a1308 4881 spin_lock_irqsave(ap->lock, flags);
83625006 4882 ap->ops->irq_on(ap);
c17ea20d 4883 ata_qc_complete(qc);
ba6a1308 4884 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4885 } else
4886 ata_qc_complete(qc);
4887 }
4888}
4889
bb5cb290
AL
4890/**
4891 * ata_hsm_move - move the HSM to the next state.
4892 * @ap: the target ata_port
4893 * @qc: qc on going
4894 * @status: current device status
4895 * @in_wq: 1 if called from workqueue, 0 otherwise
4896 *
4897 * RETURNS:
4898 * 1 when poll next status needed, 0 otherwise.
4899 */
9a1004d0
TH
4900int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4901 u8 status, int in_wq)
e2cec771 4902{
bb5cb290
AL
4903 unsigned long flags = 0;
4904 int poll_next;
4905
6912ccd5
AL
4906 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4907
bb5cb290
AL
4908 /* Make sure ata_qc_issue_prot() does not throw things
4909 * like DMA polling into the workqueue. Notice that
4910 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4911 */
c234fb00 4912 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4913
e2cec771 4914fsm_start:
999bb6f4 4915 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4916 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4917
e2cec771
AL
4918 switch (ap->hsm_task_state) {
4919 case HSM_ST_FIRST:
bb5cb290
AL
4920 /* Send first data block or PACKET CDB */
4921
4922 /* If polling, we will stay in the work queue after
4923 * sending the data. Otherwise, interrupt handler
4924 * takes over after sending the data.
4925 */
4926 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4927
e2cec771 4928 /* check device status */
3655d1d3
AL
4929 if (unlikely((status & ATA_DRQ) == 0)) {
4930 /* handle BSY=0, DRQ=0 as error */
4931 if (likely(status & (ATA_ERR | ATA_DF)))
4932 /* device stops HSM for abort/error */
4933 qc->err_mask |= AC_ERR_DEV;
4934 else
4935 /* HSM violation. Let EH handle this */
4936 qc->err_mask |= AC_ERR_HSM;
4937
14be71f4 4938 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4939 goto fsm_start;
1da177e4
LT
4940 }
4941
71601958
AL
4942 /* Device should not ask for data transfer (DRQ=1)
4943 * when it finds something wrong.
eee6c32f
AL
4944 * We ignore DRQ here and stop the HSM by
4945 * changing hsm_task_state to HSM_ST_ERR and
4946 * let the EH abort the command or reset the device.
71601958
AL
4947 */
4948 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4949 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4950 "error, dev_stat 0x%X\n", status);
3655d1d3 4951 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4952 ap->hsm_task_state = HSM_ST_ERR;
4953 goto fsm_start;
71601958 4954 }
1da177e4 4955
bb5cb290
AL
4956 /* Send the CDB (atapi) or the first data block (ata pio out).
4957 * During the state transition, interrupt handler shouldn't
4958 * be invoked before the data transfer is complete and
4959 * hsm_task_state is changed. Hence, the following locking.
4960 */
4961 if (in_wq)
ba6a1308 4962 spin_lock_irqsave(ap->lock, flags);
1da177e4 4963
bb5cb290
AL
4964 if (qc->tf.protocol == ATA_PROT_PIO) {
4965 /* PIO data out protocol.
4966 * send first data block.
4967 */
0565c26d 4968
bb5cb290
AL
4969 /* ata_pio_sectors() might change the state
4970 * to HSM_ST_LAST. so, the state is changed here
4971 * before ata_pio_sectors().
4972 */
4973 ap->hsm_task_state = HSM_ST;
4974 ata_pio_sectors(qc);
bb5cb290
AL
4975 } else
4976 /* send CDB */
4977 atapi_send_cdb(ap, qc);
4978
4979 if (in_wq)
ba6a1308 4980 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4981
4982 /* if polling, ata_pio_task() handles the rest.
4983 * otherwise, interrupt handler takes over from here.
4984 */
e2cec771 4985 break;
1c848984 4986
e2cec771
AL
4987 case HSM_ST:
4988 /* complete command or read/write the data register */
4989 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4990 /* ATAPI PIO protocol */
4991 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4992 /* No more data to transfer or device error.
4993 * Device error will be tagged in HSM_ST_LAST.
4994 */
e2cec771
AL
4995 ap->hsm_task_state = HSM_ST_LAST;
4996 goto fsm_start;
4997 }
1da177e4 4998
71601958
AL
4999 /* Device should not ask for data transfer (DRQ=1)
5000 * when it finds something wrong.
eee6c32f
AL
5001 * We ignore DRQ here and stop the HSM by
5002 * changing hsm_task_state to HSM_ST_ERR and
5003 * let the EH abort the command or reset the device.
71601958
AL
5004 */
5005 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5006 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5007 "device error, dev_stat 0x%X\n",
5008 status);
3655d1d3 5009 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5010 ap->hsm_task_state = HSM_ST_ERR;
5011 goto fsm_start;
71601958 5012 }
1da177e4 5013
e2cec771 5014 atapi_pio_bytes(qc);
7fb6ec28 5015
e2cec771
AL
5016 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5017 /* bad ireason reported by device */
5018 goto fsm_start;
1da177e4 5019
e2cec771
AL
5020 } else {
5021 /* ATA PIO protocol */
5022 if (unlikely((status & ATA_DRQ) == 0)) {
5023 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5024 if (likely(status & (ATA_ERR | ATA_DF)))
5025 /* device stops HSM for abort/error */
5026 qc->err_mask |= AC_ERR_DEV;
5027 else
55a8e2c8
TH
5028 /* HSM violation. Let EH handle this.
5029 * Phantom devices also trigger this
5030 * condition. Mark hint.
5031 */
5032 qc->err_mask |= AC_ERR_HSM |
5033 AC_ERR_NODEV_HINT;
3655d1d3 5034
e2cec771
AL
5035 ap->hsm_task_state = HSM_ST_ERR;
5036 goto fsm_start;
5037 }
1da177e4 5038
eee6c32f
AL
5039 /* For PIO reads, some devices may ask for
5040 * data transfer (DRQ=1) alone with ERR=1.
5041 * We respect DRQ here and transfer one
5042 * block of junk data before changing the
5043 * hsm_task_state to HSM_ST_ERR.
5044 *
5045 * For PIO writes, ERR=1 DRQ=1 doesn't make
5046 * sense since the data block has been
5047 * transferred to the device.
71601958
AL
5048 */
5049 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5050 /* data might be corrputed */
5051 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5052
5053 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5054 ata_pio_sectors(qc);
eee6c32f
AL
5055 status = ata_wait_idle(ap);
5056 }
5057
3655d1d3
AL
5058 if (status & (ATA_BUSY | ATA_DRQ))
5059 qc->err_mask |= AC_ERR_HSM;
5060
eee6c32f
AL
5061 /* ata_pio_sectors() might change the
5062 * state to HSM_ST_LAST. so, the state
5063 * is changed after ata_pio_sectors().
5064 */
5065 ap->hsm_task_state = HSM_ST_ERR;
5066 goto fsm_start;
71601958
AL
5067 }
5068
e2cec771
AL
5069 ata_pio_sectors(qc);
5070
5071 if (ap->hsm_task_state == HSM_ST_LAST &&
5072 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5073 /* all data read */
52a32205 5074 status = ata_wait_idle(ap);
e2cec771
AL
5075 goto fsm_start;
5076 }
5077 }
5078
bb5cb290 5079 poll_next = 1;
1da177e4
LT
5080 break;
5081
14be71f4 5082 case HSM_ST_LAST:
6912ccd5
AL
5083 if (unlikely(!ata_ok(status))) {
5084 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5085 ap->hsm_task_state = HSM_ST_ERR;
5086 goto fsm_start;
5087 }
5088
5089 /* no more data to transfer */
4332a771 5090 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5091 ap->print_id, qc->dev->devno, status);
e2cec771 5092
6912ccd5
AL
5093 WARN_ON(qc->err_mask);
5094
e2cec771 5095 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5096
e2cec771 5097 /* complete taskfile transaction */
c17ea20d 5098 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5099
5100 poll_next = 0;
1da177e4
LT
5101 break;
5102
14be71f4 5103 case HSM_ST_ERR:
e2cec771
AL
5104 /* make sure qc->err_mask is available to
5105 * know what's wrong and recover
5106 */
5107 WARN_ON(qc->err_mask == 0);
5108
5109 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5110
999bb6f4 5111 /* complete taskfile transaction */
c17ea20d 5112 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5113
5114 poll_next = 0;
e2cec771
AL
5115 break;
5116 default:
bb5cb290 5117 poll_next = 0;
6912ccd5 5118 BUG();
1da177e4
LT
5119 }
5120
bb5cb290 5121 return poll_next;
1da177e4
LT
5122}
5123
65f27f38 5124static void ata_pio_task(struct work_struct *work)
8061f5f0 5125{
65f27f38
DH
5126 struct ata_port *ap =
5127 container_of(work, struct ata_port, port_task.work);
5128 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5129 u8 status;
a1af3734 5130 int poll_next;
8061f5f0 5131
7fb6ec28 5132fsm_start:
a1af3734 5133 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5134
a1af3734
AL
5135 /*
5136 * This is purely heuristic. This is a fast path.
5137 * Sometimes when we enter, BSY will be cleared in
5138 * a chk-status or two. If not, the drive is probably seeking
5139 * or something. Snooze for a couple msecs, then
5140 * chk-status again. If still busy, queue delayed work.
5141 */
5142 status = ata_busy_wait(ap, ATA_BUSY, 5);
5143 if (status & ATA_BUSY) {
5144 msleep(2);
5145 status = ata_busy_wait(ap, ATA_BUSY, 10);
5146 if (status & ATA_BUSY) {
31ce6dae 5147 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5148 return;
5149 }
8061f5f0
TH
5150 }
5151
a1af3734
AL
5152 /* move the HSM */
5153 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5154
a1af3734
AL
5155 /* another command or interrupt handler
5156 * may be running at this point.
5157 */
5158 if (poll_next)
7fb6ec28 5159 goto fsm_start;
8061f5f0
TH
5160}
5161
1da177e4
LT
5162/**
5163 * ata_qc_new - Request an available ATA command, for queueing
5164 * @ap: Port associated with device @dev
5165 * @dev: Device from whom we request an available command structure
5166 *
5167 * LOCKING:
0cba632b 5168 * None.
1da177e4
LT
5169 */
5170
5171static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5172{
5173 struct ata_queued_cmd *qc = NULL;
5174 unsigned int i;
5175
e3180499 5176 /* no command while frozen */
b51e9e5d 5177 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5178 return NULL;
5179
2ab7db1f
TH
5180 /* the last tag is reserved for internal command. */
5181 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5182 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5183 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5184 break;
5185 }
5186
5187 if (qc)
5188 qc->tag = i;
5189
5190 return qc;
5191}
5192
5193/**
5194 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5195 * @dev: Device from whom we request an available command structure
5196 *
5197 * LOCKING:
0cba632b 5198 * None.
1da177e4
LT
5199 */
5200
3373efd8 5201struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5202{
9af5c9c9 5203 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5204 struct ata_queued_cmd *qc;
5205
5206 qc = ata_qc_new(ap);
5207 if (qc) {
1da177e4
LT
5208 qc->scsicmd = NULL;
5209 qc->ap = ap;
5210 qc->dev = dev;
1da177e4 5211
2c13b7ce 5212 ata_qc_reinit(qc);
1da177e4
LT
5213 }
5214
5215 return qc;
5216}
5217
1da177e4
LT
5218/**
5219 * ata_qc_free - free unused ata_queued_cmd
5220 * @qc: Command to complete
5221 *
5222 * Designed to free unused ata_queued_cmd object
5223 * in case something prevents using it.
5224 *
5225 * LOCKING:
cca3974e 5226 * spin_lock_irqsave(host lock)
1da177e4
LT
5227 */
5228void ata_qc_free(struct ata_queued_cmd *qc)
5229{
4ba946e9
TH
5230 struct ata_port *ap = qc->ap;
5231 unsigned int tag;
5232
a4631474 5233 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5234
4ba946e9
TH
5235 qc->flags = 0;
5236 tag = qc->tag;
5237 if (likely(ata_tag_valid(tag))) {
4ba946e9 5238 qc->tag = ATA_TAG_POISON;
6cec4a39 5239 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5240 }
1da177e4
LT
5241}
5242
76014427 5243void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5244{
dedaf2b0 5245 struct ata_port *ap = qc->ap;
9af5c9c9 5246 struct ata_link *link = qc->dev->link;
dedaf2b0 5247
a4631474
TH
5248 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5249 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5250
5251 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5252 ata_sg_clean(qc);
5253
7401abf2 5254 /* command should be marked inactive atomically with qc completion */
dedaf2b0 5255 if (qc->tf.protocol == ATA_PROT_NCQ)
9af5c9c9 5256 link->sactive &= ~(1 << qc->tag);
dedaf2b0 5257 else
9af5c9c9 5258 link->active_tag = ATA_TAG_POISON;
7401abf2 5259
3f3791d3
AL
5260 /* atapi: mark qc as inactive to prevent the interrupt handler
5261 * from completing the command twice later, before the error handler
5262 * is called. (when rc != 0 and atapi request sense is needed)
5263 */
5264 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5265 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5266
1da177e4 5267 /* call completion callback */
77853bf2 5268 qc->complete_fn(qc);
1da177e4
LT
5269}
5270
39599a53
TH
5271static void fill_result_tf(struct ata_queued_cmd *qc)
5272{
5273 struct ata_port *ap = qc->ap;
5274
39599a53 5275 qc->result_tf.flags = qc->tf.flags;
4742d54f 5276 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5277}
5278
f686bcb8
TH
5279/**
5280 * ata_qc_complete - Complete an active ATA command
5281 * @qc: Command to complete
5282 * @err_mask: ATA Status register contents
5283 *
5284 * Indicate to the mid and upper layers that an ATA
5285 * command has completed, with either an ok or not-ok status.
5286 *
5287 * LOCKING:
cca3974e 5288 * spin_lock_irqsave(host lock)
f686bcb8
TH
5289 */
5290void ata_qc_complete(struct ata_queued_cmd *qc)
5291{
5292 struct ata_port *ap = qc->ap;
5293
5294 /* XXX: New EH and old EH use different mechanisms to
5295 * synchronize EH with regular execution path.
5296 *
5297 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5298 * Normal execution path is responsible for not accessing a
5299 * failed qc. libata core enforces the rule by returning NULL
5300 * from ata_qc_from_tag() for failed qcs.
5301 *
5302 * Old EH depends on ata_qc_complete() nullifying completion
5303 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5304 * not synchronize with interrupt handler. Only PIO task is
5305 * taken care of.
5306 */
5307 if (ap->ops->error_handler) {
b51e9e5d 5308 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5309
5310 if (unlikely(qc->err_mask))
5311 qc->flags |= ATA_QCFLAG_FAILED;
5312
5313 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5314 if (!ata_tag_internal(qc->tag)) {
5315 /* always fill result TF for failed qc */
39599a53 5316 fill_result_tf(qc);
f686bcb8
TH
5317 ata_qc_schedule_eh(qc);
5318 return;
5319 }
5320 }
5321
5322 /* read result TF if requested */
5323 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5324 fill_result_tf(qc);
f686bcb8
TH
5325
5326 __ata_qc_complete(qc);
5327 } else {
5328 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5329 return;
5330
5331 /* read result TF if failed or requested */
5332 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5333 fill_result_tf(qc);
f686bcb8
TH
5334
5335 __ata_qc_complete(qc);
5336 }
5337}
5338
dedaf2b0
TH
5339/**
5340 * ata_qc_complete_multiple - Complete multiple qcs successfully
5341 * @ap: port in question
5342 * @qc_active: new qc_active mask
5343 * @finish_qc: LLDD callback invoked before completing a qc
5344 *
5345 * Complete in-flight commands. This functions is meant to be
5346 * called from low-level driver's interrupt routine to complete
5347 * requests normally. ap->qc_active and @qc_active is compared
5348 * and commands are completed accordingly.
5349 *
5350 * LOCKING:
cca3974e 5351 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5352 *
5353 * RETURNS:
5354 * Number of completed commands on success, -errno otherwise.
5355 */
5356int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5357 void (*finish_qc)(struct ata_queued_cmd *))
5358{
5359 int nr_done = 0;
5360 u32 done_mask;
5361 int i;
5362
5363 done_mask = ap->qc_active ^ qc_active;
5364
5365 if (unlikely(done_mask & qc_active)) {
5366 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5367 "(%08x->%08x)\n", ap->qc_active, qc_active);
5368 return -EINVAL;
5369 }
5370
5371 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5372 struct ata_queued_cmd *qc;
5373
5374 if (!(done_mask & (1 << i)))
5375 continue;
5376
5377 if ((qc = ata_qc_from_tag(ap, i))) {
5378 if (finish_qc)
5379 finish_qc(qc);
5380 ata_qc_complete(qc);
5381 nr_done++;
5382 }
5383 }
5384
5385 return nr_done;
5386}
5387
1da177e4
LT
5388static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5389{
5390 struct ata_port *ap = qc->ap;
5391
5392 switch (qc->tf.protocol) {
3dc1d881 5393 case ATA_PROT_NCQ:
1da177e4
LT
5394 case ATA_PROT_DMA:
5395 case ATA_PROT_ATAPI_DMA:
5396 return 1;
5397
5398 case ATA_PROT_ATAPI:
5399 case ATA_PROT_PIO:
1da177e4
LT
5400 if (ap->flags & ATA_FLAG_PIO_DMA)
5401 return 1;
5402
5403 /* fall through */
5404
5405 default:
5406 return 0;
5407 }
5408
5409 /* never reached */
5410}
5411
5412/**
5413 * ata_qc_issue - issue taskfile to device
5414 * @qc: command to issue to device
5415 *
5416 * Prepare an ATA command to submission to device.
5417 * This includes mapping the data into a DMA-able
5418 * area, filling in the S/G table, and finally
5419 * writing the taskfile to hardware, starting the command.
5420 *
5421 * LOCKING:
cca3974e 5422 * spin_lock_irqsave(host lock)
1da177e4 5423 */
8e0e694a 5424void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5425{
5426 struct ata_port *ap = qc->ap;
9af5c9c9 5427 struct ata_link *link = qc->dev->link;
1da177e4 5428
dedaf2b0
TH
5429 /* Make sure only one non-NCQ command is outstanding. The
5430 * check is skipped for old EH because it reuses active qc to
5431 * request ATAPI sense.
5432 */
9af5c9c9 5433 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5434
5435 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9
TH
5436 WARN_ON(link->sactive & (1 << qc->tag));
5437 link->sactive |= 1 << qc->tag;
dedaf2b0 5438 } else {
9af5c9c9
TH
5439 WARN_ON(link->sactive);
5440 link->active_tag = qc->tag;
dedaf2b0
TH
5441 }
5442
e4a70e76 5443 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5444 ap->qc_active |= 1 << qc->tag;
e4a70e76 5445
1da177e4
LT
5446 if (ata_should_dma_map(qc)) {
5447 if (qc->flags & ATA_QCFLAG_SG) {
5448 if (ata_sg_setup(qc))
8e436af9 5449 goto sg_err;
1da177e4
LT
5450 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5451 if (ata_sg_setup_one(qc))
8e436af9 5452 goto sg_err;
1da177e4
LT
5453 }
5454 } else {
5455 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5456 }
5457
5458 ap->ops->qc_prep(qc);
5459
8e0e694a
TH
5460 qc->err_mask |= ap->ops->qc_issue(qc);
5461 if (unlikely(qc->err_mask))
5462 goto err;
5463 return;
1da177e4 5464
8e436af9
TH
5465sg_err:
5466 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5467 qc->err_mask |= AC_ERR_SYSTEM;
5468err:
5469 ata_qc_complete(qc);
1da177e4
LT
5470}
5471
5472/**
5473 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5474 * @qc: command to issue to device
5475 *
5476 * Using various libata functions and hooks, this function
5477 * starts an ATA command. ATA commands are grouped into
5478 * classes called "protocols", and issuing each type of protocol
5479 * is slightly different.
5480 *
0baab86b
EF
5481 * May be used as the qc_issue() entry in ata_port_operations.
5482 *
1da177e4 5483 * LOCKING:
cca3974e 5484 * spin_lock_irqsave(host lock)
1da177e4
LT
5485 *
5486 * RETURNS:
9a3d9eb0 5487 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5488 */
5489
9a3d9eb0 5490unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5491{
5492 struct ata_port *ap = qc->ap;
5493
e50362ec
AL
5494 /* Use polling pio if the LLD doesn't handle
5495 * interrupt driven pio and atapi CDB interrupt.
5496 */
5497 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5498 switch (qc->tf.protocol) {
5499 case ATA_PROT_PIO:
e3472cbe 5500 case ATA_PROT_NODATA:
e50362ec
AL
5501 case ATA_PROT_ATAPI:
5502 case ATA_PROT_ATAPI_NODATA:
5503 qc->tf.flags |= ATA_TFLAG_POLLING;
5504 break;
5505 case ATA_PROT_ATAPI_DMA:
5506 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5507 /* see ata_dma_blacklisted() */
e50362ec
AL
5508 BUG();
5509 break;
5510 default:
5511 break;
5512 }
5513 }
5514
312f7da2 5515 /* select the device */
1da177e4
LT
5516 ata_dev_select(ap, qc->dev->devno, 1, 0);
5517
312f7da2 5518 /* start the command */
1da177e4
LT
5519 switch (qc->tf.protocol) {
5520 case ATA_PROT_NODATA:
312f7da2
AL
5521 if (qc->tf.flags & ATA_TFLAG_POLLING)
5522 ata_qc_set_polling(qc);
5523
e5338254 5524 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5525 ap->hsm_task_state = HSM_ST_LAST;
5526
5527 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5528 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5529
1da177e4
LT
5530 break;
5531
5532 case ATA_PROT_DMA:
587005de 5533 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5534
1da177e4
LT
5535 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5536 ap->ops->bmdma_setup(qc); /* set up bmdma */
5537 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5538 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5539 break;
5540
312f7da2
AL
5541 case ATA_PROT_PIO:
5542 if (qc->tf.flags & ATA_TFLAG_POLLING)
5543 ata_qc_set_polling(qc);
1da177e4 5544
e5338254 5545 ata_tf_to_host(ap, &qc->tf);
312f7da2 5546
54f00389
AL
5547 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5548 /* PIO data out protocol */
5549 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5550 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5551
5552 /* always send first data block using
e27486db 5553 * the ata_pio_task() codepath.
54f00389 5554 */
312f7da2 5555 } else {
54f00389
AL
5556 /* PIO data in protocol */
5557 ap->hsm_task_state = HSM_ST;
5558
5559 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5560 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5561
5562 /* if polling, ata_pio_task() handles the rest.
5563 * otherwise, interrupt handler takes over from here.
5564 */
312f7da2
AL
5565 }
5566
1da177e4
LT
5567 break;
5568
1da177e4 5569 case ATA_PROT_ATAPI:
1da177e4 5570 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5571 if (qc->tf.flags & ATA_TFLAG_POLLING)
5572 ata_qc_set_polling(qc);
5573
e5338254 5574 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5575
312f7da2
AL
5576 ap->hsm_task_state = HSM_ST_FIRST;
5577
5578 /* send cdb by polling if no cdb interrupt */
5579 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5580 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5581 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5582 break;
5583
5584 case ATA_PROT_ATAPI_DMA:
587005de 5585 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5586
1da177e4
LT
5587 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5588 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5589 ap->hsm_task_state = HSM_ST_FIRST;
5590
5591 /* send cdb by polling if no cdb interrupt */
5592 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5593 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5594 break;
5595
5596 default:
5597 WARN_ON(1);
9a3d9eb0 5598 return AC_ERR_SYSTEM;
1da177e4
LT
5599 }
5600
5601 return 0;
5602}
5603
1da177e4
LT
5604/**
5605 * ata_host_intr - Handle host interrupt for given (port, task)
5606 * @ap: Port on which interrupt arrived (possibly...)
5607 * @qc: Taskfile currently active in engine
5608 *
5609 * Handle host interrupt for given queued command. Currently,
5610 * only DMA interrupts are handled. All other commands are
5611 * handled via polling with interrupts disabled (nIEN bit).
5612 *
5613 * LOCKING:
cca3974e 5614 * spin_lock_irqsave(host lock)
1da177e4
LT
5615 *
5616 * RETURNS:
5617 * One if interrupt was handled, zero if not (shared irq).
5618 */
5619
5620inline unsigned int ata_host_intr (struct ata_port *ap,
5621 struct ata_queued_cmd *qc)
5622{
9af5c9c9 5623 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5624 u8 status, host_stat = 0;
1da177e4 5625
312f7da2 5626 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5627 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5628
312f7da2
AL
5629 /* Check whether we are expecting interrupt in this state */
5630 switch (ap->hsm_task_state) {
5631 case HSM_ST_FIRST:
6912ccd5
AL
5632 /* Some pre-ATAPI-4 devices assert INTRQ
5633 * at this state when ready to receive CDB.
5634 */
1da177e4 5635
312f7da2
AL
5636 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5637 * The flag was turned on only for atapi devices.
5638 * No need to check is_atapi_taskfile(&qc->tf) again.
5639 */
5640 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5641 goto idle_irq;
1da177e4 5642 break;
312f7da2
AL
5643 case HSM_ST_LAST:
5644 if (qc->tf.protocol == ATA_PROT_DMA ||
5645 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5646 /* check status of DMA engine */
5647 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5648 VPRINTK("ata%u: host_stat 0x%X\n",
5649 ap->print_id, host_stat);
312f7da2
AL
5650
5651 /* if it's not our irq... */
5652 if (!(host_stat & ATA_DMA_INTR))
5653 goto idle_irq;
5654
5655 /* before we do anything else, clear DMA-Start bit */
5656 ap->ops->bmdma_stop(qc);
a4f16610
AL
5657
5658 if (unlikely(host_stat & ATA_DMA_ERR)) {
5659 /* error when transfering data to/from memory */
5660 qc->err_mask |= AC_ERR_HOST_BUS;
5661 ap->hsm_task_state = HSM_ST_ERR;
5662 }
312f7da2
AL
5663 }
5664 break;
5665 case HSM_ST:
5666 break;
1da177e4
LT
5667 default:
5668 goto idle_irq;
5669 }
5670
312f7da2
AL
5671 /* check altstatus */
5672 status = ata_altstatus(ap);
5673 if (status & ATA_BUSY)
5674 goto idle_irq;
1da177e4 5675
312f7da2
AL
5676 /* check main status, clearing INTRQ */
5677 status = ata_chk_status(ap);
5678 if (unlikely(status & ATA_BUSY))
5679 goto idle_irq;
1da177e4 5680
312f7da2
AL
5681 /* ack bmdma irq events */
5682 ap->ops->irq_clear(ap);
1da177e4 5683
bb5cb290 5684 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5685
5686 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5687 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5688 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5689
1da177e4
LT
5690 return 1; /* irq handled */
5691
5692idle_irq:
5693 ap->stats.idle_irq++;
5694
5695#ifdef ATA_IRQ_TRAP
5696 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5697 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5698 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5699 return 1;
1da177e4
LT
5700 }
5701#endif
5702 return 0; /* irq not handled */
5703}
5704
5705/**
5706 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5707 * @irq: irq line (unused)
cca3974e 5708 * @dev_instance: pointer to our ata_host information structure
1da177e4 5709 *
0cba632b
JG
5710 * Default interrupt handler for PCI IDE devices. Calls
5711 * ata_host_intr() for each port that is not disabled.
5712 *
1da177e4 5713 * LOCKING:
cca3974e 5714 * Obtains host lock during operation.
1da177e4
LT
5715 *
5716 * RETURNS:
0cba632b 5717 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5718 */
5719
7d12e780 5720irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5721{
cca3974e 5722 struct ata_host *host = dev_instance;
1da177e4
LT
5723 unsigned int i;
5724 unsigned int handled = 0;
5725 unsigned long flags;
5726
5727 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5728 spin_lock_irqsave(&host->lock, flags);
1da177e4 5729
cca3974e 5730 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5731 struct ata_port *ap;
5732
cca3974e 5733 ap = host->ports[i];
c1389503 5734 if (ap &&
029f5468 5735 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5736 struct ata_queued_cmd *qc;
5737
9af5c9c9 5738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5739 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5740 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5741 handled |= ata_host_intr(ap, qc);
5742 }
5743 }
5744
cca3974e 5745 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5746
5747 return IRQ_RETVAL(handled);
5748}
5749
34bf2170
TH
5750/**
5751 * sata_scr_valid - test whether SCRs are accessible
936fd732 5752 * @link: ATA link to test SCR accessibility for
34bf2170 5753 *
936fd732 5754 * Test whether SCRs are accessible for @link.
34bf2170
TH
5755 *
5756 * LOCKING:
5757 * None.
5758 *
5759 * RETURNS:
5760 * 1 if SCRs are accessible, 0 otherwise.
5761 */
936fd732 5762int sata_scr_valid(struct ata_link *link)
34bf2170 5763{
936fd732
TH
5764 struct ata_port *ap = link->ap;
5765
a16abc0b 5766 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5767}
5768
5769/**
5770 * sata_scr_read - read SCR register of the specified port
936fd732 5771 * @link: ATA link to read SCR for
34bf2170
TH
5772 * @reg: SCR to read
5773 * @val: Place to store read value
5774 *
936fd732 5775 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5776 * guaranteed to succeed if the cable type of the port is SATA
5777 * and the port implements ->scr_read.
5778 *
5779 * LOCKING:
5780 * None.
5781 *
5782 * RETURNS:
5783 * 0 on success, negative errno on failure.
5784 */
936fd732 5785int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5786{
936fd732
TH
5787 struct ata_port *ap = link->ap;
5788
5789 if (sata_scr_valid(link))
da3dbb17 5790 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5791 return -EOPNOTSUPP;
5792}
5793
5794/**
5795 * sata_scr_write - write SCR register of the specified port
936fd732 5796 * @link: ATA link to write SCR for
34bf2170
TH
5797 * @reg: SCR to write
5798 * @val: value to write
5799 *
936fd732 5800 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5801 * guaranteed to succeed if the cable type of the port is SATA
5802 * and the port implements ->scr_read.
5803 *
5804 * LOCKING:
5805 * None.
5806 *
5807 * RETURNS:
5808 * 0 on success, negative errno on failure.
5809 */
936fd732 5810int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5811{
936fd732
TH
5812 struct ata_port *ap = link->ap;
5813
5814 if (sata_scr_valid(link))
da3dbb17 5815 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5816 return -EOPNOTSUPP;
5817}
5818
5819/**
5820 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5821 * @link: ATA link to write SCR for
34bf2170
TH
5822 * @reg: SCR to write
5823 * @val: value to write
5824 *
5825 * This function is identical to sata_scr_write() except that this
5826 * function performs flush after writing to the register.
5827 *
5828 * LOCKING:
5829 * None.
5830 *
5831 * RETURNS:
5832 * 0 on success, negative errno on failure.
5833 */
936fd732 5834int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5835{
936fd732 5836 struct ata_port *ap = link->ap;
da3dbb17
TH
5837 int rc;
5838
936fd732 5839 if (sata_scr_valid(link)) {
da3dbb17
TH
5840 rc = ap->ops->scr_write(ap, reg, val);
5841 if (rc == 0)
5842 rc = ap->ops->scr_read(ap, reg, &val);
5843 return rc;
34bf2170
TH
5844 }
5845 return -EOPNOTSUPP;
5846}
5847
5848/**
936fd732
TH
5849 * ata_link_online - test whether the given link is online
5850 * @link: ATA link to test
34bf2170 5851 *
936fd732
TH
5852 * Test whether @link is online. Note that this function returns
5853 * 0 if online status of @link cannot be obtained, so
5854 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5855 *
5856 * LOCKING:
5857 * None.
5858 *
5859 * RETURNS:
5860 * 1 if the port online status is available and online.
5861 */
936fd732 5862int ata_link_online(struct ata_link *link)
34bf2170
TH
5863{
5864 u32 sstatus;
5865
936fd732
TH
5866 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5867 (sstatus & 0xf) == 0x3)
34bf2170
TH
5868 return 1;
5869 return 0;
5870}
5871
5872/**
936fd732
TH
5873 * ata_link_offline - test whether the given link is offline
5874 * @link: ATA link to test
34bf2170 5875 *
936fd732
TH
5876 * Test whether @link is offline. Note that this function
5877 * returns 0 if offline status of @link cannot be obtained, so
5878 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5879 *
5880 * LOCKING:
5881 * None.
5882 *
5883 * RETURNS:
5884 * 1 if the port offline status is available and offline.
5885 */
936fd732 5886int ata_link_offline(struct ata_link *link)
34bf2170
TH
5887{
5888 u32 sstatus;
5889
936fd732
TH
5890 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5891 (sstatus & 0xf) != 0x3)
34bf2170
TH
5892 return 1;
5893 return 0;
5894}
0baab86b 5895
77b08fb5 5896int ata_flush_cache(struct ata_device *dev)
9b847548 5897{
977e6b9f 5898 unsigned int err_mask;
9b847548
JA
5899 u8 cmd;
5900
5901 if (!ata_try_flush_cache(dev))
5902 return 0;
5903
6fc49adb 5904 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5905 cmd = ATA_CMD_FLUSH_EXT;
5906 else
5907 cmd = ATA_CMD_FLUSH;
5908
977e6b9f
TH
5909 err_mask = ata_do_simple_cmd(dev, cmd);
5910 if (err_mask) {
5911 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5912 return -EIO;
5913 }
5914
5915 return 0;
9b847548
JA
5916}
5917
6ffa01d8 5918#ifdef CONFIG_PM
cca3974e
JG
5919static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5920 unsigned int action, unsigned int ehi_flags,
5921 int wait)
500530f6
TH
5922{
5923 unsigned long flags;
5924 int i, rc;
5925
cca3974e
JG
5926 for (i = 0; i < host->n_ports; i++) {
5927 struct ata_port *ap = host->ports[i];
e3667ebf 5928 struct ata_link *link;
500530f6
TH
5929
5930 /* Previous resume operation might still be in
5931 * progress. Wait for PM_PENDING to clear.
5932 */
5933 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5934 ata_port_wait_eh(ap);
5935 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5936 }
5937
5938 /* request PM ops to EH */
5939 spin_lock_irqsave(ap->lock, flags);
5940
5941 ap->pm_mesg = mesg;
5942 if (wait) {
5943 rc = 0;
5944 ap->pm_result = &rc;
5945 }
5946
5947 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
5948 __ata_port_for_each_link(link, ap) {
5949 link->eh_info.action |= action;
5950 link->eh_info.flags |= ehi_flags;
5951 }
500530f6
TH
5952
5953 ata_port_schedule_eh(ap);
5954
5955 spin_unlock_irqrestore(ap->lock, flags);
5956
5957 /* wait and check result */
5958 if (wait) {
5959 ata_port_wait_eh(ap);
5960 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5961 if (rc)
5962 return rc;
5963 }
5964 }
5965
5966 return 0;
5967}
5968
5969/**
cca3974e
JG
5970 * ata_host_suspend - suspend host
5971 * @host: host to suspend
500530f6
TH
5972 * @mesg: PM message
5973 *
cca3974e 5974 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5975 * function requests EH to perform PM operations and waits for EH
5976 * to finish.
5977 *
5978 * LOCKING:
5979 * Kernel thread context (may sleep).
5980 *
5981 * RETURNS:
5982 * 0 on success, -errno on failure.
5983 */
cca3974e 5984int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5985{
9666f400 5986 int rc;
500530f6 5987
cca3974e 5988 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5989 if (rc == 0)
5990 host->dev->power.power_state = mesg;
500530f6
TH
5991 return rc;
5992}
5993
5994/**
cca3974e
JG
5995 * ata_host_resume - resume host
5996 * @host: host to resume
500530f6 5997 *
cca3974e 5998 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5999 * function requests EH to perform PM operations and returns.
6000 * Note that all resume operations are performed parallely.
6001 *
6002 * LOCKING:
6003 * Kernel thread context (may sleep).
6004 */
cca3974e 6005void ata_host_resume(struct ata_host *host)
500530f6 6006{
cca3974e
JG
6007 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6008 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6009 host->dev->power.power_state = PMSG_ON;
500530f6 6010}
6ffa01d8 6011#endif
500530f6 6012
c893a3ae
RD
6013/**
6014 * ata_port_start - Set port up for dma.
6015 * @ap: Port to initialize
6016 *
6017 * Called just after data structures for each port are
6018 * initialized. Allocates space for PRD table.
6019 *
6020 * May be used as the port_start() entry in ata_port_operations.
6021 *
6022 * LOCKING:
6023 * Inherited from caller.
6024 */
f0d36efd 6025int ata_port_start(struct ata_port *ap)
1da177e4 6026{
2f1f610b 6027 struct device *dev = ap->dev;
6037d6bb 6028 int rc;
1da177e4 6029
f0d36efd
TH
6030 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6031 GFP_KERNEL);
1da177e4
LT
6032 if (!ap->prd)
6033 return -ENOMEM;
6034
6037d6bb 6035 rc = ata_pad_alloc(ap, dev);
f0d36efd 6036 if (rc)
6037d6bb 6037 return rc;
1da177e4 6038
f0d36efd
TH
6039 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6040 (unsigned long long)ap->prd_dma);
1da177e4
LT
6041 return 0;
6042}
6043
3ef3b43d
TH
6044/**
6045 * ata_dev_init - Initialize an ata_device structure
6046 * @dev: Device structure to initialize
6047 *
6048 * Initialize @dev in preparation for probing.
6049 *
6050 * LOCKING:
6051 * Inherited from caller.
6052 */
6053void ata_dev_init(struct ata_device *dev)
6054{
9af5c9c9
TH
6055 struct ata_link *link = dev->link;
6056 struct ata_port *ap = link->ap;
72fa4b74
TH
6057 unsigned long flags;
6058
5a04bf4b 6059 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6060 link->sata_spd_limit = link->hw_sata_spd_limit;
6061 link->sata_spd = 0;
5a04bf4b 6062
72fa4b74
TH
6063 /* High bits of dev->flags are used to record warm plug
6064 * requests which occur asynchronously. Synchronize using
cca3974e 6065 * host lock.
72fa4b74 6066 */
ba6a1308 6067 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6068 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6069 dev->horkage = 0;
ba6a1308 6070 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6071
72fa4b74
TH
6072 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6073 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6074 dev->pio_mask = UINT_MAX;
6075 dev->mwdma_mask = UINT_MAX;
6076 dev->udma_mask = UINT_MAX;
6077}
6078
4fb37a25
TH
6079/**
6080 * ata_link_init - Initialize an ata_link structure
6081 * @ap: ATA port link is attached to
6082 * @link: Link structure to initialize
8989805d 6083 * @pmp: Port multiplier port number
4fb37a25
TH
6084 *
6085 * Initialize @link.
6086 *
6087 * LOCKING:
6088 * Kernel thread context (may sleep)
6089 */
8989805d 6090static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6091{
6092 int i;
6093
6094 /* clear everything except for devices */
6095 memset(link, 0, offsetof(struct ata_link, device[0]));
6096
6097 link->ap = ap;
8989805d 6098 link->pmp = pmp;
4fb37a25
TH
6099 link->active_tag = ATA_TAG_POISON;
6100 link->hw_sata_spd_limit = UINT_MAX;
6101
6102 /* can't use iterator, ap isn't initialized yet */
6103 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6104 struct ata_device *dev = &link->device[i];
6105
6106 dev->link = link;
6107 dev->devno = dev - link->device;
6108 ata_dev_init(dev);
6109 }
6110}
6111
6112/**
6113 * sata_link_init_spd - Initialize link->sata_spd_limit
6114 * @link: Link to configure sata_spd_limit for
6115 *
6116 * Initialize @link->[hw_]sata_spd_limit to the currently
6117 * configured value.
6118 *
6119 * LOCKING:
6120 * Kernel thread context (may sleep).
6121 *
6122 * RETURNS:
6123 * 0 on success, -errno on failure.
6124 */
6125static int sata_link_init_spd(struct ata_link *link)
6126{
6127 u32 scontrol, spd;
6128 int rc;
6129
6130 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6131 if (rc)
6132 return rc;
6133
6134 spd = (scontrol >> 4) & 0xf;
6135 if (spd)
6136 link->hw_sata_spd_limit &= (1 << spd) - 1;
6137
6138 link->sata_spd_limit = link->hw_sata_spd_limit;
6139
6140 return 0;
6141}
6142
1da177e4 6143/**
f3187195
TH
6144 * ata_port_alloc - allocate and initialize basic ATA port resources
6145 * @host: ATA host this allocated port belongs to
1da177e4 6146 *
f3187195
TH
6147 * Allocate and initialize basic ATA port resources.
6148 *
6149 * RETURNS:
6150 * Allocate ATA port on success, NULL on failure.
0cba632b 6151 *
1da177e4 6152 * LOCKING:
f3187195 6153 * Inherited from calling layer (may sleep).
1da177e4 6154 */
f3187195 6155struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6156{
f3187195 6157 struct ata_port *ap;
1da177e4 6158
f3187195
TH
6159 DPRINTK("ENTER\n");
6160
6161 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6162 if (!ap)
6163 return NULL;
6164
f4d6d004 6165 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6166 ap->lock = &host->lock;
198e0fed 6167 ap->flags = ATA_FLAG_DISABLED;
f3187195 6168 ap->print_id = -1;
1da177e4 6169 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6170 ap->host = host;
f3187195 6171 ap->dev = host->dev;
1da177e4 6172 ap->last_ctl = 0xFF;
bd5d825c
BP
6173
6174#if defined(ATA_VERBOSE_DEBUG)
6175 /* turn on all debugging levels */
6176 ap->msg_enable = 0x00FF;
6177#elif defined(ATA_DEBUG)
6178 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6179#else
0dd4b21f 6180 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6181#endif
1da177e4 6182
65f27f38
DH
6183 INIT_DELAYED_WORK(&ap->port_task, NULL);
6184 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6185 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6186 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6187 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6188 init_timer_deferrable(&ap->fastdrain_timer);
6189 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6190 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6191
838df628 6192 ap->cbl = ATA_CBL_NONE;
838df628 6193
8989805d 6194 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6195
6196#ifdef ATA_IRQ_TRAP
6197 ap->stats.unhandled_irq = 1;
6198 ap->stats.idle_irq = 1;
6199#endif
1da177e4 6200 return ap;
1da177e4
LT
6201}
6202
f0d36efd
TH
6203static void ata_host_release(struct device *gendev, void *res)
6204{
6205 struct ata_host *host = dev_get_drvdata(gendev);
6206 int i;
6207
6208 for (i = 0; i < host->n_ports; i++) {
6209 struct ata_port *ap = host->ports[i];
6210
ecef7253
TH
6211 if (!ap)
6212 continue;
6213
6214 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6215 ap->ops->port_stop(ap);
f0d36efd
TH
6216 }
6217
ecef7253 6218 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6219 host->ops->host_stop(host);
1aa56cca 6220
1aa506e4
TH
6221 for (i = 0; i < host->n_ports; i++) {
6222 struct ata_port *ap = host->ports[i];
6223
4911487a
TH
6224 if (!ap)
6225 continue;
6226
6227 if (ap->scsi_host)
1aa506e4
TH
6228 scsi_host_put(ap->scsi_host);
6229
4911487a 6230 kfree(ap);
1aa506e4
TH
6231 host->ports[i] = NULL;
6232 }
6233
1aa56cca 6234 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6235}
6236
f3187195
TH
6237/**
6238 * ata_host_alloc - allocate and init basic ATA host resources
6239 * @dev: generic device this host is associated with
6240 * @max_ports: maximum number of ATA ports associated with this host
6241 *
6242 * Allocate and initialize basic ATA host resources. LLD calls
6243 * this function to allocate a host, initializes it fully and
6244 * attaches it using ata_host_register().
6245 *
6246 * @max_ports ports are allocated and host->n_ports is
6247 * initialized to @max_ports. The caller is allowed to decrease
6248 * host->n_ports before calling ata_host_register(). The unused
6249 * ports will be automatically freed on registration.
6250 *
6251 * RETURNS:
6252 * Allocate ATA host on success, NULL on failure.
6253 *
6254 * LOCKING:
6255 * Inherited from calling layer (may sleep).
6256 */
6257struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6258{
6259 struct ata_host *host;
6260 size_t sz;
6261 int i;
6262
6263 DPRINTK("ENTER\n");
6264
6265 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6266 return NULL;
6267
6268 /* alloc a container for our list of ATA ports (buses) */
6269 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6270 /* alloc a container for our list of ATA ports (buses) */
6271 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6272 if (!host)
6273 goto err_out;
6274
6275 devres_add(dev, host);
6276 dev_set_drvdata(dev, host);
6277
6278 spin_lock_init(&host->lock);
6279 host->dev = dev;
6280 host->n_ports = max_ports;
6281
6282 /* allocate ports bound to this host */
6283 for (i = 0; i < max_ports; i++) {
6284 struct ata_port *ap;
6285
6286 ap = ata_port_alloc(host);
6287 if (!ap)
6288 goto err_out;
6289
6290 ap->port_no = i;
6291 host->ports[i] = ap;
6292 }
6293
6294 devres_remove_group(dev, NULL);
6295 return host;
6296
6297 err_out:
6298 devres_release_group(dev, NULL);
6299 return NULL;
6300}
6301
f5cda257
TH
6302/**
6303 * ata_host_alloc_pinfo - alloc host and init with port_info array
6304 * @dev: generic device this host is associated with
6305 * @ppi: array of ATA port_info to initialize host with
6306 * @n_ports: number of ATA ports attached to this host
6307 *
6308 * Allocate ATA host and initialize with info from @ppi. If NULL
6309 * terminated, @ppi may contain fewer entries than @n_ports. The
6310 * last entry will be used for the remaining ports.
6311 *
6312 * RETURNS:
6313 * Allocate ATA host on success, NULL on failure.
6314 *
6315 * LOCKING:
6316 * Inherited from calling layer (may sleep).
6317 */
6318struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6319 const struct ata_port_info * const * ppi,
6320 int n_ports)
6321{
6322 const struct ata_port_info *pi;
6323 struct ata_host *host;
6324 int i, j;
6325
6326 host = ata_host_alloc(dev, n_ports);
6327 if (!host)
6328 return NULL;
6329
6330 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6331 struct ata_port *ap = host->ports[i];
6332
6333 if (ppi[j])
6334 pi = ppi[j++];
6335
6336 ap->pio_mask = pi->pio_mask;
6337 ap->mwdma_mask = pi->mwdma_mask;
6338 ap->udma_mask = pi->udma_mask;
6339 ap->flags |= pi->flags;
0c88758b 6340 ap->link.flags |= pi->link_flags;
f5cda257
TH
6341 ap->ops = pi->port_ops;
6342
6343 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6344 host->ops = pi->port_ops;
6345 if (!host->private_data && pi->private_data)
6346 host->private_data = pi->private_data;
6347 }
6348
6349 return host;
6350}
6351
ecef7253
TH
6352/**
6353 * ata_host_start - start and freeze ports of an ATA host
6354 * @host: ATA host to start ports for
6355 *
6356 * Start and then freeze ports of @host. Started status is
6357 * recorded in host->flags, so this function can be called
6358 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6359 * once. If host->ops isn't initialized yet, its set to the
6360 * first non-dummy port ops.
ecef7253
TH
6361 *
6362 * LOCKING:
6363 * Inherited from calling layer (may sleep).
6364 *
6365 * RETURNS:
6366 * 0 if all ports are started successfully, -errno otherwise.
6367 */
6368int ata_host_start(struct ata_host *host)
6369{
6370 int i, rc;
6371
6372 if (host->flags & ATA_HOST_STARTED)
6373 return 0;
6374
6375 for (i = 0; i < host->n_ports; i++) {
6376 struct ata_port *ap = host->ports[i];
6377
f3187195
TH
6378 if (!host->ops && !ata_port_is_dummy(ap))
6379 host->ops = ap->ops;
6380
ecef7253
TH
6381 if (ap->ops->port_start) {
6382 rc = ap->ops->port_start(ap);
6383 if (rc) {
6384 ata_port_printk(ap, KERN_ERR, "failed to "
6385 "start port (errno=%d)\n", rc);
6386 goto err_out;
6387 }
6388 }
6389
6390 ata_eh_freeze_port(ap);
6391 }
6392
6393 host->flags |= ATA_HOST_STARTED;
6394 return 0;
6395
6396 err_out:
6397 while (--i >= 0) {
6398 struct ata_port *ap = host->ports[i];
6399
6400 if (ap->ops->port_stop)
6401 ap->ops->port_stop(ap);
6402 }
6403 return rc;
6404}
6405
b03732f0 6406/**
cca3974e
JG
6407 * ata_sas_host_init - Initialize a host struct
6408 * @host: host to initialize
6409 * @dev: device host is attached to
6410 * @flags: host flags
6411 * @ops: port_ops
b03732f0
BK
6412 *
6413 * LOCKING:
6414 * PCI/etc. bus probe sem.
6415 *
6416 */
f3187195 6417/* KILLME - the only user left is ipr */
cca3974e
JG
6418void ata_host_init(struct ata_host *host, struct device *dev,
6419 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6420{
cca3974e
JG
6421 spin_lock_init(&host->lock);
6422 host->dev = dev;
6423 host->flags = flags;
6424 host->ops = ops;
b03732f0
BK
6425}
6426
f3187195
TH
6427/**
6428 * ata_host_register - register initialized ATA host
6429 * @host: ATA host to register
6430 * @sht: template for SCSI host
6431 *
6432 * Register initialized ATA host. @host is allocated using
6433 * ata_host_alloc() and fully initialized by LLD. This function
6434 * starts ports, registers @host with ATA and SCSI layers and
6435 * probe registered devices.
6436 *
6437 * LOCKING:
6438 * Inherited from calling layer (may sleep).
6439 *
6440 * RETURNS:
6441 * 0 on success, -errno otherwise.
6442 */
6443int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6444{
6445 int i, rc;
6446
6447 /* host must have been started */
6448 if (!(host->flags & ATA_HOST_STARTED)) {
6449 dev_printk(KERN_ERR, host->dev,
6450 "BUG: trying to register unstarted host\n");
6451 WARN_ON(1);
6452 return -EINVAL;
6453 }
6454
6455 /* Blow away unused ports. This happens when LLD can't
6456 * determine the exact number of ports to allocate at
6457 * allocation time.
6458 */
6459 for (i = host->n_ports; host->ports[i]; i++)
6460 kfree(host->ports[i]);
6461
6462 /* give ports names and add SCSI hosts */
6463 for (i = 0; i < host->n_ports; i++)
6464 host->ports[i]->print_id = ata_print_id++;
6465
6466 rc = ata_scsi_add_hosts(host, sht);
6467 if (rc)
6468 return rc;
6469
fafbae87
TH
6470 /* associate with ACPI nodes */
6471 ata_acpi_associate(host);
6472
f3187195
TH
6473 /* set cable, sata_spd_limit and report */
6474 for (i = 0; i < host->n_ports; i++) {
6475 struct ata_port *ap = host->ports[i];
6476 int irq_line;
f3187195
TH
6477 unsigned long xfer_mask;
6478
6479 /* set SATA cable type if still unset */
6480 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6481 ap->cbl = ATA_CBL_SATA;
6482
6483 /* init sata_spd_limit to the current value */
4fb37a25 6484 sata_link_init_spd(&ap->link);
f3187195
TH
6485
6486 /* report the secondary IRQ for second channel legacy */
6487 irq_line = host->irq;
6488 if (i == 1 && host->irq2)
6489 irq_line = host->irq2;
6490
6491 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6492 ap->udma_mask);
6493
6494 /* print per-port info to dmesg */
6495 if (!ata_port_is_dummy(ap))
6496 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6497 "ctl 0x%p bmdma 0x%p irq %d\n",
a16abc0b 6498 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195
TH
6499 ata_mode_string(xfer_mask),
6500 ap->ioaddr.cmd_addr,
6501 ap->ioaddr.ctl_addr,
6502 ap->ioaddr.bmdma_addr,
6503 irq_line);
6504 else
6505 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6506 }
6507
6508 /* perform each probe synchronously */
6509 DPRINTK("probe begin\n");
6510 for (i = 0; i < host->n_ports; i++) {
6511 struct ata_port *ap = host->ports[i];
6512 int rc;
6513
6514 /* probe */
6515 if (ap->ops->error_handler) {
9af5c9c9 6516 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6517 unsigned long flags;
6518
6519 ata_port_probe(ap);
6520
6521 /* kick EH for boot probing */
6522 spin_lock_irqsave(ap->lock, flags);
6523
f58229f8
TH
6524 ehi->probe_mask =
6525 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6526 ehi->action |= ATA_EH_SOFTRESET;
6527 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6528
f4d6d004 6529 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6530 ap->pflags |= ATA_PFLAG_LOADING;
6531 ata_port_schedule_eh(ap);
6532
6533 spin_unlock_irqrestore(ap->lock, flags);
6534
6535 /* wait for EH to finish */
6536 ata_port_wait_eh(ap);
6537 } else {
6538 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6539 rc = ata_bus_probe(ap);
6540 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6541
6542 if (rc) {
6543 /* FIXME: do something useful here?
6544 * Current libata behavior will
6545 * tear down everything when
6546 * the module is removed
6547 * or the h/w is unplugged.
6548 */
6549 }
6550 }
6551 }
6552
6553 /* probes are done, now scan each port's disk(s) */
6554 DPRINTK("host probe begin\n");
6555 for (i = 0; i < host->n_ports; i++) {
6556 struct ata_port *ap = host->ports[i];
6557
1ae46317 6558 ata_scsi_scan_host(ap, 1);
f3187195
TH
6559 }
6560
6561 return 0;
6562}
6563
f5cda257
TH
6564/**
6565 * ata_host_activate - start host, request IRQ and register it
6566 * @host: target ATA host
6567 * @irq: IRQ to request
6568 * @irq_handler: irq_handler used when requesting IRQ
6569 * @irq_flags: irq_flags used when requesting IRQ
6570 * @sht: scsi_host_template to use when registering the host
6571 *
6572 * After allocating an ATA host and initializing it, most libata
6573 * LLDs perform three steps to activate the host - start host,
6574 * request IRQ and register it. This helper takes necessasry
6575 * arguments and performs the three steps in one go.
6576 *
6577 * LOCKING:
6578 * Inherited from calling layer (may sleep).
6579 *
6580 * RETURNS:
6581 * 0 on success, -errno otherwise.
6582 */
6583int ata_host_activate(struct ata_host *host, int irq,
6584 irq_handler_t irq_handler, unsigned long irq_flags,
6585 struct scsi_host_template *sht)
6586{
6587 int rc;
6588
6589 rc = ata_host_start(host);
6590 if (rc)
6591 return rc;
6592
6593 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6594 dev_driver_string(host->dev), host);
6595 if (rc)
6596 return rc;
6597
4031826b
TH
6598 /* Used to print device info at probe */
6599 host->irq = irq;
6600
f5cda257
TH
6601 rc = ata_host_register(host, sht);
6602 /* if failed, just free the IRQ and leave ports alone */
6603 if (rc)
6604 devm_free_irq(host->dev, irq, host);
6605
6606 return rc;
6607}
6608
720ba126
TH
6609/**
6610 * ata_port_detach - Detach ATA port in prepration of device removal
6611 * @ap: ATA port to be detached
6612 *
6613 * Detach all ATA devices and the associated SCSI devices of @ap;
6614 * then, remove the associated SCSI host. @ap is guaranteed to
6615 * be quiescent on return from this function.
6616 *
6617 * LOCKING:
6618 * Kernel thread context (may sleep).
6619 */
6620void ata_port_detach(struct ata_port *ap)
6621{
6622 unsigned long flags;
41bda9c9 6623 struct ata_link *link;
f58229f8 6624 struct ata_device *dev;
720ba126
TH
6625
6626 if (!ap->ops->error_handler)
c3cf30a9 6627 goto skip_eh;
720ba126
TH
6628
6629 /* tell EH we're leaving & flush EH */
ba6a1308 6630 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6631 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6632 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6633
6634 ata_port_wait_eh(ap);
6635
6636 /* EH is now guaranteed to see UNLOADING, so no new device
6637 * will be attached. Disable all existing devices.
6638 */
ba6a1308 6639 spin_lock_irqsave(ap->lock, flags);
720ba126 6640
41bda9c9
TH
6641 ata_port_for_each_link(link, ap) {
6642 ata_link_for_each_dev(dev, link)
6643 ata_dev_disable(dev);
6644 }
720ba126 6645
ba6a1308 6646 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6647
6648 /* Final freeze & EH. All in-flight commands are aborted. EH
6649 * will be skipped and retrials will be terminated with bad
6650 * target.
6651 */
ba6a1308 6652 spin_lock_irqsave(ap->lock, flags);
720ba126 6653 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6654 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6655
6656 ata_port_wait_eh(ap);
45a66c1c 6657 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6658
c3cf30a9 6659 skip_eh:
720ba126 6660 /* remove the associated SCSI host */
cca3974e 6661 scsi_remove_host(ap->scsi_host);
720ba126
TH
6662}
6663
0529c159
TH
6664/**
6665 * ata_host_detach - Detach all ports of an ATA host
6666 * @host: Host to detach
6667 *
6668 * Detach all ports of @host.
6669 *
6670 * LOCKING:
6671 * Kernel thread context (may sleep).
6672 */
6673void ata_host_detach(struct ata_host *host)
6674{
6675 int i;
6676
6677 for (i = 0; i < host->n_ports; i++)
6678 ata_port_detach(host->ports[i]);
6679}
6680
1da177e4
LT
6681/**
6682 * ata_std_ports - initialize ioaddr with standard port offsets.
6683 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6684 *
6685 * Utility function which initializes data_addr, error_addr,
6686 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6687 * device_addr, status_addr, and command_addr to standard offsets
6688 * relative to cmd_addr.
6689 *
6690 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6691 */
0baab86b 6692
1da177e4
LT
6693void ata_std_ports(struct ata_ioports *ioaddr)
6694{
6695 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6696 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6697 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6698 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6699 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6700 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6701 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6702 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6703 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6704 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6705}
6706
0baab86b 6707
374b1873
JG
6708#ifdef CONFIG_PCI
6709
1da177e4
LT
6710/**
6711 * ata_pci_remove_one - PCI layer callback for device removal
6712 * @pdev: PCI device that was removed
6713 *
b878ca5d
TH
6714 * PCI layer indicates to libata via this hook that hot-unplug or
6715 * module unload event has occurred. Detach all ports. Resource
6716 * release is handled via devres.
1da177e4
LT
6717 *
6718 * LOCKING:
6719 * Inherited from PCI layer (may sleep).
6720 */
f0d36efd 6721void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6722{
6723 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6724 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6725
b878ca5d 6726 ata_host_detach(host);
1da177e4
LT
6727}
6728
6729/* move to PCI subsystem */
057ace5e 6730int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6731{
6732 unsigned long tmp = 0;
6733
6734 switch (bits->width) {
6735 case 1: {
6736 u8 tmp8 = 0;
6737 pci_read_config_byte(pdev, bits->reg, &tmp8);
6738 tmp = tmp8;
6739 break;
6740 }
6741 case 2: {
6742 u16 tmp16 = 0;
6743 pci_read_config_word(pdev, bits->reg, &tmp16);
6744 tmp = tmp16;
6745 break;
6746 }
6747 case 4: {
6748 u32 tmp32 = 0;
6749 pci_read_config_dword(pdev, bits->reg, &tmp32);
6750 tmp = tmp32;
6751 break;
6752 }
6753
6754 default:
6755 return -EINVAL;
6756 }
6757
6758 tmp &= bits->mask;
6759
6760 return (tmp == bits->val) ? 1 : 0;
6761}
9b847548 6762
6ffa01d8 6763#ifdef CONFIG_PM
3c5100c1 6764void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6765{
6766 pci_save_state(pdev);
4c90d971 6767 pci_disable_device(pdev);
500530f6 6768
4c90d971 6769 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6770 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6771}
6772
553c4aa6 6773int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6774{
553c4aa6
TH
6775 int rc;
6776
9b847548
JA
6777 pci_set_power_state(pdev, PCI_D0);
6778 pci_restore_state(pdev);
553c4aa6 6779
b878ca5d 6780 rc = pcim_enable_device(pdev);
553c4aa6
TH
6781 if (rc) {
6782 dev_printk(KERN_ERR, &pdev->dev,
6783 "failed to enable device after resume (%d)\n", rc);
6784 return rc;
6785 }
6786
9b847548 6787 pci_set_master(pdev);
553c4aa6 6788 return 0;
500530f6
TH
6789}
6790
3c5100c1 6791int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6792{
cca3974e 6793 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6794 int rc = 0;
6795
cca3974e 6796 rc = ata_host_suspend(host, mesg);
500530f6
TH
6797 if (rc)
6798 return rc;
6799
3c5100c1 6800 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6801
6802 return 0;
6803}
6804
6805int ata_pci_device_resume(struct pci_dev *pdev)
6806{
cca3974e 6807 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6808 int rc;
500530f6 6809
553c4aa6
TH
6810 rc = ata_pci_device_do_resume(pdev);
6811 if (rc == 0)
6812 ata_host_resume(host);
6813 return rc;
9b847548 6814}
6ffa01d8
TH
6815#endif /* CONFIG_PM */
6816
1da177e4
LT
6817#endif /* CONFIG_PCI */
6818
6819
1da177e4
LT
6820static int __init ata_init(void)
6821{
a8601e5f 6822 ata_probe_timeout *= HZ;
1da177e4
LT
6823 ata_wq = create_workqueue("ata");
6824 if (!ata_wq)
6825 return -ENOMEM;
6826
453b07ac
TH
6827 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6828 if (!ata_aux_wq) {
6829 destroy_workqueue(ata_wq);
6830 return -ENOMEM;
6831 }
6832
1da177e4
LT
6833 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6834 return 0;
6835}
6836
6837static void __exit ata_exit(void)
6838{
6839 destroy_workqueue(ata_wq);
453b07ac 6840 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6841}
6842
a4625085 6843subsys_initcall(ata_init);
1da177e4
LT
6844module_exit(ata_exit);
6845
67846b30 6846static unsigned long ratelimit_time;
34af946a 6847static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6848
6849int ata_ratelimit(void)
6850{
6851 int rc;
6852 unsigned long flags;
6853
6854 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6855
6856 if (time_after(jiffies, ratelimit_time)) {
6857 rc = 1;
6858 ratelimit_time = jiffies + (HZ/5);
6859 } else
6860 rc = 0;
6861
6862 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6863
6864 return rc;
6865}
6866
c22daff4
TH
6867/**
6868 * ata_wait_register - wait until register value changes
6869 * @reg: IO-mapped register
6870 * @mask: Mask to apply to read register value
6871 * @val: Wait condition
6872 * @interval_msec: polling interval in milliseconds
6873 * @timeout_msec: timeout in milliseconds
6874 *
6875 * Waiting for some bits of register to change is a common
6876 * operation for ATA controllers. This function reads 32bit LE
6877 * IO-mapped register @reg and tests for the following condition.
6878 *
6879 * (*@reg & mask) != val
6880 *
6881 * If the condition is met, it returns; otherwise, the process is
6882 * repeated after @interval_msec until timeout.
6883 *
6884 * LOCKING:
6885 * Kernel thread context (may sleep)
6886 *
6887 * RETURNS:
6888 * The final register value.
6889 */
6890u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6891 unsigned long interval_msec,
6892 unsigned long timeout_msec)
6893{
6894 unsigned long timeout;
6895 u32 tmp;
6896
6897 tmp = ioread32(reg);
6898
6899 /* Calculate timeout _after_ the first read to make sure
6900 * preceding writes reach the controller before starting to
6901 * eat away the timeout.
6902 */
6903 timeout = jiffies + (timeout_msec * HZ) / 1000;
6904
6905 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6906 msleep(interval_msec);
6907 tmp = ioread32(reg);
6908 }
6909
6910 return tmp;
6911}
6912
dd5b06c4
TH
6913/*
6914 * Dummy port_ops
6915 */
6916static void ata_dummy_noret(struct ata_port *ap) { }
6917static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6918static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6919
6920static u8 ata_dummy_check_status(struct ata_port *ap)
6921{
6922 return ATA_DRDY;
6923}
6924
6925static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6926{
6927 return AC_ERR_SYSTEM;
6928}
6929
6930const struct ata_port_operations ata_dummy_port_ops = {
6931 .port_disable = ata_port_disable,
6932 .check_status = ata_dummy_check_status,
6933 .check_altstatus = ata_dummy_check_status,
6934 .dev_select = ata_noop_dev_select,
6935 .qc_prep = ata_noop_qc_prep,
6936 .qc_issue = ata_dummy_qc_issue,
6937 .freeze = ata_dummy_noret,
6938 .thaw = ata_dummy_noret,
6939 .error_handler = ata_dummy_noret,
6940 .post_internal_cmd = ata_dummy_qc_noret,
6941 .irq_clear = ata_dummy_noret,
6942 .port_start = ata_dummy_ret0,
6943 .port_stop = ata_dummy_noret,
6944};
6945
21b0ad4f
TH
6946const struct ata_port_info ata_dummy_port_info = {
6947 .port_ops = &ata_dummy_port_ops,
6948};
6949
1da177e4
LT
6950/*
6951 * libata is essentially a library of internal helper functions for
6952 * low-level ATA host controller drivers. As such, the API/ABI is
6953 * likely to change as new drivers are added and updated.
6954 * Do not depend on ABI/API stability.
6955 */
6956
e9c83914
TH
6957EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6958EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6959EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6960EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6961EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6962EXPORT_SYMBOL_GPL(ata_std_bios_param);
6963EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6964EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6965EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6966EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6967EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6968EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6969EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6970EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6971EXPORT_SYMBOL_GPL(ata_sg_init);
6972EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6973EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6974EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6975EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6976EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6977EXPORT_SYMBOL_GPL(ata_tf_load);
6978EXPORT_SYMBOL_GPL(ata_tf_read);
6979EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6980EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6981EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6982EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6983EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6984EXPORT_SYMBOL_GPL(ata_check_status);
6985EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6986EXPORT_SYMBOL_GPL(ata_exec_command);
6987EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 6988EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 6989EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6990EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6991EXPORT_SYMBOL_GPL(ata_data_xfer);
6992EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6993EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 6994EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 6995EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6996EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6997EXPORT_SYMBOL_GPL(ata_bmdma_start);
6998EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6999EXPORT_SYMBOL_GPL(ata_bmdma_status);
7000EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7001EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7002EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7003EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7004EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7005EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7006EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7007EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7008EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7009EXPORT_SYMBOL_GPL(sata_link_debounce);
7010EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7011EXPORT_SYMBOL_GPL(sata_phy_reset);
7012EXPORT_SYMBOL_GPL(__sata_phy_reset);
7013EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7014EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7015EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7016EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7017EXPORT_SYMBOL_GPL(sata_std_hardreset);
7018EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7019EXPORT_SYMBOL_GPL(ata_dev_classify);
7020EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7021EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7022EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7023EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7024EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7025EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7026EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7027EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7028EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7029EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7030EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7031EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7032EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7033EXPORT_SYMBOL_GPL(sata_scr_valid);
7034EXPORT_SYMBOL_GPL(sata_scr_read);
7035EXPORT_SYMBOL_GPL(sata_scr_write);
7036EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7037EXPORT_SYMBOL_GPL(ata_link_online);
7038EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7039#ifdef CONFIG_PM
cca3974e
JG
7040EXPORT_SYMBOL_GPL(ata_host_suspend);
7041EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7042#endif /* CONFIG_PM */
6a62a04d
TH
7043EXPORT_SYMBOL_GPL(ata_id_string);
7044EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7045EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7046EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7047
1bc4ccff 7048EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7049EXPORT_SYMBOL_GPL(ata_timing_compute);
7050EXPORT_SYMBOL_GPL(ata_timing_merge);
7051
1da177e4
LT
7052#ifdef CONFIG_PCI
7053EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7054EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7055EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7056EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7057EXPORT_SYMBOL_GPL(ata_pci_init_one);
7058EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7059#ifdef CONFIG_PM
500530f6
TH
7060EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7061EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7062EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7063EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7064#endif /* CONFIG_PM */
67951ade
AC
7065EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7066EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7067#endif /* CONFIG_PCI */
9b847548 7068
b64bbc39
TH
7069EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7070EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7071EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
ece1d636 7072EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7073EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7074EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7075EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7076EXPORT_SYMBOL_GPL(ata_port_freeze);
7077EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7078EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7079EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7080EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7081EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
7082EXPORT_SYMBOL_GPL(ata_irq_on);
7083EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
7084EXPORT_SYMBOL_GPL(ata_irq_ack);
7085EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 7086EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7087
7088EXPORT_SYMBOL_GPL(ata_cable_40wire);
7089EXPORT_SYMBOL_GPL(ata_cable_80wire);
7090EXPORT_SYMBOL_GPL(ata_cable_unknown);
7091EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.934006 seconds and 5 git commands to generate.