libata: correct kernel parameter in documentation.
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
1e999736 918
c728a914 919 return 0;
1e999736
AC
920}
921
922/**
c728a914
TH
923 * ata_set_max_sectors - Set max sectors
924 * @dev: target device
6b38d1d1 925 * @new_sectors: new max sectors value to set for the device
1e999736 926 *
c728a914
TH
927 * Set max sectors of @dev to @new_sectors.
928 *
929 * RETURNS:
930 * 0 on success, -EACCES if command is aborted or denied (due to
931 * previous non-volatile SET_MAX) by the drive. -EIO on other
932 * errors.
1e999736 933 */
05027adc 934static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 935{
c728a914 936 unsigned int err_mask;
1e999736 937 struct ata_taskfile tf;
c728a914 938 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
939
940 new_sectors--;
941
942 ata_tf_init(dev, &tf);
943
1e999736 944 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
945
946 if (lba48) {
947 tf.command = ATA_CMD_SET_MAX_EXT;
948 tf.flags |= ATA_TFLAG_LBA48;
949
950 tf.hob_lbal = (new_sectors >> 24) & 0xff;
951 tf.hob_lbam = (new_sectors >> 32) & 0xff;
952 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 953 } else {
c728a914
TH
954 tf.command = ATA_CMD_SET_MAX;
955
1e582ba4
TH
956 tf.device |= (new_sectors >> 24) & 0xf;
957 }
958
1e999736 959 tf.protocol |= ATA_PROT_NODATA;
c728a914 960 tf.device |= ATA_LBA;
1e999736
AC
961
962 tf.lbal = (new_sectors >> 0) & 0xff;
963 tf.lbam = (new_sectors >> 8) & 0xff;
964 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 965
c728a914
TH
966 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
967 if (err_mask) {
968 ata_dev_printk(dev, KERN_WARNING, "failed to set "
969 "max address (err_mask=0x%x)\n", err_mask);
970 if (err_mask == AC_ERR_DEV &&
971 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
972 return -EACCES;
973 return -EIO;
974 }
975
c728a914 976 return 0;
1e999736
AC
977}
978
979/**
980 * ata_hpa_resize - Resize a device with an HPA set
981 * @dev: Device to resize
982 *
983 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
984 * it if required to the full size of the media. The caller must check
985 * the drive has the HPA feature set enabled.
05027adc
TH
986 *
987 * RETURNS:
988 * 0 on success, -errno on failure.
1e999736 989 */
05027adc 990static int ata_hpa_resize(struct ata_device *dev)
1e999736 991{
05027adc
TH
992 struct ata_eh_context *ehc = &dev->link->eh_context;
993 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
994 u64 sectors = ata_id_n_sectors(dev->id);
995 u64 native_sectors;
c728a914 996 int rc;
a617c09f 997
05027adc
TH
998 /* do we need to do it? */
999 if (dev->class != ATA_DEV_ATA ||
1000 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1001 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1002 return 0;
1e999736 1003
05027adc
TH
1004 /* read native max address */
1005 rc = ata_read_native_max_address(dev, &native_sectors);
1006 if (rc) {
1007 /* If HPA isn't going to be unlocked, skip HPA
1008 * resizing from the next try.
1009 */
1010 if (!ata_ignore_hpa) {
1011 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1012 "broken, will skip HPA handling\n");
1013 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1014
1015 /* we can continue if device aborted the command */
1016 if (rc == -EACCES)
1017 rc = 0;
1e999736 1018 }
37301a55 1019
05027adc
TH
1020 return rc;
1021 }
1022
1023 /* nothing to do? */
1024 if (native_sectors <= sectors || !ata_ignore_hpa) {
1025 if (!print_info || native_sectors == sectors)
1026 return 0;
1027
1028 if (native_sectors > sectors)
1029 ata_dev_printk(dev, KERN_INFO,
1030 "HPA detected: current %llu, native %llu\n",
1031 (unsigned long long)sectors,
1032 (unsigned long long)native_sectors);
1033 else if (native_sectors < sectors)
1034 ata_dev_printk(dev, KERN_WARNING,
1035 "native sectors (%llu) is smaller than "
1036 "sectors (%llu)\n",
1037 (unsigned long long)native_sectors,
1038 (unsigned long long)sectors);
1039 return 0;
1040 }
1041
1042 /* let's unlock HPA */
1043 rc = ata_set_max_sectors(dev, native_sectors);
1044 if (rc == -EACCES) {
1045 /* if device aborted the command, skip HPA resizing */
1046 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1047 "(%llu -> %llu), skipping HPA handling\n",
1048 (unsigned long long)sectors,
1049 (unsigned long long)native_sectors);
1050 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1051 return 0;
1052 } else if (rc)
1053 return rc;
1054
1055 /* re-read IDENTIFY data */
1056 rc = ata_dev_reread_id(dev, 0);
1057 if (rc) {
1058 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1059 "data after HPA resizing\n");
1060 return rc;
1061 }
1062
1063 if (print_info) {
1064 u64 new_sectors = ata_id_n_sectors(dev->id);
1065 ata_dev_printk(dev, KERN_INFO,
1066 "HPA unlocked: %llu -> %llu, native %llu\n",
1067 (unsigned long long)sectors,
1068 (unsigned long long)new_sectors,
1069 (unsigned long long)native_sectors);
1070 }
1071
1072 return 0;
1e999736
AC
1073}
1074
10305f0f
A
1075/**
1076 * ata_id_to_dma_mode - Identify DMA mode from id block
1077 * @dev: device to identify
cc261267 1078 * @unknown: mode to assume if we cannot tell
10305f0f
A
1079 *
1080 * Set up the timing values for the device based upon the identify
1081 * reported values for the DMA mode. This function is used by drivers
1082 * which rely upon firmware configured modes, but wish to report the
1083 * mode correctly when possible.
1084 *
1085 * In addition we emit similarly formatted messages to the default
1086 * ata_dev_set_mode handler, in order to provide consistency of
1087 * presentation.
1088 */
1089
1090void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1091{
1092 unsigned int mask;
1093 u8 mode;
1094
1095 /* Pack the DMA modes */
1096 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1097 if (dev->id[53] & 0x04)
1098 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1099
1100 /* Select the mode in use */
1101 mode = ata_xfer_mask2mode(mask);
1102
1103 if (mode != 0) {
1104 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1105 ata_mode_string(mask));
1106 } else {
1107 /* SWDMA perhaps ? */
1108 mode = unknown;
1109 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1110 }
1111
1112 /* Configure the device reporting */
1113 dev->xfer_mode = mode;
1114 dev->xfer_shift = ata_xfer_mode2shift(mode);
1115}
1116
0baab86b
EF
1117/**
1118 * ata_noop_dev_select - Select device 0/1 on ATA bus
1119 * @ap: ATA channel to manipulate
1120 * @device: ATA device (numbered from zero) to select
1121 *
1122 * This function performs no actual function.
1123 *
1124 * May be used as the dev_select() entry in ata_port_operations.
1125 *
1126 * LOCKING:
1127 * caller.
1128 */
1da177e4
LT
1129void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1130{
1131}
1132
0baab86b 1133
1da177e4
LT
1134/**
1135 * ata_std_dev_select - Select device 0/1 on ATA bus
1136 * @ap: ATA channel to manipulate
1137 * @device: ATA device (numbered from zero) to select
1138 *
1139 * Use the method defined in the ATA specification to
1140 * make either device 0, or device 1, active on the
0baab86b
EF
1141 * ATA channel. Works with both PIO and MMIO.
1142 *
1143 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1144 *
1145 * LOCKING:
1146 * caller.
1147 */
1148
1149void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1150{
1151 u8 tmp;
1152
1153 if (device == 0)
1154 tmp = ATA_DEVICE_OBS;
1155 else
1156 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1157
0d5ff566 1158 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1159 ata_pause(ap); /* needed; also flushes, for mmio */
1160}
1161
1162/**
1163 * ata_dev_select - Select device 0/1 on ATA bus
1164 * @ap: ATA channel to manipulate
1165 * @device: ATA device (numbered from zero) to select
1166 * @wait: non-zero to wait for Status register BSY bit to clear
1167 * @can_sleep: non-zero if context allows sleeping
1168 *
1169 * Use the method defined in the ATA specification to
1170 * make either device 0, or device 1, active on the
1171 * ATA channel.
1172 *
1173 * This is a high-level version of ata_std_dev_select(),
1174 * which additionally provides the services of inserting
1175 * the proper pauses and status polling, where needed.
1176 *
1177 * LOCKING:
1178 * caller.
1179 */
1180
1181void ata_dev_select(struct ata_port *ap, unsigned int device,
1182 unsigned int wait, unsigned int can_sleep)
1183{
88574551 1184 if (ata_msg_probe(ap))
44877b4e
TH
1185 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1186 "device %u, wait %u\n", device, wait);
1da177e4
LT
1187
1188 if (wait)
1189 ata_wait_idle(ap);
1190
1191 ap->ops->dev_select(ap, device);
1192
1193 if (wait) {
9af5c9c9 1194 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1195 msleep(150);
1196 ata_wait_idle(ap);
1197 }
1198}
1199
1200/**
1201 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1202 * @id: IDENTIFY DEVICE page to dump
1da177e4 1203 *
0bd3300a
TH
1204 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1205 * page.
1da177e4
LT
1206 *
1207 * LOCKING:
1208 * caller.
1209 */
1210
0bd3300a 1211static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1212{
1213 DPRINTK("49==0x%04x "
1214 "53==0x%04x "
1215 "63==0x%04x "
1216 "64==0x%04x "
1217 "75==0x%04x \n",
0bd3300a
TH
1218 id[49],
1219 id[53],
1220 id[63],
1221 id[64],
1222 id[75]);
1da177e4
LT
1223 DPRINTK("80==0x%04x "
1224 "81==0x%04x "
1225 "82==0x%04x "
1226 "83==0x%04x "
1227 "84==0x%04x \n",
0bd3300a
TH
1228 id[80],
1229 id[81],
1230 id[82],
1231 id[83],
1232 id[84]);
1da177e4
LT
1233 DPRINTK("88==0x%04x "
1234 "93==0x%04x\n",
0bd3300a
TH
1235 id[88],
1236 id[93]);
1da177e4
LT
1237}
1238
cb95d562
TH
1239/**
1240 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1241 * @id: IDENTIFY data to compute xfer mask from
1242 *
1243 * Compute the xfermask for this device. This is not as trivial
1244 * as it seems if we must consider early devices correctly.
1245 *
1246 * FIXME: pre IDE drive timing (do we care ?).
1247 *
1248 * LOCKING:
1249 * None.
1250 *
1251 * RETURNS:
1252 * Computed xfermask
1253 */
1254static unsigned int ata_id_xfermask(const u16 *id)
1255{
1256 unsigned int pio_mask, mwdma_mask, udma_mask;
1257
1258 /* Usual case. Word 53 indicates word 64 is valid */
1259 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1260 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1261 pio_mask <<= 3;
1262 pio_mask |= 0x7;
1263 } else {
1264 /* If word 64 isn't valid then Word 51 high byte holds
1265 * the PIO timing number for the maximum. Turn it into
1266 * a mask.
1267 */
7a0f1c8a 1268 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1269 if (mode < 5) /* Valid PIO range */
1270 pio_mask = (2 << mode) - 1;
1271 else
1272 pio_mask = 1;
cb95d562
TH
1273
1274 /* But wait.. there's more. Design your standards by
1275 * committee and you too can get a free iordy field to
1276 * process. However its the speeds not the modes that
1277 * are supported... Note drivers using the timing API
1278 * will get this right anyway
1279 */
1280 }
1281
1282 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1283
b352e57d
AC
1284 if (ata_id_is_cfa(id)) {
1285 /*
1286 * Process compact flash extended modes
1287 */
1288 int pio = id[163] & 0x7;
1289 int dma = (id[163] >> 3) & 7;
1290
1291 if (pio)
1292 pio_mask |= (1 << 5);
1293 if (pio > 1)
1294 pio_mask |= (1 << 6);
1295 if (dma)
1296 mwdma_mask |= (1 << 3);
1297 if (dma > 1)
1298 mwdma_mask |= (1 << 4);
1299 }
1300
fb21f0d0
TH
1301 udma_mask = 0;
1302 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1303 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1304
1305 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1306}
1307
86e45b6b
TH
1308/**
1309 * ata_port_queue_task - Queue port_task
1310 * @ap: The ata_port to queue port_task for
e2a7f77a 1311 * @fn: workqueue function to be scheduled
65f27f38 1312 * @data: data for @fn to use
e2a7f77a 1313 * @delay: delay time for workqueue function
86e45b6b
TH
1314 *
1315 * Schedule @fn(@data) for execution after @delay jiffies using
1316 * port_task. There is one port_task per port and it's the
1317 * user(low level driver)'s responsibility to make sure that only
1318 * one task is active at any given time.
1319 *
1320 * libata core layer takes care of synchronization between
1321 * port_task and EH. ata_port_queue_task() may be ignored for EH
1322 * synchronization.
1323 *
1324 * LOCKING:
1325 * Inherited from caller.
1326 */
65f27f38 1327void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1328 unsigned long delay)
1329{
65f27f38
DH
1330 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1331 ap->port_task_data = data;
86e45b6b 1332
45a66c1c
ON
1333 /* may fail if ata_port_flush_task() in progress */
1334 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1335}
1336
1337/**
1338 * ata_port_flush_task - Flush port_task
1339 * @ap: The ata_port to flush port_task for
1340 *
1341 * After this function completes, port_task is guranteed not to
1342 * be running or scheduled.
1343 *
1344 * LOCKING:
1345 * Kernel thread context (may sleep)
1346 */
1347void ata_port_flush_task(struct ata_port *ap)
1348{
86e45b6b
TH
1349 DPRINTK("ENTER\n");
1350
45a66c1c 1351 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1352
0dd4b21f
BP
1353 if (ata_msg_ctl(ap))
1354 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1355}
1356
7102d230 1357static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1358{
77853bf2 1359 struct completion *waiting = qc->private_data;
a2a7a662 1360
a2a7a662 1361 complete(waiting);
a2a7a662
TH
1362}
1363
1364/**
2432697b 1365 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1366 * @dev: Device to which the command is sent
1367 * @tf: Taskfile registers for the command and the result
d69cf37d 1368 * @cdb: CDB for packet command
a2a7a662 1369 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1370 * @sg: sg list for the data buffer of the command
1371 * @n_elem: Number of sg entries
a2a7a662
TH
1372 *
1373 * Executes libata internal command with timeout. @tf contains
1374 * command on entry and result on return. Timeout and error
1375 * conditions are reported via return value. No recovery action
1376 * is taken after a command times out. It's caller's duty to
1377 * clean up after timeout.
1378 *
1379 * LOCKING:
1380 * None. Should be called with kernel context, might sleep.
551e8889
TH
1381 *
1382 * RETURNS:
1383 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1384 */
2432697b
TH
1385unsigned ata_exec_internal_sg(struct ata_device *dev,
1386 struct ata_taskfile *tf, const u8 *cdb,
1387 int dma_dir, struct scatterlist *sg,
1388 unsigned int n_elem)
a2a7a662 1389{
9af5c9c9
TH
1390 struct ata_link *link = dev->link;
1391 struct ata_port *ap = link->ap;
a2a7a662
TH
1392 u8 command = tf->command;
1393 struct ata_queued_cmd *qc;
2ab7db1f 1394 unsigned int tag, preempted_tag;
dedaf2b0 1395 u32 preempted_sactive, preempted_qc_active;
da917d69 1396 int preempted_nr_active_links;
60be6b9a 1397 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1398 unsigned long flags;
77853bf2 1399 unsigned int err_mask;
d95a717f 1400 int rc;
a2a7a662 1401
ba6a1308 1402 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1403
e3180499 1404 /* no internal command while frozen */
b51e9e5d 1405 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1406 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1407 return AC_ERR_SYSTEM;
1408 }
1409
2ab7db1f 1410 /* initialize internal qc */
a2a7a662 1411
2ab7db1f
TH
1412 /* XXX: Tag 0 is used for drivers with legacy EH as some
1413 * drivers choke if any other tag is given. This breaks
1414 * ata_tag_internal() test for those drivers. Don't use new
1415 * EH stuff without converting to it.
1416 */
1417 if (ap->ops->error_handler)
1418 tag = ATA_TAG_INTERNAL;
1419 else
1420 tag = 0;
1421
6cec4a39 1422 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1423 BUG();
f69499f4 1424 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1425
1426 qc->tag = tag;
1427 qc->scsicmd = NULL;
1428 qc->ap = ap;
1429 qc->dev = dev;
1430 ata_qc_reinit(qc);
1431
9af5c9c9
TH
1432 preempted_tag = link->active_tag;
1433 preempted_sactive = link->sactive;
dedaf2b0 1434 preempted_qc_active = ap->qc_active;
da917d69 1435 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1436 link->active_tag = ATA_TAG_POISON;
1437 link->sactive = 0;
dedaf2b0 1438 ap->qc_active = 0;
da917d69 1439 ap->nr_active_links = 0;
2ab7db1f
TH
1440
1441 /* prepare & issue qc */
a2a7a662 1442 qc->tf = *tf;
d69cf37d
TH
1443 if (cdb)
1444 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1445 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1446 qc->dma_dir = dma_dir;
1447 if (dma_dir != DMA_NONE) {
2432697b
TH
1448 unsigned int i, buflen = 0;
1449
1450 for (i = 0; i < n_elem; i++)
1451 buflen += sg[i].length;
1452
1453 ata_sg_init(qc, sg, n_elem);
49c80429 1454 qc->nbytes = buflen;
a2a7a662
TH
1455 }
1456
77853bf2 1457 qc->private_data = &wait;
a2a7a662
TH
1458 qc->complete_fn = ata_qc_complete_internal;
1459
8e0e694a 1460 ata_qc_issue(qc);
a2a7a662 1461
ba6a1308 1462 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1463
a8601e5f 1464 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1465
1466 ata_port_flush_task(ap);
41ade50c 1467
d95a717f 1468 if (!rc) {
ba6a1308 1469 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1470
1471 /* We're racing with irq here. If we lose, the
1472 * following test prevents us from completing the qc
d95a717f
TH
1473 * twice. If we win, the port is frozen and will be
1474 * cleaned up by ->post_internal_cmd().
a2a7a662 1475 */
77853bf2 1476 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1477 qc->err_mask |= AC_ERR_TIMEOUT;
1478
1479 if (ap->ops->error_handler)
1480 ata_port_freeze(ap);
1481 else
1482 ata_qc_complete(qc);
f15a1daf 1483
0dd4b21f
BP
1484 if (ata_msg_warn(ap))
1485 ata_dev_printk(dev, KERN_WARNING,
88574551 1486 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1487 }
1488
ba6a1308 1489 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1490 }
1491
d95a717f
TH
1492 /* do post_internal_cmd */
1493 if (ap->ops->post_internal_cmd)
1494 ap->ops->post_internal_cmd(qc);
1495
a51d644a
TH
1496 /* perform minimal error analysis */
1497 if (qc->flags & ATA_QCFLAG_FAILED) {
1498 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1499 qc->err_mask |= AC_ERR_DEV;
1500
1501 if (!qc->err_mask)
1502 qc->err_mask |= AC_ERR_OTHER;
1503
1504 if (qc->err_mask & ~AC_ERR_OTHER)
1505 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1506 }
1507
15869303 1508 /* finish up */
ba6a1308 1509 spin_lock_irqsave(ap->lock, flags);
15869303 1510
e61e0672 1511 *tf = qc->result_tf;
77853bf2
TH
1512 err_mask = qc->err_mask;
1513
1514 ata_qc_free(qc);
9af5c9c9
TH
1515 link->active_tag = preempted_tag;
1516 link->sactive = preempted_sactive;
dedaf2b0 1517 ap->qc_active = preempted_qc_active;
da917d69 1518 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1519
1f7dd3e9
TH
1520 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1521 * Until those drivers are fixed, we detect the condition
1522 * here, fail the command with AC_ERR_SYSTEM and reenable the
1523 * port.
1524 *
1525 * Note that this doesn't change any behavior as internal
1526 * command failure results in disabling the device in the
1527 * higher layer for LLDDs without new reset/EH callbacks.
1528 *
1529 * Kill the following code as soon as those drivers are fixed.
1530 */
198e0fed 1531 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1532 err_mask |= AC_ERR_SYSTEM;
1533 ata_port_probe(ap);
1534 }
1535
ba6a1308 1536 spin_unlock_irqrestore(ap->lock, flags);
15869303 1537
77853bf2 1538 return err_mask;
a2a7a662
TH
1539}
1540
2432697b 1541/**
33480a0e 1542 * ata_exec_internal - execute libata internal command
2432697b
TH
1543 * @dev: Device to which the command is sent
1544 * @tf: Taskfile registers for the command and the result
1545 * @cdb: CDB for packet command
1546 * @dma_dir: Data tranfer direction of the command
1547 * @buf: Data buffer of the command
1548 * @buflen: Length of data buffer
1549 *
1550 * Wrapper around ata_exec_internal_sg() which takes simple
1551 * buffer instead of sg list.
1552 *
1553 * LOCKING:
1554 * None. Should be called with kernel context, might sleep.
1555 *
1556 * RETURNS:
1557 * Zero on success, AC_ERR_* mask on failure
1558 */
1559unsigned ata_exec_internal(struct ata_device *dev,
1560 struct ata_taskfile *tf, const u8 *cdb,
1561 int dma_dir, void *buf, unsigned int buflen)
1562{
33480a0e
TH
1563 struct scatterlist *psg = NULL, sg;
1564 unsigned int n_elem = 0;
2432697b 1565
33480a0e
TH
1566 if (dma_dir != DMA_NONE) {
1567 WARN_ON(!buf);
1568 sg_init_one(&sg, buf, buflen);
1569 psg = &sg;
1570 n_elem++;
1571 }
2432697b 1572
33480a0e 1573 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1574}
1575
977e6b9f
TH
1576/**
1577 * ata_do_simple_cmd - execute simple internal command
1578 * @dev: Device to which the command is sent
1579 * @cmd: Opcode to execute
1580 *
1581 * Execute a 'simple' command, that only consists of the opcode
1582 * 'cmd' itself, without filling any other registers
1583 *
1584 * LOCKING:
1585 * Kernel thread context (may sleep).
1586 *
1587 * RETURNS:
1588 * Zero on success, AC_ERR_* mask on failure
e58eb583 1589 */
77b08fb5 1590unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1591{
1592 struct ata_taskfile tf;
e58eb583
TH
1593
1594 ata_tf_init(dev, &tf);
1595
1596 tf.command = cmd;
1597 tf.flags |= ATA_TFLAG_DEVICE;
1598 tf.protocol = ATA_PROT_NODATA;
1599
977e6b9f 1600 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1601}
1602
1bc4ccff
AC
1603/**
1604 * ata_pio_need_iordy - check if iordy needed
1605 * @adev: ATA device
1606 *
1607 * Check if the current speed of the device requires IORDY. Used
1608 * by various controllers for chip configuration.
1609 */
a617c09f 1610
1bc4ccff
AC
1611unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1612{
432729f0
AC
1613 /* Controller doesn't support IORDY. Probably a pointless check
1614 as the caller should know this */
9af5c9c9 1615 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1616 return 0;
432729f0
AC
1617 /* PIO3 and higher it is mandatory */
1618 if (adev->pio_mode > XFER_PIO_2)
1619 return 1;
1620 /* We turn it on when possible */
1621 if (ata_id_has_iordy(adev->id))
1bc4ccff 1622 return 1;
432729f0
AC
1623 return 0;
1624}
2e9edbf8 1625
432729f0
AC
1626/**
1627 * ata_pio_mask_no_iordy - Return the non IORDY mask
1628 * @adev: ATA device
1629 *
1630 * Compute the highest mode possible if we are not using iordy. Return
1631 * -1 if no iordy mode is available.
1632 */
a617c09f 1633
432729f0
AC
1634static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1635{
1bc4ccff 1636 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1637 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1638 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1639 /* Is the speed faster than the drive allows non IORDY ? */
1640 if (pio) {
1641 /* This is cycle times not frequency - watch the logic! */
1642 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1643 return 3 << ATA_SHIFT_PIO;
1644 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1645 }
1646 }
432729f0 1647 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1648}
1649
1da177e4 1650/**
49016aca 1651 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1652 * @dev: target device
1653 * @p_class: pointer to class of the target device (may be changed)
bff04647 1654 * @flags: ATA_READID_* flags
fe635c7e 1655 * @id: buffer to read IDENTIFY data into
1da177e4 1656 *
49016aca
TH
1657 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1658 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1659 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1660 * for pre-ATA4 drives.
1da177e4 1661 *
50a99018
AC
1662 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1663 * now we abort if we hit that case.
1664 *
1da177e4 1665 * LOCKING:
49016aca
TH
1666 * Kernel thread context (may sleep)
1667 *
1668 * RETURNS:
1669 * 0 on success, -errno otherwise.
1da177e4 1670 */
a9beec95 1671int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1672 unsigned int flags, u16 *id)
1da177e4 1673{
9af5c9c9 1674 struct ata_port *ap = dev->link->ap;
49016aca 1675 unsigned int class = *p_class;
a0123703 1676 struct ata_taskfile tf;
49016aca
TH
1677 unsigned int err_mask = 0;
1678 const char *reason;
54936f8b 1679 int may_fallback = 1, tried_spinup = 0;
49016aca 1680 int rc;
1da177e4 1681
0dd4b21f 1682 if (ata_msg_ctl(ap))
44877b4e 1683 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1684
49016aca 1685 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1686 retry:
3373efd8 1687 ata_tf_init(dev, &tf);
a0123703 1688
49016aca
TH
1689 switch (class) {
1690 case ATA_DEV_ATA:
a0123703 1691 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1692 break;
1693 case ATA_DEV_ATAPI:
a0123703 1694 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1695 break;
1696 default:
1697 rc = -ENODEV;
1698 reason = "unsupported class";
1699 goto err_out;
1da177e4
LT
1700 }
1701
a0123703 1702 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1703
1704 /* Some devices choke if TF registers contain garbage. Make
1705 * sure those are properly initialized.
1706 */
1707 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1708
1709 /* Device presence detection is unreliable on some
1710 * controllers. Always poll IDENTIFY if available.
1711 */
1712 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1713
3373efd8 1714 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1715 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1716 if (err_mask) {
800b3996 1717 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1718 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1719 ap->print_id, dev->devno);
55a8e2c8
TH
1720 return -ENOENT;
1721 }
1722
54936f8b
TH
1723 /* Device or controller might have reported the wrong
1724 * device class. Give a shot at the other IDENTIFY if
1725 * the current one is aborted by the device.
1726 */
1727 if (may_fallback &&
1728 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1729 may_fallback = 0;
1730
1731 if (class == ATA_DEV_ATA)
1732 class = ATA_DEV_ATAPI;
1733 else
1734 class = ATA_DEV_ATA;
1735 goto retry;
1736 }
1737
49016aca
TH
1738 rc = -EIO;
1739 reason = "I/O error";
1da177e4
LT
1740 goto err_out;
1741 }
1742
54936f8b
TH
1743 /* Falling back doesn't make sense if ID data was read
1744 * successfully at least once.
1745 */
1746 may_fallback = 0;
1747
49016aca 1748 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1749
49016aca 1750 /* sanity check */
a4f5749b 1751 rc = -EINVAL;
6070068b 1752 reason = "device reports invalid type";
a4f5749b
TH
1753
1754 if (class == ATA_DEV_ATA) {
1755 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1756 goto err_out;
1757 } else {
1758 if (ata_id_is_ata(id))
1759 goto err_out;
49016aca
TH
1760 }
1761
169439c2
ML
1762 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1763 tried_spinup = 1;
1764 /*
1765 * Drive powered-up in standby mode, and requires a specific
1766 * SET_FEATURES spin-up subcommand before it will accept
1767 * anything other than the original IDENTIFY command.
1768 */
1769 ata_tf_init(dev, &tf);
1770 tf.command = ATA_CMD_SET_FEATURES;
1771 tf.feature = SETFEATURES_SPINUP;
1772 tf.protocol = ATA_PROT_NODATA;
1773 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1774 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1775 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1776 rc = -EIO;
1777 reason = "SPINUP failed";
1778 goto err_out;
1779 }
1780 /*
1781 * If the drive initially returned incomplete IDENTIFY info,
1782 * we now must reissue the IDENTIFY command.
1783 */
1784 if (id[2] == 0x37c8)
1785 goto retry;
1786 }
1787
bff04647 1788 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1789 /*
1790 * The exact sequence expected by certain pre-ATA4 drives is:
1791 * SRST RESET
50a99018
AC
1792 * IDENTIFY (optional in early ATA)
1793 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1794 * anything else..
1795 * Some drives were very specific about that exact sequence.
50a99018
AC
1796 *
1797 * Note that ATA4 says lba is mandatory so the second check
1798 * shoud never trigger.
49016aca
TH
1799 */
1800 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1801 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1802 if (err_mask) {
1803 rc = -EIO;
1804 reason = "INIT_DEV_PARAMS failed";
1805 goto err_out;
1806 }
1807
1808 /* current CHS translation info (id[53-58]) might be
1809 * changed. reread the identify device info.
1810 */
bff04647 1811 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1812 goto retry;
1813 }
1814 }
1815
1816 *p_class = class;
fe635c7e 1817
49016aca
TH
1818 return 0;
1819
1820 err_out:
88574551 1821 if (ata_msg_warn(ap))
0dd4b21f 1822 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1823 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1824 return rc;
1825}
1826
3373efd8 1827static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1828{
9af5c9c9
TH
1829 struct ata_port *ap = dev->link->ap;
1830 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1831}
1832
a6e6ce8e
TH
1833static void ata_dev_config_ncq(struct ata_device *dev,
1834 char *desc, size_t desc_sz)
1835{
9af5c9c9 1836 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1837 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1838
1839 if (!ata_id_has_ncq(dev->id)) {
1840 desc[0] = '\0';
1841 return;
1842 }
75683fe7 1843 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1844 snprintf(desc, desc_sz, "NCQ (not used)");
1845 return;
1846 }
a6e6ce8e 1847 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1848 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1849 dev->flags |= ATA_DFLAG_NCQ;
1850 }
1851
1852 if (hdepth >= ddepth)
1853 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1854 else
1855 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1856}
1857
49016aca 1858/**
ffeae418 1859 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1860 * @dev: Target device to configure
1861 *
1862 * Configure @dev according to @dev->id. Generic and low-level
1863 * driver specific fixups are also applied.
49016aca
TH
1864 *
1865 * LOCKING:
ffeae418
TH
1866 * Kernel thread context (may sleep)
1867 *
1868 * RETURNS:
1869 * 0 on success, -errno otherwise
49016aca 1870 */
efdaedc4 1871int ata_dev_configure(struct ata_device *dev)
49016aca 1872{
9af5c9c9
TH
1873 struct ata_port *ap = dev->link->ap;
1874 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1875 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1876 const u16 *id = dev->id;
ff8854b2 1877 unsigned int xfer_mask;
b352e57d 1878 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1879 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1880 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1881 int rc;
49016aca 1882
0dd4b21f 1883 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1884 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1885 __FUNCTION__);
ffeae418 1886 return 0;
49016aca
TH
1887 }
1888
0dd4b21f 1889 if (ata_msg_probe(ap))
44877b4e 1890 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1891
75683fe7
TH
1892 /* set horkage */
1893 dev->horkage |= ata_dev_blacklisted(dev);
1894
6746544c
TH
1895 /* let ACPI work its magic */
1896 rc = ata_acpi_on_devcfg(dev);
1897 if (rc)
1898 return rc;
08573a86 1899
05027adc
TH
1900 /* massage HPA, do it early as it might change IDENTIFY data */
1901 rc = ata_hpa_resize(dev);
1902 if (rc)
1903 return rc;
1904
c39f5ebe 1905 /* print device capabilities */
0dd4b21f 1906 if (ata_msg_probe(ap))
88574551
TH
1907 ata_dev_printk(dev, KERN_DEBUG,
1908 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1909 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1910 __FUNCTION__,
f15a1daf
TH
1911 id[49], id[82], id[83], id[84],
1912 id[85], id[86], id[87], id[88]);
c39f5ebe 1913
208a9933 1914 /* initialize to-be-configured parameters */
ea1dd4e1 1915 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1916 dev->max_sectors = 0;
1917 dev->cdb_len = 0;
1918 dev->n_sectors = 0;
1919 dev->cylinders = 0;
1920 dev->heads = 0;
1921 dev->sectors = 0;
1922
1da177e4
LT
1923 /*
1924 * common ATA, ATAPI feature tests
1925 */
1926
ff8854b2 1927 /* find max transfer mode; for printk only */
1148c3a7 1928 xfer_mask = ata_id_xfermask(id);
1da177e4 1929
0dd4b21f
BP
1930 if (ata_msg_probe(ap))
1931 ata_dump_id(id);
1da177e4 1932
ef143d57
AL
1933 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1934 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1935 sizeof(fwrevbuf));
1936
1937 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1938 sizeof(modelbuf));
1939
1da177e4
LT
1940 /* ATA-specific feature tests */
1941 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1942 if (ata_id_is_cfa(id)) {
1943 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1944 ata_dev_printk(dev, KERN_WARNING,
1945 "supports DRM functions and may "
1946 "not be fully accessable.\n");
b352e57d
AC
1947 snprintf(revbuf, 7, "CFA");
1948 }
1949 else
1950 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1951
1148c3a7 1952 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1953
3f64f565
EM
1954 if (dev->id[59] & 0x100)
1955 dev->multi_count = dev->id[59] & 0xff;
1956
1148c3a7 1957 if (ata_id_has_lba(id)) {
4c2d721a 1958 const char *lba_desc;
a6e6ce8e 1959 char ncq_desc[20];
8bf62ece 1960
4c2d721a
TH
1961 lba_desc = "LBA";
1962 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1963 if (ata_id_has_lba48(id)) {
8bf62ece 1964 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1965 lba_desc = "LBA48";
6fc49adb
TH
1966
1967 if (dev->n_sectors >= (1UL << 28) &&
1968 ata_id_has_flush_ext(id))
1969 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1970 }
8bf62ece 1971
a6e6ce8e
TH
1972 /* config NCQ */
1973 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1974
8bf62ece 1975 /* print device info to dmesg */
3f64f565
EM
1976 if (ata_msg_drv(ap) && print_info) {
1977 ata_dev_printk(dev, KERN_INFO,
1978 "%s: %s, %s, max %s\n",
1979 revbuf, modelbuf, fwrevbuf,
1980 ata_mode_string(xfer_mask));
1981 ata_dev_printk(dev, KERN_INFO,
1982 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1983 (unsigned long long)dev->n_sectors,
3f64f565
EM
1984 dev->multi_count, lba_desc, ncq_desc);
1985 }
ffeae418 1986 } else {
8bf62ece
AL
1987 /* CHS */
1988
1989 /* Default translation */
1148c3a7
TH
1990 dev->cylinders = id[1];
1991 dev->heads = id[3];
1992 dev->sectors = id[6];
8bf62ece 1993
1148c3a7 1994 if (ata_id_current_chs_valid(id)) {
8bf62ece 1995 /* Current CHS translation is valid. */
1148c3a7
TH
1996 dev->cylinders = id[54];
1997 dev->heads = id[55];
1998 dev->sectors = id[56];
8bf62ece
AL
1999 }
2000
2001 /* print device info to dmesg */
3f64f565 2002 if (ata_msg_drv(ap) && print_info) {
88574551 2003 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2004 "%s: %s, %s, max %s\n",
2005 revbuf, modelbuf, fwrevbuf,
2006 ata_mode_string(xfer_mask));
a84471fe 2007 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2008 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2009 (unsigned long long)dev->n_sectors,
2010 dev->multi_count, dev->cylinders,
2011 dev->heads, dev->sectors);
2012 }
07f6f7d0
AL
2013 }
2014
6e7846e9 2015 dev->cdb_len = 16;
1da177e4
LT
2016 }
2017
2018 /* ATAPI-specific feature tests */
2c13b7ce 2019 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2020 const char *cdb_intr_string = "";
2021 const char *atapi_an_string = "";
7d77b247 2022 u32 sntf;
08a556db 2023
1148c3a7 2024 rc = atapi_cdb_len(id);
1da177e4 2025 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2026 if (ata_msg_warn(ap))
88574551
TH
2027 ata_dev_printk(dev, KERN_WARNING,
2028 "unsupported CDB len\n");
ffeae418 2029 rc = -EINVAL;
1da177e4
LT
2030 goto err_out_nosup;
2031 }
6e7846e9 2032 dev->cdb_len = (unsigned int) rc;
1da177e4 2033
7d77b247
TH
2034 /* Enable ATAPI AN if both the host and device have
2035 * the support. If PMP is attached, SNTF is required
2036 * to enable ATAPI AN to discern between PHY status
2037 * changed notifications and ATAPI ANs.
9f45cbd3 2038 */
7d77b247
TH
2039 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2040 (!ap->nr_pmp_links ||
2041 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2042 unsigned int err_mask;
2043
9f45cbd3 2044 /* issue SET feature command to turn this on */
854c73a2
TH
2045 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2046 if (err_mask)
9f45cbd3 2047 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2048 "failed to enable ATAPI AN "
2049 "(err_mask=0x%x)\n", err_mask);
2050 else {
9f45cbd3 2051 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2052 atapi_an_string = ", ATAPI AN";
2053 }
9f45cbd3
KCA
2054 }
2055
08a556db 2056 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2057 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2058 cdb_intr_string = ", CDB intr";
2059 }
312f7da2 2060
1da177e4 2061 /* print device info to dmesg */
5afc8142 2062 if (ata_msg_drv(ap) && print_info)
ef143d57 2063 ata_dev_printk(dev, KERN_INFO,
854c73a2 2064 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2065 modelbuf, fwrevbuf,
12436c30 2066 ata_mode_string(xfer_mask),
854c73a2 2067 cdb_intr_string, atapi_an_string);
1da177e4
LT
2068 }
2069
914ed354
TH
2070 /* determine max_sectors */
2071 dev->max_sectors = ATA_MAX_SECTORS;
2072 if (dev->flags & ATA_DFLAG_LBA48)
2073 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2074
93590859
AC
2075 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2076 /* Let the user know. We don't want to disallow opens for
2077 rescue purposes, or in case the vendor is just a blithering
2078 idiot */
2079 if (print_info) {
2080 ata_dev_printk(dev, KERN_WARNING,
2081"Drive reports diagnostics failure. This may indicate a drive\n");
2082 ata_dev_printk(dev, KERN_WARNING,
2083"fault or invalid emulation. Contact drive vendor for information.\n");
2084 }
2085 }
2086
4b2f3ede 2087 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2088 if (ata_dev_knobble(dev)) {
5afc8142 2089 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2090 ata_dev_printk(dev, KERN_INFO,
2091 "applying bridge limits\n");
5a529139 2092 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2093 dev->max_sectors = ATA_MAX_SECTORS;
2094 }
2095
75683fe7 2096 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2097 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2098 dev->max_sectors);
18d6e9d5 2099
4b2f3ede 2100 if (ap->ops->dev_config)
cd0d3bbc 2101 ap->ops->dev_config(dev);
4b2f3ede 2102
0dd4b21f
BP
2103 if (ata_msg_probe(ap))
2104 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2105 __FUNCTION__, ata_chk_status(ap));
ffeae418 2106 return 0;
1da177e4
LT
2107
2108err_out_nosup:
0dd4b21f 2109 if (ata_msg_probe(ap))
88574551
TH
2110 ata_dev_printk(dev, KERN_DEBUG,
2111 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2112 return rc;
1da177e4
LT
2113}
2114
be0d18df 2115/**
2e41e8e6 2116 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2117 * @ap: port
2118 *
2e41e8e6 2119 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2120 * detection.
2121 */
2122
2123int ata_cable_40wire(struct ata_port *ap)
2124{
2125 return ATA_CBL_PATA40;
2126}
2127
2128/**
2e41e8e6 2129 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2130 * @ap: port
2131 *
2e41e8e6 2132 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2133 * detection.
2134 */
2135
2136int ata_cable_80wire(struct ata_port *ap)
2137{
2138 return ATA_CBL_PATA80;
2139}
2140
2141/**
2142 * ata_cable_unknown - return unknown PATA cable.
2143 * @ap: port
2144 *
2145 * Helper method for drivers which have no PATA cable detection.
2146 */
2147
2148int ata_cable_unknown(struct ata_port *ap)
2149{
2150 return ATA_CBL_PATA_UNK;
2151}
2152
2153/**
2154 * ata_cable_sata - return SATA cable type
2155 * @ap: port
2156 *
2157 * Helper method for drivers which have SATA cables
2158 */
2159
2160int ata_cable_sata(struct ata_port *ap)
2161{
2162 return ATA_CBL_SATA;
2163}
2164
1da177e4
LT
2165/**
2166 * ata_bus_probe - Reset and probe ATA bus
2167 * @ap: Bus to probe
2168 *
0cba632b
JG
2169 * Master ATA bus probing function. Initiates a hardware-dependent
2170 * bus reset, then attempts to identify any devices found on
2171 * the bus.
2172 *
1da177e4 2173 * LOCKING:
0cba632b 2174 * PCI/etc. bus probe sem.
1da177e4
LT
2175 *
2176 * RETURNS:
96072e69 2177 * Zero on success, negative errno otherwise.
1da177e4
LT
2178 */
2179
80289167 2180int ata_bus_probe(struct ata_port *ap)
1da177e4 2181{
28ca5c57 2182 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2183 int tries[ATA_MAX_DEVICES];
f58229f8 2184 int rc;
e82cbdb9 2185 struct ata_device *dev;
1da177e4 2186
28ca5c57 2187 ata_port_probe(ap);
c19ba8af 2188
f58229f8
TH
2189 ata_link_for_each_dev(dev, &ap->link)
2190 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2191
2192 retry:
2044470c 2193 /* reset and determine device classes */
52783c5d 2194 ap->ops->phy_reset(ap);
2061a47a 2195
f58229f8 2196 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2197 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2198 dev->class != ATA_DEV_UNKNOWN)
2199 classes[dev->devno] = dev->class;
2200 else
2201 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2202
52783c5d 2203 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2204 }
1da177e4 2205
52783c5d 2206 ata_port_probe(ap);
2044470c 2207
b6079ca4
AC
2208 /* after the reset the device state is PIO 0 and the controller
2209 state is undefined. Record the mode */
2210
f58229f8
TH
2211 ata_link_for_each_dev(dev, &ap->link)
2212 dev->pio_mode = XFER_PIO_0;
b6079ca4 2213
f31f0cc2
JG
2214 /* read IDENTIFY page and configure devices. We have to do the identify
2215 specific sequence bass-ackwards so that PDIAG- is released by
2216 the slave device */
2217
f58229f8
TH
2218 ata_link_for_each_dev(dev, &ap->link) {
2219 if (tries[dev->devno])
2220 dev->class = classes[dev->devno];
ffeae418 2221
14d2bac1 2222 if (!ata_dev_enabled(dev))
ffeae418 2223 continue;
ffeae418 2224
bff04647
TH
2225 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2226 dev->id);
14d2bac1
TH
2227 if (rc)
2228 goto fail;
f31f0cc2
JG
2229 }
2230
be0d18df
AC
2231 /* Now ask for the cable type as PDIAG- should have been released */
2232 if (ap->ops->cable_detect)
2233 ap->cbl = ap->ops->cable_detect(ap);
2234
614fe29b
AC
2235 /* We may have SATA bridge glue hiding here irrespective of the
2236 reported cable types and sensed types */
2237 ata_link_for_each_dev(dev, &ap->link) {
2238 if (!ata_dev_enabled(dev))
2239 continue;
2240 /* SATA drives indicate we have a bridge. We don't know which
2241 end of the link the bridge is which is a problem */
2242 if (ata_id_is_sata(dev->id))
2243 ap->cbl = ATA_CBL_SATA;
2244 }
2245
f31f0cc2
JG
2246 /* After the identify sequence we can now set up the devices. We do
2247 this in the normal order so that the user doesn't get confused */
2248
f58229f8 2249 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2250 if (!ata_dev_enabled(dev))
2251 continue;
14d2bac1 2252
9af5c9c9 2253 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2254 rc = ata_dev_configure(dev);
9af5c9c9 2255 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2256 if (rc)
2257 goto fail;
1da177e4
LT
2258 }
2259
e82cbdb9 2260 /* configure transfer mode */
0260731f 2261 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2262 if (rc)
51713d35 2263 goto fail;
1da177e4 2264
f58229f8
TH
2265 ata_link_for_each_dev(dev, &ap->link)
2266 if (ata_dev_enabled(dev))
e82cbdb9 2267 return 0;
1da177e4 2268
e82cbdb9
TH
2269 /* no device present, disable port */
2270 ata_port_disable(ap);
96072e69 2271 return -ENODEV;
14d2bac1
TH
2272
2273 fail:
4ae72a1e
TH
2274 tries[dev->devno]--;
2275
14d2bac1
TH
2276 switch (rc) {
2277 case -EINVAL:
4ae72a1e 2278 /* eeek, something went very wrong, give up */
14d2bac1
TH
2279 tries[dev->devno] = 0;
2280 break;
4ae72a1e
TH
2281
2282 case -ENODEV:
2283 /* give it just one more chance */
2284 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2285 case -EIO:
4ae72a1e
TH
2286 if (tries[dev->devno] == 1) {
2287 /* This is the last chance, better to slow
2288 * down than lose it.
2289 */
936fd732 2290 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2291 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2292 }
14d2bac1
TH
2293 }
2294
4ae72a1e 2295 if (!tries[dev->devno])
3373efd8 2296 ata_dev_disable(dev);
ec573755 2297
14d2bac1 2298 goto retry;
1da177e4
LT
2299}
2300
2301/**
0cba632b
JG
2302 * ata_port_probe - Mark port as enabled
2303 * @ap: Port for which we indicate enablement
1da177e4 2304 *
0cba632b
JG
2305 * Modify @ap data structure such that the system
2306 * thinks that the entire port is enabled.
2307 *
cca3974e 2308 * LOCKING: host lock, or some other form of
0cba632b 2309 * serialization.
1da177e4
LT
2310 */
2311
2312void ata_port_probe(struct ata_port *ap)
2313{
198e0fed 2314 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2315}
2316
3be680b7
TH
2317/**
2318 * sata_print_link_status - Print SATA link status
936fd732 2319 * @link: SATA link to printk link status about
3be680b7
TH
2320 *
2321 * This function prints link speed and status of a SATA link.
2322 *
2323 * LOCKING:
2324 * None.
2325 */
936fd732 2326void sata_print_link_status(struct ata_link *link)
3be680b7 2327{
6d5f9732 2328 u32 sstatus, scontrol, tmp;
3be680b7 2329
936fd732 2330 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2331 return;
936fd732 2332 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2333
936fd732 2334 if (ata_link_online(link)) {
3be680b7 2335 tmp = (sstatus >> 4) & 0xf;
936fd732 2336 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2337 "SATA link up %s (SStatus %X SControl %X)\n",
2338 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2339 } else {
936fd732 2340 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2341 "SATA link down (SStatus %X SControl %X)\n",
2342 sstatus, scontrol);
3be680b7
TH
2343 }
2344}
2345
1da177e4 2346/**
780a87f7
JG
2347 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2348 * @ap: SATA port associated with target SATA PHY.
1da177e4 2349 *
780a87f7
JG
2350 * This function issues commands to standard SATA Sxxx
2351 * PHY registers, to wake up the phy (and device), and
2352 * clear any reset condition.
1da177e4
LT
2353 *
2354 * LOCKING:
0cba632b 2355 * PCI/etc. bus probe sem.
1da177e4
LT
2356 *
2357 */
2358void __sata_phy_reset(struct ata_port *ap)
2359{
936fd732 2360 struct ata_link *link = &ap->link;
1da177e4 2361 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2362 u32 sstatus;
1da177e4
LT
2363
2364 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2365 /* issue phy wake/reset */
936fd732 2366 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2367 /* Couldn't find anything in SATA I/II specs, but
2368 * AHCI-1.1 10.4.2 says at least 1 ms. */
2369 mdelay(1);
1da177e4 2370 }
81952c54 2371 /* phy wake/clear reset */
936fd732 2372 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2373
2374 /* wait for phy to become ready, if necessary */
2375 do {
2376 msleep(200);
936fd732 2377 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2378 if ((sstatus & 0xf) != 1)
2379 break;
2380 } while (time_before(jiffies, timeout));
2381
3be680b7 2382 /* print link status */
936fd732 2383 sata_print_link_status(link);
656563e3 2384
3be680b7 2385 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2386 if (!ata_link_offline(link))
1da177e4 2387 ata_port_probe(ap);
3be680b7 2388 else
1da177e4 2389 ata_port_disable(ap);
1da177e4 2390
198e0fed 2391 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2392 return;
2393
2394 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2395 ata_port_disable(ap);
2396 return;
2397 }
2398
2399 ap->cbl = ATA_CBL_SATA;
2400}
2401
2402/**
780a87f7
JG
2403 * sata_phy_reset - Reset SATA bus.
2404 * @ap: SATA port associated with target SATA PHY.
1da177e4 2405 *
780a87f7
JG
2406 * This function resets the SATA bus, and then probes
2407 * the bus for devices.
1da177e4
LT
2408 *
2409 * LOCKING:
0cba632b 2410 * PCI/etc. bus probe sem.
1da177e4
LT
2411 *
2412 */
2413void sata_phy_reset(struct ata_port *ap)
2414{
2415 __sata_phy_reset(ap);
198e0fed 2416 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2417 return;
2418 ata_bus_reset(ap);
2419}
2420
ebdfca6e
AC
2421/**
2422 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2423 * @adev: device
2424 *
2425 * Obtain the other device on the same cable, or if none is
2426 * present NULL is returned
2427 */
2e9edbf8 2428
3373efd8 2429struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2430{
9af5c9c9
TH
2431 struct ata_link *link = adev->link;
2432 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2433 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2434 return NULL;
2435 return pair;
2436}
2437
1da177e4 2438/**
780a87f7
JG
2439 * ata_port_disable - Disable port.
2440 * @ap: Port to be disabled.
1da177e4 2441 *
780a87f7
JG
2442 * Modify @ap data structure such that the system
2443 * thinks that the entire port is disabled, and should
2444 * never attempt to probe or communicate with devices
2445 * on this port.
2446 *
cca3974e 2447 * LOCKING: host lock, or some other form of
780a87f7 2448 * serialization.
1da177e4
LT
2449 */
2450
2451void ata_port_disable(struct ata_port *ap)
2452{
9af5c9c9
TH
2453 ap->link.device[0].class = ATA_DEV_NONE;
2454 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2455 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2456}
2457
1c3fae4d 2458/**
3c567b7d 2459 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2460 * @link: Link to adjust SATA spd limit for
1c3fae4d 2461 *
936fd732 2462 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2463 * function only adjusts the limit. The change must be applied
3c567b7d 2464 * using sata_set_spd().
1c3fae4d
TH
2465 *
2466 * LOCKING:
2467 * Inherited from caller.
2468 *
2469 * RETURNS:
2470 * 0 on success, negative errno on failure
2471 */
936fd732 2472int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2473{
81952c54
TH
2474 u32 sstatus, spd, mask;
2475 int rc, highbit;
1c3fae4d 2476
936fd732 2477 if (!sata_scr_valid(link))
008a7896
TH
2478 return -EOPNOTSUPP;
2479
2480 /* If SCR can be read, use it to determine the current SPD.
936fd732 2481 * If not, use cached value in link->sata_spd.
008a7896 2482 */
936fd732 2483 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2484 if (rc == 0)
2485 spd = (sstatus >> 4) & 0xf;
2486 else
936fd732 2487 spd = link->sata_spd;
1c3fae4d 2488
936fd732 2489 mask = link->sata_spd_limit;
1c3fae4d
TH
2490 if (mask <= 1)
2491 return -EINVAL;
008a7896
TH
2492
2493 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2494 highbit = fls(mask) - 1;
2495 mask &= ~(1 << highbit);
2496
008a7896
TH
2497 /* Mask off all speeds higher than or equal to the current
2498 * one. Force 1.5Gbps if current SPD is not available.
2499 */
2500 if (spd > 1)
2501 mask &= (1 << (spd - 1)) - 1;
2502 else
2503 mask &= 1;
2504
2505 /* were we already at the bottom? */
1c3fae4d
TH
2506 if (!mask)
2507 return -EINVAL;
2508
936fd732 2509 link->sata_spd_limit = mask;
1c3fae4d 2510
936fd732 2511 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2512 sata_spd_string(fls(mask)));
1c3fae4d
TH
2513
2514 return 0;
2515}
2516
936fd732 2517static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2518{
2519 u32 spd, limit;
2520
936fd732 2521 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2522 limit = 0;
2523 else
936fd732 2524 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2525
2526 spd = (*scontrol >> 4) & 0xf;
2527 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2528
2529 return spd != limit;
2530}
2531
2532/**
3c567b7d 2533 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2534 * @link: Link in question
1c3fae4d
TH
2535 *
2536 * Test whether the spd limit in SControl matches
936fd732 2537 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2538 * whether hardreset is necessary to apply SATA spd
2539 * configuration.
2540 *
2541 * LOCKING:
2542 * Inherited from caller.
2543 *
2544 * RETURNS:
2545 * 1 if SATA spd configuration is needed, 0 otherwise.
2546 */
936fd732 2547int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2548{
2549 u32 scontrol;
2550
936fd732 2551 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2552 return 0;
2553
936fd732 2554 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2555}
2556
2557/**
3c567b7d 2558 * sata_set_spd - set SATA spd according to spd limit
936fd732 2559 * @link: Link to set SATA spd for
1c3fae4d 2560 *
936fd732 2561 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2562 *
2563 * LOCKING:
2564 * Inherited from caller.
2565 *
2566 * RETURNS:
2567 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2568 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2569 */
936fd732 2570int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2571{
2572 u32 scontrol;
81952c54 2573 int rc;
1c3fae4d 2574
936fd732 2575 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2576 return rc;
1c3fae4d 2577
936fd732 2578 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2579 return 0;
2580
936fd732 2581 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2582 return rc;
2583
1c3fae4d
TH
2584 return 1;
2585}
2586
452503f9
AC
2587/*
2588 * This mode timing computation functionality is ported over from
2589 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2590 */
2591/*
b352e57d 2592 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2593 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2594 * for UDMA6, which is currently supported only by Maxtor drives.
2595 *
2596 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2597 */
2598
2599static const struct ata_timing ata_timing[] = {
2600
2601 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2602 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2603 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2604 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2605
b352e57d
AC
2606 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2607 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2608 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2609 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2610 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2611
2612/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2613
452503f9
AC
2614 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2615 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2616 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2617
452503f9
AC
2618 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2619 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2620 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2621
b352e57d
AC
2622 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2623 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2624 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2625 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2626
2627 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2628 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2629 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2630
2631/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2632
2633 { 0xFF }
2634};
2635
2636#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2637#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2638
2639static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2640{
2641 q->setup = EZ(t->setup * 1000, T);
2642 q->act8b = EZ(t->act8b * 1000, T);
2643 q->rec8b = EZ(t->rec8b * 1000, T);
2644 q->cyc8b = EZ(t->cyc8b * 1000, T);
2645 q->active = EZ(t->active * 1000, T);
2646 q->recover = EZ(t->recover * 1000, T);
2647 q->cycle = EZ(t->cycle * 1000, T);
2648 q->udma = EZ(t->udma * 1000, UT);
2649}
2650
2651void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2652 struct ata_timing *m, unsigned int what)
2653{
2654 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2655 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2656 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2657 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2658 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2659 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2660 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2661 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2662}
2663
2664static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2665{
2666 const struct ata_timing *t;
2667
2668 for (t = ata_timing; t->mode != speed; t++)
91190758 2669 if (t->mode == 0xFF)
452503f9 2670 return NULL;
2e9edbf8 2671 return t;
452503f9
AC
2672}
2673
2674int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2675 struct ata_timing *t, int T, int UT)
2676{
2677 const struct ata_timing *s;
2678 struct ata_timing p;
2679
2680 /*
2e9edbf8 2681 * Find the mode.
75b1f2f8 2682 */
452503f9
AC
2683
2684 if (!(s = ata_timing_find_mode(speed)))
2685 return -EINVAL;
2686
75b1f2f8
AL
2687 memcpy(t, s, sizeof(*s));
2688
452503f9
AC
2689 /*
2690 * If the drive is an EIDE drive, it can tell us it needs extended
2691 * PIO/MW_DMA cycle timing.
2692 */
2693
2694 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2695 memset(&p, 0, sizeof(p));
2696 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2697 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2698 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2699 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2700 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2701 }
2702 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2703 }
2704
2705 /*
2706 * Convert the timing to bus clock counts.
2707 */
2708
75b1f2f8 2709 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2710
2711 /*
c893a3ae
RD
2712 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2713 * S.M.A.R.T * and some other commands. We have to ensure that the
2714 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2715 */
2716
fd3367af 2717 if (speed > XFER_PIO_6) {
452503f9
AC
2718 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2719 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2720 }
2721
2722 /*
c893a3ae 2723 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2724 */
2725
2726 if (t->act8b + t->rec8b < t->cyc8b) {
2727 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2728 t->rec8b = t->cyc8b - t->act8b;
2729 }
2730
2731 if (t->active + t->recover < t->cycle) {
2732 t->active += (t->cycle - (t->active + t->recover)) / 2;
2733 t->recover = t->cycle - t->active;
2734 }
a617c09f 2735
4f701d1e
AC
2736 /* In a few cases quantisation may produce enough errors to
2737 leave t->cycle too low for the sum of active and recovery
2738 if so we must correct this */
2739 if (t->active + t->recover > t->cycle)
2740 t->cycle = t->active + t->recover;
452503f9
AC
2741
2742 return 0;
2743}
2744
cf176e1a
TH
2745/**
2746 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2747 * @dev: Device to adjust xfer masks
458337db 2748 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2749 *
2750 * Adjust xfer masks of @dev downward. Note that this function
2751 * does not apply the change. Invoking ata_set_mode() afterwards
2752 * will apply the limit.
2753 *
2754 * LOCKING:
2755 * Inherited from caller.
2756 *
2757 * RETURNS:
2758 * 0 on success, negative errno on failure
2759 */
458337db 2760int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2761{
458337db
TH
2762 char buf[32];
2763 unsigned int orig_mask, xfer_mask;
2764 unsigned int pio_mask, mwdma_mask, udma_mask;
2765 int quiet, highbit;
cf176e1a 2766
458337db
TH
2767 quiet = !!(sel & ATA_DNXFER_QUIET);
2768 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2769
458337db
TH
2770 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2771 dev->mwdma_mask,
2772 dev->udma_mask);
2773 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2774
458337db
TH
2775 switch (sel) {
2776 case ATA_DNXFER_PIO:
2777 highbit = fls(pio_mask) - 1;
2778 pio_mask &= ~(1 << highbit);
2779 break;
2780
2781 case ATA_DNXFER_DMA:
2782 if (udma_mask) {
2783 highbit = fls(udma_mask) - 1;
2784 udma_mask &= ~(1 << highbit);
2785 if (!udma_mask)
2786 return -ENOENT;
2787 } else if (mwdma_mask) {
2788 highbit = fls(mwdma_mask) - 1;
2789 mwdma_mask &= ~(1 << highbit);
2790 if (!mwdma_mask)
2791 return -ENOENT;
2792 }
2793 break;
2794
2795 case ATA_DNXFER_40C:
2796 udma_mask &= ATA_UDMA_MASK_40C;
2797 break;
2798
2799 case ATA_DNXFER_FORCE_PIO0:
2800 pio_mask &= 1;
2801 case ATA_DNXFER_FORCE_PIO:
2802 mwdma_mask = 0;
2803 udma_mask = 0;
2804 break;
2805
458337db
TH
2806 default:
2807 BUG();
2808 }
2809
2810 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2811
2812 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2813 return -ENOENT;
2814
2815 if (!quiet) {
2816 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2817 snprintf(buf, sizeof(buf), "%s:%s",
2818 ata_mode_string(xfer_mask),
2819 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2820 else
2821 snprintf(buf, sizeof(buf), "%s",
2822 ata_mode_string(xfer_mask));
2823
2824 ata_dev_printk(dev, KERN_WARNING,
2825 "limiting speed to %s\n", buf);
2826 }
cf176e1a
TH
2827
2828 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2829 &dev->udma_mask);
2830
cf176e1a 2831 return 0;
cf176e1a
TH
2832}
2833
3373efd8 2834static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2835{
9af5c9c9 2836 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2837 unsigned int err_mask;
2838 int rc;
1da177e4 2839
e8384607 2840 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2841 if (dev->xfer_shift == ATA_SHIFT_PIO)
2842 dev->flags |= ATA_DFLAG_PIO;
2843
3373efd8 2844 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2845 /* Old CFA may refuse this command, which is just fine */
2846 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2847 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2848 /* Some very old devices and some bad newer ones fail any kind of
2849 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2850 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2851 dev->pio_mode <= XFER_PIO_2)
2852 err_mask &= ~AC_ERR_DEV;
83206a29 2853 if (err_mask) {
f15a1daf
TH
2854 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2855 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2856 return -EIO;
2857 }
1da177e4 2858
baa1e78a 2859 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2860 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2861 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2862 if (rc)
83206a29 2863 return rc;
48a8a14f 2864
23e71c3d
TH
2865 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2866 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2867
f15a1daf
TH
2868 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2869 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2870 return 0;
1da177e4
LT
2871}
2872
1da177e4 2873/**
04351821 2874 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2875 * @link: link on which timings will be programmed
e82cbdb9 2876 * @r_failed_dev: out paramter for failed device
1da177e4 2877 *
04351821
A
2878 * Standard implementation of the function used to tune and set
2879 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2880 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2881 * returned in @r_failed_dev.
780a87f7 2882 *
1da177e4 2883 * LOCKING:
0cba632b 2884 * PCI/etc. bus probe sem.
e82cbdb9
TH
2885 *
2886 * RETURNS:
2887 * 0 on success, negative errno otherwise
1da177e4 2888 */
04351821 2889
0260731f 2890int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2891{
0260731f 2892 struct ata_port *ap = link->ap;
e8e0619f 2893 struct ata_device *dev;
f58229f8 2894 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2895
a6d5a51c 2896 /* step 1: calculate xfer_mask */
f58229f8 2897 ata_link_for_each_dev(dev, link) {
acf356b1 2898 unsigned int pio_mask, dma_mask;
a6d5a51c 2899
e1211e3f 2900 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2901 continue;
2902
3373efd8 2903 ata_dev_xfermask(dev);
1da177e4 2904
acf356b1
TH
2905 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2906 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2907 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2908 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2909
4f65977d 2910 found = 1;
5444a6f4
AC
2911 if (dev->dma_mode)
2912 used_dma = 1;
a6d5a51c 2913 }
4f65977d 2914 if (!found)
e82cbdb9 2915 goto out;
a6d5a51c
TH
2916
2917 /* step 2: always set host PIO timings */
f58229f8 2918 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2919 if (!ata_dev_enabled(dev))
2920 continue;
2921
2922 if (!dev->pio_mode) {
f15a1daf 2923 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2924 rc = -EINVAL;
e82cbdb9 2925 goto out;
e8e0619f
TH
2926 }
2927
2928 dev->xfer_mode = dev->pio_mode;
2929 dev->xfer_shift = ATA_SHIFT_PIO;
2930 if (ap->ops->set_piomode)
2931 ap->ops->set_piomode(ap, dev);
2932 }
1da177e4 2933
a6d5a51c 2934 /* step 3: set host DMA timings */
f58229f8 2935 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2936 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2937 continue;
2938
2939 dev->xfer_mode = dev->dma_mode;
2940 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2941 if (ap->ops->set_dmamode)
2942 ap->ops->set_dmamode(ap, dev);
2943 }
1da177e4
LT
2944
2945 /* step 4: update devices' xfer mode */
f58229f8 2946 ata_link_for_each_dev(dev, link) {
18d90deb 2947 /* don't update suspended devices' xfer mode */
9666f400 2948 if (!ata_dev_enabled(dev))
83206a29
TH
2949 continue;
2950
3373efd8 2951 rc = ata_dev_set_mode(dev);
5bbc53f4 2952 if (rc)
e82cbdb9 2953 goto out;
83206a29 2954 }
1da177e4 2955
e8e0619f
TH
2956 /* Record simplex status. If we selected DMA then the other
2957 * host channels are not permitted to do so.
5444a6f4 2958 */
cca3974e 2959 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2960 ap->host->simplex_claimed = ap;
5444a6f4 2961
e82cbdb9
TH
2962 out:
2963 if (rc)
2964 *r_failed_dev = dev;
2965 return rc;
1da177e4
LT
2966}
2967
04351821
A
2968/**
2969 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2970 * @link: link on which timings will be programmed
04351821
A
2971 * @r_failed_dev: out paramter for failed device
2972 *
2973 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2974 * ata_set_mode() fails, pointer to the failing device is
2975 * returned in @r_failed_dev.
2976 *
2977 * LOCKING:
2978 * PCI/etc. bus probe sem.
2979 *
2980 * RETURNS:
2981 * 0 on success, negative errno otherwise
2982 */
0260731f 2983int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2984{
0260731f
TH
2985 struct ata_port *ap = link->ap;
2986
04351821
A
2987 /* has private set_mode? */
2988 if (ap->ops->set_mode)
0260731f
TH
2989 return ap->ops->set_mode(link, r_failed_dev);
2990 return ata_do_set_mode(link, r_failed_dev);
04351821
A
2991}
2992
1fdffbce
JG
2993/**
2994 * ata_tf_to_host - issue ATA taskfile to host controller
2995 * @ap: port to which command is being issued
2996 * @tf: ATA taskfile register set
2997 *
2998 * Issues ATA taskfile register set to ATA host controller,
2999 * with proper synchronization with interrupt handler and
3000 * other threads.
3001 *
3002 * LOCKING:
cca3974e 3003 * spin_lock_irqsave(host lock)
1fdffbce
JG
3004 */
3005
3006static inline void ata_tf_to_host(struct ata_port *ap,
3007 const struct ata_taskfile *tf)
3008{
3009 ap->ops->tf_load(ap, tf);
3010 ap->ops->exec_command(ap, tf);
3011}
3012
1da177e4
LT
3013/**
3014 * ata_busy_sleep - sleep until BSY clears, or timeout
3015 * @ap: port containing status register to be polled
3016 * @tmout_pat: impatience timeout
3017 * @tmout: overall timeout
3018 *
780a87f7
JG
3019 * Sleep until ATA Status register bit BSY clears,
3020 * or a timeout occurs.
3021 *
d1adc1bb
TH
3022 * LOCKING:
3023 * Kernel thread context (may sleep).
3024 *
3025 * RETURNS:
3026 * 0 on success, -errno otherwise.
1da177e4 3027 */
d1adc1bb
TH
3028int ata_busy_sleep(struct ata_port *ap,
3029 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3030{
3031 unsigned long timer_start, timeout;
3032 u8 status;
3033
3034 status = ata_busy_wait(ap, ATA_BUSY, 300);
3035 timer_start = jiffies;
3036 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3037 while (status != 0xff && (status & ATA_BUSY) &&
3038 time_before(jiffies, timeout)) {
1da177e4
LT
3039 msleep(50);
3040 status = ata_busy_wait(ap, ATA_BUSY, 3);
3041 }
3042
d1adc1bb 3043 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3044 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3045 "port is slow to respond, please be patient "
3046 "(Status 0x%x)\n", status);
1da177e4
LT
3047
3048 timeout = timer_start + tmout;
d1adc1bb
TH
3049 while (status != 0xff && (status & ATA_BUSY) &&
3050 time_before(jiffies, timeout)) {
1da177e4
LT
3051 msleep(50);
3052 status = ata_chk_status(ap);
3053 }
3054
d1adc1bb
TH
3055 if (status == 0xff)
3056 return -ENODEV;
3057
1da177e4 3058 if (status & ATA_BUSY) {
f15a1daf 3059 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3060 "(%lu secs, Status 0x%x)\n",
3061 tmout / HZ, status);
d1adc1bb 3062 return -EBUSY;
1da177e4
LT
3063 }
3064
3065 return 0;
3066}
3067
d4b2bab4
TH
3068/**
3069 * ata_wait_ready - sleep until BSY clears, or timeout
3070 * @ap: port containing status register to be polled
3071 * @deadline: deadline jiffies for the operation
3072 *
3073 * Sleep until ATA Status register bit BSY clears, or timeout
3074 * occurs.
3075 *
3076 * LOCKING:
3077 * Kernel thread context (may sleep).
3078 *
3079 * RETURNS:
3080 * 0 on success, -errno otherwise.
3081 */
3082int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3083{
3084 unsigned long start = jiffies;
3085 int warned = 0;
3086
3087 while (1) {
3088 u8 status = ata_chk_status(ap);
3089 unsigned long now = jiffies;
3090
3091 if (!(status & ATA_BUSY))
3092 return 0;
936fd732 3093 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3094 return -ENODEV;
3095 if (time_after(now, deadline))
3096 return -EBUSY;
3097
3098 if (!warned && time_after(now, start + 5 * HZ) &&
3099 (deadline - now > 3 * HZ)) {
3100 ata_port_printk(ap, KERN_WARNING,
3101 "port is slow to respond, please be patient "
3102 "(Status 0x%x)\n", status);
3103 warned = 1;
3104 }
3105
3106 msleep(50);
3107 }
3108}
3109
3110static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3111 unsigned long deadline)
1da177e4
LT
3112{
3113 struct ata_ioports *ioaddr = &ap->ioaddr;
3114 unsigned int dev0 = devmask & (1 << 0);
3115 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3116 int rc, ret = 0;
1da177e4
LT
3117
3118 /* if device 0 was found in ata_devchk, wait for its
3119 * BSY bit to clear
3120 */
d4b2bab4
TH
3121 if (dev0) {
3122 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3123 if (rc) {
3124 if (rc != -ENODEV)
3125 return rc;
3126 ret = rc;
3127 }
d4b2bab4 3128 }
1da177e4 3129
e141d999
TH
3130 /* if device 1 was found in ata_devchk, wait for register
3131 * access briefly, then wait for BSY to clear.
1da177e4 3132 */
e141d999
TH
3133 if (dev1) {
3134 int i;
1da177e4
LT
3135
3136 ap->ops->dev_select(ap, 1);
e141d999
TH
3137
3138 /* Wait for register access. Some ATAPI devices fail
3139 * to set nsect/lbal after reset, so don't waste too
3140 * much time on it. We're gonna wait for !BSY anyway.
3141 */
3142 for (i = 0; i < 2; i++) {
3143 u8 nsect, lbal;
3144
3145 nsect = ioread8(ioaddr->nsect_addr);
3146 lbal = ioread8(ioaddr->lbal_addr);
3147 if ((nsect == 1) && (lbal == 1))
3148 break;
3149 msleep(50); /* give drive a breather */
3150 }
3151
d4b2bab4 3152 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3153 if (rc) {
3154 if (rc != -ENODEV)
3155 return rc;
3156 ret = rc;
3157 }
d4b2bab4 3158 }
1da177e4
LT
3159
3160 /* is all this really necessary? */
3161 ap->ops->dev_select(ap, 0);
3162 if (dev1)
3163 ap->ops->dev_select(ap, 1);
3164 if (dev0)
3165 ap->ops->dev_select(ap, 0);
d4b2bab4 3166
9b89391c 3167 return ret;
1da177e4
LT
3168}
3169
d4b2bab4
TH
3170static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3171 unsigned long deadline)
1da177e4
LT
3172{
3173 struct ata_ioports *ioaddr = &ap->ioaddr;
3174
44877b4e 3175 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3176
3177 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3178 iowrite8(ap->ctl, ioaddr->ctl_addr);
3179 udelay(20); /* FIXME: flush */
3180 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3181 udelay(20); /* FIXME: flush */
3182 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3183
3184 /* spec mandates ">= 2ms" before checking status.
3185 * We wait 150ms, because that was the magic delay used for
3186 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3187 * between when the ATA command register is written, and then
3188 * status is checked. Because waiting for "a while" before
3189 * checking status is fine, post SRST, we perform this magic
3190 * delay here as well.
09c7ad79
AC
3191 *
3192 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3193 */
3194 msleep(150);
3195
2e9edbf8 3196 /* Before we perform post reset processing we want to see if
298a41ca
TH
3197 * the bus shows 0xFF because the odd clown forgets the D7
3198 * pulldown resistor.
3199 */
d1adc1bb 3200 if (ata_check_status(ap) == 0xFF)
9b89391c 3201 return -ENODEV;
09c7ad79 3202
d4b2bab4 3203 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3204}
3205
3206/**
3207 * ata_bus_reset - reset host port and associated ATA channel
3208 * @ap: port to reset
3209 *
3210 * This is typically the first time we actually start issuing
3211 * commands to the ATA channel. We wait for BSY to clear, then
3212 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3213 * result. Determine what devices, if any, are on the channel
3214 * by looking at the device 0/1 error register. Look at the signature
3215 * stored in each device's taskfile registers, to determine if
3216 * the device is ATA or ATAPI.
3217 *
3218 * LOCKING:
0cba632b 3219 * PCI/etc. bus probe sem.
cca3974e 3220 * Obtains host lock.
1da177e4
LT
3221 *
3222 * SIDE EFFECTS:
198e0fed 3223 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3224 */
3225
3226void ata_bus_reset(struct ata_port *ap)
3227{
9af5c9c9 3228 struct ata_device *device = ap->link.device;
1da177e4
LT
3229 struct ata_ioports *ioaddr = &ap->ioaddr;
3230 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3231 u8 err;
aec5c3c1 3232 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3233 int rc;
1da177e4 3234
44877b4e 3235 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3236
3237 /* determine if device 0/1 are present */
3238 if (ap->flags & ATA_FLAG_SATA_RESET)
3239 dev0 = 1;
3240 else {
3241 dev0 = ata_devchk(ap, 0);
3242 if (slave_possible)
3243 dev1 = ata_devchk(ap, 1);
3244 }
3245
3246 if (dev0)
3247 devmask |= (1 << 0);
3248 if (dev1)
3249 devmask |= (1 << 1);
3250
3251 /* select device 0 again */
3252 ap->ops->dev_select(ap, 0);
3253
3254 /* issue bus reset */
9b89391c
TH
3255 if (ap->flags & ATA_FLAG_SRST) {
3256 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3257 if (rc && rc != -ENODEV)
aec5c3c1 3258 goto err_out;
9b89391c 3259 }
1da177e4
LT
3260
3261 /*
3262 * determine by signature whether we have ATA or ATAPI devices
3263 */
3f19859e 3264 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3265 if ((slave_possible) && (err != 0x81))
3f19859e 3266 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3267
1da177e4 3268 /* is double-select really necessary? */
9af5c9c9 3269 if (device[1].class != ATA_DEV_NONE)
1da177e4 3270 ap->ops->dev_select(ap, 1);
9af5c9c9 3271 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3272 ap->ops->dev_select(ap, 0);
3273
3274 /* if no devices were detected, disable this port */
9af5c9c9
TH
3275 if ((device[0].class == ATA_DEV_NONE) &&
3276 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3277 goto err_out;
3278
3279 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3280 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3281 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3282 }
3283
3284 DPRINTK("EXIT\n");
3285 return;
3286
3287err_out:
f15a1daf 3288 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3289 ata_port_disable(ap);
1da177e4
LT
3290
3291 DPRINTK("EXIT\n");
3292}
3293
d7bb4cc7 3294/**
936fd732
TH
3295 * sata_link_debounce - debounce SATA phy status
3296 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3297 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3298 * @deadline: deadline jiffies for the operation
d7bb4cc7 3299 *
936fd732 3300* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3301 * holding the same value where DET is not 1 for @duration polled
3302 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3303 * beginning of the stable state. Because DET gets stuck at 1 on
3304 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3305 * until timeout then returns 0 if DET is stable at 1.
3306 *
d4b2bab4
TH
3307 * @timeout is further limited by @deadline. The sooner of the
3308 * two is used.
3309 *
d7bb4cc7
TH
3310 * LOCKING:
3311 * Kernel thread context (may sleep)
3312 *
3313 * RETURNS:
3314 * 0 on success, -errno on failure.
3315 */
936fd732
TH
3316int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3317 unsigned long deadline)
7a7921e8 3318{
d7bb4cc7 3319 unsigned long interval_msec = params[0];
d4b2bab4
TH
3320 unsigned long duration = msecs_to_jiffies(params[1]);
3321 unsigned long last_jiffies, t;
d7bb4cc7
TH
3322 u32 last, cur;
3323 int rc;
3324
d4b2bab4
TH
3325 t = jiffies + msecs_to_jiffies(params[2]);
3326 if (time_before(t, deadline))
3327 deadline = t;
3328
936fd732 3329 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3330 return rc;
3331 cur &= 0xf;
3332
3333 last = cur;
3334 last_jiffies = jiffies;
3335
3336 while (1) {
3337 msleep(interval_msec);
936fd732 3338 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3339 return rc;
3340 cur &= 0xf;
3341
3342 /* DET stable? */
3343 if (cur == last) {
d4b2bab4 3344 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3345 continue;
3346 if (time_after(jiffies, last_jiffies + duration))
3347 return 0;
3348 continue;
3349 }
3350
3351 /* unstable, start over */
3352 last = cur;
3353 last_jiffies = jiffies;
3354
f1545154
TH
3355 /* Check deadline. If debouncing failed, return
3356 * -EPIPE to tell upper layer to lower link speed.
3357 */
d4b2bab4 3358 if (time_after(jiffies, deadline))
f1545154 3359 return -EPIPE;
d7bb4cc7
TH
3360 }
3361}
3362
3363/**
936fd732
TH
3364 * sata_link_resume - resume SATA link
3365 * @link: ATA link to resume SATA
d7bb4cc7 3366 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3367 * @deadline: deadline jiffies for the operation
d7bb4cc7 3368 *
936fd732 3369 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3370 *
3371 * LOCKING:
3372 * Kernel thread context (may sleep)
3373 *
3374 * RETURNS:
3375 * 0 on success, -errno on failure.
3376 */
936fd732
TH
3377int sata_link_resume(struct ata_link *link, const unsigned long *params,
3378 unsigned long deadline)
d7bb4cc7
TH
3379{
3380 u32 scontrol;
81952c54
TH
3381 int rc;
3382
936fd732 3383 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3384 return rc;
7a7921e8 3385
852ee16a 3386 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3387
936fd732 3388 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3389 return rc;
7a7921e8 3390
d7bb4cc7
TH
3391 /* Some PHYs react badly if SStatus is pounded immediately
3392 * after resuming. Delay 200ms before debouncing.
3393 */
3394 msleep(200);
7a7921e8 3395
936fd732 3396 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3397}
3398
f5914a46
TH
3399/**
3400 * ata_std_prereset - prepare for reset
cc0680a5 3401 * @link: ATA link to be reset
d4b2bab4 3402 * @deadline: deadline jiffies for the operation
f5914a46 3403 *
cc0680a5 3404 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3405 * prereset makes libata abort whole reset sequence and give up
3406 * that port, so prereset should be best-effort. It does its
3407 * best to prepare for reset sequence but if things go wrong, it
3408 * should just whine, not fail.
f5914a46
TH
3409 *
3410 * LOCKING:
3411 * Kernel thread context (may sleep)
3412 *
3413 * RETURNS:
3414 * 0 on success, -errno otherwise.
3415 */
cc0680a5 3416int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3417{
cc0680a5 3418 struct ata_port *ap = link->ap;
936fd732 3419 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3420 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3421 int rc;
3422
31daabda 3423 /* handle link resume */
28324304 3424 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3425 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3426 ehc->i.action |= ATA_EH_HARDRESET;
3427
f5914a46
TH
3428 /* if we're about to do hardreset, nothing more to do */
3429 if (ehc->i.action & ATA_EH_HARDRESET)
3430 return 0;
3431
936fd732 3432 /* if SATA, resume link */
a16abc0b 3433 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3434 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3435 /* whine about phy resume failure but proceed */
3436 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3437 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3438 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3439 }
3440
3441 /* Wait for !BSY if the controller can wait for the first D2H
3442 * Reg FIS and we don't know that no device is attached.
3443 */
0c88758b 3444 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3445 rc = ata_wait_ready(ap, deadline);
6dffaf61 3446 if (rc && rc != -ENODEV) {
cc0680a5 3447 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3448 "(errno=%d), forcing hardreset\n", rc);
3449 ehc->i.action |= ATA_EH_HARDRESET;
3450 }
3451 }
f5914a46
TH
3452
3453 return 0;
3454}
3455
c2bd5804
TH
3456/**
3457 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3458 * @link: ATA link to reset
c2bd5804 3459 * @classes: resulting classes of attached devices
d4b2bab4 3460 * @deadline: deadline jiffies for the operation
c2bd5804 3461 *
52783c5d 3462 * Reset host port using ATA SRST.
c2bd5804
TH
3463 *
3464 * LOCKING:
3465 * Kernel thread context (may sleep)
3466 *
3467 * RETURNS:
3468 * 0 on success, -errno otherwise.
3469 */
cc0680a5 3470int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3471 unsigned long deadline)
c2bd5804 3472{
cc0680a5 3473 struct ata_port *ap = link->ap;
c2bd5804 3474 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3475 unsigned int devmask = 0;
3476 int rc;
c2bd5804
TH
3477 u8 err;
3478
3479 DPRINTK("ENTER\n");
3480
936fd732 3481 if (ata_link_offline(link)) {
3a39746a
TH
3482 classes[0] = ATA_DEV_NONE;
3483 goto out;
3484 }
3485
c2bd5804
TH
3486 /* determine if device 0/1 are present */
3487 if (ata_devchk(ap, 0))
3488 devmask |= (1 << 0);
3489 if (slave_possible && ata_devchk(ap, 1))
3490 devmask |= (1 << 1);
3491
c2bd5804
TH
3492 /* select device 0 again */
3493 ap->ops->dev_select(ap, 0);
3494
3495 /* issue bus reset */
3496 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3497 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3498 /* if link is occupied, -ENODEV too is an error */
936fd732 3499 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3500 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3501 return rc;
c2bd5804
TH
3502 }
3503
3504 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3505 classes[0] = ata_dev_try_classify(&link->device[0],
3506 devmask & (1 << 0), &err);
c2bd5804 3507 if (slave_possible && err != 0x81)
3f19859e
TH
3508 classes[1] = ata_dev_try_classify(&link->device[1],
3509 devmask & (1 << 1), &err);
c2bd5804 3510
3a39746a 3511 out:
c2bd5804
TH
3512 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3513 return 0;
3514}
3515
3516/**
cc0680a5
TH
3517 * sata_link_hardreset - reset link via SATA phy reset
3518 * @link: link to reset
b6103f6d 3519 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3520 * @deadline: deadline jiffies for the operation
c2bd5804 3521 *
cc0680a5 3522 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3523 *
3524 * LOCKING:
3525 * Kernel thread context (may sleep)
3526 *
3527 * RETURNS:
3528 * 0 on success, -errno otherwise.
3529 */
cc0680a5 3530int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3531 unsigned long deadline)
c2bd5804 3532{
852ee16a 3533 u32 scontrol;
81952c54 3534 int rc;
852ee16a 3535
c2bd5804
TH
3536 DPRINTK("ENTER\n");
3537
936fd732 3538 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3539 /* SATA spec says nothing about how to reconfigure
3540 * spd. To be on the safe side, turn off phy during
3541 * reconfiguration. This works for at least ICH7 AHCI
3542 * and Sil3124.
3543 */
936fd732 3544 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3545 goto out;
81952c54 3546
a34b6fc0 3547 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3548
936fd732 3549 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3550 goto out;
1c3fae4d 3551
936fd732 3552 sata_set_spd(link);
1c3fae4d
TH
3553 }
3554
3555 /* issue phy wake/reset */
936fd732 3556 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3557 goto out;
81952c54 3558
852ee16a 3559 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3560
936fd732 3561 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3562 goto out;
c2bd5804 3563
1c3fae4d 3564 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3565 * 10.4.2 says at least 1 ms.
3566 */
3567 msleep(1);
3568
936fd732
TH
3569 /* bring link back */
3570 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3571 out:
3572 DPRINTK("EXIT, rc=%d\n", rc);
3573 return rc;
3574}
3575
3576/**
3577 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3578 * @link: link to reset
b6103f6d 3579 * @class: resulting class of attached device
d4b2bab4 3580 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3581 *
3582 * SATA phy-reset host port using DET bits of SControl register,
3583 * wait for !BSY and classify the attached device.
3584 *
3585 * LOCKING:
3586 * Kernel thread context (may sleep)
3587 *
3588 * RETURNS:
3589 * 0 on success, -errno otherwise.
3590 */
cc0680a5 3591int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3592 unsigned long deadline)
b6103f6d 3593{
cc0680a5 3594 struct ata_port *ap = link->ap;
936fd732 3595 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3596 int rc;
3597
3598 DPRINTK("ENTER\n");
3599
3600 /* do hardreset */
cc0680a5 3601 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3602 if (rc) {
cc0680a5 3603 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3604 "COMRESET failed (errno=%d)\n", rc);
3605 return rc;
3606 }
c2bd5804 3607
c2bd5804 3608 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3609 if (ata_link_offline(link)) {
c2bd5804
TH
3610 *class = ATA_DEV_NONE;
3611 DPRINTK("EXIT, link offline\n");
3612 return 0;
3613 }
3614
34fee227
TH
3615 /* wait a while before checking status, see SRST for more info */
3616 msleep(150);
3617
d4b2bab4 3618 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3619 /* link occupied, -ENODEV too is an error */
3620 if (rc) {
cc0680a5 3621 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3622 "COMRESET failed (errno=%d)\n", rc);
3623 return rc;
c2bd5804
TH
3624 }
3625
3a39746a
TH
3626 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3627
3f19859e 3628 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3629
3630 DPRINTK("EXIT, class=%u\n", *class);
3631 return 0;
3632}
3633
3634/**
3635 * ata_std_postreset - standard postreset callback
cc0680a5 3636 * @link: the target ata_link
c2bd5804
TH
3637 * @classes: classes of attached devices
3638 *
3639 * This function is invoked after a successful reset. Note that
3640 * the device might have been reset more than once using
3641 * different reset methods before postreset is invoked.
c2bd5804 3642 *
c2bd5804
TH
3643 * LOCKING:
3644 * Kernel thread context (may sleep)
3645 */
cc0680a5 3646void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3647{
cc0680a5 3648 struct ata_port *ap = link->ap;
dc2b3515
TH
3649 u32 serror;
3650
c2bd5804
TH
3651 DPRINTK("ENTER\n");
3652
c2bd5804 3653 /* print link status */
936fd732 3654 sata_print_link_status(link);
c2bd5804 3655
dc2b3515 3656 /* clear SError */
936fd732
TH
3657 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3658 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3659
c2bd5804
TH
3660 /* is double-select really necessary? */
3661 if (classes[0] != ATA_DEV_NONE)
3662 ap->ops->dev_select(ap, 1);
3663 if (classes[1] != ATA_DEV_NONE)
3664 ap->ops->dev_select(ap, 0);
3665
3a39746a
TH
3666 /* bail out if no device is present */
3667 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3668 DPRINTK("EXIT, no device\n");
3669 return;
3670 }
3671
3672 /* set up device control */
0d5ff566
TH
3673 if (ap->ioaddr.ctl_addr)
3674 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3675
3676 DPRINTK("EXIT\n");
3677}
3678
623a3128
TH
3679/**
3680 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3681 * @dev: device to compare against
3682 * @new_class: class of the new device
3683 * @new_id: IDENTIFY page of the new device
3684 *
3685 * Compare @new_class and @new_id against @dev and determine
3686 * whether @dev is the device indicated by @new_class and
3687 * @new_id.
3688 *
3689 * LOCKING:
3690 * None.
3691 *
3692 * RETURNS:
3693 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3694 */
3373efd8
TH
3695static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3696 const u16 *new_id)
623a3128
TH
3697{
3698 const u16 *old_id = dev->id;
a0cf733b
TH
3699 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3700 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3701
3702 if (dev->class != new_class) {
f15a1daf
TH
3703 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3704 dev->class, new_class);
623a3128
TH
3705 return 0;
3706 }
3707
a0cf733b
TH
3708 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3709 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3710 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3711 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3712
3713 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3714 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3715 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3716 return 0;
3717 }
3718
3719 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3720 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3721 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3722 return 0;
3723 }
3724
623a3128
TH
3725 return 1;
3726}
3727
3728/**
fe30911b 3729 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3730 * @dev: target ATA device
bff04647 3731 * @readid_flags: read ID flags
623a3128
TH
3732 *
3733 * Re-read IDENTIFY page and make sure @dev is still attached to
3734 * the port.
3735 *
3736 * LOCKING:
3737 * Kernel thread context (may sleep)
3738 *
3739 * RETURNS:
3740 * 0 on success, negative errno otherwise
3741 */
fe30911b 3742int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3743{
5eb45c02 3744 unsigned int class = dev->class;
9af5c9c9 3745 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3746 int rc;
3747
fe635c7e 3748 /* read ID data */
bff04647 3749 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3750 if (rc)
fe30911b 3751 return rc;
623a3128
TH
3752
3753 /* is the device still there? */
fe30911b
TH
3754 if (!ata_dev_same_device(dev, class, id))
3755 return -ENODEV;
623a3128 3756
fe635c7e 3757 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3758 return 0;
3759}
3760
3761/**
3762 * ata_dev_revalidate - Revalidate ATA device
3763 * @dev: device to revalidate
422c9daa 3764 * @new_class: new class code
fe30911b
TH
3765 * @readid_flags: read ID flags
3766 *
3767 * Re-read IDENTIFY page, make sure @dev is still attached to the
3768 * port and reconfigure it according to the new IDENTIFY page.
3769 *
3770 * LOCKING:
3771 * Kernel thread context (may sleep)
3772 *
3773 * RETURNS:
3774 * 0 on success, negative errno otherwise
3775 */
422c9daa
TH
3776int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3777 unsigned int readid_flags)
fe30911b 3778{
6ddcd3b0 3779 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3780 int rc;
3781
3782 if (!ata_dev_enabled(dev))
3783 return -ENODEV;
3784
422c9daa
TH
3785 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3786 if (ata_class_enabled(new_class) &&
3787 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3788 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3789 dev->class, new_class);
3790 rc = -ENODEV;
3791 goto fail;
3792 }
3793
fe30911b
TH
3794 /* re-read ID */
3795 rc = ata_dev_reread_id(dev, readid_flags);
3796 if (rc)
3797 goto fail;
623a3128
TH
3798
3799 /* configure device according to the new ID */
efdaedc4 3800 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3801 if (rc)
3802 goto fail;
3803
3804 /* verify n_sectors hasn't changed */
b54eebd6
TH
3805 if (dev->class == ATA_DEV_ATA && n_sectors &&
3806 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3807 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3808 "%llu != %llu\n",
3809 (unsigned long long)n_sectors,
3810 (unsigned long long)dev->n_sectors);
8270bec4
TH
3811
3812 /* restore original n_sectors */
3813 dev->n_sectors = n_sectors;
3814
6ddcd3b0
TH
3815 rc = -ENODEV;
3816 goto fail;
3817 }
3818
3819 return 0;
623a3128
TH
3820
3821 fail:
f15a1daf 3822 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3823 return rc;
3824}
3825
6919a0a6
AC
3826struct ata_blacklist_entry {
3827 const char *model_num;
3828 const char *model_rev;
3829 unsigned long horkage;
3830};
3831
3832static const struct ata_blacklist_entry ata_device_blacklist [] = {
3833 /* Devices with DMA related problems under Linux */
3834 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3835 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3836 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3837 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3838 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3839 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3840 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3841 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3842 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3843 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3844 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3845 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3846 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3847 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3848 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3849 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3850 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3851 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3852 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3853 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3854 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3855 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3856 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3857 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3858 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3859 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3860 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3861 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3862 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3863 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3864 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3865 { "IOMEGA ZIP 250 ATAPI Floppy",
3866 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3867
18d6e9d5 3868 /* Weird ATAPI devices */
40a1d531 3869 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3870
6919a0a6
AC
3871 /* Devices we expect to fail diagnostics */
3872
3873 /* Devices where NCQ should be avoided */
3874 /* NCQ is slow */
3875 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3876 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3877 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3878 /* NCQ is broken */
539cc7c7 3879 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3880 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab 3881 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
539cc7c7
JG
3882 ATA_HORKAGE_NONCQ },
3883
36e337d0
RH
3884 /* Blacklist entries taken from Silicon Image 3124/3132
3885 Windows driver .inf file - also several Linux problem reports */
3886 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3887 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3888 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3889 /* Drives which do spurious command completion */
3890 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3891 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3892 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3893 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3894 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3895 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3896 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3897 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3898
16c55b03
TH
3899 /* devices which puke on READ_NATIVE_MAX */
3900 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3901 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3902 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3903 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3904
3905 /* End Marker */
3906 { }
1da177e4 3907};
2e9edbf8 3908
539cc7c7
JG
3909int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3910{
3911 const char *p;
3912 int len;
3913
3914 /*
3915 * check for trailing wildcard: *\0
3916 */
3917 p = strchr(patt, wildchar);
3918 if (p && ((*(p + 1)) == 0))
3919 len = p - patt;
3920 else
3921 len = strlen(name);
3922
3923 return strncmp(patt, name, len);
3924}
3925
75683fe7 3926static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3927{
8bfa79fc
TH
3928 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3929 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3930 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3931
8bfa79fc
TH
3932 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3933 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3934
6919a0a6 3935 while (ad->model_num) {
539cc7c7 3936 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3937 if (ad->model_rev == NULL)
3938 return ad->horkage;
539cc7c7 3939 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3940 return ad->horkage;
f4b15fef 3941 }
6919a0a6 3942 ad++;
f4b15fef 3943 }
1da177e4
LT
3944 return 0;
3945}
3946
6919a0a6
AC
3947static int ata_dma_blacklisted(const struct ata_device *dev)
3948{
3949 /* We don't support polling DMA.
3950 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3951 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3952 */
9af5c9c9 3953 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3954 (dev->flags & ATA_DFLAG_CDB_INTR))
3955 return 1;
75683fe7 3956 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3957}
3958
a6d5a51c
TH
3959/**
3960 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3961 * @dev: Device to compute xfermask for
3962 *
acf356b1
TH
3963 * Compute supported xfermask of @dev and store it in
3964 * dev->*_mask. This function is responsible for applying all
3965 * known limits including host controller limits, device
3966 * blacklist, etc...
a6d5a51c
TH
3967 *
3968 * LOCKING:
3969 * None.
a6d5a51c 3970 */
3373efd8 3971static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3972{
9af5c9c9
TH
3973 struct ata_link *link = dev->link;
3974 struct ata_port *ap = link->ap;
cca3974e 3975 struct ata_host *host = ap->host;
a6d5a51c 3976 unsigned long xfer_mask;
1da177e4 3977
37deecb5 3978 /* controller modes available */
565083e1
TH
3979 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3980 ap->mwdma_mask, ap->udma_mask);
3981
8343f889 3982 /* drive modes available */
37deecb5
TH
3983 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3984 dev->mwdma_mask, dev->udma_mask);
3985 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3986
b352e57d
AC
3987 /*
3988 * CFA Advanced TrueIDE timings are not allowed on a shared
3989 * cable
3990 */
3991 if (ata_dev_pair(dev)) {
3992 /* No PIO5 or PIO6 */
3993 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3994 /* No MWDMA3 or MWDMA 4 */
3995 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3996 }
3997
37deecb5
TH
3998 if (ata_dma_blacklisted(dev)) {
3999 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4000 ata_dev_printk(dev, KERN_WARNING,
4001 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4002 }
a6d5a51c 4003
14d66ab7
PV
4004 if ((host->flags & ATA_HOST_SIMPLEX) &&
4005 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4006 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4007 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4008 "other device, disabling DMA\n");
5444a6f4 4009 }
565083e1 4010
e424675f
JG
4011 if (ap->flags & ATA_FLAG_NO_IORDY)
4012 xfer_mask &= ata_pio_mask_no_iordy(dev);
4013
5444a6f4 4014 if (ap->ops->mode_filter)
a76b62ca 4015 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4016
8343f889
RH
4017 /* Apply cable rule here. Don't apply it early because when
4018 * we handle hot plug the cable type can itself change.
4019 * Check this last so that we know if the transfer rate was
4020 * solely limited by the cable.
4021 * Unknown or 80 wire cables reported host side are checked
4022 * drive side as well. Cases where we know a 40wire cable
4023 * is used safely for 80 are not checked here.
4024 */
4025 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4026 /* UDMA/44 or higher would be available */
4027 if((ap->cbl == ATA_CBL_PATA40) ||
4028 (ata_drive_40wire(dev->id) &&
4029 (ap->cbl == ATA_CBL_PATA_UNK ||
4030 ap->cbl == ATA_CBL_PATA80))) {
4031 ata_dev_printk(dev, KERN_WARNING,
4032 "limited to UDMA/33 due to 40-wire cable\n");
4033 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4034 }
4035
565083e1
TH
4036 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4037 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4038}
4039
1da177e4
LT
4040/**
4041 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4042 * @dev: Device to which command will be sent
4043 *
780a87f7
JG
4044 * Issue SET FEATURES - XFER MODE command to device @dev
4045 * on port @ap.
4046 *
1da177e4 4047 * LOCKING:
0cba632b 4048 * PCI/etc. bus probe sem.
83206a29
TH
4049 *
4050 * RETURNS:
4051 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4052 */
4053
3373efd8 4054static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4055{
a0123703 4056 struct ata_taskfile tf;
83206a29 4057 unsigned int err_mask;
1da177e4
LT
4058
4059 /* set up set-features taskfile */
4060 DPRINTK("set features - xfer mode\n");
4061
464cf177
TH
4062 /* Some controllers and ATAPI devices show flaky interrupt
4063 * behavior after setting xfer mode. Use polling instead.
4064 */
3373efd8 4065 ata_tf_init(dev, &tf);
a0123703
TH
4066 tf.command = ATA_CMD_SET_FEATURES;
4067 tf.feature = SETFEATURES_XFER;
464cf177 4068 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4069 tf.protocol = ATA_PROT_NODATA;
4070 tf.nsect = dev->xfer_mode;
1da177e4 4071
3373efd8 4072 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4073
4074 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4075 return err_mask;
4076}
4077
4078/**
4079 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4080 * @dev: Device to which command will be sent
4081 * @enable: Whether to enable or disable the feature
4082 *
4083 * Issue SET FEATURES - SATA FEATURES command to device @dev
4084 * on port @ap with sector count set to indicate Asynchronous
4085 * Notification feature
4086 *
4087 * LOCKING:
4088 * PCI/etc. bus probe sem.
4089 *
4090 * RETURNS:
4091 * 0 on success, AC_ERR_* mask otherwise.
4092 */
4093static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4094{
4095 struct ata_taskfile tf;
4096 unsigned int err_mask;
4097
4098 /* set up set-features taskfile */
4099 DPRINTK("set features - SATA features\n");
4100
4101 ata_tf_init(dev, &tf);
4102 tf.command = ATA_CMD_SET_FEATURES;
4103 tf.feature = enable;
4104 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4105 tf.protocol = ATA_PROT_NODATA;
4106 tf.nsect = SATA_AN;
4107
4108 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4109
83206a29
TH
4110 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4111 return err_mask;
1da177e4
LT
4112}
4113
8bf62ece
AL
4114/**
4115 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4116 * @dev: Device to which command will be sent
e2a7f77a
RD
4117 * @heads: Number of heads (taskfile parameter)
4118 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4119 *
4120 * LOCKING:
6aff8f1f
TH
4121 * Kernel thread context (may sleep)
4122 *
4123 * RETURNS:
4124 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4125 */
3373efd8
TH
4126static unsigned int ata_dev_init_params(struct ata_device *dev,
4127 u16 heads, u16 sectors)
8bf62ece 4128{
a0123703 4129 struct ata_taskfile tf;
6aff8f1f 4130 unsigned int err_mask;
8bf62ece
AL
4131
4132 /* Number of sectors per track 1-255. Number of heads 1-16 */
4133 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4134 return AC_ERR_INVALID;
8bf62ece
AL
4135
4136 /* set up init dev params taskfile */
4137 DPRINTK("init dev params \n");
4138
3373efd8 4139 ata_tf_init(dev, &tf);
a0123703
TH
4140 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4141 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4142 tf.protocol = ATA_PROT_NODATA;
4143 tf.nsect = sectors;
4144 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4145
3373efd8 4146 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4147 /* A clean abort indicates an original or just out of spec drive
4148 and we should continue as we issue the setup based on the
4149 drive reported working geometry */
4150 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4151 err_mask = 0;
8bf62ece 4152
6aff8f1f
TH
4153 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4154 return err_mask;
8bf62ece
AL
4155}
4156
1da177e4 4157/**
0cba632b
JG
4158 * ata_sg_clean - Unmap DMA memory associated with command
4159 * @qc: Command containing DMA memory to be released
4160 *
4161 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4162 *
4163 * LOCKING:
cca3974e 4164 * spin_lock_irqsave(host lock)
1da177e4 4165 */
70e6ad0c 4166void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4167{
4168 struct ata_port *ap = qc->ap;
cedc9a47 4169 struct scatterlist *sg = qc->__sg;
1da177e4 4170 int dir = qc->dma_dir;
cedc9a47 4171 void *pad_buf = NULL;
1da177e4 4172
a4631474
TH
4173 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4174 WARN_ON(sg == NULL);
1da177e4
LT
4175
4176 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4177 WARN_ON(qc->n_elem > 1);
1da177e4 4178
2c13b7ce 4179 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4180
cedc9a47
JG
4181 /* if we padded the buffer out to 32-bit bound, and data
4182 * xfer direction is from-device, we must copy from the
4183 * pad buffer back into the supplied buffer
4184 */
4185 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4186 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4187
4188 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4189 if (qc->n_elem)
2f1f610b 4190 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4191 /* restore last sg */
4192 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4193 if (pad_buf) {
4194 struct scatterlist *psg = &qc->pad_sgent;
4195 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4196 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4197 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4198 }
4199 } else {
2e242fa9 4200 if (qc->n_elem)
2f1f610b 4201 dma_unmap_single(ap->dev,
e1410f2d
JG
4202 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4203 dir);
cedc9a47
JG
4204 /* restore sg */
4205 sg->length += qc->pad_len;
4206 if (pad_buf)
4207 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4208 pad_buf, qc->pad_len);
4209 }
1da177e4
LT
4210
4211 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4212 qc->__sg = NULL;
1da177e4
LT
4213}
4214
4215/**
4216 * ata_fill_sg - Fill PCI IDE PRD table
4217 * @qc: Metadata associated with taskfile to be transferred
4218 *
780a87f7
JG
4219 * Fill PCI IDE PRD (scatter-gather) table with segments
4220 * associated with the current disk command.
4221 *
1da177e4 4222 * LOCKING:
cca3974e 4223 * spin_lock_irqsave(host lock)
1da177e4
LT
4224 *
4225 */
4226static void ata_fill_sg(struct ata_queued_cmd *qc)
4227{
1da177e4 4228 struct ata_port *ap = qc->ap;
cedc9a47
JG
4229 struct scatterlist *sg;
4230 unsigned int idx;
1da177e4 4231
a4631474 4232 WARN_ON(qc->__sg == NULL);
f131883e 4233 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4234
4235 idx = 0;
cedc9a47 4236 ata_for_each_sg(sg, qc) {
1da177e4
LT
4237 u32 addr, offset;
4238 u32 sg_len, len;
4239
4240 /* determine if physical DMA addr spans 64K boundary.
4241 * Note h/w doesn't support 64-bit, so we unconditionally
4242 * truncate dma_addr_t to u32.
4243 */
4244 addr = (u32) sg_dma_address(sg);
4245 sg_len = sg_dma_len(sg);
4246
4247 while (sg_len) {
4248 offset = addr & 0xffff;
4249 len = sg_len;
4250 if ((offset + sg_len) > 0x10000)
4251 len = 0x10000 - offset;
4252
4253 ap->prd[idx].addr = cpu_to_le32(addr);
4254 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4255 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4256
4257 idx++;
4258 sg_len -= len;
4259 addr += len;
4260 }
4261 }
4262
4263 if (idx)
4264 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4265}
b9a4197e 4266
d26fc955
AC
4267/**
4268 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4269 * @qc: Metadata associated with taskfile to be transferred
4270 *
4271 * Fill PCI IDE PRD (scatter-gather) table with segments
4272 * associated with the current disk command. Perform the fill
4273 * so that we avoid writing any length 64K records for
4274 * controllers that don't follow the spec.
4275 *
4276 * LOCKING:
4277 * spin_lock_irqsave(host lock)
4278 *
4279 */
4280static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4281{
4282 struct ata_port *ap = qc->ap;
4283 struct scatterlist *sg;
4284 unsigned int idx;
4285
4286 WARN_ON(qc->__sg == NULL);
4287 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4288
4289 idx = 0;
4290 ata_for_each_sg(sg, qc) {
4291 u32 addr, offset;
4292 u32 sg_len, len, blen;
4293
4294 /* determine if physical DMA addr spans 64K boundary.
4295 * Note h/w doesn't support 64-bit, so we unconditionally
4296 * truncate dma_addr_t to u32.
4297 */
4298 addr = (u32) sg_dma_address(sg);
4299 sg_len = sg_dma_len(sg);
4300
4301 while (sg_len) {
4302 offset = addr & 0xffff;
4303 len = sg_len;
4304 if ((offset + sg_len) > 0x10000)
4305 len = 0x10000 - offset;
4306
4307 blen = len & 0xffff;
4308 ap->prd[idx].addr = cpu_to_le32(addr);
4309 if (blen == 0) {
4310 /* Some PATA chipsets like the CS5530 can't
4311 cope with 0x0000 meaning 64K as the spec says */
4312 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4313 blen = 0x8000;
4314 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4315 }
4316 ap->prd[idx].flags_len = cpu_to_le32(blen);
4317 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4318
4319 idx++;
4320 sg_len -= len;
4321 addr += len;
4322 }
4323 }
4324
4325 if (idx)
4326 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4327}
4328
1da177e4
LT
4329/**
4330 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4331 * @qc: Metadata associated with taskfile to check
4332 *
780a87f7
JG
4333 * Allow low-level driver to filter ATA PACKET commands, returning
4334 * a status indicating whether or not it is OK to use DMA for the
4335 * supplied PACKET command.
4336 *
1da177e4 4337 * LOCKING:
cca3974e 4338 * spin_lock_irqsave(host lock)
0cba632b 4339 *
1da177e4
LT
4340 * RETURNS: 0 when ATAPI DMA can be used
4341 * nonzero otherwise
4342 */
4343int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4344{
4345 struct ata_port *ap = qc->ap;
b9a4197e
TH
4346
4347 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4348 * few ATAPI devices choke on such DMA requests.
4349 */
4350 if (unlikely(qc->nbytes & 15))
4351 return 1;
6f23a31d 4352
1da177e4 4353 if (ap->ops->check_atapi_dma)
b9a4197e 4354 return ap->ops->check_atapi_dma(qc);
1da177e4 4355
b9a4197e 4356 return 0;
1da177e4 4357}
b9a4197e 4358
31cc23b3
TH
4359/**
4360 * ata_std_qc_defer - Check whether a qc needs to be deferred
4361 * @qc: ATA command in question
4362 *
4363 * Non-NCQ commands cannot run with any other command, NCQ or
4364 * not. As upper layer only knows the queue depth, we are
4365 * responsible for maintaining exclusion. This function checks
4366 * whether a new command @qc can be issued.
4367 *
4368 * LOCKING:
4369 * spin_lock_irqsave(host lock)
4370 *
4371 * RETURNS:
4372 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4373 */
4374int ata_std_qc_defer(struct ata_queued_cmd *qc)
4375{
4376 struct ata_link *link = qc->dev->link;
4377
4378 if (qc->tf.protocol == ATA_PROT_NCQ) {
4379 if (!ata_tag_valid(link->active_tag))
4380 return 0;
4381 } else {
4382 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4383 return 0;
4384 }
4385
4386 return ATA_DEFER_LINK;
4387}
4388
1da177e4
LT
4389/**
4390 * ata_qc_prep - Prepare taskfile for submission
4391 * @qc: Metadata associated with taskfile to be prepared
4392 *
780a87f7
JG
4393 * Prepare ATA taskfile for submission.
4394 *
1da177e4 4395 * LOCKING:
cca3974e 4396 * spin_lock_irqsave(host lock)
1da177e4
LT
4397 */
4398void ata_qc_prep(struct ata_queued_cmd *qc)
4399{
4400 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4401 return;
4402
4403 ata_fill_sg(qc);
4404}
4405
d26fc955
AC
4406/**
4407 * ata_dumb_qc_prep - Prepare taskfile for submission
4408 * @qc: Metadata associated with taskfile to be prepared
4409 *
4410 * Prepare ATA taskfile for submission.
4411 *
4412 * LOCKING:
4413 * spin_lock_irqsave(host lock)
4414 */
4415void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4416{
4417 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4418 return;
4419
4420 ata_fill_sg_dumb(qc);
4421}
4422
e46834cd
BK
4423void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4424
0cba632b
JG
4425/**
4426 * ata_sg_init_one - Associate command with memory buffer
4427 * @qc: Command to be associated
4428 * @buf: Memory buffer
4429 * @buflen: Length of memory buffer, in bytes.
4430 *
4431 * Initialize the data-related elements of queued_cmd @qc
4432 * to point to a single memory buffer, @buf of byte length @buflen.
4433 *
4434 * LOCKING:
cca3974e 4435 * spin_lock_irqsave(host lock)
0cba632b
JG
4436 */
4437
1da177e4
LT
4438void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4439{
1da177e4
LT
4440 qc->flags |= ATA_QCFLAG_SINGLE;
4441
cedc9a47 4442 qc->__sg = &qc->sgent;
1da177e4 4443 qc->n_elem = 1;
cedc9a47 4444 qc->orig_n_elem = 1;
1da177e4 4445 qc->buf_virt = buf;
233277ca 4446 qc->nbytes = buflen;
1da177e4 4447
61c0596c 4448 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4449}
4450
0cba632b
JG
4451/**
4452 * ata_sg_init - Associate command with scatter-gather table.
4453 * @qc: Command to be associated
4454 * @sg: Scatter-gather table.
4455 * @n_elem: Number of elements in s/g table.
4456 *
4457 * Initialize the data-related elements of queued_cmd @qc
4458 * to point to a scatter-gather table @sg, containing @n_elem
4459 * elements.
4460 *
4461 * LOCKING:
cca3974e 4462 * spin_lock_irqsave(host lock)
0cba632b
JG
4463 */
4464
1da177e4
LT
4465void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4466 unsigned int n_elem)
4467{
4468 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4469 qc->__sg = sg;
1da177e4 4470 qc->n_elem = n_elem;
cedc9a47 4471 qc->orig_n_elem = n_elem;
1da177e4
LT
4472}
4473
4474/**
0cba632b
JG
4475 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4476 * @qc: Command with memory buffer to be mapped.
4477 *
4478 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4479 *
4480 * LOCKING:
cca3974e 4481 * spin_lock_irqsave(host lock)
1da177e4
LT
4482 *
4483 * RETURNS:
0cba632b 4484 * Zero on success, negative on error.
1da177e4
LT
4485 */
4486
4487static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4488{
4489 struct ata_port *ap = qc->ap;
4490 int dir = qc->dma_dir;
cedc9a47 4491 struct scatterlist *sg = qc->__sg;
1da177e4 4492 dma_addr_t dma_address;
2e242fa9 4493 int trim_sg = 0;
1da177e4 4494
cedc9a47
JG
4495 /* we must lengthen transfers to end on a 32-bit boundary */
4496 qc->pad_len = sg->length & 3;
4497 if (qc->pad_len) {
4498 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4499 struct scatterlist *psg = &qc->pad_sgent;
4500
a4631474 4501 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4502
4503 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4504
4505 if (qc->tf.flags & ATA_TFLAG_WRITE)
4506 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4507 qc->pad_len);
4508
4509 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4510 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4511 /* trim sg */
4512 sg->length -= qc->pad_len;
2e242fa9
TH
4513 if (sg->length == 0)
4514 trim_sg = 1;
cedc9a47
JG
4515
4516 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4517 sg->length, qc->pad_len);
4518 }
4519
2e242fa9
TH
4520 if (trim_sg) {
4521 qc->n_elem--;
e1410f2d
JG
4522 goto skip_map;
4523 }
4524
2f1f610b 4525 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4526 sg->length, dir);
537a95d9
TH
4527 if (dma_mapping_error(dma_address)) {
4528 /* restore sg */
4529 sg->length += qc->pad_len;
1da177e4 4530 return -1;
537a95d9 4531 }
1da177e4
LT
4532
4533 sg_dma_address(sg) = dma_address;
32529e01 4534 sg_dma_len(sg) = sg->length;
1da177e4 4535
2e242fa9 4536skip_map:
1da177e4
LT
4537 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4538 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4539
4540 return 0;
4541}
4542
4543/**
0cba632b
JG
4544 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4545 * @qc: Command with scatter-gather table to be mapped.
4546 *
4547 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4548 *
4549 * LOCKING:
cca3974e 4550 * spin_lock_irqsave(host lock)
1da177e4
LT
4551 *
4552 * RETURNS:
0cba632b 4553 * Zero on success, negative on error.
1da177e4
LT
4554 *
4555 */
4556
4557static int ata_sg_setup(struct ata_queued_cmd *qc)
4558{
4559 struct ata_port *ap = qc->ap;
cedc9a47
JG
4560 struct scatterlist *sg = qc->__sg;
4561 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4562 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4563
44877b4e 4564 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4565 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4566
cedc9a47
JG
4567 /* we must lengthen transfers to end on a 32-bit boundary */
4568 qc->pad_len = lsg->length & 3;
4569 if (qc->pad_len) {
4570 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4571 struct scatterlist *psg = &qc->pad_sgent;
4572 unsigned int offset;
4573
a4631474 4574 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4575
4576 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4577
4578 /*
4579 * psg->page/offset are used to copy to-be-written
4580 * data in this function or read data in ata_sg_clean.
4581 */
4582 offset = lsg->offset + lsg->length - qc->pad_len;
4583 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4584 psg->offset = offset_in_page(offset);
4585
4586 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4587 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4588 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4589 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4590 }
4591
4592 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4593 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4594 /* trim last sg */
4595 lsg->length -= qc->pad_len;
e1410f2d
JG
4596 if (lsg->length == 0)
4597 trim_sg = 1;
cedc9a47
JG
4598
4599 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4600 qc->n_elem - 1, lsg->length, qc->pad_len);
4601 }
4602
e1410f2d
JG
4603 pre_n_elem = qc->n_elem;
4604 if (trim_sg && pre_n_elem)
4605 pre_n_elem--;
4606
4607 if (!pre_n_elem) {
4608 n_elem = 0;
4609 goto skip_map;
4610 }
4611
1da177e4 4612 dir = qc->dma_dir;
2f1f610b 4613 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4614 if (n_elem < 1) {
4615 /* restore last sg */
4616 lsg->length += qc->pad_len;
1da177e4 4617 return -1;
537a95d9 4618 }
1da177e4
LT
4619
4620 DPRINTK("%d sg elements mapped\n", n_elem);
4621
e1410f2d 4622skip_map:
1da177e4
LT
4623 qc->n_elem = n_elem;
4624
4625 return 0;
4626}
4627
0baab86b 4628/**
c893a3ae 4629 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4630 * @buf: Buffer to swap
4631 * @buf_words: Number of 16-bit words in buffer.
4632 *
4633 * Swap halves of 16-bit words if needed to convert from
4634 * little-endian byte order to native cpu byte order, or
4635 * vice-versa.
4636 *
4637 * LOCKING:
6f0ef4fa 4638 * Inherited from caller.
0baab86b 4639 */
1da177e4
LT
4640void swap_buf_le16(u16 *buf, unsigned int buf_words)
4641{
4642#ifdef __BIG_ENDIAN
4643 unsigned int i;
4644
4645 for (i = 0; i < buf_words; i++)
4646 buf[i] = le16_to_cpu(buf[i]);
4647#endif /* __BIG_ENDIAN */
4648}
4649
6ae4cfb5 4650/**
0d5ff566 4651 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4652 * @adev: device to target
6ae4cfb5
AL
4653 * @buf: data buffer
4654 * @buflen: buffer length
344babaa 4655 * @write_data: read/write
6ae4cfb5
AL
4656 *
4657 * Transfer data from/to the device data register by PIO.
4658 *
4659 * LOCKING:
4660 * Inherited from caller.
6ae4cfb5 4661 */
0d5ff566
TH
4662void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4663 unsigned int buflen, int write_data)
1da177e4 4664{
9af5c9c9 4665 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4666 unsigned int words = buflen >> 1;
1da177e4 4667
6ae4cfb5 4668 /* Transfer multiple of 2 bytes */
1da177e4 4669 if (write_data)
0d5ff566 4670 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4671 else
0d5ff566 4672 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4673
4674 /* Transfer trailing 1 byte, if any. */
4675 if (unlikely(buflen & 0x01)) {
4676 u16 align_buf[1] = { 0 };
4677 unsigned char *trailing_buf = buf + buflen - 1;
4678
4679 if (write_data) {
4680 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4681 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4682 } else {
0d5ff566 4683 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4684 memcpy(trailing_buf, align_buf, 1);
4685 }
4686 }
1da177e4
LT
4687}
4688
75e99585 4689/**
0d5ff566 4690 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4691 * @adev: device to target
4692 * @buf: data buffer
4693 * @buflen: buffer length
4694 * @write_data: read/write
4695 *
88574551 4696 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4697 * transfer with interrupts disabled.
4698 *
4699 * LOCKING:
4700 * Inherited from caller.
4701 */
0d5ff566
TH
4702void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4703 unsigned int buflen, int write_data)
75e99585
AC
4704{
4705 unsigned long flags;
4706 local_irq_save(flags);
0d5ff566 4707 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4708 local_irq_restore(flags);
4709}
4710
4711
6ae4cfb5 4712/**
5a5dbd18 4713 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4714 * @qc: Command on going
4715 *
5a5dbd18 4716 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4717 *
4718 * LOCKING:
4719 * Inherited from caller.
4720 */
4721
1da177e4
LT
4722static void ata_pio_sector(struct ata_queued_cmd *qc)
4723{
4724 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4725 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4726 struct ata_port *ap = qc->ap;
4727 struct page *page;
4728 unsigned int offset;
4729 unsigned char *buf;
4730
5a5dbd18 4731 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4732 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4733
4734 page = sg[qc->cursg].page;
726f0785 4735 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4736
4737 /* get the current page and offset */
4738 page = nth_page(page, (offset >> PAGE_SHIFT));
4739 offset %= PAGE_SIZE;
4740
1da177e4
LT
4741 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4742
91b8b313
AL
4743 if (PageHighMem(page)) {
4744 unsigned long flags;
4745
a6b2c5d4 4746 /* FIXME: use a bounce buffer */
91b8b313
AL
4747 local_irq_save(flags);
4748 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4749
91b8b313 4750 /* do the actual data transfer */
5a5dbd18 4751 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4752
91b8b313
AL
4753 kunmap_atomic(buf, KM_IRQ0);
4754 local_irq_restore(flags);
4755 } else {
4756 buf = page_address(page);
5a5dbd18 4757 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4758 }
1da177e4 4759
5a5dbd18
ML
4760 qc->curbytes += qc->sect_size;
4761 qc->cursg_ofs += qc->sect_size;
1da177e4 4762
726f0785 4763 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4764 qc->cursg++;
4765 qc->cursg_ofs = 0;
4766 }
1da177e4 4767}
1da177e4 4768
07f6f7d0 4769/**
5a5dbd18 4770 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4771 * @qc: Command on going
4772 *
5a5dbd18 4773 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4774 * ATA device for the DRQ request.
4775 *
4776 * LOCKING:
4777 * Inherited from caller.
4778 */
1da177e4 4779
07f6f7d0
AL
4780static void ata_pio_sectors(struct ata_queued_cmd *qc)
4781{
4782 if (is_multi_taskfile(&qc->tf)) {
4783 /* READ/WRITE MULTIPLE */
4784 unsigned int nsect;
4785
587005de 4786 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4787
5a5dbd18 4788 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4789 qc->dev->multi_count);
07f6f7d0
AL
4790 while (nsect--)
4791 ata_pio_sector(qc);
4792 } else
4793 ata_pio_sector(qc);
4cc980b3
AL
4794
4795 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4796}
4797
c71c1857
AL
4798/**
4799 * atapi_send_cdb - Write CDB bytes to hardware
4800 * @ap: Port to which ATAPI device is attached.
4801 * @qc: Taskfile currently active
4802 *
4803 * When device has indicated its readiness to accept
4804 * a CDB, this function is called. Send the CDB.
4805 *
4806 * LOCKING:
4807 * caller.
4808 */
4809
4810static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4811{
4812 /* send SCSI cdb */
4813 DPRINTK("send cdb\n");
db024d53 4814 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4815
a6b2c5d4 4816 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4817 ata_altstatus(ap); /* flush */
4818
4819 switch (qc->tf.protocol) {
4820 case ATA_PROT_ATAPI:
4821 ap->hsm_task_state = HSM_ST;
4822 break;
4823 case ATA_PROT_ATAPI_NODATA:
4824 ap->hsm_task_state = HSM_ST_LAST;
4825 break;
4826 case ATA_PROT_ATAPI_DMA:
4827 ap->hsm_task_state = HSM_ST_LAST;
4828 /* initiate bmdma */
4829 ap->ops->bmdma_start(qc);
4830 break;
4831 }
1da177e4
LT
4832}
4833
6ae4cfb5
AL
4834/**
4835 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4836 * @qc: Command on going
4837 * @bytes: number of bytes
4838 *
4839 * Transfer Transfer data from/to the ATAPI device.
4840 *
4841 * LOCKING:
4842 * Inherited from caller.
4843 *
4844 */
4845
1da177e4
LT
4846static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4847{
4848 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4849 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4850 struct ata_port *ap = qc->ap;
4851 struct page *page;
4852 unsigned char *buf;
4853 unsigned int offset, count;
4854
563a6e1f 4855 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4856 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4857
4858next_sg:
563a6e1f 4859 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4860 /*
563a6e1f
AL
4861 * The end of qc->sg is reached and the device expects
4862 * more data to transfer. In order not to overrun qc->sg
4863 * and fulfill length specified in the byte count register,
4864 * - for read case, discard trailing data from the device
4865 * - for write case, padding zero data to the device
4866 */
4867 u16 pad_buf[1] = { 0 };
4868 unsigned int words = bytes >> 1;
4869 unsigned int i;
4870
4871 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4872 ata_dev_printk(qc->dev, KERN_WARNING,
4873 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4874
4875 for (i = 0; i < words; i++)
a6b2c5d4 4876 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4877
14be71f4 4878 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4879 return;
4880 }
4881
cedc9a47 4882 sg = &qc->__sg[qc->cursg];
1da177e4 4883
1da177e4
LT
4884 page = sg->page;
4885 offset = sg->offset + qc->cursg_ofs;
4886
4887 /* get the current page and offset */
4888 page = nth_page(page, (offset >> PAGE_SHIFT));
4889 offset %= PAGE_SIZE;
4890
6952df03 4891 /* don't overrun current sg */
32529e01 4892 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4893
4894 /* don't cross page boundaries */
4895 count = min(count, (unsigned int)PAGE_SIZE - offset);
4896
7282aa4b
AL
4897 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4898
91b8b313
AL
4899 if (PageHighMem(page)) {
4900 unsigned long flags;
4901
a6b2c5d4 4902 /* FIXME: use bounce buffer */
91b8b313
AL
4903 local_irq_save(flags);
4904 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4905
91b8b313 4906 /* do the actual data transfer */
a6b2c5d4 4907 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4908
91b8b313
AL
4909 kunmap_atomic(buf, KM_IRQ0);
4910 local_irq_restore(flags);
4911 } else {
4912 buf = page_address(page);
a6b2c5d4 4913 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4914 }
1da177e4
LT
4915
4916 bytes -= count;
4917 qc->curbytes += count;
4918 qc->cursg_ofs += count;
4919
32529e01 4920 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4921 qc->cursg++;
4922 qc->cursg_ofs = 0;
4923 }
4924
563a6e1f 4925 if (bytes)
1da177e4 4926 goto next_sg;
1da177e4
LT
4927}
4928
6ae4cfb5
AL
4929/**
4930 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4931 * @qc: Command on going
4932 *
4933 * Transfer Transfer data from/to the ATAPI device.
4934 *
4935 * LOCKING:
4936 * Inherited from caller.
6ae4cfb5
AL
4937 */
4938
1da177e4
LT
4939static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4940{
4941 struct ata_port *ap = qc->ap;
4942 struct ata_device *dev = qc->dev;
4943 unsigned int ireason, bc_lo, bc_hi, bytes;
4944 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4945
eec4c3f3
AL
4946 /* Abuse qc->result_tf for temp storage of intermediate TF
4947 * here to save some kernel stack usage.
4948 * For normal completion, qc->result_tf is not relevant. For
4949 * error, qc->result_tf is later overwritten by ata_qc_complete().
4950 * So, the correctness of qc->result_tf is not affected.
4951 */
4952 ap->ops->tf_read(ap, &qc->result_tf);
4953 ireason = qc->result_tf.nsect;
4954 bc_lo = qc->result_tf.lbam;
4955 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4956 bytes = (bc_hi << 8) | bc_lo;
4957
4958 /* shall be cleared to zero, indicating xfer of data */
4959 if (ireason & (1 << 0))
4960 goto err_out;
4961
4962 /* make sure transfer direction matches expected */
4963 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4964 if (do_write != i_write)
4965 goto err_out;
4966
44877b4e 4967 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4968
1da177e4 4969 __atapi_pio_bytes(qc, bytes);
4cc980b3 4970 ata_altstatus(ap); /* flush */
1da177e4
LT
4971
4972 return;
4973
4974err_out:
f15a1daf 4975 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4976 qc->err_mask |= AC_ERR_HSM;
14be71f4 4977 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4978}
4979
4980/**
c234fb00
AL
4981 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4982 * @ap: the target ata_port
4983 * @qc: qc on going
1da177e4 4984 *
c234fb00
AL
4985 * RETURNS:
4986 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4987 */
c234fb00
AL
4988
4989static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4990{
c234fb00
AL
4991 if (qc->tf.flags & ATA_TFLAG_POLLING)
4992 return 1;
1da177e4 4993
c234fb00
AL
4994 if (ap->hsm_task_state == HSM_ST_FIRST) {
4995 if (qc->tf.protocol == ATA_PROT_PIO &&
4996 (qc->tf.flags & ATA_TFLAG_WRITE))
4997 return 1;
1da177e4 4998
c234fb00
AL
4999 if (is_atapi_taskfile(&qc->tf) &&
5000 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5001 return 1;
fe79e683
AL
5002 }
5003
c234fb00
AL
5004 return 0;
5005}
1da177e4 5006
c17ea20d
TH
5007/**
5008 * ata_hsm_qc_complete - finish a qc running on standard HSM
5009 * @qc: Command to complete
5010 * @in_wq: 1 if called from workqueue, 0 otherwise
5011 *
5012 * Finish @qc which is running on standard HSM.
5013 *
5014 * LOCKING:
cca3974e 5015 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5016 * Otherwise, none on entry and grabs host lock.
5017 */
5018static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5019{
5020 struct ata_port *ap = qc->ap;
5021 unsigned long flags;
5022
5023 if (ap->ops->error_handler) {
5024 if (in_wq) {
ba6a1308 5025 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5026
cca3974e
JG
5027 /* EH might have kicked in while host lock is
5028 * released.
c17ea20d
TH
5029 */
5030 qc = ata_qc_from_tag(ap, qc->tag);
5031 if (qc) {
5032 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5033 ap->ops->irq_on(ap);
c17ea20d
TH
5034 ata_qc_complete(qc);
5035 } else
5036 ata_port_freeze(ap);
5037 }
5038
ba6a1308 5039 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5040 } else {
5041 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5042 ata_qc_complete(qc);
5043 else
5044 ata_port_freeze(ap);
5045 }
5046 } else {
5047 if (in_wq) {
ba6a1308 5048 spin_lock_irqsave(ap->lock, flags);
83625006 5049 ap->ops->irq_on(ap);
c17ea20d 5050 ata_qc_complete(qc);
ba6a1308 5051 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5052 } else
5053 ata_qc_complete(qc);
5054 }
5055}
5056
bb5cb290
AL
5057/**
5058 * ata_hsm_move - move the HSM to the next state.
5059 * @ap: the target ata_port
5060 * @qc: qc on going
5061 * @status: current device status
5062 * @in_wq: 1 if called from workqueue, 0 otherwise
5063 *
5064 * RETURNS:
5065 * 1 when poll next status needed, 0 otherwise.
5066 */
9a1004d0
TH
5067int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5068 u8 status, int in_wq)
e2cec771 5069{
bb5cb290
AL
5070 unsigned long flags = 0;
5071 int poll_next;
5072
6912ccd5
AL
5073 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5074
bb5cb290
AL
5075 /* Make sure ata_qc_issue_prot() does not throw things
5076 * like DMA polling into the workqueue. Notice that
5077 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5078 */
c234fb00 5079 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5080
e2cec771 5081fsm_start:
999bb6f4 5082 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5083 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5084
e2cec771
AL
5085 switch (ap->hsm_task_state) {
5086 case HSM_ST_FIRST:
bb5cb290
AL
5087 /* Send first data block or PACKET CDB */
5088
5089 /* If polling, we will stay in the work queue after
5090 * sending the data. Otherwise, interrupt handler
5091 * takes over after sending the data.
5092 */
5093 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5094
e2cec771 5095 /* check device status */
3655d1d3
AL
5096 if (unlikely((status & ATA_DRQ) == 0)) {
5097 /* handle BSY=0, DRQ=0 as error */
5098 if (likely(status & (ATA_ERR | ATA_DF)))
5099 /* device stops HSM for abort/error */
5100 qc->err_mask |= AC_ERR_DEV;
5101 else
5102 /* HSM violation. Let EH handle this */
5103 qc->err_mask |= AC_ERR_HSM;
5104
14be71f4 5105 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5106 goto fsm_start;
1da177e4
LT
5107 }
5108
71601958
AL
5109 /* Device should not ask for data transfer (DRQ=1)
5110 * when it finds something wrong.
eee6c32f
AL
5111 * We ignore DRQ here and stop the HSM by
5112 * changing hsm_task_state to HSM_ST_ERR and
5113 * let the EH abort the command or reset the device.
71601958
AL
5114 */
5115 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5116 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5117 "error, dev_stat 0x%X\n", status);
3655d1d3 5118 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5119 ap->hsm_task_state = HSM_ST_ERR;
5120 goto fsm_start;
71601958 5121 }
1da177e4 5122
bb5cb290
AL
5123 /* Send the CDB (atapi) or the first data block (ata pio out).
5124 * During the state transition, interrupt handler shouldn't
5125 * be invoked before the data transfer is complete and
5126 * hsm_task_state is changed. Hence, the following locking.
5127 */
5128 if (in_wq)
ba6a1308 5129 spin_lock_irqsave(ap->lock, flags);
1da177e4 5130
bb5cb290
AL
5131 if (qc->tf.protocol == ATA_PROT_PIO) {
5132 /* PIO data out protocol.
5133 * send first data block.
5134 */
0565c26d 5135
bb5cb290
AL
5136 /* ata_pio_sectors() might change the state
5137 * to HSM_ST_LAST. so, the state is changed here
5138 * before ata_pio_sectors().
5139 */
5140 ap->hsm_task_state = HSM_ST;
5141 ata_pio_sectors(qc);
bb5cb290
AL
5142 } else
5143 /* send CDB */
5144 atapi_send_cdb(ap, qc);
5145
5146 if (in_wq)
ba6a1308 5147 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5148
5149 /* if polling, ata_pio_task() handles the rest.
5150 * otherwise, interrupt handler takes over from here.
5151 */
e2cec771 5152 break;
1c848984 5153
e2cec771
AL
5154 case HSM_ST:
5155 /* complete command or read/write the data register */
5156 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5157 /* ATAPI PIO protocol */
5158 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5159 /* No more data to transfer or device error.
5160 * Device error will be tagged in HSM_ST_LAST.
5161 */
e2cec771
AL
5162 ap->hsm_task_state = HSM_ST_LAST;
5163 goto fsm_start;
5164 }
1da177e4 5165
71601958
AL
5166 /* Device should not ask for data transfer (DRQ=1)
5167 * when it finds something wrong.
eee6c32f
AL
5168 * We ignore DRQ here and stop the HSM by
5169 * changing hsm_task_state to HSM_ST_ERR and
5170 * let the EH abort the command or reset the device.
71601958
AL
5171 */
5172 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5173 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5174 "device error, dev_stat 0x%X\n",
5175 status);
3655d1d3 5176 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5177 ap->hsm_task_state = HSM_ST_ERR;
5178 goto fsm_start;
71601958 5179 }
1da177e4 5180
e2cec771 5181 atapi_pio_bytes(qc);
7fb6ec28 5182
e2cec771
AL
5183 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5184 /* bad ireason reported by device */
5185 goto fsm_start;
1da177e4 5186
e2cec771
AL
5187 } else {
5188 /* ATA PIO protocol */
5189 if (unlikely((status & ATA_DRQ) == 0)) {
5190 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5191 if (likely(status & (ATA_ERR | ATA_DF)))
5192 /* device stops HSM for abort/error */
5193 qc->err_mask |= AC_ERR_DEV;
5194 else
55a8e2c8
TH
5195 /* HSM violation. Let EH handle this.
5196 * Phantom devices also trigger this
5197 * condition. Mark hint.
5198 */
5199 qc->err_mask |= AC_ERR_HSM |
5200 AC_ERR_NODEV_HINT;
3655d1d3 5201
e2cec771
AL
5202 ap->hsm_task_state = HSM_ST_ERR;
5203 goto fsm_start;
5204 }
1da177e4 5205
eee6c32f
AL
5206 /* For PIO reads, some devices may ask for
5207 * data transfer (DRQ=1) alone with ERR=1.
5208 * We respect DRQ here and transfer one
5209 * block of junk data before changing the
5210 * hsm_task_state to HSM_ST_ERR.
5211 *
5212 * For PIO writes, ERR=1 DRQ=1 doesn't make
5213 * sense since the data block has been
5214 * transferred to the device.
71601958
AL
5215 */
5216 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5217 /* data might be corrputed */
5218 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5219
5220 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5221 ata_pio_sectors(qc);
eee6c32f
AL
5222 status = ata_wait_idle(ap);
5223 }
5224
3655d1d3
AL
5225 if (status & (ATA_BUSY | ATA_DRQ))
5226 qc->err_mask |= AC_ERR_HSM;
5227
eee6c32f
AL
5228 /* ata_pio_sectors() might change the
5229 * state to HSM_ST_LAST. so, the state
5230 * is changed after ata_pio_sectors().
5231 */
5232 ap->hsm_task_state = HSM_ST_ERR;
5233 goto fsm_start;
71601958
AL
5234 }
5235
e2cec771
AL
5236 ata_pio_sectors(qc);
5237
5238 if (ap->hsm_task_state == HSM_ST_LAST &&
5239 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5240 /* all data read */
52a32205 5241 status = ata_wait_idle(ap);
e2cec771
AL
5242 goto fsm_start;
5243 }
5244 }
5245
bb5cb290 5246 poll_next = 1;
1da177e4
LT
5247 break;
5248
14be71f4 5249 case HSM_ST_LAST:
6912ccd5
AL
5250 if (unlikely(!ata_ok(status))) {
5251 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5252 ap->hsm_task_state = HSM_ST_ERR;
5253 goto fsm_start;
5254 }
5255
5256 /* no more data to transfer */
4332a771 5257 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5258 ap->print_id, qc->dev->devno, status);
e2cec771 5259
6912ccd5
AL
5260 WARN_ON(qc->err_mask);
5261
e2cec771 5262 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5263
e2cec771 5264 /* complete taskfile transaction */
c17ea20d 5265 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5266
5267 poll_next = 0;
1da177e4
LT
5268 break;
5269
14be71f4 5270 case HSM_ST_ERR:
e2cec771
AL
5271 /* make sure qc->err_mask is available to
5272 * know what's wrong and recover
5273 */
5274 WARN_ON(qc->err_mask == 0);
5275
5276 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5277
999bb6f4 5278 /* complete taskfile transaction */
c17ea20d 5279 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5280
5281 poll_next = 0;
e2cec771
AL
5282 break;
5283 default:
bb5cb290 5284 poll_next = 0;
6912ccd5 5285 BUG();
1da177e4
LT
5286 }
5287
bb5cb290 5288 return poll_next;
1da177e4
LT
5289}
5290
65f27f38 5291static void ata_pio_task(struct work_struct *work)
8061f5f0 5292{
65f27f38
DH
5293 struct ata_port *ap =
5294 container_of(work, struct ata_port, port_task.work);
5295 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5296 u8 status;
a1af3734 5297 int poll_next;
8061f5f0 5298
7fb6ec28 5299fsm_start:
a1af3734 5300 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5301
a1af3734
AL
5302 /*
5303 * This is purely heuristic. This is a fast path.
5304 * Sometimes when we enter, BSY will be cleared in
5305 * a chk-status or two. If not, the drive is probably seeking
5306 * or something. Snooze for a couple msecs, then
5307 * chk-status again. If still busy, queue delayed work.
5308 */
5309 status = ata_busy_wait(ap, ATA_BUSY, 5);
5310 if (status & ATA_BUSY) {
5311 msleep(2);
5312 status = ata_busy_wait(ap, ATA_BUSY, 10);
5313 if (status & ATA_BUSY) {
31ce6dae 5314 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5315 return;
5316 }
8061f5f0
TH
5317 }
5318
a1af3734
AL
5319 /* move the HSM */
5320 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5321
a1af3734
AL
5322 /* another command or interrupt handler
5323 * may be running at this point.
5324 */
5325 if (poll_next)
7fb6ec28 5326 goto fsm_start;
8061f5f0
TH
5327}
5328
1da177e4
LT
5329/**
5330 * ata_qc_new - Request an available ATA command, for queueing
5331 * @ap: Port associated with device @dev
5332 * @dev: Device from whom we request an available command structure
5333 *
5334 * LOCKING:
0cba632b 5335 * None.
1da177e4
LT
5336 */
5337
5338static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5339{
5340 struct ata_queued_cmd *qc = NULL;
5341 unsigned int i;
5342
e3180499 5343 /* no command while frozen */
b51e9e5d 5344 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5345 return NULL;
5346
2ab7db1f
TH
5347 /* the last tag is reserved for internal command. */
5348 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5349 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5350 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5351 break;
5352 }
5353
5354 if (qc)
5355 qc->tag = i;
5356
5357 return qc;
5358}
5359
5360/**
5361 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5362 * @dev: Device from whom we request an available command structure
5363 *
5364 * LOCKING:
0cba632b 5365 * None.
1da177e4
LT
5366 */
5367
3373efd8 5368struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5369{
9af5c9c9 5370 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5371 struct ata_queued_cmd *qc;
5372
5373 qc = ata_qc_new(ap);
5374 if (qc) {
1da177e4
LT
5375 qc->scsicmd = NULL;
5376 qc->ap = ap;
5377 qc->dev = dev;
1da177e4 5378
2c13b7ce 5379 ata_qc_reinit(qc);
1da177e4
LT
5380 }
5381
5382 return qc;
5383}
5384
1da177e4
LT
5385/**
5386 * ata_qc_free - free unused ata_queued_cmd
5387 * @qc: Command to complete
5388 *
5389 * Designed to free unused ata_queued_cmd object
5390 * in case something prevents using it.
5391 *
5392 * LOCKING:
cca3974e 5393 * spin_lock_irqsave(host lock)
1da177e4
LT
5394 */
5395void ata_qc_free(struct ata_queued_cmd *qc)
5396{
4ba946e9
TH
5397 struct ata_port *ap = qc->ap;
5398 unsigned int tag;
5399
a4631474 5400 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5401
4ba946e9
TH
5402 qc->flags = 0;
5403 tag = qc->tag;
5404 if (likely(ata_tag_valid(tag))) {
4ba946e9 5405 qc->tag = ATA_TAG_POISON;
6cec4a39 5406 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5407 }
1da177e4
LT
5408}
5409
76014427 5410void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5411{
dedaf2b0 5412 struct ata_port *ap = qc->ap;
9af5c9c9 5413 struct ata_link *link = qc->dev->link;
dedaf2b0 5414
a4631474
TH
5415 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5416 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5417
5418 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5419 ata_sg_clean(qc);
5420
7401abf2 5421 /* command should be marked inactive atomically with qc completion */
da917d69 5422 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5423 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5424 if (!link->sactive)
5425 ap->nr_active_links--;
5426 } else {
9af5c9c9 5427 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5428 ap->nr_active_links--;
5429 }
5430
5431 /* clear exclusive status */
5432 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5433 ap->excl_link == link))
5434 ap->excl_link = NULL;
7401abf2 5435
3f3791d3
AL
5436 /* atapi: mark qc as inactive to prevent the interrupt handler
5437 * from completing the command twice later, before the error handler
5438 * is called. (when rc != 0 and atapi request sense is needed)
5439 */
5440 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5441 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5442
1da177e4 5443 /* call completion callback */
77853bf2 5444 qc->complete_fn(qc);
1da177e4
LT
5445}
5446
39599a53
TH
5447static void fill_result_tf(struct ata_queued_cmd *qc)
5448{
5449 struct ata_port *ap = qc->ap;
5450
39599a53 5451 qc->result_tf.flags = qc->tf.flags;
4742d54f 5452 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5453}
5454
f686bcb8
TH
5455/**
5456 * ata_qc_complete - Complete an active ATA command
5457 * @qc: Command to complete
5458 * @err_mask: ATA Status register contents
5459 *
5460 * Indicate to the mid and upper layers that an ATA
5461 * command has completed, with either an ok or not-ok status.
5462 *
5463 * LOCKING:
cca3974e 5464 * spin_lock_irqsave(host lock)
f686bcb8
TH
5465 */
5466void ata_qc_complete(struct ata_queued_cmd *qc)
5467{
5468 struct ata_port *ap = qc->ap;
5469
5470 /* XXX: New EH and old EH use different mechanisms to
5471 * synchronize EH with regular execution path.
5472 *
5473 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5474 * Normal execution path is responsible for not accessing a
5475 * failed qc. libata core enforces the rule by returning NULL
5476 * from ata_qc_from_tag() for failed qcs.
5477 *
5478 * Old EH depends on ata_qc_complete() nullifying completion
5479 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5480 * not synchronize with interrupt handler. Only PIO task is
5481 * taken care of.
5482 */
5483 if (ap->ops->error_handler) {
b51e9e5d 5484 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5485
5486 if (unlikely(qc->err_mask))
5487 qc->flags |= ATA_QCFLAG_FAILED;
5488
5489 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5490 if (!ata_tag_internal(qc->tag)) {
5491 /* always fill result TF for failed qc */
39599a53 5492 fill_result_tf(qc);
f686bcb8
TH
5493 ata_qc_schedule_eh(qc);
5494 return;
5495 }
5496 }
5497
5498 /* read result TF if requested */
5499 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5500 fill_result_tf(qc);
f686bcb8
TH
5501
5502 __ata_qc_complete(qc);
5503 } else {
5504 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5505 return;
5506
5507 /* read result TF if failed or requested */
5508 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5509 fill_result_tf(qc);
f686bcb8
TH
5510
5511 __ata_qc_complete(qc);
5512 }
5513}
5514
dedaf2b0
TH
5515/**
5516 * ata_qc_complete_multiple - Complete multiple qcs successfully
5517 * @ap: port in question
5518 * @qc_active: new qc_active mask
5519 * @finish_qc: LLDD callback invoked before completing a qc
5520 *
5521 * Complete in-flight commands. This functions is meant to be
5522 * called from low-level driver's interrupt routine to complete
5523 * requests normally. ap->qc_active and @qc_active is compared
5524 * and commands are completed accordingly.
5525 *
5526 * LOCKING:
cca3974e 5527 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5528 *
5529 * RETURNS:
5530 * Number of completed commands on success, -errno otherwise.
5531 */
5532int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5533 void (*finish_qc)(struct ata_queued_cmd *))
5534{
5535 int nr_done = 0;
5536 u32 done_mask;
5537 int i;
5538
5539 done_mask = ap->qc_active ^ qc_active;
5540
5541 if (unlikely(done_mask & qc_active)) {
5542 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5543 "(%08x->%08x)\n", ap->qc_active, qc_active);
5544 return -EINVAL;
5545 }
5546
5547 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5548 struct ata_queued_cmd *qc;
5549
5550 if (!(done_mask & (1 << i)))
5551 continue;
5552
5553 if ((qc = ata_qc_from_tag(ap, i))) {
5554 if (finish_qc)
5555 finish_qc(qc);
5556 ata_qc_complete(qc);
5557 nr_done++;
5558 }
5559 }
5560
5561 return nr_done;
5562}
5563
1da177e4
LT
5564static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5565{
5566 struct ata_port *ap = qc->ap;
5567
5568 switch (qc->tf.protocol) {
3dc1d881 5569 case ATA_PROT_NCQ:
1da177e4
LT
5570 case ATA_PROT_DMA:
5571 case ATA_PROT_ATAPI_DMA:
5572 return 1;
5573
5574 case ATA_PROT_ATAPI:
5575 case ATA_PROT_PIO:
1da177e4
LT
5576 if (ap->flags & ATA_FLAG_PIO_DMA)
5577 return 1;
5578
5579 /* fall through */
5580
5581 default:
5582 return 0;
5583 }
5584
5585 /* never reached */
5586}
5587
5588/**
5589 * ata_qc_issue - issue taskfile to device
5590 * @qc: command to issue to device
5591 *
5592 * Prepare an ATA command to submission to device.
5593 * This includes mapping the data into a DMA-able
5594 * area, filling in the S/G table, and finally
5595 * writing the taskfile to hardware, starting the command.
5596 *
5597 * LOCKING:
cca3974e 5598 * spin_lock_irqsave(host lock)
1da177e4 5599 */
8e0e694a 5600void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5601{
5602 struct ata_port *ap = qc->ap;
9af5c9c9 5603 struct ata_link *link = qc->dev->link;
1da177e4 5604
dedaf2b0
TH
5605 /* Make sure only one non-NCQ command is outstanding. The
5606 * check is skipped for old EH because it reuses active qc to
5607 * request ATAPI sense.
5608 */
9af5c9c9 5609 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5610
5611 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5612 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5613
5614 if (!link->sactive)
5615 ap->nr_active_links++;
9af5c9c9 5616 link->sactive |= 1 << qc->tag;
dedaf2b0 5617 } else {
9af5c9c9 5618 WARN_ON(link->sactive);
da917d69
TH
5619
5620 ap->nr_active_links++;
9af5c9c9 5621 link->active_tag = qc->tag;
dedaf2b0
TH
5622 }
5623
e4a70e76 5624 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5625 ap->qc_active |= 1 << qc->tag;
e4a70e76 5626
1da177e4
LT
5627 if (ata_should_dma_map(qc)) {
5628 if (qc->flags & ATA_QCFLAG_SG) {
5629 if (ata_sg_setup(qc))
8e436af9 5630 goto sg_err;
1da177e4
LT
5631 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5632 if (ata_sg_setup_one(qc))
8e436af9 5633 goto sg_err;
1da177e4
LT
5634 }
5635 } else {
5636 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5637 }
5638
5639 ap->ops->qc_prep(qc);
5640
8e0e694a
TH
5641 qc->err_mask |= ap->ops->qc_issue(qc);
5642 if (unlikely(qc->err_mask))
5643 goto err;
5644 return;
1da177e4 5645
8e436af9
TH
5646sg_err:
5647 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5648 qc->err_mask |= AC_ERR_SYSTEM;
5649err:
5650 ata_qc_complete(qc);
1da177e4
LT
5651}
5652
5653/**
5654 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5655 * @qc: command to issue to device
5656 *
5657 * Using various libata functions and hooks, this function
5658 * starts an ATA command. ATA commands are grouped into
5659 * classes called "protocols", and issuing each type of protocol
5660 * is slightly different.
5661 *
0baab86b
EF
5662 * May be used as the qc_issue() entry in ata_port_operations.
5663 *
1da177e4 5664 * LOCKING:
cca3974e 5665 * spin_lock_irqsave(host lock)
1da177e4
LT
5666 *
5667 * RETURNS:
9a3d9eb0 5668 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5669 */
5670
9a3d9eb0 5671unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5672{
5673 struct ata_port *ap = qc->ap;
5674
e50362ec
AL
5675 /* Use polling pio if the LLD doesn't handle
5676 * interrupt driven pio and atapi CDB interrupt.
5677 */
5678 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5679 switch (qc->tf.protocol) {
5680 case ATA_PROT_PIO:
e3472cbe 5681 case ATA_PROT_NODATA:
e50362ec
AL
5682 case ATA_PROT_ATAPI:
5683 case ATA_PROT_ATAPI_NODATA:
5684 qc->tf.flags |= ATA_TFLAG_POLLING;
5685 break;
5686 case ATA_PROT_ATAPI_DMA:
5687 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5688 /* see ata_dma_blacklisted() */
e50362ec
AL
5689 BUG();
5690 break;
5691 default:
5692 break;
5693 }
5694 }
5695
312f7da2 5696 /* select the device */
1da177e4
LT
5697 ata_dev_select(ap, qc->dev->devno, 1, 0);
5698
312f7da2 5699 /* start the command */
1da177e4
LT
5700 switch (qc->tf.protocol) {
5701 case ATA_PROT_NODATA:
312f7da2
AL
5702 if (qc->tf.flags & ATA_TFLAG_POLLING)
5703 ata_qc_set_polling(qc);
5704
e5338254 5705 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5706 ap->hsm_task_state = HSM_ST_LAST;
5707
5708 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5709 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5710
1da177e4
LT
5711 break;
5712
5713 case ATA_PROT_DMA:
587005de 5714 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5715
1da177e4
LT
5716 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5717 ap->ops->bmdma_setup(qc); /* set up bmdma */
5718 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5719 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5720 break;
5721
312f7da2
AL
5722 case ATA_PROT_PIO:
5723 if (qc->tf.flags & ATA_TFLAG_POLLING)
5724 ata_qc_set_polling(qc);
1da177e4 5725
e5338254 5726 ata_tf_to_host(ap, &qc->tf);
312f7da2 5727
54f00389
AL
5728 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5729 /* PIO data out protocol */
5730 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5731 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5732
5733 /* always send first data block using
e27486db 5734 * the ata_pio_task() codepath.
54f00389 5735 */
312f7da2 5736 } else {
54f00389
AL
5737 /* PIO data in protocol */
5738 ap->hsm_task_state = HSM_ST;
5739
5740 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5741 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5742
5743 /* if polling, ata_pio_task() handles the rest.
5744 * otherwise, interrupt handler takes over from here.
5745 */
312f7da2
AL
5746 }
5747
1da177e4
LT
5748 break;
5749
1da177e4 5750 case ATA_PROT_ATAPI:
1da177e4 5751 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5752 if (qc->tf.flags & ATA_TFLAG_POLLING)
5753 ata_qc_set_polling(qc);
5754
e5338254 5755 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5756
312f7da2
AL
5757 ap->hsm_task_state = HSM_ST_FIRST;
5758
5759 /* send cdb by polling if no cdb interrupt */
5760 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5761 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5762 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5763 break;
5764
5765 case ATA_PROT_ATAPI_DMA:
587005de 5766 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5767
1da177e4
LT
5768 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5769 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5770 ap->hsm_task_state = HSM_ST_FIRST;
5771
5772 /* send cdb by polling if no cdb interrupt */
5773 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5774 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5775 break;
5776
5777 default:
5778 WARN_ON(1);
9a3d9eb0 5779 return AC_ERR_SYSTEM;
1da177e4
LT
5780 }
5781
5782 return 0;
5783}
5784
1da177e4
LT
5785/**
5786 * ata_host_intr - Handle host interrupt for given (port, task)
5787 * @ap: Port on which interrupt arrived (possibly...)
5788 * @qc: Taskfile currently active in engine
5789 *
5790 * Handle host interrupt for given queued command. Currently,
5791 * only DMA interrupts are handled. All other commands are
5792 * handled via polling with interrupts disabled (nIEN bit).
5793 *
5794 * LOCKING:
cca3974e 5795 * spin_lock_irqsave(host lock)
1da177e4
LT
5796 *
5797 * RETURNS:
5798 * One if interrupt was handled, zero if not (shared irq).
5799 */
5800
5801inline unsigned int ata_host_intr (struct ata_port *ap,
5802 struct ata_queued_cmd *qc)
5803{
9af5c9c9 5804 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5805 u8 status, host_stat = 0;
1da177e4 5806
312f7da2 5807 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5808 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5809
312f7da2
AL
5810 /* Check whether we are expecting interrupt in this state */
5811 switch (ap->hsm_task_state) {
5812 case HSM_ST_FIRST:
6912ccd5
AL
5813 /* Some pre-ATAPI-4 devices assert INTRQ
5814 * at this state when ready to receive CDB.
5815 */
1da177e4 5816
312f7da2
AL
5817 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5818 * The flag was turned on only for atapi devices.
5819 * No need to check is_atapi_taskfile(&qc->tf) again.
5820 */
5821 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5822 goto idle_irq;
1da177e4 5823 break;
312f7da2
AL
5824 case HSM_ST_LAST:
5825 if (qc->tf.protocol == ATA_PROT_DMA ||
5826 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5827 /* check status of DMA engine */
5828 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5829 VPRINTK("ata%u: host_stat 0x%X\n",
5830 ap->print_id, host_stat);
312f7da2
AL
5831
5832 /* if it's not our irq... */
5833 if (!(host_stat & ATA_DMA_INTR))
5834 goto idle_irq;
5835
5836 /* before we do anything else, clear DMA-Start bit */
5837 ap->ops->bmdma_stop(qc);
a4f16610
AL
5838
5839 if (unlikely(host_stat & ATA_DMA_ERR)) {
5840 /* error when transfering data to/from memory */
5841 qc->err_mask |= AC_ERR_HOST_BUS;
5842 ap->hsm_task_state = HSM_ST_ERR;
5843 }
312f7da2
AL
5844 }
5845 break;
5846 case HSM_ST:
5847 break;
1da177e4
LT
5848 default:
5849 goto idle_irq;
5850 }
5851
312f7da2
AL
5852 /* check altstatus */
5853 status = ata_altstatus(ap);
5854 if (status & ATA_BUSY)
5855 goto idle_irq;
1da177e4 5856
312f7da2
AL
5857 /* check main status, clearing INTRQ */
5858 status = ata_chk_status(ap);
5859 if (unlikely(status & ATA_BUSY))
5860 goto idle_irq;
1da177e4 5861
312f7da2
AL
5862 /* ack bmdma irq events */
5863 ap->ops->irq_clear(ap);
1da177e4 5864
bb5cb290 5865 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5866
5867 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5868 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5869 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5870
1da177e4
LT
5871 return 1; /* irq handled */
5872
5873idle_irq:
5874 ap->stats.idle_irq++;
5875
5876#ifdef ATA_IRQ_TRAP
5877 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5878 ata_chk_status(ap);
5879 ap->ops->irq_clear(ap);
f15a1daf 5880 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5881 return 1;
1da177e4
LT
5882 }
5883#endif
5884 return 0; /* irq not handled */
5885}
5886
5887/**
5888 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5889 * @irq: irq line (unused)
cca3974e 5890 * @dev_instance: pointer to our ata_host information structure
1da177e4 5891 *
0cba632b
JG
5892 * Default interrupt handler for PCI IDE devices. Calls
5893 * ata_host_intr() for each port that is not disabled.
5894 *
1da177e4 5895 * LOCKING:
cca3974e 5896 * Obtains host lock during operation.
1da177e4
LT
5897 *
5898 * RETURNS:
0cba632b 5899 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5900 */
5901
7d12e780 5902irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5903{
cca3974e 5904 struct ata_host *host = dev_instance;
1da177e4
LT
5905 unsigned int i;
5906 unsigned int handled = 0;
5907 unsigned long flags;
5908
5909 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5910 spin_lock_irqsave(&host->lock, flags);
1da177e4 5911
cca3974e 5912 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5913 struct ata_port *ap;
5914
cca3974e 5915 ap = host->ports[i];
c1389503 5916 if (ap &&
029f5468 5917 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5918 struct ata_queued_cmd *qc;
5919
9af5c9c9 5920 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5921 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5922 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5923 handled |= ata_host_intr(ap, qc);
5924 }
5925 }
5926
cca3974e 5927 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5928
5929 return IRQ_RETVAL(handled);
5930}
5931
34bf2170
TH
5932/**
5933 * sata_scr_valid - test whether SCRs are accessible
936fd732 5934 * @link: ATA link to test SCR accessibility for
34bf2170 5935 *
936fd732 5936 * Test whether SCRs are accessible for @link.
34bf2170
TH
5937 *
5938 * LOCKING:
5939 * None.
5940 *
5941 * RETURNS:
5942 * 1 if SCRs are accessible, 0 otherwise.
5943 */
936fd732 5944int sata_scr_valid(struct ata_link *link)
34bf2170 5945{
936fd732
TH
5946 struct ata_port *ap = link->ap;
5947
a16abc0b 5948 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5949}
5950
5951/**
5952 * sata_scr_read - read SCR register of the specified port
936fd732 5953 * @link: ATA link to read SCR for
34bf2170
TH
5954 * @reg: SCR to read
5955 * @val: Place to store read value
5956 *
936fd732 5957 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5958 * guaranteed to succeed if the cable type of the port is SATA
5959 * and the port implements ->scr_read.
5960 *
5961 * LOCKING:
5962 * None.
5963 *
5964 * RETURNS:
5965 * 0 on success, negative errno on failure.
5966 */
936fd732 5967int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5968{
936fd732
TH
5969 struct ata_port *ap = link->ap;
5970
5971 if (sata_scr_valid(link))
da3dbb17 5972 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5973 return -EOPNOTSUPP;
5974}
5975
5976/**
5977 * sata_scr_write - write SCR register of the specified port
936fd732 5978 * @link: ATA link to write SCR for
34bf2170
TH
5979 * @reg: SCR to write
5980 * @val: value to write
5981 *
936fd732 5982 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5983 * guaranteed to succeed if the cable type of the port is SATA
5984 * and the port implements ->scr_read.
5985 *
5986 * LOCKING:
5987 * None.
5988 *
5989 * RETURNS:
5990 * 0 on success, negative errno on failure.
5991 */
936fd732 5992int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5993{
936fd732
TH
5994 struct ata_port *ap = link->ap;
5995
5996 if (sata_scr_valid(link))
da3dbb17 5997 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5998 return -EOPNOTSUPP;
5999}
6000
6001/**
6002 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6003 * @link: ATA link to write SCR for
34bf2170
TH
6004 * @reg: SCR to write
6005 * @val: value to write
6006 *
6007 * This function is identical to sata_scr_write() except that this
6008 * function performs flush after writing to the register.
6009 *
6010 * LOCKING:
6011 * None.
6012 *
6013 * RETURNS:
6014 * 0 on success, negative errno on failure.
6015 */
936fd732 6016int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6017{
936fd732 6018 struct ata_port *ap = link->ap;
da3dbb17
TH
6019 int rc;
6020
936fd732 6021 if (sata_scr_valid(link)) {
da3dbb17
TH
6022 rc = ap->ops->scr_write(ap, reg, val);
6023 if (rc == 0)
6024 rc = ap->ops->scr_read(ap, reg, &val);
6025 return rc;
34bf2170
TH
6026 }
6027 return -EOPNOTSUPP;
6028}
6029
6030/**
936fd732
TH
6031 * ata_link_online - test whether the given link is online
6032 * @link: ATA link to test
34bf2170 6033 *
936fd732
TH
6034 * Test whether @link is online. Note that this function returns
6035 * 0 if online status of @link cannot be obtained, so
6036 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6037 *
6038 * LOCKING:
6039 * None.
6040 *
6041 * RETURNS:
6042 * 1 if the port online status is available and online.
6043 */
936fd732 6044int ata_link_online(struct ata_link *link)
34bf2170
TH
6045{
6046 u32 sstatus;
6047
936fd732
TH
6048 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6049 (sstatus & 0xf) == 0x3)
34bf2170
TH
6050 return 1;
6051 return 0;
6052}
6053
6054/**
936fd732
TH
6055 * ata_link_offline - test whether the given link is offline
6056 * @link: ATA link to test
34bf2170 6057 *
936fd732
TH
6058 * Test whether @link is offline. Note that this function
6059 * returns 0 if offline status of @link cannot be obtained, so
6060 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6061 *
6062 * LOCKING:
6063 * None.
6064 *
6065 * RETURNS:
6066 * 1 if the port offline status is available and offline.
6067 */
936fd732 6068int ata_link_offline(struct ata_link *link)
34bf2170
TH
6069{
6070 u32 sstatus;
6071
936fd732
TH
6072 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6073 (sstatus & 0xf) != 0x3)
34bf2170
TH
6074 return 1;
6075 return 0;
6076}
0baab86b 6077
77b08fb5 6078int ata_flush_cache(struct ata_device *dev)
9b847548 6079{
977e6b9f 6080 unsigned int err_mask;
9b847548
JA
6081 u8 cmd;
6082
6083 if (!ata_try_flush_cache(dev))
6084 return 0;
6085
6fc49adb 6086 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6087 cmd = ATA_CMD_FLUSH_EXT;
6088 else
6089 cmd = ATA_CMD_FLUSH;
6090
4f34337b
AC
6091 /* This is wrong. On a failed flush we get back the LBA of the lost
6092 sector and we should (assuming it wasn't aborted as unknown) issue
6093 a further flush command to continue the writeback until it
6094 does not error */
977e6b9f
TH
6095 err_mask = ata_do_simple_cmd(dev, cmd);
6096 if (err_mask) {
6097 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6098 return -EIO;
6099 }
6100
6101 return 0;
9b847548
JA
6102}
6103
6ffa01d8 6104#ifdef CONFIG_PM
cca3974e
JG
6105static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6106 unsigned int action, unsigned int ehi_flags,
6107 int wait)
500530f6
TH
6108{
6109 unsigned long flags;
6110 int i, rc;
6111
cca3974e
JG
6112 for (i = 0; i < host->n_ports; i++) {
6113 struct ata_port *ap = host->ports[i];
e3667ebf 6114 struct ata_link *link;
500530f6
TH
6115
6116 /* Previous resume operation might still be in
6117 * progress. Wait for PM_PENDING to clear.
6118 */
6119 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6120 ata_port_wait_eh(ap);
6121 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6122 }
6123
6124 /* request PM ops to EH */
6125 spin_lock_irqsave(ap->lock, flags);
6126
6127 ap->pm_mesg = mesg;
6128 if (wait) {
6129 rc = 0;
6130 ap->pm_result = &rc;
6131 }
6132
6133 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6134 __ata_port_for_each_link(link, ap) {
6135 link->eh_info.action |= action;
6136 link->eh_info.flags |= ehi_flags;
6137 }
500530f6
TH
6138
6139 ata_port_schedule_eh(ap);
6140
6141 spin_unlock_irqrestore(ap->lock, flags);
6142
6143 /* wait and check result */
6144 if (wait) {
6145 ata_port_wait_eh(ap);
6146 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6147 if (rc)
6148 return rc;
6149 }
6150 }
6151
6152 return 0;
6153}
6154
6155/**
cca3974e
JG
6156 * ata_host_suspend - suspend host
6157 * @host: host to suspend
500530f6
TH
6158 * @mesg: PM message
6159 *
cca3974e 6160 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6161 * function requests EH to perform PM operations and waits for EH
6162 * to finish.
6163 *
6164 * LOCKING:
6165 * Kernel thread context (may sleep).
6166 *
6167 * RETURNS:
6168 * 0 on success, -errno on failure.
6169 */
cca3974e 6170int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6171{
9666f400 6172 int rc;
500530f6 6173
cca3974e 6174 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6175 if (rc == 0)
6176 host->dev->power.power_state = mesg;
500530f6
TH
6177 return rc;
6178}
6179
6180/**
cca3974e
JG
6181 * ata_host_resume - resume host
6182 * @host: host to resume
500530f6 6183 *
cca3974e 6184 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6185 * function requests EH to perform PM operations and returns.
6186 * Note that all resume operations are performed parallely.
6187 *
6188 * LOCKING:
6189 * Kernel thread context (may sleep).
6190 */
cca3974e 6191void ata_host_resume(struct ata_host *host)
500530f6 6192{
cca3974e
JG
6193 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6194 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6195 host->dev->power.power_state = PMSG_ON;
500530f6 6196}
6ffa01d8 6197#endif
500530f6 6198
c893a3ae
RD
6199/**
6200 * ata_port_start - Set port up for dma.
6201 * @ap: Port to initialize
6202 *
6203 * Called just after data structures for each port are
6204 * initialized. Allocates space for PRD table.
6205 *
6206 * May be used as the port_start() entry in ata_port_operations.
6207 *
6208 * LOCKING:
6209 * Inherited from caller.
6210 */
f0d36efd 6211int ata_port_start(struct ata_port *ap)
1da177e4 6212{
2f1f610b 6213 struct device *dev = ap->dev;
6037d6bb 6214 int rc;
1da177e4 6215
f0d36efd
TH
6216 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6217 GFP_KERNEL);
1da177e4
LT
6218 if (!ap->prd)
6219 return -ENOMEM;
6220
6037d6bb 6221 rc = ata_pad_alloc(ap, dev);
f0d36efd 6222 if (rc)
6037d6bb 6223 return rc;
1da177e4 6224
f0d36efd
TH
6225 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6226 (unsigned long long)ap->prd_dma);
1da177e4
LT
6227 return 0;
6228}
6229
3ef3b43d
TH
6230/**
6231 * ata_dev_init - Initialize an ata_device structure
6232 * @dev: Device structure to initialize
6233 *
6234 * Initialize @dev in preparation for probing.
6235 *
6236 * LOCKING:
6237 * Inherited from caller.
6238 */
6239void ata_dev_init(struct ata_device *dev)
6240{
9af5c9c9
TH
6241 struct ata_link *link = dev->link;
6242 struct ata_port *ap = link->ap;
72fa4b74
TH
6243 unsigned long flags;
6244
5a04bf4b 6245 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6246 link->sata_spd_limit = link->hw_sata_spd_limit;
6247 link->sata_spd = 0;
5a04bf4b 6248
72fa4b74
TH
6249 /* High bits of dev->flags are used to record warm plug
6250 * requests which occur asynchronously. Synchronize using
cca3974e 6251 * host lock.
72fa4b74 6252 */
ba6a1308 6253 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6254 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6255 dev->horkage = 0;
ba6a1308 6256 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6257
72fa4b74
TH
6258 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6259 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6260 dev->pio_mask = UINT_MAX;
6261 dev->mwdma_mask = UINT_MAX;
6262 dev->udma_mask = UINT_MAX;
6263}
6264
4fb37a25
TH
6265/**
6266 * ata_link_init - Initialize an ata_link structure
6267 * @ap: ATA port link is attached to
6268 * @link: Link structure to initialize
8989805d 6269 * @pmp: Port multiplier port number
4fb37a25
TH
6270 *
6271 * Initialize @link.
6272 *
6273 * LOCKING:
6274 * Kernel thread context (may sleep)
6275 */
fb7fd614 6276void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6277{
6278 int i;
6279
6280 /* clear everything except for devices */
6281 memset(link, 0, offsetof(struct ata_link, device[0]));
6282
6283 link->ap = ap;
8989805d 6284 link->pmp = pmp;
4fb37a25
TH
6285 link->active_tag = ATA_TAG_POISON;
6286 link->hw_sata_spd_limit = UINT_MAX;
6287
6288 /* can't use iterator, ap isn't initialized yet */
6289 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6290 struct ata_device *dev = &link->device[i];
6291
6292 dev->link = link;
6293 dev->devno = dev - link->device;
6294 ata_dev_init(dev);
6295 }
6296}
6297
6298/**
6299 * sata_link_init_spd - Initialize link->sata_spd_limit
6300 * @link: Link to configure sata_spd_limit for
6301 *
6302 * Initialize @link->[hw_]sata_spd_limit to the currently
6303 * configured value.
6304 *
6305 * LOCKING:
6306 * Kernel thread context (may sleep).
6307 *
6308 * RETURNS:
6309 * 0 on success, -errno on failure.
6310 */
fb7fd614 6311int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6312{
6313 u32 scontrol, spd;
6314 int rc;
6315
6316 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6317 if (rc)
6318 return rc;
6319
6320 spd = (scontrol >> 4) & 0xf;
6321 if (spd)
6322 link->hw_sata_spd_limit &= (1 << spd) - 1;
6323
6324 link->sata_spd_limit = link->hw_sata_spd_limit;
6325
6326 return 0;
6327}
6328
1da177e4 6329/**
f3187195
TH
6330 * ata_port_alloc - allocate and initialize basic ATA port resources
6331 * @host: ATA host this allocated port belongs to
1da177e4 6332 *
f3187195
TH
6333 * Allocate and initialize basic ATA port resources.
6334 *
6335 * RETURNS:
6336 * Allocate ATA port on success, NULL on failure.
0cba632b 6337 *
1da177e4 6338 * LOCKING:
f3187195 6339 * Inherited from calling layer (may sleep).
1da177e4 6340 */
f3187195 6341struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6342{
f3187195 6343 struct ata_port *ap;
1da177e4 6344
f3187195
TH
6345 DPRINTK("ENTER\n");
6346
6347 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6348 if (!ap)
6349 return NULL;
6350
f4d6d004 6351 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6352 ap->lock = &host->lock;
198e0fed 6353 ap->flags = ATA_FLAG_DISABLED;
f3187195 6354 ap->print_id = -1;
1da177e4 6355 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6356 ap->host = host;
f3187195 6357 ap->dev = host->dev;
1da177e4 6358 ap->last_ctl = 0xFF;
bd5d825c
BP
6359
6360#if defined(ATA_VERBOSE_DEBUG)
6361 /* turn on all debugging levels */
6362 ap->msg_enable = 0x00FF;
6363#elif defined(ATA_DEBUG)
6364 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6365#else
0dd4b21f 6366 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6367#endif
1da177e4 6368
65f27f38
DH
6369 INIT_DELAYED_WORK(&ap->port_task, NULL);
6370 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6371 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6372 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6373 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6374 init_timer_deferrable(&ap->fastdrain_timer);
6375 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6376 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6377
838df628 6378 ap->cbl = ATA_CBL_NONE;
838df628 6379
8989805d 6380 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6381
6382#ifdef ATA_IRQ_TRAP
6383 ap->stats.unhandled_irq = 1;
6384 ap->stats.idle_irq = 1;
6385#endif
1da177e4 6386 return ap;
1da177e4
LT
6387}
6388
f0d36efd
TH
6389static void ata_host_release(struct device *gendev, void *res)
6390{
6391 struct ata_host *host = dev_get_drvdata(gendev);
6392 int i;
6393
6394 for (i = 0; i < host->n_ports; i++) {
6395 struct ata_port *ap = host->ports[i];
6396
ecef7253
TH
6397 if (!ap)
6398 continue;
6399
6400 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6401 ap->ops->port_stop(ap);
f0d36efd
TH
6402 }
6403
ecef7253 6404 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6405 host->ops->host_stop(host);
1aa56cca 6406
1aa506e4
TH
6407 for (i = 0; i < host->n_ports; i++) {
6408 struct ata_port *ap = host->ports[i];
6409
4911487a
TH
6410 if (!ap)
6411 continue;
6412
6413 if (ap->scsi_host)
1aa506e4
TH
6414 scsi_host_put(ap->scsi_host);
6415
4911487a 6416 kfree(ap);
1aa506e4
TH
6417 host->ports[i] = NULL;
6418 }
6419
1aa56cca 6420 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6421}
6422
f3187195
TH
6423/**
6424 * ata_host_alloc - allocate and init basic ATA host resources
6425 * @dev: generic device this host is associated with
6426 * @max_ports: maximum number of ATA ports associated with this host
6427 *
6428 * Allocate and initialize basic ATA host resources. LLD calls
6429 * this function to allocate a host, initializes it fully and
6430 * attaches it using ata_host_register().
6431 *
6432 * @max_ports ports are allocated and host->n_ports is
6433 * initialized to @max_ports. The caller is allowed to decrease
6434 * host->n_ports before calling ata_host_register(). The unused
6435 * ports will be automatically freed on registration.
6436 *
6437 * RETURNS:
6438 * Allocate ATA host on success, NULL on failure.
6439 *
6440 * LOCKING:
6441 * Inherited from calling layer (may sleep).
6442 */
6443struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6444{
6445 struct ata_host *host;
6446 size_t sz;
6447 int i;
6448
6449 DPRINTK("ENTER\n");
6450
6451 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6452 return NULL;
6453
6454 /* alloc a container for our list of ATA ports (buses) */
6455 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6456 /* alloc a container for our list of ATA ports (buses) */
6457 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6458 if (!host)
6459 goto err_out;
6460
6461 devres_add(dev, host);
6462 dev_set_drvdata(dev, host);
6463
6464 spin_lock_init(&host->lock);
6465 host->dev = dev;
6466 host->n_ports = max_ports;
6467
6468 /* allocate ports bound to this host */
6469 for (i = 0; i < max_ports; i++) {
6470 struct ata_port *ap;
6471
6472 ap = ata_port_alloc(host);
6473 if (!ap)
6474 goto err_out;
6475
6476 ap->port_no = i;
6477 host->ports[i] = ap;
6478 }
6479
6480 devres_remove_group(dev, NULL);
6481 return host;
6482
6483 err_out:
6484 devres_release_group(dev, NULL);
6485 return NULL;
6486}
6487
f5cda257
TH
6488/**
6489 * ata_host_alloc_pinfo - alloc host and init with port_info array
6490 * @dev: generic device this host is associated with
6491 * @ppi: array of ATA port_info to initialize host with
6492 * @n_ports: number of ATA ports attached to this host
6493 *
6494 * Allocate ATA host and initialize with info from @ppi. If NULL
6495 * terminated, @ppi may contain fewer entries than @n_ports. The
6496 * last entry will be used for the remaining ports.
6497 *
6498 * RETURNS:
6499 * Allocate ATA host on success, NULL on failure.
6500 *
6501 * LOCKING:
6502 * Inherited from calling layer (may sleep).
6503 */
6504struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6505 const struct ata_port_info * const * ppi,
6506 int n_ports)
6507{
6508 const struct ata_port_info *pi;
6509 struct ata_host *host;
6510 int i, j;
6511
6512 host = ata_host_alloc(dev, n_ports);
6513 if (!host)
6514 return NULL;
6515
6516 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6517 struct ata_port *ap = host->ports[i];
6518
6519 if (ppi[j])
6520 pi = ppi[j++];
6521
6522 ap->pio_mask = pi->pio_mask;
6523 ap->mwdma_mask = pi->mwdma_mask;
6524 ap->udma_mask = pi->udma_mask;
6525 ap->flags |= pi->flags;
0c88758b 6526 ap->link.flags |= pi->link_flags;
f5cda257
TH
6527 ap->ops = pi->port_ops;
6528
6529 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6530 host->ops = pi->port_ops;
6531 if (!host->private_data && pi->private_data)
6532 host->private_data = pi->private_data;
6533 }
6534
6535 return host;
6536}
6537
ecef7253
TH
6538/**
6539 * ata_host_start - start and freeze ports of an ATA host
6540 * @host: ATA host to start ports for
6541 *
6542 * Start and then freeze ports of @host. Started status is
6543 * recorded in host->flags, so this function can be called
6544 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6545 * once. If host->ops isn't initialized yet, its set to the
6546 * first non-dummy port ops.
ecef7253
TH
6547 *
6548 * LOCKING:
6549 * Inherited from calling layer (may sleep).
6550 *
6551 * RETURNS:
6552 * 0 if all ports are started successfully, -errno otherwise.
6553 */
6554int ata_host_start(struct ata_host *host)
6555{
6556 int i, rc;
6557
6558 if (host->flags & ATA_HOST_STARTED)
6559 return 0;
6560
6561 for (i = 0; i < host->n_ports; i++) {
6562 struct ata_port *ap = host->ports[i];
6563
f3187195
TH
6564 if (!host->ops && !ata_port_is_dummy(ap))
6565 host->ops = ap->ops;
6566
ecef7253
TH
6567 if (ap->ops->port_start) {
6568 rc = ap->ops->port_start(ap);
6569 if (rc) {
6570 ata_port_printk(ap, KERN_ERR, "failed to "
6571 "start port (errno=%d)\n", rc);
6572 goto err_out;
6573 }
6574 }
6575
6576 ata_eh_freeze_port(ap);
6577 }
6578
6579 host->flags |= ATA_HOST_STARTED;
6580 return 0;
6581
6582 err_out:
6583 while (--i >= 0) {
6584 struct ata_port *ap = host->ports[i];
6585
6586 if (ap->ops->port_stop)
6587 ap->ops->port_stop(ap);
6588 }
6589 return rc;
6590}
6591
b03732f0 6592/**
cca3974e
JG
6593 * ata_sas_host_init - Initialize a host struct
6594 * @host: host to initialize
6595 * @dev: device host is attached to
6596 * @flags: host flags
6597 * @ops: port_ops
b03732f0
BK
6598 *
6599 * LOCKING:
6600 * PCI/etc. bus probe sem.
6601 *
6602 */
f3187195 6603/* KILLME - the only user left is ipr */
cca3974e
JG
6604void ata_host_init(struct ata_host *host, struct device *dev,
6605 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6606{
cca3974e
JG
6607 spin_lock_init(&host->lock);
6608 host->dev = dev;
6609 host->flags = flags;
6610 host->ops = ops;
b03732f0
BK
6611}
6612
f3187195
TH
6613/**
6614 * ata_host_register - register initialized ATA host
6615 * @host: ATA host to register
6616 * @sht: template for SCSI host
6617 *
6618 * Register initialized ATA host. @host is allocated using
6619 * ata_host_alloc() and fully initialized by LLD. This function
6620 * starts ports, registers @host with ATA and SCSI layers and
6621 * probe registered devices.
6622 *
6623 * LOCKING:
6624 * Inherited from calling layer (may sleep).
6625 *
6626 * RETURNS:
6627 * 0 on success, -errno otherwise.
6628 */
6629int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6630{
6631 int i, rc;
6632
6633 /* host must have been started */
6634 if (!(host->flags & ATA_HOST_STARTED)) {
6635 dev_printk(KERN_ERR, host->dev,
6636 "BUG: trying to register unstarted host\n");
6637 WARN_ON(1);
6638 return -EINVAL;
6639 }
6640
6641 /* Blow away unused ports. This happens when LLD can't
6642 * determine the exact number of ports to allocate at
6643 * allocation time.
6644 */
6645 for (i = host->n_ports; host->ports[i]; i++)
6646 kfree(host->ports[i]);
6647
6648 /* give ports names and add SCSI hosts */
6649 for (i = 0; i < host->n_ports; i++)
6650 host->ports[i]->print_id = ata_print_id++;
6651
6652 rc = ata_scsi_add_hosts(host, sht);
6653 if (rc)
6654 return rc;
6655
fafbae87
TH
6656 /* associate with ACPI nodes */
6657 ata_acpi_associate(host);
6658
f3187195
TH
6659 /* set cable, sata_spd_limit and report */
6660 for (i = 0; i < host->n_ports; i++) {
6661 struct ata_port *ap = host->ports[i];
f3187195
TH
6662 unsigned long xfer_mask;
6663
6664 /* set SATA cable type if still unset */
6665 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6666 ap->cbl = ATA_CBL_SATA;
6667
6668 /* init sata_spd_limit to the current value */
4fb37a25 6669 sata_link_init_spd(&ap->link);
f3187195 6670
cbcdd875 6671 /* print per-port info to dmesg */
f3187195
TH
6672 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6673 ap->udma_mask);
6674
f3187195 6675 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6676 ata_port_printk(ap, KERN_INFO,
6677 "%cATA max %s %s\n",
a16abc0b 6678 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6679 ata_mode_string(xfer_mask),
cbcdd875 6680 ap->link.eh_info.desc);
f3187195
TH
6681 else
6682 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6683 }
6684
6685 /* perform each probe synchronously */
6686 DPRINTK("probe begin\n");
6687 for (i = 0; i < host->n_ports; i++) {
6688 struct ata_port *ap = host->ports[i];
6689 int rc;
6690
6691 /* probe */
6692 if (ap->ops->error_handler) {
9af5c9c9 6693 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6694 unsigned long flags;
6695
6696 ata_port_probe(ap);
6697
6698 /* kick EH for boot probing */
6699 spin_lock_irqsave(ap->lock, flags);
6700
f58229f8
TH
6701 ehi->probe_mask =
6702 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6703 ehi->action |= ATA_EH_SOFTRESET;
6704 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6705
f4d6d004 6706 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6707 ap->pflags |= ATA_PFLAG_LOADING;
6708 ata_port_schedule_eh(ap);
6709
6710 spin_unlock_irqrestore(ap->lock, flags);
6711
6712 /* wait for EH to finish */
6713 ata_port_wait_eh(ap);
6714 } else {
6715 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6716 rc = ata_bus_probe(ap);
6717 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6718
6719 if (rc) {
6720 /* FIXME: do something useful here?
6721 * Current libata behavior will
6722 * tear down everything when
6723 * the module is removed
6724 * or the h/w is unplugged.
6725 */
6726 }
6727 }
6728 }
6729
6730 /* probes are done, now scan each port's disk(s) */
6731 DPRINTK("host probe begin\n");
6732 for (i = 0; i < host->n_ports; i++) {
6733 struct ata_port *ap = host->ports[i];
6734
1ae46317 6735 ata_scsi_scan_host(ap, 1);
f3187195
TH
6736 }
6737
6738 return 0;
6739}
6740
f5cda257
TH
6741/**
6742 * ata_host_activate - start host, request IRQ and register it
6743 * @host: target ATA host
6744 * @irq: IRQ to request
6745 * @irq_handler: irq_handler used when requesting IRQ
6746 * @irq_flags: irq_flags used when requesting IRQ
6747 * @sht: scsi_host_template to use when registering the host
6748 *
6749 * After allocating an ATA host and initializing it, most libata
6750 * LLDs perform three steps to activate the host - start host,
6751 * request IRQ and register it. This helper takes necessasry
6752 * arguments and performs the three steps in one go.
6753 *
6754 * LOCKING:
6755 * Inherited from calling layer (may sleep).
6756 *
6757 * RETURNS:
6758 * 0 on success, -errno otherwise.
6759 */
6760int ata_host_activate(struct ata_host *host, int irq,
6761 irq_handler_t irq_handler, unsigned long irq_flags,
6762 struct scsi_host_template *sht)
6763{
cbcdd875 6764 int i, rc;
f5cda257
TH
6765
6766 rc = ata_host_start(host);
6767 if (rc)
6768 return rc;
6769
6770 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6771 dev_driver_string(host->dev), host);
6772 if (rc)
6773 return rc;
6774
cbcdd875
TH
6775 for (i = 0; i < host->n_ports; i++)
6776 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6777
f5cda257
TH
6778 rc = ata_host_register(host, sht);
6779 /* if failed, just free the IRQ and leave ports alone */
6780 if (rc)
6781 devm_free_irq(host->dev, irq, host);
6782
6783 return rc;
6784}
6785
720ba126
TH
6786/**
6787 * ata_port_detach - Detach ATA port in prepration of device removal
6788 * @ap: ATA port to be detached
6789 *
6790 * Detach all ATA devices and the associated SCSI devices of @ap;
6791 * then, remove the associated SCSI host. @ap is guaranteed to
6792 * be quiescent on return from this function.
6793 *
6794 * LOCKING:
6795 * Kernel thread context (may sleep).
6796 */
6797void ata_port_detach(struct ata_port *ap)
6798{
6799 unsigned long flags;
41bda9c9 6800 struct ata_link *link;
f58229f8 6801 struct ata_device *dev;
720ba126
TH
6802
6803 if (!ap->ops->error_handler)
c3cf30a9 6804 goto skip_eh;
720ba126
TH
6805
6806 /* tell EH we're leaving & flush EH */
ba6a1308 6807 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6808 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6809 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6810
6811 ata_port_wait_eh(ap);
6812
6813 /* EH is now guaranteed to see UNLOADING, so no new device
6814 * will be attached. Disable all existing devices.
6815 */
ba6a1308 6816 spin_lock_irqsave(ap->lock, flags);
720ba126 6817
41bda9c9
TH
6818 ata_port_for_each_link(link, ap) {
6819 ata_link_for_each_dev(dev, link)
6820 ata_dev_disable(dev);
6821 }
720ba126 6822
ba6a1308 6823 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6824
6825 /* Final freeze & EH. All in-flight commands are aborted. EH
6826 * will be skipped and retrials will be terminated with bad
6827 * target.
6828 */
ba6a1308 6829 spin_lock_irqsave(ap->lock, flags);
720ba126 6830 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6831 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6832
6833 ata_port_wait_eh(ap);
45a66c1c 6834 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6835
c3cf30a9 6836 skip_eh:
720ba126 6837 /* remove the associated SCSI host */
cca3974e 6838 scsi_remove_host(ap->scsi_host);
720ba126
TH
6839}
6840
0529c159
TH
6841/**
6842 * ata_host_detach - Detach all ports of an ATA host
6843 * @host: Host to detach
6844 *
6845 * Detach all ports of @host.
6846 *
6847 * LOCKING:
6848 * Kernel thread context (may sleep).
6849 */
6850void ata_host_detach(struct ata_host *host)
6851{
6852 int i;
6853
6854 for (i = 0; i < host->n_ports; i++)
6855 ata_port_detach(host->ports[i]);
6856}
6857
1da177e4
LT
6858/**
6859 * ata_std_ports - initialize ioaddr with standard port offsets.
6860 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6861 *
6862 * Utility function which initializes data_addr, error_addr,
6863 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6864 * device_addr, status_addr, and command_addr to standard offsets
6865 * relative to cmd_addr.
6866 *
6867 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6868 */
0baab86b 6869
1da177e4
LT
6870void ata_std_ports(struct ata_ioports *ioaddr)
6871{
6872 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6873 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6874 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6875 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6876 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6877 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6878 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6879 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6880 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6881 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6882}
6883
0baab86b 6884
374b1873
JG
6885#ifdef CONFIG_PCI
6886
1da177e4
LT
6887/**
6888 * ata_pci_remove_one - PCI layer callback for device removal
6889 * @pdev: PCI device that was removed
6890 *
b878ca5d
TH
6891 * PCI layer indicates to libata via this hook that hot-unplug or
6892 * module unload event has occurred. Detach all ports. Resource
6893 * release is handled via devres.
1da177e4
LT
6894 *
6895 * LOCKING:
6896 * Inherited from PCI layer (may sleep).
6897 */
f0d36efd 6898void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6899{
6900 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6901 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6902
b878ca5d 6903 ata_host_detach(host);
1da177e4
LT
6904}
6905
6906/* move to PCI subsystem */
057ace5e 6907int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6908{
6909 unsigned long tmp = 0;
6910
6911 switch (bits->width) {
6912 case 1: {
6913 u8 tmp8 = 0;
6914 pci_read_config_byte(pdev, bits->reg, &tmp8);
6915 tmp = tmp8;
6916 break;
6917 }
6918 case 2: {
6919 u16 tmp16 = 0;
6920 pci_read_config_word(pdev, bits->reg, &tmp16);
6921 tmp = tmp16;
6922 break;
6923 }
6924 case 4: {
6925 u32 tmp32 = 0;
6926 pci_read_config_dword(pdev, bits->reg, &tmp32);
6927 tmp = tmp32;
6928 break;
6929 }
6930
6931 default:
6932 return -EINVAL;
6933 }
6934
6935 tmp &= bits->mask;
6936
6937 return (tmp == bits->val) ? 1 : 0;
6938}
9b847548 6939
6ffa01d8 6940#ifdef CONFIG_PM
3c5100c1 6941void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6942{
6943 pci_save_state(pdev);
4c90d971 6944 pci_disable_device(pdev);
500530f6 6945
4c90d971 6946 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6947 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6948}
6949
553c4aa6 6950int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6951{
553c4aa6
TH
6952 int rc;
6953
9b847548
JA
6954 pci_set_power_state(pdev, PCI_D0);
6955 pci_restore_state(pdev);
553c4aa6 6956
b878ca5d 6957 rc = pcim_enable_device(pdev);
553c4aa6
TH
6958 if (rc) {
6959 dev_printk(KERN_ERR, &pdev->dev,
6960 "failed to enable device after resume (%d)\n", rc);
6961 return rc;
6962 }
6963
9b847548 6964 pci_set_master(pdev);
553c4aa6 6965 return 0;
500530f6
TH
6966}
6967
3c5100c1 6968int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6969{
cca3974e 6970 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6971 int rc = 0;
6972
cca3974e 6973 rc = ata_host_suspend(host, mesg);
500530f6
TH
6974 if (rc)
6975 return rc;
6976
3c5100c1 6977 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6978
6979 return 0;
6980}
6981
6982int ata_pci_device_resume(struct pci_dev *pdev)
6983{
cca3974e 6984 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6985 int rc;
500530f6 6986
553c4aa6
TH
6987 rc = ata_pci_device_do_resume(pdev);
6988 if (rc == 0)
6989 ata_host_resume(host);
6990 return rc;
9b847548 6991}
6ffa01d8
TH
6992#endif /* CONFIG_PM */
6993
1da177e4
LT
6994#endif /* CONFIG_PCI */
6995
6996
1da177e4
LT
6997static int __init ata_init(void)
6998{
a8601e5f 6999 ata_probe_timeout *= HZ;
1da177e4
LT
7000 ata_wq = create_workqueue("ata");
7001 if (!ata_wq)
7002 return -ENOMEM;
7003
453b07ac
TH
7004 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7005 if (!ata_aux_wq) {
7006 destroy_workqueue(ata_wq);
7007 return -ENOMEM;
7008 }
7009
1da177e4
LT
7010 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7011 return 0;
7012}
7013
7014static void __exit ata_exit(void)
7015{
7016 destroy_workqueue(ata_wq);
453b07ac 7017 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7018}
7019
a4625085 7020subsys_initcall(ata_init);
1da177e4
LT
7021module_exit(ata_exit);
7022
67846b30 7023static unsigned long ratelimit_time;
34af946a 7024static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7025
7026int ata_ratelimit(void)
7027{
7028 int rc;
7029 unsigned long flags;
7030
7031 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7032
7033 if (time_after(jiffies, ratelimit_time)) {
7034 rc = 1;
7035 ratelimit_time = jiffies + (HZ/5);
7036 } else
7037 rc = 0;
7038
7039 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7040
7041 return rc;
7042}
7043
c22daff4
TH
7044/**
7045 * ata_wait_register - wait until register value changes
7046 * @reg: IO-mapped register
7047 * @mask: Mask to apply to read register value
7048 * @val: Wait condition
7049 * @interval_msec: polling interval in milliseconds
7050 * @timeout_msec: timeout in milliseconds
7051 *
7052 * Waiting for some bits of register to change is a common
7053 * operation for ATA controllers. This function reads 32bit LE
7054 * IO-mapped register @reg and tests for the following condition.
7055 *
7056 * (*@reg & mask) != val
7057 *
7058 * If the condition is met, it returns; otherwise, the process is
7059 * repeated after @interval_msec until timeout.
7060 *
7061 * LOCKING:
7062 * Kernel thread context (may sleep)
7063 *
7064 * RETURNS:
7065 * The final register value.
7066 */
7067u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7068 unsigned long interval_msec,
7069 unsigned long timeout_msec)
7070{
7071 unsigned long timeout;
7072 u32 tmp;
7073
7074 tmp = ioread32(reg);
7075
7076 /* Calculate timeout _after_ the first read to make sure
7077 * preceding writes reach the controller before starting to
7078 * eat away the timeout.
7079 */
7080 timeout = jiffies + (timeout_msec * HZ) / 1000;
7081
7082 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7083 msleep(interval_msec);
7084 tmp = ioread32(reg);
7085 }
7086
7087 return tmp;
7088}
7089
dd5b06c4
TH
7090/*
7091 * Dummy port_ops
7092 */
7093static void ata_dummy_noret(struct ata_port *ap) { }
7094static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7095static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7096
7097static u8 ata_dummy_check_status(struct ata_port *ap)
7098{
7099 return ATA_DRDY;
7100}
7101
7102static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7103{
7104 return AC_ERR_SYSTEM;
7105}
7106
7107const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7108 .check_status = ata_dummy_check_status,
7109 .check_altstatus = ata_dummy_check_status,
7110 .dev_select = ata_noop_dev_select,
7111 .qc_prep = ata_noop_qc_prep,
7112 .qc_issue = ata_dummy_qc_issue,
7113 .freeze = ata_dummy_noret,
7114 .thaw = ata_dummy_noret,
7115 .error_handler = ata_dummy_noret,
7116 .post_internal_cmd = ata_dummy_qc_noret,
7117 .irq_clear = ata_dummy_noret,
7118 .port_start = ata_dummy_ret0,
7119 .port_stop = ata_dummy_noret,
7120};
7121
21b0ad4f
TH
7122const struct ata_port_info ata_dummy_port_info = {
7123 .port_ops = &ata_dummy_port_ops,
7124};
7125
1da177e4
LT
7126/*
7127 * libata is essentially a library of internal helper functions for
7128 * low-level ATA host controller drivers. As such, the API/ABI is
7129 * likely to change as new drivers are added and updated.
7130 * Do not depend on ABI/API stability.
7131 */
7132
e9c83914
TH
7133EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7134EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7135EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7136EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7137EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7138EXPORT_SYMBOL_GPL(ata_std_bios_param);
7139EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7140EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7141EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7142EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7143EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7144EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7145EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7146EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7147EXPORT_SYMBOL_GPL(ata_sg_init);
7148EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7149EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7150EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7151EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7152EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7153EXPORT_SYMBOL_GPL(ata_tf_load);
7154EXPORT_SYMBOL_GPL(ata_tf_read);
7155EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7156EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7157EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7158EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7159EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7160EXPORT_SYMBOL_GPL(ata_check_status);
7161EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7162EXPORT_SYMBOL_GPL(ata_exec_command);
7163EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7164EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7165EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7166EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7167EXPORT_SYMBOL_GPL(ata_data_xfer);
7168EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7169EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7170EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7171EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7172EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7173EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7174EXPORT_SYMBOL_GPL(ata_bmdma_start);
7175EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7176EXPORT_SYMBOL_GPL(ata_bmdma_status);
7177EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7178EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7179EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7180EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7181EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7182EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7183EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7184EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7185EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7186EXPORT_SYMBOL_GPL(sata_link_debounce);
7187EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7188EXPORT_SYMBOL_GPL(sata_phy_reset);
7189EXPORT_SYMBOL_GPL(__sata_phy_reset);
7190EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7191EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7192EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7193EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7194EXPORT_SYMBOL_GPL(sata_std_hardreset);
7195EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7196EXPORT_SYMBOL_GPL(ata_dev_classify);
7197EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7198EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7199EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7200EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7201EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7202EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7203EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7204EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7205EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7206EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7207EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7208EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7209EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7210EXPORT_SYMBOL_GPL(sata_scr_valid);
7211EXPORT_SYMBOL_GPL(sata_scr_read);
7212EXPORT_SYMBOL_GPL(sata_scr_write);
7213EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7214EXPORT_SYMBOL_GPL(ata_link_online);
7215EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7216#ifdef CONFIG_PM
cca3974e
JG
7217EXPORT_SYMBOL_GPL(ata_host_suspend);
7218EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7219#endif /* CONFIG_PM */
6a62a04d
TH
7220EXPORT_SYMBOL_GPL(ata_id_string);
7221EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7222EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7223EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7224
1bc4ccff 7225EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7226EXPORT_SYMBOL_GPL(ata_timing_compute);
7227EXPORT_SYMBOL_GPL(ata_timing_merge);
7228
1da177e4
LT
7229#ifdef CONFIG_PCI
7230EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7231EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7232EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7233EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7234EXPORT_SYMBOL_GPL(ata_pci_init_one);
7235EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7236#ifdef CONFIG_PM
500530f6
TH
7237EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7238EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7239EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7240EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7241#endif /* CONFIG_PM */
67951ade
AC
7242EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7243EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7244#endif /* CONFIG_PCI */
9b847548 7245
b64bbc39
TH
7246EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7247EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7248EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7249EXPORT_SYMBOL_GPL(ata_port_desc);
7250#ifdef CONFIG_PCI
7251EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7252#endif /* CONFIG_PCI */
ece1d636 7253EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7254EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7255EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7256EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7257EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7258EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7259EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7260EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7261EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7262EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7263EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7264EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7265EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7266
7267EXPORT_SYMBOL_GPL(ata_cable_40wire);
7268EXPORT_SYMBOL_GPL(ata_cable_80wire);
7269EXPORT_SYMBOL_GPL(ata_cable_unknown);
7270EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.927197 seconds and 5 git commands to generate.