libata-pmp-prep: implement qc_defer helpers
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
1e999736 918
c728a914 919 return 0;
1e999736
AC
920}
921
922/**
c728a914
TH
923 * ata_set_max_sectors - Set max sectors
924 * @dev: target device
6b38d1d1 925 * @new_sectors: new max sectors value to set for the device
1e999736 926 *
c728a914
TH
927 * Set max sectors of @dev to @new_sectors.
928 *
929 * RETURNS:
930 * 0 on success, -EACCES if command is aborted or denied (due to
931 * previous non-volatile SET_MAX) by the drive. -EIO on other
932 * errors.
1e999736 933 */
05027adc 934static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 935{
c728a914 936 unsigned int err_mask;
1e999736 937 struct ata_taskfile tf;
c728a914 938 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
939
940 new_sectors--;
941
942 ata_tf_init(dev, &tf);
943
1e999736 944 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
945
946 if (lba48) {
947 tf.command = ATA_CMD_SET_MAX_EXT;
948 tf.flags |= ATA_TFLAG_LBA48;
949
950 tf.hob_lbal = (new_sectors >> 24) & 0xff;
951 tf.hob_lbam = (new_sectors >> 32) & 0xff;
952 tf.hob_lbah = (new_sectors >> 40) & 0xff;
953 } else
954 tf.command = ATA_CMD_SET_MAX;
955
1e999736 956 tf.protocol |= ATA_PROT_NODATA;
c728a914 957 tf.device |= ATA_LBA;
1e999736
AC
958
959 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 962
c728a914
TH
963 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
964 if (err_mask) {
965 ata_dev_printk(dev, KERN_WARNING, "failed to set "
966 "max address (err_mask=0x%x)\n", err_mask);
967 if (err_mask == AC_ERR_DEV &&
968 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
969 return -EACCES;
970 return -EIO;
971 }
972
c728a914 973 return 0;
1e999736
AC
974}
975
976/**
977 * ata_hpa_resize - Resize a device with an HPA set
978 * @dev: Device to resize
979 *
980 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
981 * it if required to the full size of the media. The caller must check
982 * the drive has the HPA feature set enabled.
05027adc
TH
983 *
984 * RETURNS:
985 * 0 on success, -errno on failure.
1e999736 986 */
05027adc 987static int ata_hpa_resize(struct ata_device *dev)
1e999736 988{
05027adc
TH
989 struct ata_eh_context *ehc = &dev->link->eh_context;
990 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
991 u64 sectors = ata_id_n_sectors(dev->id);
992 u64 native_sectors;
c728a914 993 int rc;
a617c09f 994
05027adc
TH
995 /* do we need to do it? */
996 if (dev->class != ATA_DEV_ATA ||
997 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
998 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 999 return 0;
1e999736 1000
05027adc
TH
1001 /* read native max address */
1002 rc = ata_read_native_max_address(dev, &native_sectors);
1003 if (rc) {
1004 /* If HPA isn't going to be unlocked, skip HPA
1005 * resizing from the next try.
1006 */
1007 if (!ata_ignore_hpa) {
1008 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1009 "broken, will skip HPA handling\n");
1010 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1011
1012 /* we can continue if device aborted the command */
1013 if (rc == -EACCES)
1014 rc = 0;
1e999736 1015 }
37301a55 1016
05027adc
TH
1017 return rc;
1018 }
1019
1020 /* nothing to do? */
1021 if (native_sectors <= sectors || !ata_ignore_hpa) {
1022 if (!print_info || native_sectors == sectors)
1023 return 0;
1024
1025 if (native_sectors > sectors)
1026 ata_dev_printk(dev, KERN_INFO,
1027 "HPA detected: current %llu, native %llu\n",
1028 (unsigned long long)sectors,
1029 (unsigned long long)native_sectors);
1030 else if (native_sectors < sectors)
1031 ata_dev_printk(dev, KERN_WARNING,
1032 "native sectors (%llu) is smaller than "
1033 "sectors (%llu)\n",
1034 (unsigned long long)native_sectors,
1035 (unsigned long long)sectors);
1036 return 0;
1037 }
1038
1039 /* let's unlock HPA */
1040 rc = ata_set_max_sectors(dev, native_sectors);
1041 if (rc == -EACCES) {
1042 /* if device aborted the command, skip HPA resizing */
1043 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1044 "(%llu -> %llu), skipping HPA handling\n",
1045 (unsigned long long)sectors,
1046 (unsigned long long)native_sectors);
1047 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1048 return 0;
1049 } else if (rc)
1050 return rc;
1051
1052 /* re-read IDENTIFY data */
1053 rc = ata_dev_reread_id(dev, 0);
1054 if (rc) {
1055 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1056 "data after HPA resizing\n");
1057 return rc;
1058 }
1059
1060 if (print_info) {
1061 u64 new_sectors = ata_id_n_sectors(dev->id);
1062 ata_dev_printk(dev, KERN_INFO,
1063 "HPA unlocked: %llu -> %llu, native %llu\n",
1064 (unsigned long long)sectors,
1065 (unsigned long long)new_sectors,
1066 (unsigned long long)native_sectors);
1067 }
1068
1069 return 0;
1e999736
AC
1070}
1071
10305f0f
A
1072/**
1073 * ata_id_to_dma_mode - Identify DMA mode from id block
1074 * @dev: device to identify
cc261267 1075 * @unknown: mode to assume if we cannot tell
10305f0f
A
1076 *
1077 * Set up the timing values for the device based upon the identify
1078 * reported values for the DMA mode. This function is used by drivers
1079 * which rely upon firmware configured modes, but wish to report the
1080 * mode correctly when possible.
1081 *
1082 * In addition we emit similarly formatted messages to the default
1083 * ata_dev_set_mode handler, in order to provide consistency of
1084 * presentation.
1085 */
1086
1087void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1088{
1089 unsigned int mask;
1090 u8 mode;
1091
1092 /* Pack the DMA modes */
1093 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1094 if (dev->id[53] & 0x04)
1095 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1096
1097 /* Select the mode in use */
1098 mode = ata_xfer_mask2mode(mask);
1099
1100 if (mode != 0) {
1101 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1102 ata_mode_string(mask));
1103 } else {
1104 /* SWDMA perhaps ? */
1105 mode = unknown;
1106 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1107 }
1108
1109 /* Configure the device reporting */
1110 dev->xfer_mode = mode;
1111 dev->xfer_shift = ata_xfer_mode2shift(mode);
1112}
1113
0baab86b
EF
1114/**
1115 * ata_noop_dev_select - Select device 0/1 on ATA bus
1116 * @ap: ATA channel to manipulate
1117 * @device: ATA device (numbered from zero) to select
1118 *
1119 * This function performs no actual function.
1120 *
1121 * May be used as the dev_select() entry in ata_port_operations.
1122 *
1123 * LOCKING:
1124 * caller.
1125 */
1da177e4
LT
1126void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1127{
1128}
1129
0baab86b 1130
1da177e4
LT
1131/**
1132 * ata_std_dev_select - Select device 0/1 on ATA bus
1133 * @ap: ATA channel to manipulate
1134 * @device: ATA device (numbered from zero) to select
1135 *
1136 * Use the method defined in the ATA specification to
1137 * make either device 0, or device 1, active on the
0baab86b
EF
1138 * ATA channel. Works with both PIO and MMIO.
1139 *
1140 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1141 *
1142 * LOCKING:
1143 * caller.
1144 */
1145
1146void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1147{
1148 u8 tmp;
1149
1150 if (device == 0)
1151 tmp = ATA_DEVICE_OBS;
1152 else
1153 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1154
0d5ff566 1155 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1156 ata_pause(ap); /* needed; also flushes, for mmio */
1157}
1158
1159/**
1160 * ata_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1163 * @wait: non-zero to wait for Status register BSY bit to clear
1164 * @can_sleep: non-zero if context allows sleeping
1165 *
1166 * Use the method defined in the ATA specification to
1167 * make either device 0, or device 1, active on the
1168 * ATA channel.
1169 *
1170 * This is a high-level version of ata_std_dev_select(),
1171 * which additionally provides the services of inserting
1172 * the proper pauses and status polling, where needed.
1173 *
1174 * LOCKING:
1175 * caller.
1176 */
1177
1178void ata_dev_select(struct ata_port *ap, unsigned int device,
1179 unsigned int wait, unsigned int can_sleep)
1180{
88574551 1181 if (ata_msg_probe(ap))
44877b4e
TH
1182 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1183 "device %u, wait %u\n", device, wait);
1da177e4
LT
1184
1185 if (wait)
1186 ata_wait_idle(ap);
1187
1188 ap->ops->dev_select(ap, device);
1189
1190 if (wait) {
9af5c9c9 1191 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1192 msleep(150);
1193 ata_wait_idle(ap);
1194 }
1195}
1196
1197/**
1198 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1199 * @id: IDENTIFY DEVICE page to dump
1da177e4 1200 *
0bd3300a
TH
1201 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1202 * page.
1da177e4
LT
1203 *
1204 * LOCKING:
1205 * caller.
1206 */
1207
0bd3300a 1208static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1209{
1210 DPRINTK("49==0x%04x "
1211 "53==0x%04x "
1212 "63==0x%04x "
1213 "64==0x%04x "
1214 "75==0x%04x \n",
0bd3300a
TH
1215 id[49],
1216 id[53],
1217 id[63],
1218 id[64],
1219 id[75]);
1da177e4
LT
1220 DPRINTK("80==0x%04x "
1221 "81==0x%04x "
1222 "82==0x%04x "
1223 "83==0x%04x "
1224 "84==0x%04x \n",
0bd3300a
TH
1225 id[80],
1226 id[81],
1227 id[82],
1228 id[83],
1229 id[84]);
1da177e4
LT
1230 DPRINTK("88==0x%04x "
1231 "93==0x%04x\n",
0bd3300a
TH
1232 id[88],
1233 id[93]);
1da177e4
LT
1234}
1235
cb95d562
TH
1236/**
1237 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1238 * @id: IDENTIFY data to compute xfer mask from
1239 *
1240 * Compute the xfermask for this device. This is not as trivial
1241 * as it seems if we must consider early devices correctly.
1242 *
1243 * FIXME: pre IDE drive timing (do we care ?).
1244 *
1245 * LOCKING:
1246 * None.
1247 *
1248 * RETURNS:
1249 * Computed xfermask
1250 */
1251static unsigned int ata_id_xfermask(const u16 *id)
1252{
1253 unsigned int pio_mask, mwdma_mask, udma_mask;
1254
1255 /* Usual case. Word 53 indicates word 64 is valid */
1256 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1257 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1258 pio_mask <<= 3;
1259 pio_mask |= 0x7;
1260 } else {
1261 /* If word 64 isn't valid then Word 51 high byte holds
1262 * the PIO timing number for the maximum. Turn it into
1263 * a mask.
1264 */
7a0f1c8a 1265 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1266 if (mode < 5) /* Valid PIO range */
1267 pio_mask = (2 << mode) - 1;
1268 else
1269 pio_mask = 1;
cb95d562
TH
1270
1271 /* But wait.. there's more. Design your standards by
1272 * committee and you too can get a free iordy field to
1273 * process. However its the speeds not the modes that
1274 * are supported... Note drivers using the timing API
1275 * will get this right anyway
1276 */
1277 }
1278
1279 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1280
b352e57d
AC
1281 if (ata_id_is_cfa(id)) {
1282 /*
1283 * Process compact flash extended modes
1284 */
1285 int pio = id[163] & 0x7;
1286 int dma = (id[163] >> 3) & 7;
1287
1288 if (pio)
1289 pio_mask |= (1 << 5);
1290 if (pio > 1)
1291 pio_mask |= (1 << 6);
1292 if (dma)
1293 mwdma_mask |= (1 << 3);
1294 if (dma > 1)
1295 mwdma_mask |= (1 << 4);
1296 }
1297
fb21f0d0
TH
1298 udma_mask = 0;
1299 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1300 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1301
1302 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1303}
1304
86e45b6b
TH
1305/**
1306 * ata_port_queue_task - Queue port_task
1307 * @ap: The ata_port to queue port_task for
e2a7f77a 1308 * @fn: workqueue function to be scheduled
65f27f38 1309 * @data: data for @fn to use
e2a7f77a 1310 * @delay: delay time for workqueue function
86e45b6b
TH
1311 *
1312 * Schedule @fn(@data) for execution after @delay jiffies using
1313 * port_task. There is one port_task per port and it's the
1314 * user(low level driver)'s responsibility to make sure that only
1315 * one task is active at any given time.
1316 *
1317 * libata core layer takes care of synchronization between
1318 * port_task and EH. ata_port_queue_task() may be ignored for EH
1319 * synchronization.
1320 *
1321 * LOCKING:
1322 * Inherited from caller.
1323 */
65f27f38 1324void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1325 unsigned long delay)
1326{
65f27f38
DH
1327 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1328 ap->port_task_data = data;
86e45b6b 1329
45a66c1c
ON
1330 /* may fail if ata_port_flush_task() in progress */
1331 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1332}
1333
1334/**
1335 * ata_port_flush_task - Flush port_task
1336 * @ap: The ata_port to flush port_task for
1337 *
1338 * After this function completes, port_task is guranteed not to
1339 * be running or scheduled.
1340 *
1341 * LOCKING:
1342 * Kernel thread context (may sleep)
1343 */
1344void ata_port_flush_task(struct ata_port *ap)
1345{
86e45b6b
TH
1346 DPRINTK("ENTER\n");
1347
45a66c1c 1348 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1349
0dd4b21f
BP
1350 if (ata_msg_ctl(ap))
1351 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1352}
1353
7102d230 1354static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1355{
77853bf2 1356 struct completion *waiting = qc->private_data;
a2a7a662 1357
a2a7a662 1358 complete(waiting);
a2a7a662
TH
1359}
1360
1361/**
2432697b 1362 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1363 * @dev: Device to which the command is sent
1364 * @tf: Taskfile registers for the command and the result
d69cf37d 1365 * @cdb: CDB for packet command
a2a7a662 1366 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1367 * @sg: sg list for the data buffer of the command
1368 * @n_elem: Number of sg entries
a2a7a662
TH
1369 *
1370 * Executes libata internal command with timeout. @tf contains
1371 * command on entry and result on return. Timeout and error
1372 * conditions are reported via return value. No recovery action
1373 * is taken after a command times out. It's caller's duty to
1374 * clean up after timeout.
1375 *
1376 * LOCKING:
1377 * None. Should be called with kernel context, might sleep.
551e8889
TH
1378 *
1379 * RETURNS:
1380 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1381 */
2432697b
TH
1382unsigned ata_exec_internal_sg(struct ata_device *dev,
1383 struct ata_taskfile *tf, const u8 *cdb,
1384 int dma_dir, struct scatterlist *sg,
1385 unsigned int n_elem)
a2a7a662 1386{
9af5c9c9
TH
1387 struct ata_link *link = dev->link;
1388 struct ata_port *ap = link->ap;
a2a7a662
TH
1389 u8 command = tf->command;
1390 struct ata_queued_cmd *qc;
2ab7db1f 1391 unsigned int tag, preempted_tag;
dedaf2b0 1392 u32 preempted_sactive, preempted_qc_active;
da917d69 1393 int preempted_nr_active_links;
60be6b9a 1394 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1395 unsigned long flags;
77853bf2 1396 unsigned int err_mask;
d95a717f 1397 int rc;
a2a7a662 1398
ba6a1308 1399 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1400
e3180499 1401 /* no internal command while frozen */
b51e9e5d 1402 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1403 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1404 return AC_ERR_SYSTEM;
1405 }
1406
2ab7db1f 1407 /* initialize internal qc */
a2a7a662 1408
2ab7db1f
TH
1409 /* XXX: Tag 0 is used for drivers with legacy EH as some
1410 * drivers choke if any other tag is given. This breaks
1411 * ata_tag_internal() test for those drivers. Don't use new
1412 * EH stuff without converting to it.
1413 */
1414 if (ap->ops->error_handler)
1415 tag = ATA_TAG_INTERNAL;
1416 else
1417 tag = 0;
1418
6cec4a39 1419 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1420 BUG();
f69499f4 1421 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1422
1423 qc->tag = tag;
1424 qc->scsicmd = NULL;
1425 qc->ap = ap;
1426 qc->dev = dev;
1427 ata_qc_reinit(qc);
1428
9af5c9c9
TH
1429 preempted_tag = link->active_tag;
1430 preempted_sactive = link->sactive;
dedaf2b0 1431 preempted_qc_active = ap->qc_active;
da917d69 1432 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1433 link->active_tag = ATA_TAG_POISON;
1434 link->sactive = 0;
dedaf2b0 1435 ap->qc_active = 0;
da917d69 1436 ap->nr_active_links = 0;
2ab7db1f
TH
1437
1438 /* prepare & issue qc */
a2a7a662 1439 qc->tf = *tf;
d69cf37d
TH
1440 if (cdb)
1441 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1442 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1443 qc->dma_dir = dma_dir;
1444 if (dma_dir != DMA_NONE) {
2432697b
TH
1445 unsigned int i, buflen = 0;
1446
1447 for (i = 0; i < n_elem; i++)
1448 buflen += sg[i].length;
1449
1450 ata_sg_init(qc, sg, n_elem);
49c80429 1451 qc->nbytes = buflen;
a2a7a662
TH
1452 }
1453
77853bf2 1454 qc->private_data = &wait;
a2a7a662
TH
1455 qc->complete_fn = ata_qc_complete_internal;
1456
8e0e694a 1457 ata_qc_issue(qc);
a2a7a662 1458
ba6a1308 1459 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1460
a8601e5f 1461 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1462
1463 ata_port_flush_task(ap);
41ade50c 1464
d95a717f 1465 if (!rc) {
ba6a1308 1466 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1467
1468 /* We're racing with irq here. If we lose, the
1469 * following test prevents us from completing the qc
d95a717f
TH
1470 * twice. If we win, the port is frozen and will be
1471 * cleaned up by ->post_internal_cmd().
a2a7a662 1472 */
77853bf2 1473 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1474 qc->err_mask |= AC_ERR_TIMEOUT;
1475
1476 if (ap->ops->error_handler)
1477 ata_port_freeze(ap);
1478 else
1479 ata_qc_complete(qc);
f15a1daf 1480
0dd4b21f
BP
1481 if (ata_msg_warn(ap))
1482 ata_dev_printk(dev, KERN_WARNING,
88574551 1483 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1484 }
1485
ba6a1308 1486 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1487 }
1488
d95a717f
TH
1489 /* do post_internal_cmd */
1490 if (ap->ops->post_internal_cmd)
1491 ap->ops->post_internal_cmd(qc);
1492
a51d644a
TH
1493 /* perform minimal error analysis */
1494 if (qc->flags & ATA_QCFLAG_FAILED) {
1495 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1496 qc->err_mask |= AC_ERR_DEV;
1497
1498 if (!qc->err_mask)
1499 qc->err_mask |= AC_ERR_OTHER;
1500
1501 if (qc->err_mask & ~AC_ERR_OTHER)
1502 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1503 }
1504
15869303 1505 /* finish up */
ba6a1308 1506 spin_lock_irqsave(ap->lock, flags);
15869303 1507
e61e0672 1508 *tf = qc->result_tf;
77853bf2
TH
1509 err_mask = qc->err_mask;
1510
1511 ata_qc_free(qc);
9af5c9c9
TH
1512 link->active_tag = preempted_tag;
1513 link->sactive = preempted_sactive;
dedaf2b0 1514 ap->qc_active = preempted_qc_active;
da917d69 1515 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1516
1f7dd3e9
TH
1517 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1518 * Until those drivers are fixed, we detect the condition
1519 * here, fail the command with AC_ERR_SYSTEM and reenable the
1520 * port.
1521 *
1522 * Note that this doesn't change any behavior as internal
1523 * command failure results in disabling the device in the
1524 * higher layer for LLDDs without new reset/EH callbacks.
1525 *
1526 * Kill the following code as soon as those drivers are fixed.
1527 */
198e0fed 1528 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1529 err_mask |= AC_ERR_SYSTEM;
1530 ata_port_probe(ap);
1531 }
1532
ba6a1308 1533 spin_unlock_irqrestore(ap->lock, flags);
15869303 1534
77853bf2 1535 return err_mask;
a2a7a662
TH
1536}
1537
2432697b 1538/**
33480a0e 1539 * ata_exec_internal - execute libata internal command
2432697b
TH
1540 * @dev: Device to which the command is sent
1541 * @tf: Taskfile registers for the command and the result
1542 * @cdb: CDB for packet command
1543 * @dma_dir: Data tranfer direction of the command
1544 * @buf: Data buffer of the command
1545 * @buflen: Length of data buffer
1546 *
1547 * Wrapper around ata_exec_internal_sg() which takes simple
1548 * buffer instead of sg list.
1549 *
1550 * LOCKING:
1551 * None. Should be called with kernel context, might sleep.
1552 *
1553 * RETURNS:
1554 * Zero on success, AC_ERR_* mask on failure
1555 */
1556unsigned ata_exec_internal(struct ata_device *dev,
1557 struct ata_taskfile *tf, const u8 *cdb,
1558 int dma_dir, void *buf, unsigned int buflen)
1559{
33480a0e
TH
1560 struct scatterlist *psg = NULL, sg;
1561 unsigned int n_elem = 0;
2432697b 1562
33480a0e
TH
1563 if (dma_dir != DMA_NONE) {
1564 WARN_ON(!buf);
1565 sg_init_one(&sg, buf, buflen);
1566 psg = &sg;
1567 n_elem++;
1568 }
2432697b 1569
33480a0e 1570 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1571}
1572
977e6b9f
TH
1573/**
1574 * ata_do_simple_cmd - execute simple internal command
1575 * @dev: Device to which the command is sent
1576 * @cmd: Opcode to execute
1577 *
1578 * Execute a 'simple' command, that only consists of the opcode
1579 * 'cmd' itself, without filling any other registers
1580 *
1581 * LOCKING:
1582 * Kernel thread context (may sleep).
1583 *
1584 * RETURNS:
1585 * Zero on success, AC_ERR_* mask on failure
e58eb583 1586 */
77b08fb5 1587unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1588{
1589 struct ata_taskfile tf;
e58eb583
TH
1590
1591 ata_tf_init(dev, &tf);
1592
1593 tf.command = cmd;
1594 tf.flags |= ATA_TFLAG_DEVICE;
1595 tf.protocol = ATA_PROT_NODATA;
1596
977e6b9f 1597 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1598}
1599
1bc4ccff
AC
1600/**
1601 * ata_pio_need_iordy - check if iordy needed
1602 * @adev: ATA device
1603 *
1604 * Check if the current speed of the device requires IORDY. Used
1605 * by various controllers for chip configuration.
1606 */
a617c09f 1607
1bc4ccff
AC
1608unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1609{
432729f0
AC
1610 /* Controller doesn't support IORDY. Probably a pointless check
1611 as the caller should know this */
9af5c9c9 1612 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1613 return 0;
432729f0
AC
1614 /* PIO3 and higher it is mandatory */
1615 if (adev->pio_mode > XFER_PIO_2)
1616 return 1;
1617 /* We turn it on when possible */
1618 if (ata_id_has_iordy(adev->id))
1bc4ccff 1619 return 1;
432729f0
AC
1620 return 0;
1621}
2e9edbf8 1622
432729f0
AC
1623/**
1624 * ata_pio_mask_no_iordy - Return the non IORDY mask
1625 * @adev: ATA device
1626 *
1627 * Compute the highest mode possible if we are not using iordy. Return
1628 * -1 if no iordy mode is available.
1629 */
a617c09f 1630
432729f0
AC
1631static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1632{
1bc4ccff 1633 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1634 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1635 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1636 /* Is the speed faster than the drive allows non IORDY ? */
1637 if (pio) {
1638 /* This is cycle times not frequency - watch the logic! */
1639 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1640 return 3 << ATA_SHIFT_PIO;
1641 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1642 }
1643 }
432729f0 1644 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1645}
1646
1da177e4 1647/**
49016aca 1648 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1649 * @dev: target device
1650 * @p_class: pointer to class of the target device (may be changed)
bff04647 1651 * @flags: ATA_READID_* flags
fe635c7e 1652 * @id: buffer to read IDENTIFY data into
1da177e4 1653 *
49016aca
TH
1654 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1655 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1656 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1657 * for pre-ATA4 drives.
1da177e4 1658 *
50a99018
AC
1659 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1660 * now we abort if we hit that case.
1661 *
1da177e4 1662 * LOCKING:
49016aca
TH
1663 * Kernel thread context (may sleep)
1664 *
1665 * RETURNS:
1666 * 0 on success, -errno otherwise.
1da177e4 1667 */
a9beec95 1668int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1669 unsigned int flags, u16 *id)
1da177e4 1670{
9af5c9c9 1671 struct ata_port *ap = dev->link->ap;
49016aca 1672 unsigned int class = *p_class;
a0123703 1673 struct ata_taskfile tf;
49016aca
TH
1674 unsigned int err_mask = 0;
1675 const char *reason;
54936f8b 1676 int may_fallback = 1, tried_spinup = 0;
49016aca 1677 int rc;
1da177e4 1678
0dd4b21f 1679 if (ata_msg_ctl(ap))
44877b4e 1680 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1681
49016aca 1682 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1683 retry:
3373efd8 1684 ata_tf_init(dev, &tf);
a0123703 1685
49016aca
TH
1686 switch (class) {
1687 case ATA_DEV_ATA:
a0123703 1688 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1689 break;
1690 case ATA_DEV_ATAPI:
a0123703 1691 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1692 break;
1693 default:
1694 rc = -ENODEV;
1695 reason = "unsupported class";
1696 goto err_out;
1da177e4
LT
1697 }
1698
a0123703 1699 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1700
1701 /* Some devices choke if TF registers contain garbage. Make
1702 * sure those are properly initialized.
1703 */
1704 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1705
1706 /* Device presence detection is unreliable on some
1707 * controllers. Always poll IDENTIFY if available.
1708 */
1709 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1710
3373efd8 1711 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1712 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1713 if (err_mask) {
800b3996 1714 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1715 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1716 ap->print_id, dev->devno);
55a8e2c8
TH
1717 return -ENOENT;
1718 }
1719
54936f8b
TH
1720 /* Device or controller might have reported the wrong
1721 * device class. Give a shot at the other IDENTIFY if
1722 * the current one is aborted by the device.
1723 */
1724 if (may_fallback &&
1725 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1726 may_fallback = 0;
1727
1728 if (class == ATA_DEV_ATA)
1729 class = ATA_DEV_ATAPI;
1730 else
1731 class = ATA_DEV_ATA;
1732 goto retry;
1733 }
1734
49016aca
TH
1735 rc = -EIO;
1736 reason = "I/O error";
1da177e4
LT
1737 goto err_out;
1738 }
1739
54936f8b
TH
1740 /* Falling back doesn't make sense if ID data was read
1741 * successfully at least once.
1742 */
1743 may_fallback = 0;
1744
49016aca 1745 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1746
49016aca 1747 /* sanity check */
a4f5749b 1748 rc = -EINVAL;
6070068b 1749 reason = "device reports invalid type";
a4f5749b
TH
1750
1751 if (class == ATA_DEV_ATA) {
1752 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1753 goto err_out;
1754 } else {
1755 if (ata_id_is_ata(id))
1756 goto err_out;
49016aca
TH
1757 }
1758
169439c2
ML
1759 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1760 tried_spinup = 1;
1761 /*
1762 * Drive powered-up in standby mode, and requires a specific
1763 * SET_FEATURES spin-up subcommand before it will accept
1764 * anything other than the original IDENTIFY command.
1765 */
1766 ata_tf_init(dev, &tf);
1767 tf.command = ATA_CMD_SET_FEATURES;
1768 tf.feature = SETFEATURES_SPINUP;
1769 tf.protocol = ATA_PROT_NODATA;
1770 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1771 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1772 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1773 rc = -EIO;
1774 reason = "SPINUP failed";
1775 goto err_out;
1776 }
1777 /*
1778 * If the drive initially returned incomplete IDENTIFY info,
1779 * we now must reissue the IDENTIFY command.
1780 */
1781 if (id[2] == 0x37c8)
1782 goto retry;
1783 }
1784
bff04647 1785 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1786 /*
1787 * The exact sequence expected by certain pre-ATA4 drives is:
1788 * SRST RESET
50a99018
AC
1789 * IDENTIFY (optional in early ATA)
1790 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1791 * anything else..
1792 * Some drives were very specific about that exact sequence.
50a99018
AC
1793 *
1794 * Note that ATA4 says lba is mandatory so the second check
1795 * shoud never trigger.
49016aca
TH
1796 */
1797 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1798 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1799 if (err_mask) {
1800 rc = -EIO;
1801 reason = "INIT_DEV_PARAMS failed";
1802 goto err_out;
1803 }
1804
1805 /* current CHS translation info (id[53-58]) might be
1806 * changed. reread the identify device info.
1807 */
bff04647 1808 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1809 goto retry;
1810 }
1811 }
1812
1813 *p_class = class;
fe635c7e 1814
49016aca
TH
1815 return 0;
1816
1817 err_out:
88574551 1818 if (ata_msg_warn(ap))
0dd4b21f 1819 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1820 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1821 return rc;
1822}
1823
3373efd8 1824static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1825{
9af5c9c9
TH
1826 struct ata_port *ap = dev->link->ap;
1827 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1828}
1829
a6e6ce8e
TH
1830static void ata_dev_config_ncq(struct ata_device *dev,
1831 char *desc, size_t desc_sz)
1832{
9af5c9c9 1833 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1834 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1835
1836 if (!ata_id_has_ncq(dev->id)) {
1837 desc[0] = '\0';
1838 return;
1839 }
75683fe7 1840 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1841 snprintf(desc, desc_sz, "NCQ (not used)");
1842 return;
1843 }
a6e6ce8e 1844 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1845 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1846 dev->flags |= ATA_DFLAG_NCQ;
1847 }
1848
1849 if (hdepth >= ddepth)
1850 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1851 else
1852 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1853}
1854
49016aca 1855/**
ffeae418 1856 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1857 * @dev: Target device to configure
1858 *
1859 * Configure @dev according to @dev->id. Generic and low-level
1860 * driver specific fixups are also applied.
49016aca
TH
1861 *
1862 * LOCKING:
ffeae418
TH
1863 * Kernel thread context (may sleep)
1864 *
1865 * RETURNS:
1866 * 0 on success, -errno otherwise
49016aca 1867 */
efdaedc4 1868int ata_dev_configure(struct ata_device *dev)
49016aca 1869{
9af5c9c9
TH
1870 struct ata_port *ap = dev->link->ap;
1871 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1872 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1873 const u16 *id = dev->id;
ff8854b2 1874 unsigned int xfer_mask;
b352e57d 1875 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1876 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1877 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1878 int rc;
49016aca 1879
0dd4b21f 1880 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1881 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1882 __FUNCTION__);
ffeae418 1883 return 0;
49016aca
TH
1884 }
1885
0dd4b21f 1886 if (ata_msg_probe(ap))
44877b4e 1887 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1888
75683fe7
TH
1889 /* set horkage */
1890 dev->horkage |= ata_dev_blacklisted(dev);
1891
6746544c
TH
1892 /* let ACPI work its magic */
1893 rc = ata_acpi_on_devcfg(dev);
1894 if (rc)
1895 return rc;
08573a86 1896
05027adc
TH
1897 /* massage HPA, do it early as it might change IDENTIFY data */
1898 rc = ata_hpa_resize(dev);
1899 if (rc)
1900 return rc;
1901
c39f5ebe 1902 /* print device capabilities */
0dd4b21f 1903 if (ata_msg_probe(ap))
88574551
TH
1904 ata_dev_printk(dev, KERN_DEBUG,
1905 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1906 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1907 __FUNCTION__,
f15a1daf
TH
1908 id[49], id[82], id[83], id[84],
1909 id[85], id[86], id[87], id[88]);
c39f5ebe 1910
208a9933 1911 /* initialize to-be-configured parameters */
ea1dd4e1 1912 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1913 dev->max_sectors = 0;
1914 dev->cdb_len = 0;
1915 dev->n_sectors = 0;
1916 dev->cylinders = 0;
1917 dev->heads = 0;
1918 dev->sectors = 0;
1919
1da177e4
LT
1920 /*
1921 * common ATA, ATAPI feature tests
1922 */
1923
ff8854b2 1924 /* find max transfer mode; for printk only */
1148c3a7 1925 xfer_mask = ata_id_xfermask(id);
1da177e4 1926
0dd4b21f
BP
1927 if (ata_msg_probe(ap))
1928 ata_dump_id(id);
1da177e4 1929
ef143d57
AL
1930 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1931 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1932 sizeof(fwrevbuf));
1933
1934 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1935 sizeof(modelbuf));
1936
1da177e4
LT
1937 /* ATA-specific feature tests */
1938 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1939 if (ata_id_is_cfa(id)) {
1940 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1941 ata_dev_printk(dev, KERN_WARNING,
1942 "supports DRM functions and may "
1943 "not be fully accessable.\n");
b352e57d
AC
1944 snprintf(revbuf, 7, "CFA");
1945 }
1946 else
1947 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1948
1148c3a7 1949 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1950
3f64f565
EM
1951 if (dev->id[59] & 0x100)
1952 dev->multi_count = dev->id[59] & 0xff;
1953
1148c3a7 1954 if (ata_id_has_lba(id)) {
4c2d721a 1955 const char *lba_desc;
a6e6ce8e 1956 char ncq_desc[20];
8bf62ece 1957
4c2d721a
TH
1958 lba_desc = "LBA";
1959 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1960 if (ata_id_has_lba48(id)) {
8bf62ece 1961 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1962 lba_desc = "LBA48";
6fc49adb
TH
1963
1964 if (dev->n_sectors >= (1UL << 28) &&
1965 ata_id_has_flush_ext(id))
1966 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1967 }
8bf62ece 1968
a6e6ce8e
TH
1969 /* config NCQ */
1970 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1971
8bf62ece 1972 /* print device info to dmesg */
3f64f565
EM
1973 if (ata_msg_drv(ap) && print_info) {
1974 ata_dev_printk(dev, KERN_INFO,
1975 "%s: %s, %s, max %s\n",
1976 revbuf, modelbuf, fwrevbuf,
1977 ata_mode_string(xfer_mask));
1978 ata_dev_printk(dev, KERN_INFO,
1979 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1980 (unsigned long long)dev->n_sectors,
3f64f565
EM
1981 dev->multi_count, lba_desc, ncq_desc);
1982 }
ffeae418 1983 } else {
8bf62ece
AL
1984 /* CHS */
1985
1986 /* Default translation */
1148c3a7
TH
1987 dev->cylinders = id[1];
1988 dev->heads = id[3];
1989 dev->sectors = id[6];
8bf62ece 1990
1148c3a7 1991 if (ata_id_current_chs_valid(id)) {
8bf62ece 1992 /* Current CHS translation is valid. */
1148c3a7
TH
1993 dev->cylinders = id[54];
1994 dev->heads = id[55];
1995 dev->sectors = id[56];
8bf62ece
AL
1996 }
1997
1998 /* print device info to dmesg */
3f64f565 1999 if (ata_msg_drv(ap) && print_info) {
88574551 2000 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2001 "%s: %s, %s, max %s\n",
2002 revbuf, modelbuf, fwrevbuf,
2003 ata_mode_string(xfer_mask));
a84471fe 2004 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2005 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2006 (unsigned long long)dev->n_sectors,
2007 dev->multi_count, dev->cylinders,
2008 dev->heads, dev->sectors);
2009 }
07f6f7d0
AL
2010 }
2011
6e7846e9 2012 dev->cdb_len = 16;
1da177e4
LT
2013 }
2014
2015 /* ATAPI-specific feature tests */
2c13b7ce 2016 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2017 const char *cdb_intr_string = "";
2018 const char *atapi_an_string = "";
08a556db 2019
1148c3a7 2020 rc = atapi_cdb_len(id);
1da177e4 2021 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2022 if (ata_msg_warn(ap))
88574551
TH
2023 ata_dev_printk(dev, KERN_WARNING,
2024 "unsupported CDB len\n");
ffeae418 2025 rc = -EINVAL;
1da177e4
LT
2026 goto err_out_nosup;
2027 }
6e7846e9 2028 dev->cdb_len = (unsigned int) rc;
1da177e4 2029
9f45cbd3
KCA
2030 /*
2031 * check to see if this ATAPI device supports
2032 * Asynchronous Notification
2033 */
854c73a2
TH
2034 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id)) {
2035 unsigned int err_mask;
2036
9f45cbd3 2037 /* issue SET feature command to turn this on */
854c73a2
TH
2038 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2039 if (err_mask)
9f45cbd3 2040 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2041 "failed to enable ATAPI AN "
2042 "(err_mask=0x%x)\n", err_mask);
2043 else {
9f45cbd3 2044 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2045 atapi_an_string = ", ATAPI AN";
2046 }
9f45cbd3
KCA
2047 }
2048
08a556db 2049 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2050 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2051 cdb_intr_string = ", CDB intr";
2052 }
312f7da2 2053
1da177e4 2054 /* print device info to dmesg */
5afc8142 2055 if (ata_msg_drv(ap) && print_info)
ef143d57 2056 ata_dev_printk(dev, KERN_INFO,
854c73a2 2057 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2058 modelbuf, fwrevbuf,
12436c30 2059 ata_mode_string(xfer_mask),
854c73a2 2060 cdb_intr_string, atapi_an_string);
1da177e4
LT
2061 }
2062
914ed354
TH
2063 /* determine max_sectors */
2064 dev->max_sectors = ATA_MAX_SECTORS;
2065 if (dev->flags & ATA_DFLAG_LBA48)
2066 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2067
93590859
AC
2068 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2069 /* Let the user know. We don't want to disallow opens for
2070 rescue purposes, or in case the vendor is just a blithering
2071 idiot */
2072 if (print_info) {
2073 ata_dev_printk(dev, KERN_WARNING,
2074"Drive reports diagnostics failure. This may indicate a drive\n");
2075 ata_dev_printk(dev, KERN_WARNING,
2076"fault or invalid emulation. Contact drive vendor for information.\n");
2077 }
2078 }
2079
4b2f3ede 2080 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2081 if (ata_dev_knobble(dev)) {
5afc8142 2082 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2083 ata_dev_printk(dev, KERN_INFO,
2084 "applying bridge limits\n");
5a529139 2085 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2086 dev->max_sectors = ATA_MAX_SECTORS;
2087 }
2088
75683fe7 2089 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2090 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2091 dev->max_sectors);
18d6e9d5 2092
4b2f3ede 2093 if (ap->ops->dev_config)
cd0d3bbc 2094 ap->ops->dev_config(dev);
4b2f3ede 2095
0dd4b21f
BP
2096 if (ata_msg_probe(ap))
2097 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2098 __FUNCTION__, ata_chk_status(ap));
ffeae418 2099 return 0;
1da177e4
LT
2100
2101err_out_nosup:
0dd4b21f 2102 if (ata_msg_probe(ap))
88574551
TH
2103 ata_dev_printk(dev, KERN_DEBUG,
2104 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2105 return rc;
1da177e4
LT
2106}
2107
be0d18df 2108/**
2e41e8e6 2109 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2110 * @ap: port
2111 *
2e41e8e6 2112 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2113 * detection.
2114 */
2115
2116int ata_cable_40wire(struct ata_port *ap)
2117{
2118 return ATA_CBL_PATA40;
2119}
2120
2121/**
2e41e8e6 2122 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2123 * @ap: port
2124 *
2e41e8e6 2125 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2126 * detection.
2127 */
2128
2129int ata_cable_80wire(struct ata_port *ap)
2130{
2131 return ATA_CBL_PATA80;
2132}
2133
2134/**
2135 * ata_cable_unknown - return unknown PATA cable.
2136 * @ap: port
2137 *
2138 * Helper method for drivers which have no PATA cable detection.
2139 */
2140
2141int ata_cable_unknown(struct ata_port *ap)
2142{
2143 return ATA_CBL_PATA_UNK;
2144}
2145
2146/**
2147 * ata_cable_sata - return SATA cable type
2148 * @ap: port
2149 *
2150 * Helper method for drivers which have SATA cables
2151 */
2152
2153int ata_cable_sata(struct ata_port *ap)
2154{
2155 return ATA_CBL_SATA;
2156}
2157
1da177e4
LT
2158/**
2159 * ata_bus_probe - Reset and probe ATA bus
2160 * @ap: Bus to probe
2161 *
0cba632b
JG
2162 * Master ATA bus probing function. Initiates a hardware-dependent
2163 * bus reset, then attempts to identify any devices found on
2164 * the bus.
2165 *
1da177e4 2166 * LOCKING:
0cba632b 2167 * PCI/etc. bus probe sem.
1da177e4
LT
2168 *
2169 * RETURNS:
96072e69 2170 * Zero on success, negative errno otherwise.
1da177e4
LT
2171 */
2172
80289167 2173int ata_bus_probe(struct ata_port *ap)
1da177e4 2174{
28ca5c57 2175 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2176 int tries[ATA_MAX_DEVICES];
f58229f8 2177 int rc;
e82cbdb9 2178 struct ata_device *dev;
1da177e4 2179
28ca5c57 2180 ata_port_probe(ap);
c19ba8af 2181
f58229f8
TH
2182 ata_link_for_each_dev(dev, &ap->link)
2183 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2184
2185 retry:
2044470c 2186 /* reset and determine device classes */
52783c5d 2187 ap->ops->phy_reset(ap);
2061a47a 2188
f58229f8 2189 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2190 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2191 dev->class != ATA_DEV_UNKNOWN)
2192 classes[dev->devno] = dev->class;
2193 else
2194 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2195
52783c5d 2196 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2197 }
1da177e4 2198
52783c5d 2199 ata_port_probe(ap);
2044470c 2200
b6079ca4
AC
2201 /* after the reset the device state is PIO 0 and the controller
2202 state is undefined. Record the mode */
2203
f58229f8
TH
2204 ata_link_for_each_dev(dev, &ap->link)
2205 dev->pio_mode = XFER_PIO_0;
b6079ca4 2206
f31f0cc2
JG
2207 /* read IDENTIFY page and configure devices. We have to do the identify
2208 specific sequence bass-ackwards so that PDIAG- is released by
2209 the slave device */
2210
f58229f8
TH
2211 ata_link_for_each_dev(dev, &ap->link) {
2212 if (tries[dev->devno])
2213 dev->class = classes[dev->devno];
ffeae418 2214
14d2bac1 2215 if (!ata_dev_enabled(dev))
ffeae418 2216 continue;
ffeae418 2217
bff04647
TH
2218 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2219 dev->id);
14d2bac1
TH
2220 if (rc)
2221 goto fail;
f31f0cc2
JG
2222 }
2223
be0d18df
AC
2224 /* Now ask for the cable type as PDIAG- should have been released */
2225 if (ap->ops->cable_detect)
2226 ap->cbl = ap->ops->cable_detect(ap);
2227
614fe29b
AC
2228 /* We may have SATA bridge glue hiding here irrespective of the
2229 reported cable types and sensed types */
2230 ata_link_for_each_dev(dev, &ap->link) {
2231 if (!ata_dev_enabled(dev))
2232 continue;
2233 /* SATA drives indicate we have a bridge. We don't know which
2234 end of the link the bridge is which is a problem */
2235 if (ata_id_is_sata(dev->id))
2236 ap->cbl = ATA_CBL_SATA;
2237 }
2238
f31f0cc2
JG
2239 /* After the identify sequence we can now set up the devices. We do
2240 this in the normal order so that the user doesn't get confused */
2241
f58229f8 2242 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2243 if (!ata_dev_enabled(dev))
2244 continue;
14d2bac1 2245
9af5c9c9 2246 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2247 rc = ata_dev_configure(dev);
9af5c9c9 2248 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2249 if (rc)
2250 goto fail;
1da177e4
LT
2251 }
2252
e82cbdb9 2253 /* configure transfer mode */
0260731f 2254 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2255 if (rc)
51713d35 2256 goto fail;
1da177e4 2257
f58229f8
TH
2258 ata_link_for_each_dev(dev, &ap->link)
2259 if (ata_dev_enabled(dev))
e82cbdb9 2260 return 0;
1da177e4 2261
e82cbdb9
TH
2262 /* no device present, disable port */
2263 ata_port_disable(ap);
96072e69 2264 return -ENODEV;
14d2bac1
TH
2265
2266 fail:
4ae72a1e
TH
2267 tries[dev->devno]--;
2268
14d2bac1
TH
2269 switch (rc) {
2270 case -EINVAL:
4ae72a1e 2271 /* eeek, something went very wrong, give up */
14d2bac1
TH
2272 tries[dev->devno] = 0;
2273 break;
4ae72a1e
TH
2274
2275 case -ENODEV:
2276 /* give it just one more chance */
2277 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2278 case -EIO:
4ae72a1e
TH
2279 if (tries[dev->devno] == 1) {
2280 /* This is the last chance, better to slow
2281 * down than lose it.
2282 */
936fd732 2283 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2284 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2285 }
14d2bac1
TH
2286 }
2287
4ae72a1e 2288 if (!tries[dev->devno])
3373efd8 2289 ata_dev_disable(dev);
ec573755 2290
14d2bac1 2291 goto retry;
1da177e4
LT
2292}
2293
2294/**
0cba632b
JG
2295 * ata_port_probe - Mark port as enabled
2296 * @ap: Port for which we indicate enablement
1da177e4 2297 *
0cba632b
JG
2298 * Modify @ap data structure such that the system
2299 * thinks that the entire port is enabled.
2300 *
cca3974e 2301 * LOCKING: host lock, or some other form of
0cba632b 2302 * serialization.
1da177e4
LT
2303 */
2304
2305void ata_port_probe(struct ata_port *ap)
2306{
198e0fed 2307 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2308}
2309
3be680b7
TH
2310/**
2311 * sata_print_link_status - Print SATA link status
936fd732 2312 * @link: SATA link to printk link status about
3be680b7
TH
2313 *
2314 * This function prints link speed and status of a SATA link.
2315 *
2316 * LOCKING:
2317 * None.
2318 */
936fd732 2319void sata_print_link_status(struct ata_link *link)
3be680b7 2320{
6d5f9732 2321 u32 sstatus, scontrol, tmp;
3be680b7 2322
936fd732 2323 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2324 return;
936fd732 2325 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2326
936fd732 2327 if (ata_link_online(link)) {
3be680b7 2328 tmp = (sstatus >> 4) & 0xf;
936fd732 2329 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2330 "SATA link up %s (SStatus %X SControl %X)\n",
2331 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2332 } else {
936fd732 2333 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2334 "SATA link down (SStatus %X SControl %X)\n",
2335 sstatus, scontrol);
3be680b7
TH
2336 }
2337}
2338
1da177e4 2339/**
780a87f7
JG
2340 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2341 * @ap: SATA port associated with target SATA PHY.
1da177e4 2342 *
780a87f7
JG
2343 * This function issues commands to standard SATA Sxxx
2344 * PHY registers, to wake up the phy (and device), and
2345 * clear any reset condition.
1da177e4
LT
2346 *
2347 * LOCKING:
0cba632b 2348 * PCI/etc. bus probe sem.
1da177e4
LT
2349 *
2350 */
2351void __sata_phy_reset(struct ata_port *ap)
2352{
936fd732 2353 struct ata_link *link = &ap->link;
1da177e4 2354 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2355 u32 sstatus;
1da177e4
LT
2356
2357 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2358 /* issue phy wake/reset */
936fd732 2359 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2360 /* Couldn't find anything in SATA I/II specs, but
2361 * AHCI-1.1 10.4.2 says at least 1 ms. */
2362 mdelay(1);
1da177e4 2363 }
81952c54 2364 /* phy wake/clear reset */
936fd732 2365 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2366
2367 /* wait for phy to become ready, if necessary */
2368 do {
2369 msleep(200);
936fd732 2370 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2371 if ((sstatus & 0xf) != 1)
2372 break;
2373 } while (time_before(jiffies, timeout));
2374
3be680b7 2375 /* print link status */
936fd732 2376 sata_print_link_status(link);
656563e3 2377
3be680b7 2378 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2379 if (!ata_link_offline(link))
1da177e4 2380 ata_port_probe(ap);
3be680b7 2381 else
1da177e4 2382 ata_port_disable(ap);
1da177e4 2383
198e0fed 2384 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2385 return;
2386
2387 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2388 ata_port_disable(ap);
2389 return;
2390 }
2391
2392 ap->cbl = ATA_CBL_SATA;
2393}
2394
2395/**
780a87f7
JG
2396 * sata_phy_reset - Reset SATA bus.
2397 * @ap: SATA port associated with target SATA PHY.
1da177e4 2398 *
780a87f7
JG
2399 * This function resets the SATA bus, and then probes
2400 * the bus for devices.
1da177e4
LT
2401 *
2402 * LOCKING:
0cba632b 2403 * PCI/etc. bus probe sem.
1da177e4
LT
2404 *
2405 */
2406void sata_phy_reset(struct ata_port *ap)
2407{
2408 __sata_phy_reset(ap);
198e0fed 2409 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2410 return;
2411 ata_bus_reset(ap);
2412}
2413
ebdfca6e
AC
2414/**
2415 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2416 * @adev: device
2417 *
2418 * Obtain the other device on the same cable, or if none is
2419 * present NULL is returned
2420 */
2e9edbf8 2421
3373efd8 2422struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2423{
9af5c9c9
TH
2424 struct ata_link *link = adev->link;
2425 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2426 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2427 return NULL;
2428 return pair;
2429}
2430
1da177e4 2431/**
780a87f7
JG
2432 * ata_port_disable - Disable port.
2433 * @ap: Port to be disabled.
1da177e4 2434 *
780a87f7
JG
2435 * Modify @ap data structure such that the system
2436 * thinks that the entire port is disabled, and should
2437 * never attempt to probe or communicate with devices
2438 * on this port.
2439 *
cca3974e 2440 * LOCKING: host lock, or some other form of
780a87f7 2441 * serialization.
1da177e4
LT
2442 */
2443
2444void ata_port_disable(struct ata_port *ap)
2445{
9af5c9c9
TH
2446 ap->link.device[0].class = ATA_DEV_NONE;
2447 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2448 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2449}
2450
1c3fae4d 2451/**
3c567b7d 2452 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2453 * @link: Link to adjust SATA spd limit for
1c3fae4d 2454 *
936fd732 2455 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2456 * function only adjusts the limit. The change must be applied
3c567b7d 2457 * using sata_set_spd().
1c3fae4d
TH
2458 *
2459 * LOCKING:
2460 * Inherited from caller.
2461 *
2462 * RETURNS:
2463 * 0 on success, negative errno on failure
2464 */
936fd732 2465int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2466{
81952c54
TH
2467 u32 sstatus, spd, mask;
2468 int rc, highbit;
1c3fae4d 2469
936fd732 2470 if (!sata_scr_valid(link))
008a7896
TH
2471 return -EOPNOTSUPP;
2472
2473 /* If SCR can be read, use it to determine the current SPD.
936fd732 2474 * If not, use cached value in link->sata_spd.
008a7896 2475 */
936fd732 2476 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2477 if (rc == 0)
2478 spd = (sstatus >> 4) & 0xf;
2479 else
936fd732 2480 spd = link->sata_spd;
1c3fae4d 2481
936fd732 2482 mask = link->sata_spd_limit;
1c3fae4d
TH
2483 if (mask <= 1)
2484 return -EINVAL;
008a7896
TH
2485
2486 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2487 highbit = fls(mask) - 1;
2488 mask &= ~(1 << highbit);
2489
008a7896
TH
2490 /* Mask off all speeds higher than or equal to the current
2491 * one. Force 1.5Gbps if current SPD is not available.
2492 */
2493 if (spd > 1)
2494 mask &= (1 << (spd - 1)) - 1;
2495 else
2496 mask &= 1;
2497
2498 /* were we already at the bottom? */
1c3fae4d
TH
2499 if (!mask)
2500 return -EINVAL;
2501
936fd732 2502 link->sata_spd_limit = mask;
1c3fae4d 2503
936fd732 2504 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2505 sata_spd_string(fls(mask)));
1c3fae4d
TH
2506
2507 return 0;
2508}
2509
936fd732 2510static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2511{
2512 u32 spd, limit;
2513
936fd732 2514 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2515 limit = 0;
2516 else
936fd732 2517 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2518
2519 spd = (*scontrol >> 4) & 0xf;
2520 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2521
2522 return spd != limit;
2523}
2524
2525/**
3c567b7d 2526 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2527 * @link: Link in question
1c3fae4d
TH
2528 *
2529 * Test whether the spd limit in SControl matches
936fd732 2530 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2531 * whether hardreset is necessary to apply SATA spd
2532 * configuration.
2533 *
2534 * LOCKING:
2535 * Inherited from caller.
2536 *
2537 * RETURNS:
2538 * 1 if SATA spd configuration is needed, 0 otherwise.
2539 */
936fd732 2540int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2541{
2542 u32 scontrol;
2543
936fd732 2544 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2545 return 0;
2546
936fd732 2547 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2548}
2549
2550/**
3c567b7d 2551 * sata_set_spd - set SATA spd according to spd limit
936fd732 2552 * @link: Link to set SATA spd for
1c3fae4d 2553 *
936fd732 2554 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2555 *
2556 * LOCKING:
2557 * Inherited from caller.
2558 *
2559 * RETURNS:
2560 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2561 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2562 */
936fd732 2563int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2564{
2565 u32 scontrol;
81952c54 2566 int rc;
1c3fae4d 2567
936fd732 2568 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2569 return rc;
1c3fae4d 2570
936fd732 2571 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2572 return 0;
2573
936fd732 2574 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2575 return rc;
2576
1c3fae4d
TH
2577 return 1;
2578}
2579
452503f9
AC
2580/*
2581 * This mode timing computation functionality is ported over from
2582 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2583 */
2584/*
b352e57d 2585 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2586 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2587 * for UDMA6, which is currently supported only by Maxtor drives.
2588 *
2589 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2590 */
2591
2592static const struct ata_timing ata_timing[] = {
2593
2594 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2595 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2596 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2597 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2598
b352e57d
AC
2599 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2600 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2601 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2602 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2603 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2604
2605/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2606
452503f9
AC
2607 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2608 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2609 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2610
452503f9
AC
2611 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2612 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2613 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2614
b352e57d
AC
2615 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2616 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2617 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2618 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2619
2620 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2621 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2622 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2623
2624/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2625
2626 { 0xFF }
2627};
2628
2629#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2630#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2631
2632static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2633{
2634 q->setup = EZ(t->setup * 1000, T);
2635 q->act8b = EZ(t->act8b * 1000, T);
2636 q->rec8b = EZ(t->rec8b * 1000, T);
2637 q->cyc8b = EZ(t->cyc8b * 1000, T);
2638 q->active = EZ(t->active * 1000, T);
2639 q->recover = EZ(t->recover * 1000, T);
2640 q->cycle = EZ(t->cycle * 1000, T);
2641 q->udma = EZ(t->udma * 1000, UT);
2642}
2643
2644void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2645 struct ata_timing *m, unsigned int what)
2646{
2647 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2648 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2649 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2650 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2651 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2652 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2653 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2654 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2655}
2656
2657static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2658{
2659 const struct ata_timing *t;
2660
2661 for (t = ata_timing; t->mode != speed; t++)
91190758 2662 if (t->mode == 0xFF)
452503f9 2663 return NULL;
2e9edbf8 2664 return t;
452503f9
AC
2665}
2666
2667int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2668 struct ata_timing *t, int T, int UT)
2669{
2670 const struct ata_timing *s;
2671 struct ata_timing p;
2672
2673 /*
2e9edbf8 2674 * Find the mode.
75b1f2f8 2675 */
452503f9
AC
2676
2677 if (!(s = ata_timing_find_mode(speed)))
2678 return -EINVAL;
2679
75b1f2f8
AL
2680 memcpy(t, s, sizeof(*s));
2681
452503f9
AC
2682 /*
2683 * If the drive is an EIDE drive, it can tell us it needs extended
2684 * PIO/MW_DMA cycle timing.
2685 */
2686
2687 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2688 memset(&p, 0, sizeof(p));
2689 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2690 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2691 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2692 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2693 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2694 }
2695 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2696 }
2697
2698 /*
2699 * Convert the timing to bus clock counts.
2700 */
2701
75b1f2f8 2702 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2703
2704 /*
c893a3ae
RD
2705 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2706 * S.M.A.R.T * and some other commands. We have to ensure that the
2707 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2708 */
2709
fd3367af 2710 if (speed > XFER_PIO_6) {
452503f9
AC
2711 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2712 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2713 }
2714
2715 /*
c893a3ae 2716 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2717 */
2718
2719 if (t->act8b + t->rec8b < t->cyc8b) {
2720 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2721 t->rec8b = t->cyc8b - t->act8b;
2722 }
2723
2724 if (t->active + t->recover < t->cycle) {
2725 t->active += (t->cycle - (t->active + t->recover)) / 2;
2726 t->recover = t->cycle - t->active;
2727 }
a617c09f 2728
4f701d1e
AC
2729 /* In a few cases quantisation may produce enough errors to
2730 leave t->cycle too low for the sum of active and recovery
2731 if so we must correct this */
2732 if (t->active + t->recover > t->cycle)
2733 t->cycle = t->active + t->recover;
452503f9
AC
2734
2735 return 0;
2736}
2737
cf176e1a
TH
2738/**
2739 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2740 * @dev: Device to adjust xfer masks
458337db 2741 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2742 *
2743 * Adjust xfer masks of @dev downward. Note that this function
2744 * does not apply the change. Invoking ata_set_mode() afterwards
2745 * will apply the limit.
2746 *
2747 * LOCKING:
2748 * Inherited from caller.
2749 *
2750 * RETURNS:
2751 * 0 on success, negative errno on failure
2752 */
458337db 2753int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2754{
458337db
TH
2755 char buf[32];
2756 unsigned int orig_mask, xfer_mask;
2757 unsigned int pio_mask, mwdma_mask, udma_mask;
2758 int quiet, highbit;
cf176e1a 2759
458337db
TH
2760 quiet = !!(sel & ATA_DNXFER_QUIET);
2761 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2762
458337db
TH
2763 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2764 dev->mwdma_mask,
2765 dev->udma_mask);
2766 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2767
458337db
TH
2768 switch (sel) {
2769 case ATA_DNXFER_PIO:
2770 highbit = fls(pio_mask) - 1;
2771 pio_mask &= ~(1 << highbit);
2772 break;
2773
2774 case ATA_DNXFER_DMA:
2775 if (udma_mask) {
2776 highbit = fls(udma_mask) - 1;
2777 udma_mask &= ~(1 << highbit);
2778 if (!udma_mask)
2779 return -ENOENT;
2780 } else if (mwdma_mask) {
2781 highbit = fls(mwdma_mask) - 1;
2782 mwdma_mask &= ~(1 << highbit);
2783 if (!mwdma_mask)
2784 return -ENOENT;
2785 }
2786 break;
2787
2788 case ATA_DNXFER_40C:
2789 udma_mask &= ATA_UDMA_MASK_40C;
2790 break;
2791
2792 case ATA_DNXFER_FORCE_PIO0:
2793 pio_mask &= 1;
2794 case ATA_DNXFER_FORCE_PIO:
2795 mwdma_mask = 0;
2796 udma_mask = 0;
2797 break;
2798
458337db
TH
2799 default:
2800 BUG();
2801 }
2802
2803 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2804
2805 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2806 return -ENOENT;
2807
2808 if (!quiet) {
2809 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2810 snprintf(buf, sizeof(buf), "%s:%s",
2811 ata_mode_string(xfer_mask),
2812 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2813 else
2814 snprintf(buf, sizeof(buf), "%s",
2815 ata_mode_string(xfer_mask));
2816
2817 ata_dev_printk(dev, KERN_WARNING,
2818 "limiting speed to %s\n", buf);
2819 }
cf176e1a
TH
2820
2821 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2822 &dev->udma_mask);
2823
cf176e1a 2824 return 0;
cf176e1a
TH
2825}
2826
3373efd8 2827static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2828{
9af5c9c9 2829 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2830 unsigned int err_mask;
2831 int rc;
1da177e4 2832
e8384607 2833 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2834 if (dev->xfer_shift == ATA_SHIFT_PIO)
2835 dev->flags |= ATA_DFLAG_PIO;
2836
3373efd8 2837 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2838 /* Old CFA may refuse this command, which is just fine */
2839 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2840 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2841 /* Some very old devices and some bad newer ones fail any kind of
2842 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2843 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2844 dev->pio_mode <= XFER_PIO_2)
2845 err_mask &= ~AC_ERR_DEV;
83206a29 2846 if (err_mask) {
f15a1daf
TH
2847 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2848 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2849 return -EIO;
2850 }
1da177e4 2851
baa1e78a 2852 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2853 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2854 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2855 if (rc)
83206a29 2856 return rc;
48a8a14f 2857
23e71c3d
TH
2858 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2859 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2860
f15a1daf
TH
2861 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2862 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2863 return 0;
1da177e4
LT
2864}
2865
1da177e4 2866/**
04351821 2867 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2868 * @link: link on which timings will be programmed
e82cbdb9 2869 * @r_failed_dev: out paramter for failed device
1da177e4 2870 *
04351821
A
2871 * Standard implementation of the function used to tune and set
2872 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2873 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2874 * returned in @r_failed_dev.
780a87f7 2875 *
1da177e4 2876 * LOCKING:
0cba632b 2877 * PCI/etc. bus probe sem.
e82cbdb9
TH
2878 *
2879 * RETURNS:
2880 * 0 on success, negative errno otherwise
1da177e4 2881 */
04351821 2882
0260731f 2883int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2884{
0260731f 2885 struct ata_port *ap = link->ap;
e8e0619f 2886 struct ata_device *dev;
f58229f8 2887 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2888
a6d5a51c 2889 /* step 1: calculate xfer_mask */
f58229f8 2890 ata_link_for_each_dev(dev, link) {
acf356b1 2891 unsigned int pio_mask, dma_mask;
a6d5a51c 2892
e1211e3f 2893 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2894 continue;
2895
3373efd8 2896 ata_dev_xfermask(dev);
1da177e4 2897
acf356b1
TH
2898 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2899 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2900 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2901 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2902
4f65977d 2903 found = 1;
5444a6f4
AC
2904 if (dev->dma_mode)
2905 used_dma = 1;
a6d5a51c 2906 }
4f65977d 2907 if (!found)
e82cbdb9 2908 goto out;
a6d5a51c
TH
2909
2910 /* step 2: always set host PIO timings */
f58229f8 2911 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2912 if (!ata_dev_enabled(dev))
2913 continue;
2914
2915 if (!dev->pio_mode) {
f15a1daf 2916 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2917 rc = -EINVAL;
e82cbdb9 2918 goto out;
e8e0619f
TH
2919 }
2920
2921 dev->xfer_mode = dev->pio_mode;
2922 dev->xfer_shift = ATA_SHIFT_PIO;
2923 if (ap->ops->set_piomode)
2924 ap->ops->set_piomode(ap, dev);
2925 }
1da177e4 2926
a6d5a51c 2927 /* step 3: set host DMA timings */
f58229f8 2928 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2929 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2930 continue;
2931
2932 dev->xfer_mode = dev->dma_mode;
2933 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2934 if (ap->ops->set_dmamode)
2935 ap->ops->set_dmamode(ap, dev);
2936 }
1da177e4
LT
2937
2938 /* step 4: update devices' xfer mode */
f58229f8 2939 ata_link_for_each_dev(dev, link) {
18d90deb 2940 /* don't update suspended devices' xfer mode */
9666f400 2941 if (!ata_dev_enabled(dev))
83206a29
TH
2942 continue;
2943
3373efd8 2944 rc = ata_dev_set_mode(dev);
5bbc53f4 2945 if (rc)
e82cbdb9 2946 goto out;
83206a29 2947 }
1da177e4 2948
e8e0619f
TH
2949 /* Record simplex status. If we selected DMA then the other
2950 * host channels are not permitted to do so.
5444a6f4 2951 */
cca3974e 2952 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2953 ap->host->simplex_claimed = ap;
5444a6f4 2954
e82cbdb9
TH
2955 out:
2956 if (rc)
2957 *r_failed_dev = dev;
2958 return rc;
1da177e4
LT
2959}
2960
04351821
A
2961/**
2962 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2963 * @link: link on which timings will be programmed
04351821
A
2964 * @r_failed_dev: out paramter for failed device
2965 *
2966 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2967 * ata_set_mode() fails, pointer to the failing device is
2968 * returned in @r_failed_dev.
2969 *
2970 * LOCKING:
2971 * PCI/etc. bus probe sem.
2972 *
2973 * RETURNS:
2974 * 0 on success, negative errno otherwise
2975 */
0260731f 2976int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2977{
0260731f
TH
2978 struct ata_port *ap = link->ap;
2979
04351821
A
2980 /* has private set_mode? */
2981 if (ap->ops->set_mode)
0260731f
TH
2982 return ap->ops->set_mode(link, r_failed_dev);
2983 return ata_do_set_mode(link, r_failed_dev);
04351821
A
2984}
2985
1fdffbce
JG
2986/**
2987 * ata_tf_to_host - issue ATA taskfile to host controller
2988 * @ap: port to which command is being issued
2989 * @tf: ATA taskfile register set
2990 *
2991 * Issues ATA taskfile register set to ATA host controller,
2992 * with proper synchronization with interrupt handler and
2993 * other threads.
2994 *
2995 * LOCKING:
cca3974e 2996 * spin_lock_irqsave(host lock)
1fdffbce
JG
2997 */
2998
2999static inline void ata_tf_to_host(struct ata_port *ap,
3000 const struct ata_taskfile *tf)
3001{
3002 ap->ops->tf_load(ap, tf);
3003 ap->ops->exec_command(ap, tf);
3004}
3005
1da177e4
LT
3006/**
3007 * ata_busy_sleep - sleep until BSY clears, or timeout
3008 * @ap: port containing status register to be polled
3009 * @tmout_pat: impatience timeout
3010 * @tmout: overall timeout
3011 *
780a87f7
JG
3012 * Sleep until ATA Status register bit BSY clears,
3013 * or a timeout occurs.
3014 *
d1adc1bb
TH
3015 * LOCKING:
3016 * Kernel thread context (may sleep).
3017 *
3018 * RETURNS:
3019 * 0 on success, -errno otherwise.
1da177e4 3020 */
d1adc1bb
TH
3021int ata_busy_sleep(struct ata_port *ap,
3022 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3023{
3024 unsigned long timer_start, timeout;
3025 u8 status;
3026
3027 status = ata_busy_wait(ap, ATA_BUSY, 300);
3028 timer_start = jiffies;
3029 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3030 while (status != 0xff && (status & ATA_BUSY) &&
3031 time_before(jiffies, timeout)) {
1da177e4
LT
3032 msleep(50);
3033 status = ata_busy_wait(ap, ATA_BUSY, 3);
3034 }
3035
d1adc1bb 3036 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3037 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3038 "port is slow to respond, please be patient "
3039 "(Status 0x%x)\n", status);
1da177e4
LT
3040
3041 timeout = timer_start + tmout;
d1adc1bb
TH
3042 while (status != 0xff && (status & ATA_BUSY) &&
3043 time_before(jiffies, timeout)) {
1da177e4
LT
3044 msleep(50);
3045 status = ata_chk_status(ap);
3046 }
3047
d1adc1bb
TH
3048 if (status == 0xff)
3049 return -ENODEV;
3050
1da177e4 3051 if (status & ATA_BUSY) {
f15a1daf 3052 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3053 "(%lu secs, Status 0x%x)\n",
3054 tmout / HZ, status);
d1adc1bb 3055 return -EBUSY;
1da177e4
LT
3056 }
3057
3058 return 0;
3059}
3060
d4b2bab4
TH
3061/**
3062 * ata_wait_ready - sleep until BSY clears, or timeout
3063 * @ap: port containing status register to be polled
3064 * @deadline: deadline jiffies for the operation
3065 *
3066 * Sleep until ATA Status register bit BSY clears, or timeout
3067 * occurs.
3068 *
3069 * LOCKING:
3070 * Kernel thread context (may sleep).
3071 *
3072 * RETURNS:
3073 * 0 on success, -errno otherwise.
3074 */
3075int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3076{
3077 unsigned long start = jiffies;
3078 int warned = 0;
3079
3080 while (1) {
3081 u8 status = ata_chk_status(ap);
3082 unsigned long now = jiffies;
3083
3084 if (!(status & ATA_BUSY))
3085 return 0;
936fd732 3086 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3087 return -ENODEV;
3088 if (time_after(now, deadline))
3089 return -EBUSY;
3090
3091 if (!warned && time_after(now, start + 5 * HZ) &&
3092 (deadline - now > 3 * HZ)) {
3093 ata_port_printk(ap, KERN_WARNING,
3094 "port is slow to respond, please be patient "
3095 "(Status 0x%x)\n", status);
3096 warned = 1;
3097 }
3098
3099 msleep(50);
3100 }
3101}
3102
3103static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3104 unsigned long deadline)
1da177e4
LT
3105{
3106 struct ata_ioports *ioaddr = &ap->ioaddr;
3107 unsigned int dev0 = devmask & (1 << 0);
3108 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3109 int rc, ret = 0;
1da177e4
LT
3110
3111 /* if device 0 was found in ata_devchk, wait for its
3112 * BSY bit to clear
3113 */
d4b2bab4
TH
3114 if (dev0) {
3115 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3116 if (rc) {
3117 if (rc != -ENODEV)
3118 return rc;
3119 ret = rc;
3120 }
d4b2bab4 3121 }
1da177e4 3122
e141d999
TH
3123 /* if device 1 was found in ata_devchk, wait for register
3124 * access briefly, then wait for BSY to clear.
1da177e4 3125 */
e141d999
TH
3126 if (dev1) {
3127 int i;
1da177e4
LT
3128
3129 ap->ops->dev_select(ap, 1);
e141d999
TH
3130
3131 /* Wait for register access. Some ATAPI devices fail
3132 * to set nsect/lbal after reset, so don't waste too
3133 * much time on it. We're gonna wait for !BSY anyway.
3134 */
3135 for (i = 0; i < 2; i++) {
3136 u8 nsect, lbal;
3137
3138 nsect = ioread8(ioaddr->nsect_addr);
3139 lbal = ioread8(ioaddr->lbal_addr);
3140 if ((nsect == 1) && (lbal == 1))
3141 break;
3142 msleep(50); /* give drive a breather */
3143 }
3144
d4b2bab4 3145 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3146 if (rc) {
3147 if (rc != -ENODEV)
3148 return rc;
3149 ret = rc;
3150 }
d4b2bab4 3151 }
1da177e4
LT
3152
3153 /* is all this really necessary? */
3154 ap->ops->dev_select(ap, 0);
3155 if (dev1)
3156 ap->ops->dev_select(ap, 1);
3157 if (dev0)
3158 ap->ops->dev_select(ap, 0);
d4b2bab4 3159
9b89391c 3160 return ret;
1da177e4
LT
3161}
3162
d4b2bab4
TH
3163static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3164 unsigned long deadline)
1da177e4
LT
3165{
3166 struct ata_ioports *ioaddr = &ap->ioaddr;
3167
44877b4e 3168 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3169
3170 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3171 iowrite8(ap->ctl, ioaddr->ctl_addr);
3172 udelay(20); /* FIXME: flush */
3173 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3174 udelay(20); /* FIXME: flush */
3175 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3176
3177 /* spec mandates ">= 2ms" before checking status.
3178 * We wait 150ms, because that was the magic delay used for
3179 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3180 * between when the ATA command register is written, and then
3181 * status is checked. Because waiting for "a while" before
3182 * checking status is fine, post SRST, we perform this magic
3183 * delay here as well.
09c7ad79
AC
3184 *
3185 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3186 */
3187 msleep(150);
3188
2e9edbf8 3189 /* Before we perform post reset processing we want to see if
298a41ca
TH
3190 * the bus shows 0xFF because the odd clown forgets the D7
3191 * pulldown resistor.
3192 */
d1adc1bb 3193 if (ata_check_status(ap) == 0xFF)
9b89391c 3194 return -ENODEV;
09c7ad79 3195
d4b2bab4 3196 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3197}
3198
3199/**
3200 * ata_bus_reset - reset host port and associated ATA channel
3201 * @ap: port to reset
3202 *
3203 * This is typically the first time we actually start issuing
3204 * commands to the ATA channel. We wait for BSY to clear, then
3205 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3206 * result. Determine what devices, if any, are on the channel
3207 * by looking at the device 0/1 error register. Look at the signature
3208 * stored in each device's taskfile registers, to determine if
3209 * the device is ATA or ATAPI.
3210 *
3211 * LOCKING:
0cba632b 3212 * PCI/etc. bus probe sem.
cca3974e 3213 * Obtains host lock.
1da177e4
LT
3214 *
3215 * SIDE EFFECTS:
198e0fed 3216 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3217 */
3218
3219void ata_bus_reset(struct ata_port *ap)
3220{
9af5c9c9 3221 struct ata_device *device = ap->link.device;
1da177e4
LT
3222 struct ata_ioports *ioaddr = &ap->ioaddr;
3223 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3224 u8 err;
aec5c3c1 3225 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3226 int rc;
1da177e4 3227
44877b4e 3228 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3229
3230 /* determine if device 0/1 are present */
3231 if (ap->flags & ATA_FLAG_SATA_RESET)
3232 dev0 = 1;
3233 else {
3234 dev0 = ata_devchk(ap, 0);
3235 if (slave_possible)
3236 dev1 = ata_devchk(ap, 1);
3237 }
3238
3239 if (dev0)
3240 devmask |= (1 << 0);
3241 if (dev1)
3242 devmask |= (1 << 1);
3243
3244 /* select device 0 again */
3245 ap->ops->dev_select(ap, 0);
3246
3247 /* issue bus reset */
9b89391c
TH
3248 if (ap->flags & ATA_FLAG_SRST) {
3249 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3250 if (rc && rc != -ENODEV)
aec5c3c1 3251 goto err_out;
9b89391c 3252 }
1da177e4
LT
3253
3254 /*
3255 * determine by signature whether we have ATA or ATAPI devices
3256 */
3f19859e 3257 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3258 if ((slave_possible) && (err != 0x81))
3f19859e 3259 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3260
1da177e4 3261 /* is double-select really necessary? */
9af5c9c9 3262 if (device[1].class != ATA_DEV_NONE)
1da177e4 3263 ap->ops->dev_select(ap, 1);
9af5c9c9 3264 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3265 ap->ops->dev_select(ap, 0);
3266
3267 /* if no devices were detected, disable this port */
9af5c9c9
TH
3268 if ((device[0].class == ATA_DEV_NONE) &&
3269 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3270 goto err_out;
3271
3272 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3273 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3274 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3275 }
3276
3277 DPRINTK("EXIT\n");
3278 return;
3279
3280err_out:
f15a1daf 3281 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3282 ata_port_disable(ap);
1da177e4
LT
3283
3284 DPRINTK("EXIT\n");
3285}
3286
d7bb4cc7 3287/**
936fd732
TH
3288 * sata_link_debounce - debounce SATA phy status
3289 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3290 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3291 * @deadline: deadline jiffies for the operation
d7bb4cc7 3292 *
936fd732 3293* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3294 * holding the same value where DET is not 1 for @duration polled
3295 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3296 * beginning of the stable state. Because DET gets stuck at 1 on
3297 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3298 * until timeout then returns 0 if DET is stable at 1.
3299 *
d4b2bab4
TH
3300 * @timeout is further limited by @deadline. The sooner of the
3301 * two is used.
3302 *
d7bb4cc7
TH
3303 * LOCKING:
3304 * Kernel thread context (may sleep)
3305 *
3306 * RETURNS:
3307 * 0 on success, -errno on failure.
3308 */
936fd732
TH
3309int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3310 unsigned long deadline)
7a7921e8 3311{
d7bb4cc7 3312 unsigned long interval_msec = params[0];
d4b2bab4
TH
3313 unsigned long duration = msecs_to_jiffies(params[1]);
3314 unsigned long last_jiffies, t;
d7bb4cc7
TH
3315 u32 last, cur;
3316 int rc;
3317
d4b2bab4
TH
3318 t = jiffies + msecs_to_jiffies(params[2]);
3319 if (time_before(t, deadline))
3320 deadline = t;
3321
936fd732 3322 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3323 return rc;
3324 cur &= 0xf;
3325
3326 last = cur;
3327 last_jiffies = jiffies;
3328
3329 while (1) {
3330 msleep(interval_msec);
936fd732 3331 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3332 return rc;
3333 cur &= 0xf;
3334
3335 /* DET stable? */
3336 if (cur == last) {
d4b2bab4 3337 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3338 continue;
3339 if (time_after(jiffies, last_jiffies + duration))
3340 return 0;
3341 continue;
3342 }
3343
3344 /* unstable, start over */
3345 last = cur;
3346 last_jiffies = jiffies;
3347
f1545154
TH
3348 /* Check deadline. If debouncing failed, return
3349 * -EPIPE to tell upper layer to lower link speed.
3350 */
d4b2bab4 3351 if (time_after(jiffies, deadline))
f1545154 3352 return -EPIPE;
d7bb4cc7
TH
3353 }
3354}
3355
3356/**
936fd732
TH
3357 * sata_link_resume - resume SATA link
3358 * @link: ATA link to resume SATA
d7bb4cc7 3359 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3360 * @deadline: deadline jiffies for the operation
d7bb4cc7 3361 *
936fd732 3362 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3363 *
3364 * LOCKING:
3365 * Kernel thread context (may sleep)
3366 *
3367 * RETURNS:
3368 * 0 on success, -errno on failure.
3369 */
936fd732
TH
3370int sata_link_resume(struct ata_link *link, const unsigned long *params,
3371 unsigned long deadline)
d7bb4cc7
TH
3372{
3373 u32 scontrol;
81952c54
TH
3374 int rc;
3375
936fd732 3376 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3377 return rc;
7a7921e8 3378
852ee16a 3379 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3380
936fd732 3381 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3382 return rc;
7a7921e8 3383
d7bb4cc7
TH
3384 /* Some PHYs react badly if SStatus is pounded immediately
3385 * after resuming. Delay 200ms before debouncing.
3386 */
3387 msleep(200);
7a7921e8 3388
936fd732 3389 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3390}
3391
f5914a46
TH
3392/**
3393 * ata_std_prereset - prepare for reset
cc0680a5 3394 * @link: ATA link to be reset
d4b2bab4 3395 * @deadline: deadline jiffies for the operation
f5914a46 3396 *
cc0680a5 3397 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3398 * prereset makes libata abort whole reset sequence and give up
3399 * that port, so prereset should be best-effort. It does its
3400 * best to prepare for reset sequence but if things go wrong, it
3401 * should just whine, not fail.
f5914a46
TH
3402 *
3403 * LOCKING:
3404 * Kernel thread context (may sleep)
3405 *
3406 * RETURNS:
3407 * 0 on success, -errno otherwise.
3408 */
cc0680a5 3409int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3410{
cc0680a5 3411 struct ata_port *ap = link->ap;
936fd732 3412 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3413 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3414 int rc;
3415
31daabda 3416 /* handle link resume */
28324304 3417 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3418 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3419 ehc->i.action |= ATA_EH_HARDRESET;
3420
f5914a46
TH
3421 /* if we're about to do hardreset, nothing more to do */
3422 if (ehc->i.action & ATA_EH_HARDRESET)
3423 return 0;
3424
936fd732 3425 /* if SATA, resume link */
a16abc0b 3426 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3427 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3428 /* whine about phy resume failure but proceed */
3429 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3430 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3431 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3432 }
3433
3434 /* Wait for !BSY if the controller can wait for the first D2H
3435 * Reg FIS and we don't know that no device is attached.
3436 */
0c88758b 3437 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3438 rc = ata_wait_ready(ap, deadline);
6dffaf61 3439 if (rc && rc != -ENODEV) {
cc0680a5 3440 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3441 "(errno=%d), forcing hardreset\n", rc);
3442 ehc->i.action |= ATA_EH_HARDRESET;
3443 }
3444 }
f5914a46
TH
3445
3446 return 0;
3447}
3448
c2bd5804
TH
3449/**
3450 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3451 * @link: ATA link to reset
c2bd5804 3452 * @classes: resulting classes of attached devices
d4b2bab4 3453 * @deadline: deadline jiffies for the operation
c2bd5804 3454 *
52783c5d 3455 * Reset host port using ATA SRST.
c2bd5804
TH
3456 *
3457 * LOCKING:
3458 * Kernel thread context (may sleep)
3459 *
3460 * RETURNS:
3461 * 0 on success, -errno otherwise.
3462 */
cc0680a5 3463int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3464 unsigned long deadline)
c2bd5804 3465{
cc0680a5 3466 struct ata_port *ap = link->ap;
c2bd5804 3467 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3468 unsigned int devmask = 0;
3469 int rc;
c2bd5804
TH
3470 u8 err;
3471
3472 DPRINTK("ENTER\n");
3473
936fd732 3474 if (ata_link_offline(link)) {
3a39746a
TH
3475 classes[0] = ATA_DEV_NONE;
3476 goto out;
3477 }
3478
c2bd5804
TH
3479 /* determine if device 0/1 are present */
3480 if (ata_devchk(ap, 0))
3481 devmask |= (1 << 0);
3482 if (slave_possible && ata_devchk(ap, 1))
3483 devmask |= (1 << 1);
3484
c2bd5804
TH
3485 /* select device 0 again */
3486 ap->ops->dev_select(ap, 0);
3487
3488 /* issue bus reset */
3489 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3490 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3491 /* if link is occupied, -ENODEV too is an error */
936fd732 3492 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3493 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3494 return rc;
c2bd5804
TH
3495 }
3496
3497 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3498 classes[0] = ata_dev_try_classify(&link->device[0],
3499 devmask & (1 << 0), &err);
c2bd5804 3500 if (slave_possible && err != 0x81)
3f19859e
TH
3501 classes[1] = ata_dev_try_classify(&link->device[1],
3502 devmask & (1 << 1), &err);
c2bd5804 3503
3a39746a 3504 out:
c2bd5804
TH
3505 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3506 return 0;
3507}
3508
3509/**
cc0680a5
TH
3510 * sata_link_hardreset - reset link via SATA phy reset
3511 * @link: link to reset
b6103f6d 3512 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3513 * @deadline: deadline jiffies for the operation
c2bd5804 3514 *
cc0680a5 3515 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3516 *
3517 * LOCKING:
3518 * Kernel thread context (may sleep)
3519 *
3520 * RETURNS:
3521 * 0 on success, -errno otherwise.
3522 */
cc0680a5 3523int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3524 unsigned long deadline)
c2bd5804 3525{
852ee16a 3526 u32 scontrol;
81952c54 3527 int rc;
852ee16a 3528
c2bd5804
TH
3529 DPRINTK("ENTER\n");
3530
936fd732 3531 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3532 /* SATA spec says nothing about how to reconfigure
3533 * spd. To be on the safe side, turn off phy during
3534 * reconfiguration. This works for at least ICH7 AHCI
3535 * and Sil3124.
3536 */
936fd732 3537 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3538 goto out;
81952c54 3539
a34b6fc0 3540 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3541
936fd732 3542 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3543 goto out;
1c3fae4d 3544
936fd732 3545 sata_set_spd(link);
1c3fae4d
TH
3546 }
3547
3548 /* issue phy wake/reset */
936fd732 3549 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3550 goto out;
81952c54 3551
852ee16a 3552 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3553
936fd732 3554 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3555 goto out;
c2bd5804 3556
1c3fae4d 3557 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3558 * 10.4.2 says at least 1 ms.
3559 */
3560 msleep(1);
3561
936fd732
TH
3562 /* bring link back */
3563 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3564 out:
3565 DPRINTK("EXIT, rc=%d\n", rc);
3566 return rc;
3567}
3568
3569/**
3570 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3571 * @link: link to reset
b6103f6d 3572 * @class: resulting class of attached device
d4b2bab4 3573 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3574 *
3575 * SATA phy-reset host port using DET bits of SControl register,
3576 * wait for !BSY and classify the attached device.
3577 *
3578 * LOCKING:
3579 * Kernel thread context (may sleep)
3580 *
3581 * RETURNS:
3582 * 0 on success, -errno otherwise.
3583 */
cc0680a5 3584int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3585 unsigned long deadline)
b6103f6d 3586{
cc0680a5 3587 struct ata_port *ap = link->ap;
936fd732 3588 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3589 int rc;
3590
3591 DPRINTK("ENTER\n");
3592
3593 /* do hardreset */
cc0680a5 3594 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3595 if (rc) {
cc0680a5 3596 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3597 "COMRESET failed (errno=%d)\n", rc);
3598 return rc;
3599 }
c2bd5804 3600
c2bd5804 3601 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3602 if (ata_link_offline(link)) {
c2bd5804
TH
3603 *class = ATA_DEV_NONE;
3604 DPRINTK("EXIT, link offline\n");
3605 return 0;
3606 }
3607
34fee227
TH
3608 /* wait a while before checking status, see SRST for more info */
3609 msleep(150);
3610
d4b2bab4 3611 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3612 /* link occupied, -ENODEV too is an error */
3613 if (rc) {
cc0680a5 3614 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3615 "COMRESET failed (errno=%d)\n", rc);
3616 return rc;
c2bd5804
TH
3617 }
3618
3a39746a
TH
3619 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3620
3f19859e 3621 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3622
3623 DPRINTK("EXIT, class=%u\n", *class);
3624 return 0;
3625}
3626
3627/**
3628 * ata_std_postreset - standard postreset callback
cc0680a5 3629 * @link: the target ata_link
c2bd5804
TH
3630 * @classes: classes of attached devices
3631 *
3632 * This function is invoked after a successful reset. Note that
3633 * the device might have been reset more than once using
3634 * different reset methods before postreset is invoked.
c2bd5804 3635 *
c2bd5804
TH
3636 * LOCKING:
3637 * Kernel thread context (may sleep)
3638 */
cc0680a5 3639void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3640{
cc0680a5 3641 struct ata_port *ap = link->ap;
dc2b3515
TH
3642 u32 serror;
3643
c2bd5804
TH
3644 DPRINTK("ENTER\n");
3645
c2bd5804 3646 /* print link status */
936fd732 3647 sata_print_link_status(link);
c2bd5804 3648
dc2b3515 3649 /* clear SError */
936fd732
TH
3650 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3651 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3652
c2bd5804
TH
3653 /* is double-select really necessary? */
3654 if (classes[0] != ATA_DEV_NONE)
3655 ap->ops->dev_select(ap, 1);
3656 if (classes[1] != ATA_DEV_NONE)
3657 ap->ops->dev_select(ap, 0);
3658
3a39746a
TH
3659 /* bail out if no device is present */
3660 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3661 DPRINTK("EXIT, no device\n");
3662 return;
3663 }
3664
3665 /* set up device control */
0d5ff566
TH
3666 if (ap->ioaddr.ctl_addr)
3667 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3668
3669 DPRINTK("EXIT\n");
3670}
3671
623a3128
TH
3672/**
3673 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3674 * @dev: device to compare against
3675 * @new_class: class of the new device
3676 * @new_id: IDENTIFY page of the new device
3677 *
3678 * Compare @new_class and @new_id against @dev and determine
3679 * whether @dev is the device indicated by @new_class and
3680 * @new_id.
3681 *
3682 * LOCKING:
3683 * None.
3684 *
3685 * RETURNS:
3686 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3687 */
3373efd8
TH
3688static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3689 const u16 *new_id)
623a3128
TH
3690{
3691 const u16 *old_id = dev->id;
a0cf733b
TH
3692 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3693 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3694
3695 if (dev->class != new_class) {
f15a1daf
TH
3696 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3697 dev->class, new_class);
623a3128
TH
3698 return 0;
3699 }
3700
a0cf733b
TH
3701 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3702 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3703 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3704 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3705
3706 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3707 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3708 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3709 return 0;
3710 }
3711
3712 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3713 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3714 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3715 return 0;
3716 }
3717
623a3128
TH
3718 return 1;
3719}
3720
3721/**
fe30911b 3722 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3723 * @dev: target ATA device
bff04647 3724 * @readid_flags: read ID flags
623a3128
TH
3725 *
3726 * Re-read IDENTIFY page and make sure @dev is still attached to
3727 * the port.
3728 *
3729 * LOCKING:
3730 * Kernel thread context (may sleep)
3731 *
3732 * RETURNS:
3733 * 0 on success, negative errno otherwise
3734 */
fe30911b 3735int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3736{
5eb45c02 3737 unsigned int class = dev->class;
9af5c9c9 3738 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3739 int rc;
3740
fe635c7e 3741 /* read ID data */
bff04647 3742 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3743 if (rc)
fe30911b 3744 return rc;
623a3128
TH
3745
3746 /* is the device still there? */
fe30911b
TH
3747 if (!ata_dev_same_device(dev, class, id))
3748 return -ENODEV;
623a3128 3749
fe635c7e 3750 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3751 return 0;
3752}
3753
3754/**
3755 * ata_dev_revalidate - Revalidate ATA device
3756 * @dev: device to revalidate
422c9daa 3757 * @new_class: new class code
fe30911b
TH
3758 * @readid_flags: read ID flags
3759 *
3760 * Re-read IDENTIFY page, make sure @dev is still attached to the
3761 * port and reconfigure it according to the new IDENTIFY page.
3762 *
3763 * LOCKING:
3764 * Kernel thread context (may sleep)
3765 *
3766 * RETURNS:
3767 * 0 on success, negative errno otherwise
3768 */
422c9daa
TH
3769int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3770 unsigned int readid_flags)
fe30911b 3771{
6ddcd3b0 3772 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3773 int rc;
3774
3775 if (!ata_dev_enabled(dev))
3776 return -ENODEV;
3777
422c9daa
TH
3778 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3779 if (ata_class_enabled(new_class) &&
3780 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3781 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3782 dev->class, new_class);
3783 rc = -ENODEV;
3784 goto fail;
3785 }
3786
fe30911b
TH
3787 /* re-read ID */
3788 rc = ata_dev_reread_id(dev, readid_flags);
3789 if (rc)
3790 goto fail;
623a3128
TH
3791
3792 /* configure device according to the new ID */
efdaedc4 3793 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3794 if (rc)
3795 goto fail;
3796
3797 /* verify n_sectors hasn't changed */
b54eebd6
TH
3798 if (dev->class == ATA_DEV_ATA && n_sectors &&
3799 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3800 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3801 "%llu != %llu\n",
3802 (unsigned long long)n_sectors,
3803 (unsigned long long)dev->n_sectors);
8270bec4
TH
3804
3805 /* restore original n_sectors */
3806 dev->n_sectors = n_sectors;
3807
6ddcd3b0
TH
3808 rc = -ENODEV;
3809 goto fail;
3810 }
3811
3812 return 0;
623a3128
TH
3813
3814 fail:
f15a1daf 3815 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3816 return rc;
3817}
3818
6919a0a6
AC
3819struct ata_blacklist_entry {
3820 const char *model_num;
3821 const char *model_rev;
3822 unsigned long horkage;
3823};
3824
3825static const struct ata_blacklist_entry ata_device_blacklist [] = {
3826 /* Devices with DMA related problems under Linux */
3827 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3828 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3829 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3830 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3831 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3832 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3833 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3834 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3835 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3836 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3837 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3838 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3839 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3840 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3841 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3842 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3843 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3844 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3845 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3846 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3847 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3848 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3849 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3850 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3851 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3852 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3853 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3854 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3855 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3856 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3857 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3858 { "IOMEGA ZIP 250 ATAPI Floppy",
3859 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3860
18d6e9d5 3861 /* Weird ATAPI devices */
40a1d531 3862 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3863
6919a0a6
AC
3864 /* Devices we expect to fail diagnostics */
3865
3866 /* Devices where NCQ should be avoided */
3867 /* NCQ is slow */
3868 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3869 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3870 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3871 /* NCQ is broken */
539cc7c7 3872 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3873 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab 3874 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
539cc7c7
JG
3875 ATA_HORKAGE_NONCQ },
3876
36e337d0
RH
3877 /* Blacklist entries taken from Silicon Image 3124/3132
3878 Windows driver .inf file - also several Linux problem reports */
3879 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3880 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3881 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3882 /* Drives which do spurious command completion */
3883 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3884 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3885 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3886 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3887 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3888 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3889 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3890 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3891
16c55b03
TH
3892 /* devices which puke on READ_NATIVE_MAX */
3893 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3894 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3895 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3896 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3897
3898 /* End Marker */
3899 { }
1da177e4 3900};
2e9edbf8 3901
539cc7c7
JG
3902int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3903{
3904 const char *p;
3905 int len;
3906
3907 /*
3908 * check for trailing wildcard: *\0
3909 */
3910 p = strchr(patt, wildchar);
3911 if (p && ((*(p + 1)) == 0))
3912 len = p - patt;
3913 else
3914 len = strlen(name);
3915
3916 return strncmp(patt, name, len);
3917}
3918
75683fe7 3919static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3920{
8bfa79fc
TH
3921 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3922 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3923 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3924
8bfa79fc
TH
3925 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3926 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3927
6919a0a6 3928 while (ad->model_num) {
539cc7c7 3929 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3930 if (ad->model_rev == NULL)
3931 return ad->horkage;
539cc7c7 3932 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3933 return ad->horkage;
f4b15fef 3934 }
6919a0a6 3935 ad++;
f4b15fef 3936 }
1da177e4
LT
3937 return 0;
3938}
3939
6919a0a6
AC
3940static int ata_dma_blacklisted(const struct ata_device *dev)
3941{
3942 /* We don't support polling DMA.
3943 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3944 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3945 */
9af5c9c9 3946 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3947 (dev->flags & ATA_DFLAG_CDB_INTR))
3948 return 1;
75683fe7 3949 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3950}
3951
a6d5a51c
TH
3952/**
3953 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3954 * @dev: Device to compute xfermask for
3955 *
acf356b1
TH
3956 * Compute supported xfermask of @dev and store it in
3957 * dev->*_mask. This function is responsible for applying all
3958 * known limits including host controller limits, device
3959 * blacklist, etc...
a6d5a51c
TH
3960 *
3961 * LOCKING:
3962 * None.
a6d5a51c 3963 */
3373efd8 3964static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3965{
9af5c9c9
TH
3966 struct ata_link *link = dev->link;
3967 struct ata_port *ap = link->ap;
cca3974e 3968 struct ata_host *host = ap->host;
a6d5a51c 3969 unsigned long xfer_mask;
1da177e4 3970
37deecb5 3971 /* controller modes available */
565083e1
TH
3972 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3973 ap->mwdma_mask, ap->udma_mask);
3974
8343f889 3975 /* drive modes available */
37deecb5
TH
3976 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3977 dev->mwdma_mask, dev->udma_mask);
3978 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3979
b352e57d
AC
3980 /*
3981 * CFA Advanced TrueIDE timings are not allowed on a shared
3982 * cable
3983 */
3984 if (ata_dev_pair(dev)) {
3985 /* No PIO5 or PIO6 */
3986 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3987 /* No MWDMA3 or MWDMA 4 */
3988 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3989 }
3990
37deecb5
TH
3991 if (ata_dma_blacklisted(dev)) {
3992 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3993 ata_dev_printk(dev, KERN_WARNING,
3994 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3995 }
a6d5a51c 3996
14d66ab7
PV
3997 if ((host->flags & ATA_HOST_SIMPLEX) &&
3998 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3999 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4000 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4001 "other device, disabling DMA\n");
5444a6f4 4002 }
565083e1 4003
e424675f
JG
4004 if (ap->flags & ATA_FLAG_NO_IORDY)
4005 xfer_mask &= ata_pio_mask_no_iordy(dev);
4006
5444a6f4 4007 if (ap->ops->mode_filter)
a76b62ca 4008 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4009
8343f889
RH
4010 /* Apply cable rule here. Don't apply it early because when
4011 * we handle hot plug the cable type can itself change.
4012 * Check this last so that we know if the transfer rate was
4013 * solely limited by the cable.
4014 * Unknown or 80 wire cables reported host side are checked
4015 * drive side as well. Cases where we know a 40wire cable
4016 * is used safely for 80 are not checked here.
4017 */
4018 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4019 /* UDMA/44 or higher would be available */
4020 if((ap->cbl == ATA_CBL_PATA40) ||
4021 (ata_drive_40wire(dev->id) &&
4022 (ap->cbl == ATA_CBL_PATA_UNK ||
4023 ap->cbl == ATA_CBL_PATA80))) {
4024 ata_dev_printk(dev, KERN_WARNING,
4025 "limited to UDMA/33 due to 40-wire cable\n");
4026 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4027 }
4028
565083e1
TH
4029 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4030 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4031}
4032
1da177e4
LT
4033/**
4034 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4035 * @dev: Device to which command will be sent
4036 *
780a87f7
JG
4037 * Issue SET FEATURES - XFER MODE command to device @dev
4038 * on port @ap.
4039 *
1da177e4 4040 * LOCKING:
0cba632b 4041 * PCI/etc. bus probe sem.
83206a29
TH
4042 *
4043 * RETURNS:
4044 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4045 */
4046
3373efd8 4047static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4048{
a0123703 4049 struct ata_taskfile tf;
83206a29 4050 unsigned int err_mask;
1da177e4
LT
4051
4052 /* set up set-features taskfile */
4053 DPRINTK("set features - xfer mode\n");
4054
464cf177
TH
4055 /* Some controllers and ATAPI devices show flaky interrupt
4056 * behavior after setting xfer mode. Use polling instead.
4057 */
3373efd8 4058 ata_tf_init(dev, &tf);
a0123703
TH
4059 tf.command = ATA_CMD_SET_FEATURES;
4060 tf.feature = SETFEATURES_XFER;
464cf177 4061 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4062 tf.protocol = ATA_PROT_NODATA;
4063 tf.nsect = dev->xfer_mode;
1da177e4 4064
3373efd8 4065 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4066
4067 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4068 return err_mask;
4069}
4070
4071/**
4072 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4073 * @dev: Device to which command will be sent
4074 * @enable: Whether to enable or disable the feature
4075 *
4076 * Issue SET FEATURES - SATA FEATURES command to device @dev
4077 * on port @ap with sector count set to indicate Asynchronous
4078 * Notification feature
4079 *
4080 * LOCKING:
4081 * PCI/etc. bus probe sem.
4082 *
4083 * RETURNS:
4084 * 0 on success, AC_ERR_* mask otherwise.
4085 */
4086static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4087{
4088 struct ata_taskfile tf;
4089 unsigned int err_mask;
4090
4091 /* set up set-features taskfile */
4092 DPRINTK("set features - SATA features\n");
4093
4094 ata_tf_init(dev, &tf);
4095 tf.command = ATA_CMD_SET_FEATURES;
4096 tf.feature = enable;
4097 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4098 tf.protocol = ATA_PROT_NODATA;
4099 tf.nsect = SATA_AN;
4100
4101 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4102
83206a29
TH
4103 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4104 return err_mask;
1da177e4
LT
4105}
4106
8bf62ece
AL
4107/**
4108 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4109 * @dev: Device to which command will be sent
e2a7f77a
RD
4110 * @heads: Number of heads (taskfile parameter)
4111 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4112 *
4113 * LOCKING:
6aff8f1f
TH
4114 * Kernel thread context (may sleep)
4115 *
4116 * RETURNS:
4117 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4118 */
3373efd8
TH
4119static unsigned int ata_dev_init_params(struct ata_device *dev,
4120 u16 heads, u16 sectors)
8bf62ece 4121{
a0123703 4122 struct ata_taskfile tf;
6aff8f1f 4123 unsigned int err_mask;
8bf62ece
AL
4124
4125 /* Number of sectors per track 1-255. Number of heads 1-16 */
4126 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4127 return AC_ERR_INVALID;
8bf62ece
AL
4128
4129 /* set up init dev params taskfile */
4130 DPRINTK("init dev params \n");
4131
3373efd8 4132 ata_tf_init(dev, &tf);
a0123703
TH
4133 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4134 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4135 tf.protocol = ATA_PROT_NODATA;
4136 tf.nsect = sectors;
4137 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4138
3373efd8 4139 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4140 /* A clean abort indicates an original or just out of spec drive
4141 and we should continue as we issue the setup based on the
4142 drive reported working geometry */
4143 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4144 err_mask = 0;
8bf62ece 4145
6aff8f1f
TH
4146 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4147 return err_mask;
8bf62ece
AL
4148}
4149
1da177e4 4150/**
0cba632b
JG
4151 * ata_sg_clean - Unmap DMA memory associated with command
4152 * @qc: Command containing DMA memory to be released
4153 *
4154 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4155 *
4156 * LOCKING:
cca3974e 4157 * spin_lock_irqsave(host lock)
1da177e4 4158 */
70e6ad0c 4159void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4160{
4161 struct ata_port *ap = qc->ap;
cedc9a47 4162 struct scatterlist *sg = qc->__sg;
1da177e4 4163 int dir = qc->dma_dir;
cedc9a47 4164 void *pad_buf = NULL;
1da177e4 4165
a4631474
TH
4166 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4167 WARN_ON(sg == NULL);
1da177e4
LT
4168
4169 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4170 WARN_ON(qc->n_elem > 1);
1da177e4 4171
2c13b7ce 4172 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4173
cedc9a47
JG
4174 /* if we padded the buffer out to 32-bit bound, and data
4175 * xfer direction is from-device, we must copy from the
4176 * pad buffer back into the supplied buffer
4177 */
4178 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4179 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4180
4181 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4182 if (qc->n_elem)
2f1f610b 4183 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4184 /* restore last sg */
4185 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4186 if (pad_buf) {
4187 struct scatterlist *psg = &qc->pad_sgent;
4188 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4189 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4190 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4191 }
4192 } else {
2e242fa9 4193 if (qc->n_elem)
2f1f610b 4194 dma_unmap_single(ap->dev,
e1410f2d
JG
4195 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4196 dir);
cedc9a47
JG
4197 /* restore sg */
4198 sg->length += qc->pad_len;
4199 if (pad_buf)
4200 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4201 pad_buf, qc->pad_len);
4202 }
1da177e4
LT
4203
4204 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4205 qc->__sg = NULL;
1da177e4
LT
4206}
4207
4208/**
4209 * ata_fill_sg - Fill PCI IDE PRD table
4210 * @qc: Metadata associated with taskfile to be transferred
4211 *
780a87f7
JG
4212 * Fill PCI IDE PRD (scatter-gather) table with segments
4213 * associated with the current disk command.
4214 *
1da177e4 4215 * LOCKING:
cca3974e 4216 * spin_lock_irqsave(host lock)
1da177e4
LT
4217 *
4218 */
4219static void ata_fill_sg(struct ata_queued_cmd *qc)
4220{
1da177e4 4221 struct ata_port *ap = qc->ap;
cedc9a47
JG
4222 struct scatterlist *sg;
4223 unsigned int idx;
1da177e4 4224
a4631474 4225 WARN_ON(qc->__sg == NULL);
f131883e 4226 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4227
4228 idx = 0;
cedc9a47 4229 ata_for_each_sg(sg, qc) {
1da177e4
LT
4230 u32 addr, offset;
4231 u32 sg_len, len;
4232
4233 /* determine if physical DMA addr spans 64K boundary.
4234 * Note h/w doesn't support 64-bit, so we unconditionally
4235 * truncate dma_addr_t to u32.
4236 */
4237 addr = (u32) sg_dma_address(sg);
4238 sg_len = sg_dma_len(sg);
4239
4240 while (sg_len) {
4241 offset = addr & 0xffff;
4242 len = sg_len;
4243 if ((offset + sg_len) > 0x10000)
4244 len = 0x10000 - offset;
4245
4246 ap->prd[idx].addr = cpu_to_le32(addr);
4247 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4248 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4249
4250 idx++;
4251 sg_len -= len;
4252 addr += len;
4253 }
4254 }
4255
4256 if (idx)
4257 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4258}
b9a4197e 4259
d26fc955
AC
4260/**
4261 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4262 * @qc: Metadata associated with taskfile to be transferred
4263 *
4264 * Fill PCI IDE PRD (scatter-gather) table with segments
4265 * associated with the current disk command. Perform the fill
4266 * so that we avoid writing any length 64K records for
4267 * controllers that don't follow the spec.
4268 *
4269 * LOCKING:
4270 * spin_lock_irqsave(host lock)
4271 *
4272 */
4273static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4274{
4275 struct ata_port *ap = qc->ap;
4276 struct scatterlist *sg;
4277 unsigned int idx;
4278
4279 WARN_ON(qc->__sg == NULL);
4280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4281
4282 idx = 0;
4283 ata_for_each_sg(sg, qc) {
4284 u32 addr, offset;
4285 u32 sg_len, len, blen;
4286
4287 /* determine if physical DMA addr spans 64K boundary.
4288 * Note h/w doesn't support 64-bit, so we unconditionally
4289 * truncate dma_addr_t to u32.
4290 */
4291 addr = (u32) sg_dma_address(sg);
4292 sg_len = sg_dma_len(sg);
4293
4294 while (sg_len) {
4295 offset = addr & 0xffff;
4296 len = sg_len;
4297 if ((offset + sg_len) > 0x10000)
4298 len = 0x10000 - offset;
4299
4300 blen = len & 0xffff;
4301 ap->prd[idx].addr = cpu_to_le32(addr);
4302 if (blen == 0) {
4303 /* Some PATA chipsets like the CS5530 can't
4304 cope with 0x0000 meaning 64K as the spec says */
4305 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4306 blen = 0x8000;
4307 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4308 }
4309 ap->prd[idx].flags_len = cpu_to_le32(blen);
4310 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4311
4312 idx++;
4313 sg_len -= len;
4314 addr += len;
4315 }
4316 }
4317
4318 if (idx)
4319 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4320}
4321
1da177e4
LT
4322/**
4323 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4324 * @qc: Metadata associated with taskfile to check
4325 *
780a87f7
JG
4326 * Allow low-level driver to filter ATA PACKET commands, returning
4327 * a status indicating whether or not it is OK to use DMA for the
4328 * supplied PACKET command.
4329 *
1da177e4 4330 * LOCKING:
cca3974e 4331 * spin_lock_irqsave(host lock)
0cba632b 4332 *
1da177e4
LT
4333 * RETURNS: 0 when ATAPI DMA can be used
4334 * nonzero otherwise
4335 */
4336int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4337{
4338 struct ata_port *ap = qc->ap;
b9a4197e
TH
4339
4340 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4341 * few ATAPI devices choke on such DMA requests.
4342 */
4343 if (unlikely(qc->nbytes & 15))
4344 return 1;
6f23a31d 4345
1da177e4 4346 if (ap->ops->check_atapi_dma)
b9a4197e 4347 return ap->ops->check_atapi_dma(qc);
1da177e4 4348
b9a4197e 4349 return 0;
1da177e4 4350}
b9a4197e 4351
31cc23b3
TH
4352/**
4353 * ata_std_qc_defer - Check whether a qc needs to be deferred
4354 * @qc: ATA command in question
4355 *
4356 * Non-NCQ commands cannot run with any other command, NCQ or
4357 * not. As upper layer only knows the queue depth, we are
4358 * responsible for maintaining exclusion. This function checks
4359 * whether a new command @qc can be issued.
4360 *
4361 * LOCKING:
4362 * spin_lock_irqsave(host lock)
4363 *
4364 * RETURNS:
4365 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4366 */
4367int ata_std_qc_defer(struct ata_queued_cmd *qc)
4368{
4369 struct ata_link *link = qc->dev->link;
4370
4371 if (qc->tf.protocol == ATA_PROT_NCQ) {
4372 if (!ata_tag_valid(link->active_tag))
4373 return 0;
4374 } else {
4375 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4376 return 0;
4377 }
4378
4379 return ATA_DEFER_LINK;
4380}
4381
1da177e4
LT
4382/**
4383 * ata_qc_prep - Prepare taskfile for submission
4384 * @qc: Metadata associated with taskfile to be prepared
4385 *
780a87f7
JG
4386 * Prepare ATA taskfile for submission.
4387 *
1da177e4 4388 * LOCKING:
cca3974e 4389 * spin_lock_irqsave(host lock)
1da177e4
LT
4390 */
4391void ata_qc_prep(struct ata_queued_cmd *qc)
4392{
4393 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4394 return;
4395
4396 ata_fill_sg(qc);
4397}
4398
d26fc955
AC
4399/**
4400 * ata_dumb_qc_prep - Prepare taskfile for submission
4401 * @qc: Metadata associated with taskfile to be prepared
4402 *
4403 * Prepare ATA taskfile for submission.
4404 *
4405 * LOCKING:
4406 * spin_lock_irqsave(host lock)
4407 */
4408void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4409{
4410 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4411 return;
4412
4413 ata_fill_sg_dumb(qc);
4414}
4415
e46834cd
BK
4416void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4417
0cba632b
JG
4418/**
4419 * ata_sg_init_one - Associate command with memory buffer
4420 * @qc: Command to be associated
4421 * @buf: Memory buffer
4422 * @buflen: Length of memory buffer, in bytes.
4423 *
4424 * Initialize the data-related elements of queued_cmd @qc
4425 * to point to a single memory buffer, @buf of byte length @buflen.
4426 *
4427 * LOCKING:
cca3974e 4428 * spin_lock_irqsave(host lock)
0cba632b
JG
4429 */
4430
1da177e4
LT
4431void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4432{
1da177e4
LT
4433 qc->flags |= ATA_QCFLAG_SINGLE;
4434
cedc9a47 4435 qc->__sg = &qc->sgent;
1da177e4 4436 qc->n_elem = 1;
cedc9a47 4437 qc->orig_n_elem = 1;
1da177e4 4438 qc->buf_virt = buf;
233277ca 4439 qc->nbytes = buflen;
1da177e4 4440
61c0596c 4441 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4442}
4443
0cba632b
JG
4444/**
4445 * ata_sg_init - Associate command with scatter-gather table.
4446 * @qc: Command to be associated
4447 * @sg: Scatter-gather table.
4448 * @n_elem: Number of elements in s/g table.
4449 *
4450 * Initialize the data-related elements of queued_cmd @qc
4451 * to point to a scatter-gather table @sg, containing @n_elem
4452 * elements.
4453 *
4454 * LOCKING:
cca3974e 4455 * spin_lock_irqsave(host lock)
0cba632b
JG
4456 */
4457
1da177e4
LT
4458void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4459 unsigned int n_elem)
4460{
4461 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4462 qc->__sg = sg;
1da177e4 4463 qc->n_elem = n_elem;
cedc9a47 4464 qc->orig_n_elem = n_elem;
1da177e4
LT
4465}
4466
4467/**
0cba632b
JG
4468 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4469 * @qc: Command with memory buffer to be mapped.
4470 *
4471 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4472 *
4473 * LOCKING:
cca3974e 4474 * spin_lock_irqsave(host lock)
1da177e4
LT
4475 *
4476 * RETURNS:
0cba632b 4477 * Zero on success, negative on error.
1da177e4
LT
4478 */
4479
4480static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4481{
4482 struct ata_port *ap = qc->ap;
4483 int dir = qc->dma_dir;
cedc9a47 4484 struct scatterlist *sg = qc->__sg;
1da177e4 4485 dma_addr_t dma_address;
2e242fa9 4486 int trim_sg = 0;
1da177e4 4487
cedc9a47
JG
4488 /* we must lengthen transfers to end on a 32-bit boundary */
4489 qc->pad_len = sg->length & 3;
4490 if (qc->pad_len) {
4491 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4492 struct scatterlist *psg = &qc->pad_sgent;
4493
a4631474 4494 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4495
4496 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4497
4498 if (qc->tf.flags & ATA_TFLAG_WRITE)
4499 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4500 qc->pad_len);
4501
4502 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4503 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4504 /* trim sg */
4505 sg->length -= qc->pad_len;
2e242fa9
TH
4506 if (sg->length == 0)
4507 trim_sg = 1;
cedc9a47
JG
4508
4509 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4510 sg->length, qc->pad_len);
4511 }
4512
2e242fa9
TH
4513 if (trim_sg) {
4514 qc->n_elem--;
e1410f2d
JG
4515 goto skip_map;
4516 }
4517
2f1f610b 4518 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4519 sg->length, dir);
537a95d9
TH
4520 if (dma_mapping_error(dma_address)) {
4521 /* restore sg */
4522 sg->length += qc->pad_len;
1da177e4 4523 return -1;
537a95d9 4524 }
1da177e4
LT
4525
4526 sg_dma_address(sg) = dma_address;
32529e01 4527 sg_dma_len(sg) = sg->length;
1da177e4 4528
2e242fa9 4529skip_map:
1da177e4
LT
4530 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4531 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4532
4533 return 0;
4534}
4535
4536/**
0cba632b
JG
4537 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4538 * @qc: Command with scatter-gather table to be mapped.
4539 *
4540 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4541 *
4542 * LOCKING:
cca3974e 4543 * spin_lock_irqsave(host lock)
1da177e4
LT
4544 *
4545 * RETURNS:
0cba632b 4546 * Zero on success, negative on error.
1da177e4
LT
4547 *
4548 */
4549
4550static int ata_sg_setup(struct ata_queued_cmd *qc)
4551{
4552 struct ata_port *ap = qc->ap;
cedc9a47
JG
4553 struct scatterlist *sg = qc->__sg;
4554 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4555 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4556
44877b4e 4557 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4558 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4559
cedc9a47
JG
4560 /* we must lengthen transfers to end on a 32-bit boundary */
4561 qc->pad_len = lsg->length & 3;
4562 if (qc->pad_len) {
4563 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4564 struct scatterlist *psg = &qc->pad_sgent;
4565 unsigned int offset;
4566
a4631474 4567 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4568
4569 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4570
4571 /*
4572 * psg->page/offset are used to copy to-be-written
4573 * data in this function or read data in ata_sg_clean.
4574 */
4575 offset = lsg->offset + lsg->length - qc->pad_len;
4576 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4577 psg->offset = offset_in_page(offset);
4578
4579 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4580 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4581 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4582 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4583 }
4584
4585 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4586 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4587 /* trim last sg */
4588 lsg->length -= qc->pad_len;
e1410f2d
JG
4589 if (lsg->length == 0)
4590 trim_sg = 1;
cedc9a47
JG
4591
4592 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4593 qc->n_elem - 1, lsg->length, qc->pad_len);
4594 }
4595
e1410f2d
JG
4596 pre_n_elem = qc->n_elem;
4597 if (trim_sg && pre_n_elem)
4598 pre_n_elem--;
4599
4600 if (!pre_n_elem) {
4601 n_elem = 0;
4602 goto skip_map;
4603 }
4604
1da177e4 4605 dir = qc->dma_dir;
2f1f610b 4606 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4607 if (n_elem < 1) {
4608 /* restore last sg */
4609 lsg->length += qc->pad_len;
1da177e4 4610 return -1;
537a95d9 4611 }
1da177e4
LT
4612
4613 DPRINTK("%d sg elements mapped\n", n_elem);
4614
e1410f2d 4615skip_map:
1da177e4
LT
4616 qc->n_elem = n_elem;
4617
4618 return 0;
4619}
4620
0baab86b 4621/**
c893a3ae 4622 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4623 * @buf: Buffer to swap
4624 * @buf_words: Number of 16-bit words in buffer.
4625 *
4626 * Swap halves of 16-bit words if needed to convert from
4627 * little-endian byte order to native cpu byte order, or
4628 * vice-versa.
4629 *
4630 * LOCKING:
6f0ef4fa 4631 * Inherited from caller.
0baab86b 4632 */
1da177e4
LT
4633void swap_buf_le16(u16 *buf, unsigned int buf_words)
4634{
4635#ifdef __BIG_ENDIAN
4636 unsigned int i;
4637
4638 for (i = 0; i < buf_words; i++)
4639 buf[i] = le16_to_cpu(buf[i]);
4640#endif /* __BIG_ENDIAN */
4641}
4642
6ae4cfb5 4643/**
0d5ff566 4644 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4645 * @adev: device to target
6ae4cfb5
AL
4646 * @buf: data buffer
4647 * @buflen: buffer length
344babaa 4648 * @write_data: read/write
6ae4cfb5
AL
4649 *
4650 * Transfer data from/to the device data register by PIO.
4651 *
4652 * LOCKING:
4653 * Inherited from caller.
6ae4cfb5 4654 */
0d5ff566
TH
4655void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4656 unsigned int buflen, int write_data)
1da177e4 4657{
9af5c9c9 4658 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4659 unsigned int words = buflen >> 1;
1da177e4 4660
6ae4cfb5 4661 /* Transfer multiple of 2 bytes */
1da177e4 4662 if (write_data)
0d5ff566 4663 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4664 else
0d5ff566 4665 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4666
4667 /* Transfer trailing 1 byte, if any. */
4668 if (unlikely(buflen & 0x01)) {
4669 u16 align_buf[1] = { 0 };
4670 unsigned char *trailing_buf = buf + buflen - 1;
4671
4672 if (write_data) {
4673 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4674 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4675 } else {
0d5ff566 4676 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4677 memcpy(trailing_buf, align_buf, 1);
4678 }
4679 }
1da177e4
LT
4680}
4681
75e99585 4682/**
0d5ff566 4683 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4684 * @adev: device to target
4685 * @buf: data buffer
4686 * @buflen: buffer length
4687 * @write_data: read/write
4688 *
88574551 4689 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4690 * transfer with interrupts disabled.
4691 *
4692 * LOCKING:
4693 * Inherited from caller.
4694 */
0d5ff566
TH
4695void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4696 unsigned int buflen, int write_data)
75e99585
AC
4697{
4698 unsigned long flags;
4699 local_irq_save(flags);
0d5ff566 4700 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4701 local_irq_restore(flags);
4702}
4703
4704
6ae4cfb5 4705/**
5a5dbd18 4706 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4707 * @qc: Command on going
4708 *
5a5dbd18 4709 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4710 *
4711 * LOCKING:
4712 * Inherited from caller.
4713 */
4714
1da177e4
LT
4715static void ata_pio_sector(struct ata_queued_cmd *qc)
4716{
4717 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4718 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4719 struct ata_port *ap = qc->ap;
4720 struct page *page;
4721 unsigned int offset;
4722 unsigned char *buf;
4723
5a5dbd18 4724 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4725 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4726
4727 page = sg[qc->cursg].page;
726f0785 4728 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4729
4730 /* get the current page and offset */
4731 page = nth_page(page, (offset >> PAGE_SHIFT));
4732 offset %= PAGE_SIZE;
4733
1da177e4
LT
4734 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4735
91b8b313
AL
4736 if (PageHighMem(page)) {
4737 unsigned long flags;
4738
a6b2c5d4 4739 /* FIXME: use a bounce buffer */
91b8b313
AL
4740 local_irq_save(flags);
4741 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4742
91b8b313 4743 /* do the actual data transfer */
5a5dbd18 4744 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4745
91b8b313
AL
4746 kunmap_atomic(buf, KM_IRQ0);
4747 local_irq_restore(flags);
4748 } else {
4749 buf = page_address(page);
5a5dbd18 4750 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4751 }
1da177e4 4752
5a5dbd18
ML
4753 qc->curbytes += qc->sect_size;
4754 qc->cursg_ofs += qc->sect_size;
1da177e4 4755
726f0785 4756 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4757 qc->cursg++;
4758 qc->cursg_ofs = 0;
4759 }
1da177e4 4760}
1da177e4 4761
07f6f7d0 4762/**
5a5dbd18 4763 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4764 * @qc: Command on going
4765 *
5a5dbd18 4766 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4767 * ATA device for the DRQ request.
4768 *
4769 * LOCKING:
4770 * Inherited from caller.
4771 */
1da177e4 4772
07f6f7d0
AL
4773static void ata_pio_sectors(struct ata_queued_cmd *qc)
4774{
4775 if (is_multi_taskfile(&qc->tf)) {
4776 /* READ/WRITE MULTIPLE */
4777 unsigned int nsect;
4778
587005de 4779 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4780
5a5dbd18 4781 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4782 qc->dev->multi_count);
07f6f7d0
AL
4783 while (nsect--)
4784 ata_pio_sector(qc);
4785 } else
4786 ata_pio_sector(qc);
4cc980b3
AL
4787
4788 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4789}
4790
c71c1857
AL
4791/**
4792 * atapi_send_cdb - Write CDB bytes to hardware
4793 * @ap: Port to which ATAPI device is attached.
4794 * @qc: Taskfile currently active
4795 *
4796 * When device has indicated its readiness to accept
4797 * a CDB, this function is called. Send the CDB.
4798 *
4799 * LOCKING:
4800 * caller.
4801 */
4802
4803static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4804{
4805 /* send SCSI cdb */
4806 DPRINTK("send cdb\n");
db024d53 4807 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4808
a6b2c5d4 4809 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4810 ata_altstatus(ap); /* flush */
4811
4812 switch (qc->tf.protocol) {
4813 case ATA_PROT_ATAPI:
4814 ap->hsm_task_state = HSM_ST;
4815 break;
4816 case ATA_PROT_ATAPI_NODATA:
4817 ap->hsm_task_state = HSM_ST_LAST;
4818 break;
4819 case ATA_PROT_ATAPI_DMA:
4820 ap->hsm_task_state = HSM_ST_LAST;
4821 /* initiate bmdma */
4822 ap->ops->bmdma_start(qc);
4823 break;
4824 }
1da177e4
LT
4825}
4826
6ae4cfb5
AL
4827/**
4828 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4829 * @qc: Command on going
4830 * @bytes: number of bytes
4831 *
4832 * Transfer Transfer data from/to the ATAPI device.
4833 *
4834 * LOCKING:
4835 * Inherited from caller.
4836 *
4837 */
4838
1da177e4
LT
4839static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4840{
4841 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4842 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4843 struct ata_port *ap = qc->ap;
4844 struct page *page;
4845 unsigned char *buf;
4846 unsigned int offset, count;
4847
563a6e1f 4848 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4849 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4850
4851next_sg:
563a6e1f 4852 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4853 /*
563a6e1f
AL
4854 * The end of qc->sg is reached and the device expects
4855 * more data to transfer. In order not to overrun qc->sg
4856 * and fulfill length specified in the byte count register,
4857 * - for read case, discard trailing data from the device
4858 * - for write case, padding zero data to the device
4859 */
4860 u16 pad_buf[1] = { 0 };
4861 unsigned int words = bytes >> 1;
4862 unsigned int i;
4863
4864 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4865 ata_dev_printk(qc->dev, KERN_WARNING,
4866 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4867
4868 for (i = 0; i < words; i++)
a6b2c5d4 4869 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4870
14be71f4 4871 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4872 return;
4873 }
4874
cedc9a47 4875 sg = &qc->__sg[qc->cursg];
1da177e4 4876
1da177e4
LT
4877 page = sg->page;
4878 offset = sg->offset + qc->cursg_ofs;
4879
4880 /* get the current page and offset */
4881 page = nth_page(page, (offset >> PAGE_SHIFT));
4882 offset %= PAGE_SIZE;
4883
6952df03 4884 /* don't overrun current sg */
32529e01 4885 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4886
4887 /* don't cross page boundaries */
4888 count = min(count, (unsigned int)PAGE_SIZE - offset);
4889
7282aa4b
AL
4890 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4891
91b8b313
AL
4892 if (PageHighMem(page)) {
4893 unsigned long flags;
4894
a6b2c5d4 4895 /* FIXME: use bounce buffer */
91b8b313
AL
4896 local_irq_save(flags);
4897 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4898
91b8b313 4899 /* do the actual data transfer */
a6b2c5d4 4900 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4901
91b8b313
AL
4902 kunmap_atomic(buf, KM_IRQ0);
4903 local_irq_restore(flags);
4904 } else {
4905 buf = page_address(page);
a6b2c5d4 4906 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4907 }
1da177e4
LT
4908
4909 bytes -= count;
4910 qc->curbytes += count;
4911 qc->cursg_ofs += count;
4912
32529e01 4913 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4914 qc->cursg++;
4915 qc->cursg_ofs = 0;
4916 }
4917
563a6e1f 4918 if (bytes)
1da177e4 4919 goto next_sg;
1da177e4
LT
4920}
4921
6ae4cfb5
AL
4922/**
4923 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4924 * @qc: Command on going
4925 *
4926 * Transfer Transfer data from/to the ATAPI device.
4927 *
4928 * LOCKING:
4929 * Inherited from caller.
6ae4cfb5
AL
4930 */
4931
1da177e4
LT
4932static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4933{
4934 struct ata_port *ap = qc->ap;
4935 struct ata_device *dev = qc->dev;
4936 unsigned int ireason, bc_lo, bc_hi, bytes;
4937 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4938
eec4c3f3
AL
4939 /* Abuse qc->result_tf for temp storage of intermediate TF
4940 * here to save some kernel stack usage.
4941 * For normal completion, qc->result_tf is not relevant. For
4942 * error, qc->result_tf is later overwritten by ata_qc_complete().
4943 * So, the correctness of qc->result_tf is not affected.
4944 */
4945 ap->ops->tf_read(ap, &qc->result_tf);
4946 ireason = qc->result_tf.nsect;
4947 bc_lo = qc->result_tf.lbam;
4948 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4949 bytes = (bc_hi << 8) | bc_lo;
4950
4951 /* shall be cleared to zero, indicating xfer of data */
4952 if (ireason & (1 << 0))
4953 goto err_out;
4954
4955 /* make sure transfer direction matches expected */
4956 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4957 if (do_write != i_write)
4958 goto err_out;
4959
44877b4e 4960 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4961
1da177e4 4962 __atapi_pio_bytes(qc, bytes);
4cc980b3 4963 ata_altstatus(ap); /* flush */
1da177e4
LT
4964
4965 return;
4966
4967err_out:
f15a1daf 4968 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4969 qc->err_mask |= AC_ERR_HSM;
14be71f4 4970 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4971}
4972
4973/**
c234fb00
AL
4974 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4975 * @ap: the target ata_port
4976 * @qc: qc on going
1da177e4 4977 *
c234fb00
AL
4978 * RETURNS:
4979 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4980 */
c234fb00
AL
4981
4982static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4983{
c234fb00
AL
4984 if (qc->tf.flags & ATA_TFLAG_POLLING)
4985 return 1;
1da177e4 4986
c234fb00
AL
4987 if (ap->hsm_task_state == HSM_ST_FIRST) {
4988 if (qc->tf.protocol == ATA_PROT_PIO &&
4989 (qc->tf.flags & ATA_TFLAG_WRITE))
4990 return 1;
1da177e4 4991
c234fb00
AL
4992 if (is_atapi_taskfile(&qc->tf) &&
4993 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4994 return 1;
fe79e683
AL
4995 }
4996
c234fb00
AL
4997 return 0;
4998}
1da177e4 4999
c17ea20d
TH
5000/**
5001 * ata_hsm_qc_complete - finish a qc running on standard HSM
5002 * @qc: Command to complete
5003 * @in_wq: 1 if called from workqueue, 0 otherwise
5004 *
5005 * Finish @qc which is running on standard HSM.
5006 *
5007 * LOCKING:
cca3974e 5008 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5009 * Otherwise, none on entry and grabs host lock.
5010 */
5011static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5012{
5013 struct ata_port *ap = qc->ap;
5014 unsigned long flags;
5015
5016 if (ap->ops->error_handler) {
5017 if (in_wq) {
ba6a1308 5018 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5019
cca3974e
JG
5020 /* EH might have kicked in while host lock is
5021 * released.
c17ea20d
TH
5022 */
5023 qc = ata_qc_from_tag(ap, qc->tag);
5024 if (qc) {
5025 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5026 ap->ops->irq_on(ap);
c17ea20d
TH
5027 ata_qc_complete(qc);
5028 } else
5029 ata_port_freeze(ap);
5030 }
5031
ba6a1308 5032 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5033 } else {
5034 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5035 ata_qc_complete(qc);
5036 else
5037 ata_port_freeze(ap);
5038 }
5039 } else {
5040 if (in_wq) {
ba6a1308 5041 spin_lock_irqsave(ap->lock, flags);
83625006 5042 ap->ops->irq_on(ap);
c17ea20d 5043 ata_qc_complete(qc);
ba6a1308 5044 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5045 } else
5046 ata_qc_complete(qc);
5047 }
5048}
5049
bb5cb290
AL
5050/**
5051 * ata_hsm_move - move the HSM to the next state.
5052 * @ap: the target ata_port
5053 * @qc: qc on going
5054 * @status: current device status
5055 * @in_wq: 1 if called from workqueue, 0 otherwise
5056 *
5057 * RETURNS:
5058 * 1 when poll next status needed, 0 otherwise.
5059 */
9a1004d0
TH
5060int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5061 u8 status, int in_wq)
e2cec771 5062{
bb5cb290
AL
5063 unsigned long flags = 0;
5064 int poll_next;
5065
6912ccd5
AL
5066 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5067
bb5cb290
AL
5068 /* Make sure ata_qc_issue_prot() does not throw things
5069 * like DMA polling into the workqueue. Notice that
5070 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5071 */
c234fb00 5072 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5073
e2cec771 5074fsm_start:
999bb6f4 5075 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5076 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5077
e2cec771
AL
5078 switch (ap->hsm_task_state) {
5079 case HSM_ST_FIRST:
bb5cb290
AL
5080 /* Send first data block or PACKET CDB */
5081
5082 /* If polling, we will stay in the work queue after
5083 * sending the data. Otherwise, interrupt handler
5084 * takes over after sending the data.
5085 */
5086 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5087
e2cec771 5088 /* check device status */
3655d1d3
AL
5089 if (unlikely((status & ATA_DRQ) == 0)) {
5090 /* handle BSY=0, DRQ=0 as error */
5091 if (likely(status & (ATA_ERR | ATA_DF)))
5092 /* device stops HSM for abort/error */
5093 qc->err_mask |= AC_ERR_DEV;
5094 else
5095 /* HSM violation. Let EH handle this */
5096 qc->err_mask |= AC_ERR_HSM;
5097
14be71f4 5098 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5099 goto fsm_start;
1da177e4
LT
5100 }
5101
71601958
AL
5102 /* Device should not ask for data transfer (DRQ=1)
5103 * when it finds something wrong.
eee6c32f
AL
5104 * We ignore DRQ here and stop the HSM by
5105 * changing hsm_task_state to HSM_ST_ERR and
5106 * let the EH abort the command or reset the device.
71601958
AL
5107 */
5108 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5109 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5110 "error, dev_stat 0x%X\n", status);
3655d1d3 5111 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5112 ap->hsm_task_state = HSM_ST_ERR;
5113 goto fsm_start;
71601958 5114 }
1da177e4 5115
bb5cb290
AL
5116 /* Send the CDB (atapi) or the first data block (ata pio out).
5117 * During the state transition, interrupt handler shouldn't
5118 * be invoked before the data transfer is complete and
5119 * hsm_task_state is changed. Hence, the following locking.
5120 */
5121 if (in_wq)
ba6a1308 5122 spin_lock_irqsave(ap->lock, flags);
1da177e4 5123
bb5cb290
AL
5124 if (qc->tf.protocol == ATA_PROT_PIO) {
5125 /* PIO data out protocol.
5126 * send first data block.
5127 */
0565c26d 5128
bb5cb290
AL
5129 /* ata_pio_sectors() might change the state
5130 * to HSM_ST_LAST. so, the state is changed here
5131 * before ata_pio_sectors().
5132 */
5133 ap->hsm_task_state = HSM_ST;
5134 ata_pio_sectors(qc);
bb5cb290
AL
5135 } else
5136 /* send CDB */
5137 atapi_send_cdb(ap, qc);
5138
5139 if (in_wq)
ba6a1308 5140 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5141
5142 /* if polling, ata_pio_task() handles the rest.
5143 * otherwise, interrupt handler takes over from here.
5144 */
e2cec771 5145 break;
1c848984 5146
e2cec771
AL
5147 case HSM_ST:
5148 /* complete command or read/write the data register */
5149 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5150 /* ATAPI PIO protocol */
5151 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5152 /* No more data to transfer or device error.
5153 * Device error will be tagged in HSM_ST_LAST.
5154 */
e2cec771
AL
5155 ap->hsm_task_state = HSM_ST_LAST;
5156 goto fsm_start;
5157 }
1da177e4 5158
71601958
AL
5159 /* Device should not ask for data transfer (DRQ=1)
5160 * when it finds something wrong.
eee6c32f
AL
5161 * We ignore DRQ here and stop the HSM by
5162 * changing hsm_task_state to HSM_ST_ERR and
5163 * let the EH abort the command or reset the device.
71601958
AL
5164 */
5165 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5166 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5167 "device error, dev_stat 0x%X\n",
5168 status);
3655d1d3 5169 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5170 ap->hsm_task_state = HSM_ST_ERR;
5171 goto fsm_start;
71601958 5172 }
1da177e4 5173
e2cec771 5174 atapi_pio_bytes(qc);
7fb6ec28 5175
e2cec771
AL
5176 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5177 /* bad ireason reported by device */
5178 goto fsm_start;
1da177e4 5179
e2cec771
AL
5180 } else {
5181 /* ATA PIO protocol */
5182 if (unlikely((status & ATA_DRQ) == 0)) {
5183 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5184 if (likely(status & (ATA_ERR | ATA_DF)))
5185 /* device stops HSM for abort/error */
5186 qc->err_mask |= AC_ERR_DEV;
5187 else
55a8e2c8
TH
5188 /* HSM violation. Let EH handle this.
5189 * Phantom devices also trigger this
5190 * condition. Mark hint.
5191 */
5192 qc->err_mask |= AC_ERR_HSM |
5193 AC_ERR_NODEV_HINT;
3655d1d3 5194
e2cec771
AL
5195 ap->hsm_task_state = HSM_ST_ERR;
5196 goto fsm_start;
5197 }
1da177e4 5198
eee6c32f
AL
5199 /* For PIO reads, some devices may ask for
5200 * data transfer (DRQ=1) alone with ERR=1.
5201 * We respect DRQ here and transfer one
5202 * block of junk data before changing the
5203 * hsm_task_state to HSM_ST_ERR.
5204 *
5205 * For PIO writes, ERR=1 DRQ=1 doesn't make
5206 * sense since the data block has been
5207 * transferred to the device.
71601958
AL
5208 */
5209 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5210 /* data might be corrputed */
5211 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5212
5213 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5214 ata_pio_sectors(qc);
eee6c32f
AL
5215 status = ata_wait_idle(ap);
5216 }
5217
3655d1d3
AL
5218 if (status & (ATA_BUSY | ATA_DRQ))
5219 qc->err_mask |= AC_ERR_HSM;
5220
eee6c32f
AL
5221 /* ata_pio_sectors() might change the
5222 * state to HSM_ST_LAST. so, the state
5223 * is changed after ata_pio_sectors().
5224 */
5225 ap->hsm_task_state = HSM_ST_ERR;
5226 goto fsm_start;
71601958
AL
5227 }
5228
e2cec771
AL
5229 ata_pio_sectors(qc);
5230
5231 if (ap->hsm_task_state == HSM_ST_LAST &&
5232 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5233 /* all data read */
52a32205 5234 status = ata_wait_idle(ap);
e2cec771
AL
5235 goto fsm_start;
5236 }
5237 }
5238
bb5cb290 5239 poll_next = 1;
1da177e4
LT
5240 break;
5241
14be71f4 5242 case HSM_ST_LAST:
6912ccd5
AL
5243 if (unlikely(!ata_ok(status))) {
5244 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5245 ap->hsm_task_state = HSM_ST_ERR;
5246 goto fsm_start;
5247 }
5248
5249 /* no more data to transfer */
4332a771 5250 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5251 ap->print_id, qc->dev->devno, status);
e2cec771 5252
6912ccd5
AL
5253 WARN_ON(qc->err_mask);
5254
e2cec771 5255 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5256
e2cec771 5257 /* complete taskfile transaction */
c17ea20d 5258 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5259
5260 poll_next = 0;
1da177e4
LT
5261 break;
5262
14be71f4 5263 case HSM_ST_ERR:
e2cec771
AL
5264 /* make sure qc->err_mask is available to
5265 * know what's wrong and recover
5266 */
5267 WARN_ON(qc->err_mask == 0);
5268
5269 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5270
999bb6f4 5271 /* complete taskfile transaction */
c17ea20d 5272 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5273
5274 poll_next = 0;
e2cec771
AL
5275 break;
5276 default:
bb5cb290 5277 poll_next = 0;
6912ccd5 5278 BUG();
1da177e4
LT
5279 }
5280
bb5cb290 5281 return poll_next;
1da177e4
LT
5282}
5283
65f27f38 5284static void ata_pio_task(struct work_struct *work)
8061f5f0 5285{
65f27f38
DH
5286 struct ata_port *ap =
5287 container_of(work, struct ata_port, port_task.work);
5288 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5289 u8 status;
a1af3734 5290 int poll_next;
8061f5f0 5291
7fb6ec28 5292fsm_start:
a1af3734 5293 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5294
a1af3734
AL
5295 /*
5296 * This is purely heuristic. This is a fast path.
5297 * Sometimes when we enter, BSY will be cleared in
5298 * a chk-status or two. If not, the drive is probably seeking
5299 * or something. Snooze for a couple msecs, then
5300 * chk-status again. If still busy, queue delayed work.
5301 */
5302 status = ata_busy_wait(ap, ATA_BUSY, 5);
5303 if (status & ATA_BUSY) {
5304 msleep(2);
5305 status = ata_busy_wait(ap, ATA_BUSY, 10);
5306 if (status & ATA_BUSY) {
31ce6dae 5307 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5308 return;
5309 }
8061f5f0
TH
5310 }
5311
a1af3734
AL
5312 /* move the HSM */
5313 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5314
a1af3734
AL
5315 /* another command or interrupt handler
5316 * may be running at this point.
5317 */
5318 if (poll_next)
7fb6ec28 5319 goto fsm_start;
8061f5f0
TH
5320}
5321
1da177e4
LT
5322/**
5323 * ata_qc_new - Request an available ATA command, for queueing
5324 * @ap: Port associated with device @dev
5325 * @dev: Device from whom we request an available command structure
5326 *
5327 * LOCKING:
0cba632b 5328 * None.
1da177e4
LT
5329 */
5330
5331static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5332{
5333 struct ata_queued_cmd *qc = NULL;
5334 unsigned int i;
5335
e3180499 5336 /* no command while frozen */
b51e9e5d 5337 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5338 return NULL;
5339
2ab7db1f
TH
5340 /* the last tag is reserved for internal command. */
5341 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5342 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5343 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5344 break;
5345 }
5346
5347 if (qc)
5348 qc->tag = i;
5349
5350 return qc;
5351}
5352
5353/**
5354 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5355 * @dev: Device from whom we request an available command structure
5356 *
5357 * LOCKING:
0cba632b 5358 * None.
1da177e4
LT
5359 */
5360
3373efd8 5361struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5362{
9af5c9c9 5363 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5364 struct ata_queued_cmd *qc;
5365
5366 qc = ata_qc_new(ap);
5367 if (qc) {
1da177e4
LT
5368 qc->scsicmd = NULL;
5369 qc->ap = ap;
5370 qc->dev = dev;
1da177e4 5371
2c13b7ce 5372 ata_qc_reinit(qc);
1da177e4
LT
5373 }
5374
5375 return qc;
5376}
5377
1da177e4
LT
5378/**
5379 * ata_qc_free - free unused ata_queued_cmd
5380 * @qc: Command to complete
5381 *
5382 * Designed to free unused ata_queued_cmd object
5383 * in case something prevents using it.
5384 *
5385 * LOCKING:
cca3974e 5386 * spin_lock_irqsave(host lock)
1da177e4
LT
5387 */
5388void ata_qc_free(struct ata_queued_cmd *qc)
5389{
4ba946e9
TH
5390 struct ata_port *ap = qc->ap;
5391 unsigned int tag;
5392
a4631474 5393 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5394
4ba946e9
TH
5395 qc->flags = 0;
5396 tag = qc->tag;
5397 if (likely(ata_tag_valid(tag))) {
4ba946e9 5398 qc->tag = ATA_TAG_POISON;
6cec4a39 5399 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5400 }
1da177e4
LT
5401}
5402
76014427 5403void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5404{
dedaf2b0 5405 struct ata_port *ap = qc->ap;
9af5c9c9 5406 struct ata_link *link = qc->dev->link;
dedaf2b0 5407
a4631474
TH
5408 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5409 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5410
5411 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5412 ata_sg_clean(qc);
5413
7401abf2 5414 /* command should be marked inactive atomically with qc completion */
da917d69 5415 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5416 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5417 if (!link->sactive)
5418 ap->nr_active_links--;
5419 } else {
9af5c9c9 5420 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5421 ap->nr_active_links--;
5422 }
5423
5424 /* clear exclusive status */
5425 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5426 ap->excl_link == link))
5427 ap->excl_link = NULL;
7401abf2 5428
3f3791d3
AL
5429 /* atapi: mark qc as inactive to prevent the interrupt handler
5430 * from completing the command twice later, before the error handler
5431 * is called. (when rc != 0 and atapi request sense is needed)
5432 */
5433 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5434 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5435
1da177e4 5436 /* call completion callback */
77853bf2 5437 qc->complete_fn(qc);
1da177e4
LT
5438}
5439
39599a53
TH
5440static void fill_result_tf(struct ata_queued_cmd *qc)
5441{
5442 struct ata_port *ap = qc->ap;
5443
39599a53 5444 qc->result_tf.flags = qc->tf.flags;
4742d54f 5445 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5446}
5447
f686bcb8
TH
5448/**
5449 * ata_qc_complete - Complete an active ATA command
5450 * @qc: Command to complete
5451 * @err_mask: ATA Status register contents
5452 *
5453 * Indicate to the mid and upper layers that an ATA
5454 * command has completed, with either an ok or not-ok status.
5455 *
5456 * LOCKING:
cca3974e 5457 * spin_lock_irqsave(host lock)
f686bcb8
TH
5458 */
5459void ata_qc_complete(struct ata_queued_cmd *qc)
5460{
5461 struct ata_port *ap = qc->ap;
5462
5463 /* XXX: New EH and old EH use different mechanisms to
5464 * synchronize EH with regular execution path.
5465 *
5466 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5467 * Normal execution path is responsible for not accessing a
5468 * failed qc. libata core enforces the rule by returning NULL
5469 * from ata_qc_from_tag() for failed qcs.
5470 *
5471 * Old EH depends on ata_qc_complete() nullifying completion
5472 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5473 * not synchronize with interrupt handler. Only PIO task is
5474 * taken care of.
5475 */
5476 if (ap->ops->error_handler) {
b51e9e5d 5477 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5478
5479 if (unlikely(qc->err_mask))
5480 qc->flags |= ATA_QCFLAG_FAILED;
5481
5482 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5483 if (!ata_tag_internal(qc->tag)) {
5484 /* always fill result TF for failed qc */
39599a53 5485 fill_result_tf(qc);
f686bcb8
TH
5486 ata_qc_schedule_eh(qc);
5487 return;
5488 }
5489 }
5490
5491 /* read result TF if requested */
5492 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5493 fill_result_tf(qc);
f686bcb8
TH
5494
5495 __ata_qc_complete(qc);
5496 } else {
5497 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5498 return;
5499
5500 /* read result TF if failed or requested */
5501 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5502 fill_result_tf(qc);
f686bcb8
TH
5503
5504 __ata_qc_complete(qc);
5505 }
5506}
5507
dedaf2b0
TH
5508/**
5509 * ata_qc_complete_multiple - Complete multiple qcs successfully
5510 * @ap: port in question
5511 * @qc_active: new qc_active mask
5512 * @finish_qc: LLDD callback invoked before completing a qc
5513 *
5514 * Complete in-flight commands. This functions is meant to be
5515 * called from low-level driver's interrupt routine to complete
5516 * requests normally. ap->qc_active and @qc_active is compared
5517 * and commands are completed accordingly.
5518 *
5519 * LOCKING:
cca3974e 5520 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5521 *
5522 * RETURNS:
5523 * Number of completed commands on success, -errno otherwise.
5524 */
5525int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5526 void (*finish_qc)(struct ata_queued_cmd *))
5527{
5528 int nr_done = 0;
5529 u32 done_mask;
5530 int i;
5531
5532 done_mask = ap->qc_active ^ qc_active;
5533
5534 if (unlikely(done_mask & qc_active)) {
5535 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5536 "(%08x->%08x)\n", ap->qc_active, qc_active);
5537 return -EINVAL;
5538 }
5539
5540 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5541 struct ata_queued_cmd *qc;
5542
5543 if (!(done_mask & (1 << i)))
5544 continue;
5545
5546 if ((qc = ata_qc_from_tag(ap, i))) {
5547 if (finish_qc)
5548 finish_qc(qc);
5549 ata_qc_complete(qc);
5550 nr_done++;
5551 }
5552 }
5553
5554 return nr_done;
5555}
5556
1da177e4
LT
5557static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5558{
5559 struct ata_port *ap = qc->ap;
5560
5561 switch (qc->tf.protocol) {
3dc1d881 5562 case ATA_PROT_NCQ:
1da177e4
LT
5563 case ATA_PROT_DMA:
5564 case ATA_PROT_ATAPI_DMA:
5565 return 1;
5566
5567 case ATA_PROT_ATAPI:
5568 case ATA_PROT_PIO:
1da177e4
LT
5569 if (ap->flags & ATA_FLAG_PIO_DMA)
5570 return 1;
5571
5572 /* fall through */
5573
5574 default:
5575 return 0;
5576 }
5577
5578 /* never reached */
5579}
5580
5581/**
5582 * ata_qc_issue - issue taskfile to device
5583 * @qc: command to issue to device
5584 *
5585 * Prepare an ATA command to submission to device.
5586 * This includes mapping the data into a DMA-able
5587 * area, filling in the S/G table, and finally
5588 * writing the taskfile to hardware, starting the command.
5589 *
5590 * LOCKING:
cca3974e 5591 * spin_lock_irqsave(host lock)
1da177e4 5592 */
8e0e694a 5593void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5594{
5595 struct ata_port *ap = qc->ap;
9af5c9c9 5596 struct ata_link *link = qc->dev->link;
1da177e4 5597
dedaf2b0
TH
5598 /* Make sure only one non-NCQ command is outstanding. The
5599 * check is skipped for old EH because it reuses active qc to
5600 * request ATAPI sense.
5601 */
9af5c9c9 5602 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5603
5604 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5605 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5606
5607 if (!link->sactive)
5608 ap->nr_active_links++;
9af5c9c9 5609 link->sactive |= 1 << qc->tag;
dedaf2b0 5610 } else {
9af5c9c9 5611 WARN_ON(link->sactive);
da917d69
TH
5612
5613 ap->nr_active_links++;
9af5c9c9 5614 link->active_tag = qc->tag;
dedaf2b0
TH
5615 }
5616
e4a70e76 5617 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5618 ap->qc_active |= 1 << qc->tag;
e4a70e76 5619
1da177e4
LT
5620 if (ata_should_dma_map(qc)) {
5621 if (qc->flags & ATA_QCFLAG_SG) {
5622 if (ata_sg_setup(qc))
8e436af9 5623 goto sg_err;
1da177e4
LT
5624 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5625 if (ata_sg_setup_one(qc))
8e436af9 5626 goto sg_err;
1da177e4
LT
5627 }
5628 } else {
5629 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5630 }
5631
5632 ap->ops->qc_prep(qc);
5633
8e0e694a
TH
5634 qc->err_mask |= ap->ops->qc_issue(qc);
5635 if (unlikely(qc->err_mask))
5636 goto err;
5637 return;
1da177e4 5638
8e436af9
TH
5639sg_err:
5640 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5641 qc->err_mask |= AC_ERR_SYSTEM;
5642err:
5643 ata_qc_complete(qc);
1da177e4
LT
5644}
5645
5646/**
5647 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5648 * @qc: command to issue to device
5649 *
5650 * Using various libata functions and hooks, this function
5651 * starts an ATA command. ATA commands are grouped into
5652 * classes called "protocols", and issuing each type of protocol
5653 * is slightly different.
5654 *
0baab86b
EF
5655 * May be used as the qc_issue() entry in ata_port_operations.
5656 *
1da177e4 5657 * LOCKING:
cca3974e 5658 * spin_lock_irqsave(host lock)
1da177e4
LT
5659 *
5660 * RETURNS:
9a3d9eb0 5661 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5662 */
5663
9a3d9eb0 5664unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5665{
5666 struct ata_port *ap = qc->ap;
5667
e50362ec
AL
5668 /* Use polling pio if the LLD doesn't handle
5669 * interrupt driven pio and atapi CDB interrupt.
5670 */
5671 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5672 switch (qc->tf.protocol) {
5673 case ATA_PROT_PIO:
e3472cbe 5674 case ATA_PROT_NODATA:
e50362ec
AL
5675 case ATA_PROT_ATAPI:
5676 case ATA_PROT_ATAPI_NODATA:
5677 qc->tf.flags |= ATA_TFLAG_POLLING;
5678 break;
5679 case ATA_PROT_ATAPI_DMA:
5680 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5681 /* see ata_dma_blacklisted() */
e50362ec
AL
5682 BUG();
5683 break;
5684 default:
5685 break;
5686 }
5687 }
5688
312f7da2 5689 /* select the device */
1da177e4
LT
5690 ata_dev_select(ap, qc->dev->devno, 1, 0);
5691
312f7da2 5692 /* start the command */
1da177e4
LT
5693 switch (qc->tf.protocol) {
5694 case ATA_PROT_NODATA:
312f7da2
AL
5695 if (qc->tf.flags & ATA_TFLAG_POLLING)
5696 ata_qc_set_polling(qc);
5697
e5338254 5698 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5699 ap->hsm_task_state = HSM_ST_LAST;
5700
5701 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5702 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5703
1da177e4
LT
5704 break;
5705
5706 case ATA_PROT_DMA:
587005de 5707 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5708
1da177e4
LT
5709 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5710 ap->ops->bmdma_setup(qc); /* set up bmdma */
5711 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5712 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5713 break;
5714
312f7da2
AL
5715 case ATA_PROT_PIO:
5716 if (qc->tf.flags & ATA_TFLAG_POLLING)
5717 ata_qc_set_polling(qc);
1da177e4 5718
e5338254 5719 ata_tf_to_host(ap, &qc->tf);
312f7da2 5720
54f00389
AL
5721 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5722 /* PIO data out protocol */
5723 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5724 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5725
5726 /* always send first data block using
e27486db 5727 * the ata_pio_task() codepath.
54f00389 5728 */
312f7da2 5729 } else {
54f00389
AL
5730 /* PIO data in protocol */
5731 ap->hsm_task_state = HSM_ST;
5732
5733 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5734 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5735
5736 /* if polling, ata_pio_task() handles the rest.
5737 * otherwise, interrupt handler takes over from here.
5738 */
312f7da2
AL
5739 }
5740
1da177e4
LT
5741 break;
5742
1da177e4 5743 case ATA_PROT_ATAPI:
1da177e4 5744 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5745 if (qc->tf.flags & ATA_TFLAG_POLLING)
5746 ata_qc_set_polling(qc);
5747
e5338254 5748 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5749
312f7da2
AL
5750 ap->hsm_task_state = HSM_ST_FIRST;
5751
5752 /* send cdb by polling if no cdb interrupt */
5753 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5754 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5755 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5756 break;
5757
5758 case ATA_PROT_ATAPI_DMA:
587005de 5759 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5760
1da177e4
LT
5761 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5762 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5763 ap->hsm_task_state = HSM_ST_FIRST;
5764
5765 /* send cdb by polling if no cdb interrupt */
5766 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5767 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5768 break;
5769
5770 default:
5771 WARN_ON(1);
9a3d9eb0 5772 return AC_ERR_SYSTEM;
1da177e4
LT
5773 }
5774
5775 return 0;
5776}
5777
1da177e4
LT
5778/**
5779 * ata_host_intr - Handle host interrupt for given (port, task)
5780 * @ap: Port on which interrupt arrived (possibly...)
5781 * @qc: Taskfile currently active in engine
5782 *
5783 * Handle host interrupt for given queued command. Currently,
5784 * only DMA interrupts are handled. All other commands are
5785 * handled via polling with interrupts disabled (nIEN bit).
5786 *
5787 * LOCKING:
cca3974e 5788 * spin_lock_irqsave(host lock)
1da177e4
LT
5789 *
5790 * RETURNS:
5791 * One if interrupt was handled, zero if not (shared irq).
5792 */
5793
5794inline unsigned int ata_host_intr (struct ata_port *ap,
5795 struct ata_queued_cmd *qc)
5796{
9af5c9c9 5797 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5798 u8 status, host_stat = 0;
1da177e4 5799
312f7da2 5800 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5801 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5802
312f7da2
AL
5803 /* Check whether we are expecting interrupt in this state */
5804 switch (ap->hsm_task_state) {
5805 case HSM_ST_FIRST:
6912ccd5
AL
5806 /* Some pre-ATAPI-4 devices assert INTRQ
5807 * at this state when ready to receive CDB.
5808 */
1da177e4 5809
312f7da2
AL
5810 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5811 * The flag was turned on only for atapi devices.
5812 * No need to check is_atapi_taskfile(&qc->tf) again.
5813 */
5814 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5815 goto idle_irq;
1da177e4 5816 break;
312f7da2
AL
5817 case HSM_ST_LAST:
5818 if (qc->tf.protocol == ATA_PROT_DMA ||
5819 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5820 /* check status of DMA engine */
5821 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5822 VPRINTK("ata%u: host_stat 0x%X\n",
5823 ap->print_id, host_stat);
312f7da2
AL
5824
5825 /* if it's not our irq... */
5826 if (!(host_stat & ATA_DMA_INTR))
5827 goto idle_irq;
5828
5829 /* before we do anything else, clear DMA-Start bit */
5830 ap->ops->bmdma_stop(qc);
a4f16610
AL
5831
5832 if (unlikely(host_stat & ATA_DMA_ERR)) {
5833 /* error when transfering data to/from memory */
5834 qc->err_mask |= AC_ERR_HOST_BUS;
5835 ap->hsm_task_state = HSM_ST_ERR;
5836 }
312f7da2
AL
5837 }
5838 break;
5839 case HSM_ST:
5840 break;
1da177e4
LT
5841 default:
5842 goto idle_irq;
5843 }
5844
312f7da2
AL
5845 /* check altstatus */
5846 status = ata_altstatus(ap);
5847 if (status & ATA_BUSY)
5848 goto idle_irq;
1da177e4 5849
312f7da2
AL
5850 /* check main status, clearing INTRQ */
5851 status = ata_chk_status(ap);
5852 if (unlikely(status & ATA_BUSY))
5853 goto idle_irq;
1da177e4 5854
312f7da2
AL
5855 /* ack bmdma irq events */
5856 ap->ops->irq_clear(ap);
1da177e4 5857
bb5cb290 5858 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5859
5860 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5861 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5862 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5863
1da177e4
LT
5864 return 1; /* irq handled */
5865
5866idle_irq:
5867 ap->stats.idle_irq++;
5868
5869#ifdef ATA_IRQ_TRAP
5870 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5871 ata_chk_status(ap);
5872 ap->ops->irq_clear(ap);
f15a1daf 5873 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5874 return 1;
1da177e4
LT
5875 }
5876#endif
5877 return 0; /* irq not handled */
5878}
5879
5880/**
5881 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5882 * @irq: irq line (unused)
cca3974e 5883 * @dev_instance: pointer to our ata_host information structure
1da177e4 5884 *
0cba632b
JG
5885 * Default interrupt handler for PCI IDE devices. Calls
5886 * ata_host_intr() for each port that is not disabled.
5887 *
1da177e4 5888 * LOCKING:
cca3974e 5889 * Obtains host lock during operation.
1da177e4
LT
5890 *
5891 * RETURNS:
0cba632b 5892 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5893 */
5894
7d12e780 5895irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5896{
cca3974e 5897 struct ata_host *host = dev_instance;
1da177e4
LT
5898 unsigned int i;
5899 unsigned int handled = 0;
5900 unsigned long flags;
5901
5902 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5903 spin_lock_irqsave(&host->lock, flags);
1da177e4 5904
cca3974e 5905 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5906 struct ata_port *ap;
5907
cca3974e 5908 ap = host->ports[i];
c1389503 5909 if (ap &&
029f5468 5910 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5911 struct ata_queued_cmd *qc;
5912
9af5c9c9 5913 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5914 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5915 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5916 handled |= ata_host_intr(ap, qc);
5917 }
5918 }
5919
cca3974e 5920 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5921
5922 return IRQ_RETVAL(handled);
5923}
5924
34bf2170
TH
5925/**
5926 * sata_scr_valid - test whether SCRs are accessible
936fd732 5927 * @link: ATA link to test SCR accessibility for
34bf2170 5928 *
936fd732 5929 * Test whether SCRs are accessible for @link.
34bf2170
TH
5930 *
5931 * LOCKING:
5932 * None.
5933 *
5934 * RETURNS:
5935 * 1 if SCRs are accessible, 0 otherwise.
5936 */
936fd732 5937int sata_scr_valid(struct ata_link *link)
34bf2170 5938{
936fd732
TH
5939 struct ata_port *ap = link->ap;
5940
a16abc0b 5941 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5942}
5943
5944/**
5945 * sata_scr_read - read SCR register of the specified port
936fd732 5946 * @link: ATA link to read SCR for
34bf2170
TH
5947 * @reg: SCR to read
5948 * @val: Place to store read value
5949 *
936fd732 5950 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5951 * guaranteed to succeed if the cable type of the port is SATA
5952 * and the port implements ->scr_read.
5953 *
5954 * LOCKING:
5955 * None.
5956 *
5957 * RETURNS:
5958 * 0 on success, negative errno on failure.
5959 */
936fd732 5960int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5961{
936fd732
TH
5962 struct ata_port *ap = link->ap;
5963
5964 if (sata_scr_valid(link))
da3dbb17 5965 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5966 return -EOPNOTSUPP;
5967}
5968
5969/**
5970 * sata_scr_write - write SCR register of the specified port
936fd732 5971 * @link: ATA link to write SCR for
34bf2170
TH
5972 * @reg: SCR to write
5973 * @val: value to write
5974 *
936fd732 5975 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5976 * guaranteed to succeed if the cable type of the port is SATA
5977 * and the port implements ->scr_read.
5978 *
5979 * LOCKING:
5980 * None.
5981 *
5982 * RETURNS:
5983 * 0 on success, negative errno on failure.
5984 */
936fd732 5985int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5986{
936fd732
TH
5987 struct ata_port *ap = link->ap;
5988
5989 if (sata_scr_valid(link))
da3dbb17 5990 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5991 return -EOPNOTSUPP;
5992}
5993
5994/**
5995 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5996 * @link: ATA link to write SCR for
34bf2170
TH
5997 * @reg: SCR to write
5998 * @val: value to write
5999 *
6000 * This function is identical to sata_scr_write() except that this
6001 * function performs flush after writing to the register.
6002 *
6003 * LOCKING:
6004 * None.
6005 *
6006 * RETURNS:
6007 * 0 on success, negative errno on failure.
6008 */
936fd732 6009int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6010{
936fd732 6011 struct ata_port *ap = link->ap;
da3dbb17
TH
6012 int rc;
6013
936fd732 6014 if (sata_scr_valid(link)) {
da3dbb17
TH
6015 rc = ap->ops->scr_write(ap, reg, val);
6016 if (rc == 0)
6017 rc = ap->ops->scr_read(ap, reg, &val);
6018 return rc;
34bf2170
TH
6019 }
6020 return -EOPNOTSUPP;
6021}
6022
6023/**
936fd732
TH
6024 * ata_link_online - test whether the given link is online
6025 * @link: ATA link to test
34bf2170 6026 *
936fd732
TH
6027 * Test whether @link is online. Note that this function returns
6028 * 0 if online status of @link cannot be obtained, so
6029 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6030 *
6031 * LOCKING:
6032 * None.
6033 *
6034 * RETURNS:
6035 * 1 if the port online status is available and online.
6036 */
936fd732 6037int ata_link_online(struct ata_link *link)
34bf2170
TH
6038{
6039 u32 sstatus;
6040
936fd732
TH
6041 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6042 (sstatus & 0xf) == 0x3)
34bf2170
TH
6043 return 1;
6044 return 0;
6045}
6046
6047/**
936fd732
TH
6048 * ata_link_offline - test whether the given link is offline
6049 * @link: ATA link to test
34bf2170 6050 *
936fd732
TH
6051 * Test whether @link is offline. Note that this function
6052 * returns 0 if offline status of @link cannot be obtained, so
6053 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6054 *
6055 * LOCKING:
6056 * None.
6057 *
6058 * RETURNS:
6059 * 1 if the port offline status is available and offline.
6060 */
936fd732 6061int ata_link_offline(struct ata_link *link)
34bf2170
TH
6062{
6063 u32 sstatus;
6064
936fd732
TH
6065 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6066 (sstatus & 0xf) != 0x3)
34bf2170
TH
6067 return 1;
6068 return 0;
6069}
0baab86b 6070
77b08fb5 6071int ata_flush_cache(struct ata_device *dev)
9b847548 6072{
977e6b9f 6073 unsigned int err_mask;
9b847548
JA
6074 u8 cmd;
6075
6076 if (!ata_try_flush_cache(dev))
6077 return 0;
6078
6fc49adb 6079 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6080 cmd = ATA_CMD_FLUSH_EXT;
6081 else
6082 cmd = ATA_CMD_FLUSH;
6083
4f34337b
AC
6084 /* This is wrong. On a failed flush we get back the LBA of the lost
6085 sector and we should (assuming it wasn't aborted as unknown) issue
6086 a further flush command to continue the writeback until it
6087 does not error */
977e6b9f
TH
6088 err_mask = ata_do_simple_cmd(dev, cmd);
6089 if (err_mask) {
6090 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6091 return -EIO;
6092 }
6093
6094 return 0;
9b847548
JA
6095}
6096
6ffa01d8 6097#ifdef CONFIG_PM
cca3974e
JG
6098static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6099 unsigned int action, unsigned int ehi_flags,
6100 int wait)
500530f6
TH
6101{
6102 unsigned long flags;
6103 int i, rc;
6104
cca3974e
JG
6105 for (i = 0; i < host->n_ports; i++) {
6106 struct ata_port *ap = host->ports[i];
e3667ebf 6107 struct ata_link *link;
500530f6
TH
6108
6109 /* Previous resume operation might still be in
6110 * progress. Wait for PM_PENDING to clear.
6111 */
6112 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6113 ata_port_wait_eh(ap);
6114 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6115 }
6116
6117 /* request PM ops to EH */
6118 spin_lock_irqsave(ap->lock, flags);
6119
6120 ap->pm_mesg = mesg;
6121 if (wait) {
6122 rc = 0;
6123 ap->pm_result = &rc;
6124 }
6125
6126 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6127 __ata_port_for_each_link(link, ap) {
6128 link->eh_info.action |= action;
6129 link->eh_info.flags |= ehi_flags;
6130 }
500530f6
TH
6131
6132 ata_port_schedule_eh(ap);
6133
6134 spin_unlock_irqrestore(ap->lock, flags);
6135
6136 /* wait and check result */
6137 if (wait) {
6138 ata_port_wait_eh(ap);
6139 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6140 if (rc)
6141 return rc;
6142 }
6143 }
6144
6145 return 0;
6146}
6147
6148/**
cca3974e
JG
6149 * ata_host_suspend - suspend host
6150 * @host: host to suspend
500530f6
TH
6151 * @mesg: PM message
6152 *
cca3974e 6153 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6154 * function requests EH to perform PM operations and waits for EH
6155 * to finish.
6156 *
6157 * LOCKING:
6158 * Kernel thread context (may sleep).
6159 *
6160 * RETURNS:
6161 * 0 on success, -errno on failure.
6162 */
cca3974e 6163int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6164{
9666f400 6165 int rc;
500530f6 6166
cca3974e 6167 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6168 if (rc == 0)
6169 host->dev->power.power_state = mesg;
500530f6
TH
6170 return rc;
6171}
6172
6173/**
cca3974e
JG
6174 * ata_host_resume - resume host
6175 * @host: host to resume
500530f6 6176 *
cca3974e 6177 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6178 * function requests EH to perform PM operations and returns.
6179 * Note that all resume operations are performed parallely.
6180 *
6181 * LOCKING:
6182 * Kernel thread context (may sleep).
6183 */
cca3974e 6184void ata_host_resume(struct ata_host *host)
500530f6 6185{
cca3974e
JG
6186 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6187 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6188 host->dev->power.power_state = PMSG_ON;
500530f6 6189}
6ffa01d8 6190#endif
500530f6 6191
c893a3ae
RD
6192/**
6193 * ata_port_start - Set port up for dma.
6194 * @ap: Port to initialize
6195 *
6196 * Called just after data structures for each port are
6197 * initialized. Allocates space for PRD table.
6198 *
6199 * May be used as the port_start() entry in ata_port_operations.
6200 *
6201 * LOCKING:
6202 * Inherited from caller.
6203 */
f0d36efd 6204int ata_port_start(struct ata_port *ap)
1da177e4 6205{
2f1f610b 6206 struct device *dev = ap->dev;
6037d6bb 6207 int rc;
1da177e4 6208
f0d36efd
TH
6209 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6210 GFP_KERNEL);
1da177e4
LT
6211 if (!ap->prd)
6212 return -ENOMEM;
6213
6037d6bb 6214 rc = ata_pad_alloc(ap, dev);
f0d36efd 6215 if (rc)
6037d6bb 6216 return rc;
1da177e4 6217
f0d36efd
TH
6218 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6219 (unsigned long long)ap->prd_dma);
1da177e4
LT
6220 return 0;
6221}
6222
3ef3b43d
TH
6223/**
6224 * ata_dev_init - Initialize an ata_device structure
6225 * @dev: Device structure to initialize
6226 *
6227 * Initialize @dev in preparation for probing.
6228 *
6229 * LOCKING:
6230 * Inherited from caller.
6231 */
6232void ata_dev_init(struct ata_device *dev)
6233{
9af5c9c9
TH
6234 struct ata_link *link = dev->link;
6235 struct ata_port *ap = link->ap;
72fa4b74
TH
6236 unsigned long flags;
6237
5a04bf4b 6238 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6239 link->sata_spd_limit = link->hw_sata_spd_limit;
6240 link->sata_spd = 0;
5a04bf4b 6241
72fa4b74
TH
6242 /* High bits of dev->flags are used to record warm plug
6243 * requests which occur asynchronously. Synchronize using
cca3974e 6244 * host lock.
72fa4b74 6245 */
ba6a1308 6246 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6247 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6248 dev->horkage = 0;
ba6a1308 6249 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6250
72fa4b74
TH
6251 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6252 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6253 dev->pio_mask = UINT_MAX;
6254 dev->mwdma_mask = UINT_MAX;
6255 dev->udma_mask = UINT_MAX;
6256}
6257
4fb37a25
TH
6258/**
6259 * ata_link_init - Initialize an ata_link structure
6260 * @ap: ATA port link is attached to
6261 * @link: Link structure to initialize
8989805d 6262 * @pmp: Port multiplier port number
4fb37a25
TH
6263 *
6264 * Initialize @link.
6265 *
6266 * LOCKING:
6267 * Kernel thread context (may sleep)
6268 */
fb7fd614 6269void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6270{
6271 int i;
6272
6273 /* clear everything except for devices */
6274 memset(link, 0, offsetof(struct ata_link, device[0]));
6275
6276 link->ap = ap;
8989805d 6277 link->pmp = pmp;
4fb37a25
TH
6278 link->active_tag = ATA_TAG_POISON;
6279 link->hw_sata_spd_limit = UINT_MAX;
6280
6281 /* can't use iterator, ap isn't initialized yet */
6282 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6283 struct ata_device *dev = &link->device[i];
6284
6285 dev->link = link;
6286 dev->devno = dev - link->device;
6287 ata_dev_init(dev);
6288 }
6289}
6290
6291/**
6292 * sata_link_init_spd - Initialize link->sata_spd_limit
6293 * @link: Link to configure sata_spd_limit for
6294 *
6295 * Initialize @link->[hw_]sata_spd_limit to the currently
6296 * configured value.
6297 *
6298 * LOCKING:
6299 * Kernel thread context (may sleep).
6300 *
6301 * RETURNS:
6302 * 0 on success, -errno on failure.
6303 */
fb7fd614 6304int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6305{
6306 u32 scontrol, spd;
6307 int rc;
6308
6309 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6310 if (rc)
6311 return rc;
6312
6313 spd = (scontrol >> 4) & 0xf;
6314 if (spd)
6315 link->hw_sata_spd_limit &= (1 << spd) - 1;
6316
6317 link->sata_spd_limit = link->hw_sata_spd_limit;
6318
6319 return 0;
6320}
6321
1da177e4 6322/**
f3187195
TH
6323 * ata_port_alloc - allocate and initialize basic ATA port resources
6324 * @host: ATA host this allocated port belongs to
1da177e4 6325 *
f3187195
TH
6326 * Allocate and initialize basic ATA port resources.
6327 *
6328 * RETURNS:
6329 * Allocate ATA port on success, NULL on failure.
0cba632b 6330 *
1da177e4 6331 * LOCKING:
f3187195 6332 * Inherited from calling layer (may sleep).
1da177e4 6333 */
f3187195 6334struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6335{
f3187195 6336 struct ata_port *ap;
1da177e4 6337
f3187195
TH
6338 DPRINTK("ENTER\n");
6339
6340 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6341 if (!ap)
6342 return NULL;
6343
f4d6d004 6344 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6345 ap->lock = &host->lock;
198e0fed 6346 ap->flags = ATA_FLAG_DISABLED;
f3187195 6347 ap->print_id = -1;
1da177e4 6348 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6349 ap->host = host;
f3187195 6350 ap->dev = host->dev;
1da177e4 6351 ap->last_ctl = 0xFF;
bd5d825c
BP
6352
6353#if defined(ATA_VERBOSE_DEBUG)
6354 /* turn on all debugging levels */
6355 ap->msg_enable = 0x00FF;
6356#elif defined(ATA_DEBUG)
6357 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6358#else
0dd4b21f 6359 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6360#endif
1da177e4 6361
65f27f38
DH
6362 INIT_DELAYED_WORK(&ap->port_task, NULL);
6363 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6364 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6365 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6366 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6367 init_timer_deferrable(&ap->fastdrain_timer);
6368 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6369 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6370
838df628 6371 ap->cbl = ATA_CBL_NONE;
838df628 6372
8989805d 6373 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6374
6375#ifdef ATA_IRQ_TRAP
6376 ap->stats.unhandled_irq = 1;
6377 ap->stats.idle_irq = 1;
6378#endif
1da177e4 6379 return ap;
1da177e4
LT
6380}
6381
f0d36efd
TH
6382static void ata_host_release(struct device *gendev, void *res)
6383{
6384 struct ata_host *host = dev_get_drvdata(gendev);
6385 int i;
6386
6387 for (i = 0; i < host->n_ports; i++) {
6388 struct ata_port *ap = host->ports[i];
6389
ecef7253
TH
6390 if (!ap)
6391 continue;
6392
6393 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6394 ap->ops->port_stop(ap);
f0d36efd
TH
6395 }
6396
ecef7253 6397 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6398 host->ops->host_stop(host);
1aa56cca 6399
1aa506e4
TH
6400 for (i = 0; i < host->n_ports; i++) {
6401 struct ata_port *ap = host->ports[i];
6402
4911487a
TH
6403 if (!ap)
6404 continue;
6405
6406 if (ap->scsi_host)
1aa506e4
TH
6407 scsi_host_put(ap->scsi_host);
6408
4911487a 6409 kfree(ap);
1aa506e4
TH
6410 host->ports[i] = NULL;
6411 }
6412
1aa56cca 6413 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6414}
6415
f3187195
TH
6416/**
6417 * ata_host_alloc - allocate and init basic ATA host resources
6418 * @dev: generic device this host is associated with
6419 * @max_ports: maximum number of ATA ports associated with this host
6420 *
6421 * Allocate and initialize basic ATA host resources. LLD calls
6422 * this function to allocate a host, initializes it fully and
6423 * attaches it using ata_host_register().
6424 *
6425 * @max_ports ports are allocated and host->n_ports is
6426 * initialized to @max_ports. The caller is allowed to decrease
6427 * host->n_ports before calling ata_host_register(). The unused
6428 * ports will be automatically freed on registration.
6429 *
6430 * RETURNS:
6431 * Allocate ATA host on success, NULL on failure.
6432 *
6433 * LOCKING:
6434 * Inherited from calling layer (may sleep).
6435 */
6436struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6437{
6438 struct ata_host *host;
6439 size_t sz;
6440 int i;
6441
6442 DPRINTK("ENTER\n");
6443
6444 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6445 return NULL;
6446
6447 /* alloc a container for our list of ATA ports (buses) */
6448 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6449 /* alloc a container for our list of ATA ports (buses) */
6450 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6451 if (!host)
6452 goto err_out;
6453
6454 devres_add(dev, host);
6455 dev_set_drvdata(dev, host);
6456
6457 spin_lock_init(&host->lock);
6458 host->dev = dev;
6459 host->n_ports = max_ports;
6460
6461 /* allocate ports bound to this host */
6462 for (i = 0; i < max_ports; i++) {
6463 struct ata_port *ap;
6464
6465 ap = ata_port_alloc(host);
6466 if (!ap)
6467 goto err_out;
6468
6469 ap->port_no = i;
6470 host->ports[i] = ap;
6471 }
6472
6473 devres_remove_group(dev, NULL);
6474 return host;
6475
6476 err_out:
6477 devres_release_group(dev, NULL);
6478 return NULL;
6479}
6480
f5cda257
TH
6481/**
6482 * ata_host_alloc_pinfo - alloc host and init with port_info array
6483 * @dev: generic device this host is associated with
6484 * @ppi: array of ATA port_info to initialize host with
6485 * @n_ports: number of ATA ports attached to this host
6486 *
6487 * Allocate ATA host and initialize with info from @ppi. If NULL
6488 * terminated, @ppi may contain fewer entries than @n_ports. The
6489 * last entry will be used for the remaining ports.
6490 *
6491 * RETURNS:
6492 * Allocate ATA host on success, NULL on failure.
6493 *
6494 * LOCKING:
6495 * Inherited from calling layer (may sleep).
6496 */
6497struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6498 const struct ata_port_info * const * ppi,
6499 int n_ports)
6500{
6501 const struct ata_port_info *pi;
6502 struct ata_host *host;
6503 int i, j;
6504
6505 host = ata_host_alloc(dev, n_ports);
6506 if (!host)
6507 return NULL;
6508
6509 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6510 struct ata_port *ap = host->ports[i];
6511
6512 if (ppi[j])
6513 pi = ppi[j++];
6514
6515 ap->pio_mask = pi->pio_mask;
6516 ap->mwdma_mask = pi->mwdma_mask;
6517 ap->udma_mask = pi->udma_mask;
6518 ap->flags |= pi->flags;
0c88758b 6519 ap->link.flags |= pi->link_flags;
f5cda257
TH
6520 ap->ops = pi->port_ops;
6521
6522 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6523 host->ops = pi->port_ops;
6524 if (!host->private_data && pi->private_data)
6525 host->private_data = pi->private_data;
6526 }
6527
6528 return host;
6529}
6530
ecef7253
TH
6531/**
6532 * ata_host_start - start and freeze ports of an ATA host
6533 * @host: ATA host to start ports for
6534 *
6535 * Start and then freeze ports of @host. Started status is
6536 * recorded in host->flags, so this function can be called
6537 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6538 * once. If host->ops isn't initialized yet, its set to the
6539 * first non-dummy port ops.
ecef7253
TH
6540 *
6541 * LOCKING:
6542 * Inherited from calling layer (may sleep).
6543 *
6544 * RETURNS:
6545 * 0 if all ports are started successfully, -errno otherwise.
6546 */
6547int ata_host_start(struct ata_host *host)
6548{
6549 int i, rc;
6550
6551 if (host->flags & ATA_HOST_STARTED)
6552 return 0;
6553
6554 for (i = 0; i < host->n_ports; i++) {
6555 struct ata_port *ap = host->ports[i];
6556
f3187195
TH
6557 if (!host->ops && !ata_port_is_dummy(ap))
6558 host->ops = ap->ops;
6559
ecef7253
TH
6560 if (ap->ops->port_start) {
6561 rc = ap->ops->port_start(ap);
6562 if (rc) {
6563 ata_port_printk(ap, KERN_ERR, "failed to "
6564 "start port (errno=%d)\n", rc);
6565 goto err_out;
6566 }
6567 }
6568
6569 ata_eh_freeze_port(ap);
6570 }
6571
6572 host->flags |= ATA_HOST_STARTED;
6573 return 0;
6574
6575 err_out:
6576 while (--i >= 0) {
6577 struct ata_port *ap = host->ports[i];
6578
6579 if (ap->ops->port_stop)
6580 ap->ops->port_stop(ap);
6581 }
6582 return rc;
6583}
6584
b03732f0 6585/**
cca3974e
JG
6586 * ata_sas_host_init - Initialize a host struct
6587 * @host: host to initialize
6588 * @dev: device host is attached to
6589 * @flags: host flags
6590 * @ops: port_ops
b03732f0
BK
6591 *
6592 * LOCKING:
6593 * PCI/etc. bus probe sem.
6594 *
6595 */
f3187195 6596/* KILLME - the only user left is ipr */
cca3974e
JG
6597void ata_host_init(struct ata_host *host, struct device *dev,
6598 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6599{
cca3974e
JG
6600 spin_lock_init(&host->lock);
6601 host->dev = dev;
6602 host->flags = flags;
6603 host->ops = ops;
b03732f0
BK
6604}
6605
f3187195
TH
6606/**
6607 * ata_host_register - register initialized ATA host
6608 * @host: ATA host to register
6609 * @sht: template for SCSI host
6610 *
6611 * Register initialized ATA host. @host is allocated using
6612 * ata_host_alloc() and fully initialized by LLD. This function
6613 * starts ports, registers @host with ATA and SCSI layers and
6614 * probe registered devices.
6615 *
6616 * LOCKING:
6617 * Inherited from calling layer (may sleep).
6618 *
6619 * RETURNS:
6620 * 0 on success, -errno otherwise.
6621 */
6622int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6623{
6624 int i, rc;
6625
6626 /* host must have been started */
6627 if (!(host->flags & ATA_HOST_STARTED)) {
6628 dev_printk(KERN_ERR, host->dev,
6629 "BUG: trying to register unstarted host\n");
6630 WARN_ON(1);
6631 return -EINVAL;
6632 }
6633
6634 /* Blow away unused ports. This happens when LLD can't
6635 * determine the exact number of ports to allocate at
6636 * allocation time.
6637 */
6638 for (i = host->n_ports; host->ports[i]; i++)
6639 kfree(host->ports[i]);
6640
6641 /* give ports names and add SCSI hosts */
6642 for (i = 0; i < host->n_ports; i++)
6643 host->ports[i]->print_id = ata_print_id++;
6644
6645 rc = ata_scsi_add_hosts(host, sht);
6646 if (rc)
6647 return rc;
6648
fafbae87
TH
6649 /* associate with ACPI nodes */
6650 ata_acpi_associate(host);
6651
f3187195
TH
6652 /* set cable, sata_spd_limit and report */
6653 for (i = 0; i < host->n_ports; i++) {
6654 struct ata_port *ap = host->ports[i];
f3187195
TH
6655 unsigned long xfer_mask;
6656
6657 /* set SATA cable type if still unset */
6658 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6659 ap->cbl = ATA_CBL_SATA;
6660
6661 /* init sata_spd_limit to the current value */
4fb37a25 6662 sata_link_init_spd(&ap->link);
f3187195 6663
cbcdd875 6664 /* print per-port info to dmesg */
f3187195
TH
6665 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6666 ap->udma_mask);
6667
f3187195 6668 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6669 ata_port_printk(ap, KERN_INFO,
6670 "%cATA max %s %s\n",
a16abc0b 6671 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6672 ata_mode_string(xfer_mask),
cbcdd875 6673 ap->link.eh_info.desc);
f3187195
TH
6674 else
6675 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6676 }
6677
6678 /* perform each probe synchronously */
6679 DPRINTK("probe begin\n");
6680 for (i = 0; i < host->n_ports; i++) {
6681 struct ata_port *ap = host->ports[i];
6682 int rc;
6683
6684 /* probe */
6685 if (ap->ops->error_handler) {
9af5c9c9 6686 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6687 unsigned long flags;
6688
6689 ata_port_probe(ap);
6690
6691 /* kick EH for boot probing */
6692 spin_lock_irqsave(ap->lock, flags);
6693
f58229f8
TH
6694 ehi->probe_mask =
6695 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6696 ehi->action |= ATA_EH_SOFTRESET;
6697 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6698
f4d6d004 6699 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6700 ap->pflags |= ATA_PFLAG_LOADING;
6701 ata_port_schedule_eh(ap);
6702
6703 spin_unlock_irqrestore(ap->lock, flags);
6704
6705 /* wait for EH to finish */
6706 ata_port_wait_eh(ap);
6707 } else {
6708 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6709 rc = ata_bus_probe(ap);
6710 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6711
6712 if (rc) {
6713 /* FIXME: do something useful here?
6714 * Current libata behavior will
6715 * tear down everything when
6716 * the module is removed
6717 * or the h/w is unplugged.
6718 */
6719 }
6720 }
6721 }
6722
6723 /* probes are done, now scan each port's disk(s) */
6724 DPRINTK("host probe begin\n");
6725 for (i = 0; i < host->n_ports; i++) {
6726 struct ata_port *ap = host->ports[i];
6727
1ae46317 6728 ata_scsi_scan_host(ap, 1);
f3187195
TH
6729 }
6730
6731 return 0;
6732}
6733
f5cda257
TH
6734/**
6735 * ata_host_activate - start host, request IRQ and register it
6736 * @host: target ATA host
6737 * @irq: IRQ to request
6738 * @irq_handler: irq_handler used when requesting IRQ
6739 * @irq_flags: irq_flags used when requesting IRQ
6740 * @sht: scsi_host_template to use when registering the host
6741 *
6742 * After allocating an ATA host and initializing it, most libata
6743 * LLDs perform three steps to activate the host - start host,
6744 * request IRQ and register it. This helper takes necessasry
6745 * arguments and performs the three steps in one go.
6746 *
6747 * LOCKING:
6748 * Inherited from calling layer (may sleep).
6749 *
6750 * RETURNS:
6751 * 0 on success, -errno otherwise.
6752 */
6753int ata_host_activate(struct ata_host *host, int irq,
6754 irq_handler_t irq_handler, unsigned long irq_flags,
6755 struct scsi_host_template *sht)
6756{
cbcdd875 6757 int i, rc;
f5cda257
TH
6758
6759 rc = ata_host_start(host);
6760 if (rc)
6761 return rc;
6762
6763 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6764 dev_driver_string(host->dev), host);
6765 if (rc)
6766 return rc;
6767
cbcdd875
TH
6768 for (i = 0; i < host->n_ports; i++)
6769 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6770
f5cda257
TH
6771 rc = ata_host_register(host, sht);
6772 /* if failed, just free the IRQ and leave ports alone */
6773 if (rc)
6774 devm_free_irq(host->dev, irq, host);
6775
6776 return rc;
6777}
6778
720ba126
TH
6779/**
6780 * ata_port_detach - Detach ATA port in prepration of device removal
6781 * @ap: ATA port to be detached
6782 *
6783 * Detach all ATA devices and the associated SCSI devices of @ap;
6784 * then, remove the associated SCSI host. @ap is guaranteed to
6785 * be quiescent on return from this function.
6786 *
6787 * LOCKING:
6788 * Kernel thread context (may sleep).
6789 */
6790void ata_port_detach(struct ata_port *ap)
6791{
6792 unsigned long flags;
41bda9c9 6793 struct ata_link *link;
f58229f8 6794 struct ata_device *dev;
720ba126
TH
6795
6796 if (!ap->ops->error_handler)
c3cf30a9 6797 goto skip_eh;
720ba126
TH
6798
6799 /* tell EH we're leaving & flush EH */
ba6a1308 6800 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6801 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6802 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6803
6804 ata_port_wait_eh(ap);
6805
6806 /* EH is now guaranteed to see UNLOADING, so no new device
6807 * will be attached. Disable all existing devices.
6808 */
ba6a1308 6809 spin_lock_irqsave(ap->lock, flags);
720ba126 6810
41bda9c9
TH
6811 ata_port_for_each_link(link, ap) {
6812 ata_link_for_each_dev(dev, link)
6813 ata_dev_disable(dev);
6814 }
720ba126 6815
ba6a1308 6816 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6817
6818 /* Final freeze & EH. All in-flight commands are aborted. EH
6819 * will be skipped and retrials will be terminated with bad
6820 * target.
6821 */
ba6a1308 6822 spin_lock_irqsave(ap->lock, flags);
720ba126 6823 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6824 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6825
6826 ata_port_wait_eh(ap);
45a66c1c 6827 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6828
c3cf30a9 6829 skip_eh:
720ba126 6830 /* remove the associated SCSI host */
cca3974e 6831 scsi_remove_host(ap->scsi_host);
720ba126
TH
6832}
6833
0529c159
TH
6834/**
6835 * ata_host_detach - Detach all ports of an ATA host
6836 * @host: Host to detach
6837 *
6838 * Detach all ports of @host.
6839 *
6840 * LOCKING:
6841 * Kernel thread context (may sleep).
6842 */
6843void ata_host_detach(struct ata_host *host)
6844{
6845 int i;
6846
6847 for (i = 0; i < host->n_ports; i++)
6848 ata_port_detach(host->ports[i]);
6849}
6850
1da177e4
LT
6851/**
6852 * ata_std_ports - initialize ioaddr with standard port offsets.
6853 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6854 *
6855 * Utility function which initializes data_addr, error_addr,
6856 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6857 * device_addr, status_addr, and command_addr to standard offsets
6858 * relative to cmd_addr.
6859 *
6860 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6861 */
0baab86b 6862
1da177e4
LT
6863void ata_std_ports(struct ata_ioports *ioaddr)
6864{
6865 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6866 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6867 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6868 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6869 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6870 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6871 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6872 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6873 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6874 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6875}
6876
0baab86b 6877
374b1873
JG
6878#ifdef CONFIG_PCI
6879
1da177e4
LT
6880/**
6881 * ata_pci_remove_one - PCI layer callback for device removal
6882 * @pdev: PCI device that was removed
6883 *
b878ca5d
TH
6884 * PCI layer indicates to libata via this hook that hot-unplug or
6885 * module unload event has occurred. Detach all ports. Resource
6886 * release is handled via devres.
1da177e4
LT
6887 *
6888 * LOCKING:
6889 * Inherited from PCI layer (may sleep).
6890 */
f0d36efd 6891void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6892{
6893 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6894 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6895
b878ca5d 6896 ata_host_detach(host);
1da177e4
LT
6897}
6898
6899/* move to PCI subsystem */
057ace5e 6900int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6901{
6902 unsigned long tmp = 0;
6903
6904 switch (bits->width) {
6905 case 1: {
6906 u8 tmp8 = 0;
6907 pci_read_config_byte(pdev, bits->reg, &tmp8);
6908 tmp = tmp8;
6909 break;
6910 }
6911 case 2: {
6912 u16 tmp16 = 0;
6913 pci_read_config_word(pdev, bits->reg, &tmp16);
6914 tmp = tmp16;
6915 break;
6916 }
6917 case 4: {
6918 u32 tmp32 = 0;
6919 pci_read_config_dword(pdev, bits->reg, &tmp32);
6920 tmp = tmp32;
6921 break;
6922 }
6923
6924 default:
6925 return -EINVAL;
6926 }
6927
6928 tmp &= bits->mask;
6929
6930 return (tmp == bits->val) ? 1 : 0;
6931}
9b847548 6932
6ffa01d8 6933#ifdef CONFIG_PM
3c5100c1 6934void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6935{
6936 pci_save_state(pdev);
4c90d971 6937 pci_disable_device(pdev);
500530f6 6938
4c90d971 6939 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6940 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6941}
6942
553c4aa6 6943int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6944{
553c4aa6
TH
6945 int rc;
6946
9b847548
JA
6947 pci_set_power_state(pdev, PCI_D0);
6948 pci_restore_state(pdev);
553c4aa6 6949
b878ca5d 6950 rc = pcim_enable_device(pdev);
553c4aa6
TH
6951 if (rc) {
6952 dev_printk(KERN_ERR, &pdev->dev,
6953 "failed to enable device after resume (%d)\n", rc);
6954 return rc;
6955 }
6956
9b847548 6957 pci_set_master(pdev);
553c4aa6 6958 return 0;
500530f6
TH
6959}
6960
3c5100c1 6961int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6962{
cca3974e 6963 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6964 int rc = 0;
6965
cca3974e 6966 rc = ata_host_suspend(host, mesg);
500530f6
TH
6967 if (rc)
6968 return rc;
6969
3c5100c1 6970 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6971
6972 return 0;
6973}
6974
6975int ata_pci_device_resume(struct pci_dev *pdev)
6976{
cca3974e 6977 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6978 int rc;
500530f6 6979
553c4aa6
TH
6980 rc = ata_pci_device_do_resume(pdev);
6981 if (rc == 0)
6982 ata_host_resume(host);
6983 return rc;
9b847548 6984}
6ffa01d8
TH
6985#endif /* CONFIG_PM */
6986
1da177e4
LT
6987#endif /* CONFIG_PCI */
6988
6989
1da177e4
LT
6990static int __init ata_init(void)
6991{
a8601e5f 6992 ata_probe_timeout *= HZ;
1da177e4
LT
6993 ata_wq = create_workqueue("ata");
6994 if (!ata_wq)
6995 return -ENOMEM;
6996
453b07ac
TH
6997 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6998 if (!ata_aux_wq) {
6999 destroy_workqueue(ata_wq);
7000 return -ENOMEM;
7001 }
7002
1da177e4
LT
7003 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7004 return 0;
7005}
7006
7007static void __exit ata_exit(void)
7008{
7009 destroy_workqueue(ata_wq);
453b07ac 7010 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7011}
7012
a4625085 7013subsys_initcall(ata_init);
1da177e4
LT
7014module_exit(ata_exit);
7015
67846b30 7016static unsigned long ratelimit_time;
34af946a 7017static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7018
7019int ata_ratelimit(void)
7020{
7021 int rc;
7022 unsigned long flags;
7023
7024 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7025
7026 if (time_after(jiffies, ratelimit_time)) {
7027 rc = 1;
7028 ratelimit_time = jiffies + (HZ/5);
7029 } else
7030 rc = 0;
7031
7032 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7033
7034 return rc;
7035}
7036
c22daff4
TH
7037/**
7038 * ata_wait_register - wait until register value changes
7039 * @reg: IO-mapped register
7040 * @mask: Mask to apply to read register value
7041 * @val: Wait condition
7042 * @interval_msec: polling interval in milliseconds
7043 * @timeout_msec: timeout in milliseconds
7044 *
7045 * Waiting for some bits of register to change is a common
7046 * operation for ATA controllers. This function reads 32bit LE
7047 * IO-mapped register @reg and tests for the following condition.
7048 *
7049 * (*@reg & mask) != val
7050 *
7051 * If the condition is met, it returns; otherwise, the process is
7052 * repeated after @interval_msec until timeout.
7053 *
7054 * LOCKING:
7055 * Kernel thread context (may sleep)
7056 *
7057 * RETURNS:
7058 * The final register value.
7059 */
7060u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7061 unsigned long interval_msec,
7062 unsigned long timeout_msec)
7063{
7064 unsigned long timeout;
7065 u32 tmp;
7066
7067 tmp = ioread32(reg);
7068
7069 /* Calculate timeout _after_ the first read to make sure
7070 * preceding writes reach the controller before starting to
7071 * eat away the timeout.
7072 */
7073 timeout = jiffies + (timeout_msec * HZ) / 1000;
7074
7075 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7076 msleep(interval_msec);
7077 tmp = ioread32(reg);
7078 }
7079
7080 return tmp;
7081}
7082
dd5b06c4
TH
7083/*
7084 * Dummy port_ops
7085 */
7086static void ata_dummy_noret(struct ata_port *ap) { }
7087static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7088static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7089
7090static u8 ata_dummy_check_status(struct ata_port *ap)
7091{
7092 return ATA_DRDY;
7093}
7094
7095static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7096{
7097 return AC_ERR_SYSTEM;
7098}
7099
7100const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7101 .check_status = ata_dummy_check_status,
7102 .check_altstatus = ata_dummy_check_status,
7103 .dev_select = ata_noop_dev_select,
7104 .qc_prep = ata_noop_qc_prep,
7105 .qc_issue = ata_dummy_qc_issue,
7106 .freeze = ata_dummy_noret,
7107 .thaw = ata_dummy_noret,
7108 .error_handler = ata_dummy_noret,
7109 .post_internal_cmd = ata_dummy_qc_noret,
7110 .irq_clear = ata_dummy_noret,
7111 .port_start = ata_dummy_ret0,
7112 .port_stop = ata_dummy_noret,
7113};
7114
21b0ad4f
TH
7115const struct ata_port_info ata_dummy_port_info = {
7116 .port_ops = &ata_dummy_port_ops,
7117};
7118
1da177e4
LT
7119/*
7120 * libata is essentially a library of internal helper functions for
7121 * low-level ATA host controller drivers. As such, the API/ABI is
7122 * likely to change as new drivers are added and updated.
7123 * Do not depend on ABI/API stability.
7124 */
7125
e9c83914
TH
7126EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7127EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7128EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7129EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7130EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7131EXPORT_SYMBOL_GPL(ata_std_bios_param);
7132EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7133EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7134EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7135EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7136EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7137EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7138EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7139EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7140EXPORT_SYMBOL_GPL(ata_sg_init);
7141EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7142EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7143EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7144EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7145EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7146EXPORT_SYMBOL_GPL(ata_tf_load);
7147EXPORT_SYMBOL_GPL(ata_tf_read);
7148EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7149EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7150EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7151EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7152EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7153EXPORT_SYMBOL_GPL(ata_check_status);
7154EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7155EXPORT_SYMBOL_GPL(ata_exec_command);
7156EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7157EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7158EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7159EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7160EXPORT_SYMBOL_GPL(ata_data_xfer);
7161EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7162EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7163EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7164EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7165EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7166EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7167EXPORT_SYMBOL_GPL(ata_bmdma_start);
7168EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7169EXPORT_SYMBOL_GPL(ata_bmdma_status);
7170EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7171EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7172EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7173EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7174EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7175EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7176EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7177EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7178EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7179EXPORT_SYMBOL_GPL(sata_link_debounce);
7180EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7181EXPORT_SYMBOL_GPL(sata_phy_reset);
7182EXPORT_SYMBOL_GPL(__sata_phy_reset);
7183EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7184EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7185EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7186EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7187EXPORT_SYMBOL_GPL(sata_std_hardreset);
7188EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7189EXPORT_SYMBOL_GPL(ata_dev_classify);
7190EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7191EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7192EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7193EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7194EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7195EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7196EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7197EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7198EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7199EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7200EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7201EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7202EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7203EXPORT_SYMBOL_GPL(sata_scr_valid);
7204EXPORT_SYMBOL_GPL(sata_scr_read);
7205EXPORT_SYMBOL_GPL(sata_scr_write);
7206EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7207EXPORT_SYMBOL_GPL(ata_link_online);
7208EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7209#ifdef CONFIG_PM
cca3974e
JG
7210EXPORT_SYMBOL_GPL(ata_host_suspend);
7211EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7212#endif /* CONFIG_PM */
6a62a04d
TH
7213EXPORT_SYMBOL_GPL(ata_id_string);
7214EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7215EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7216EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7217
1bc4ccff 7218EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7219EXPORT_SYMBOL_GPL(ata_timing_compute);
7220EXPORT_SYMBOL_GPL(ata_timing_merge);
7221
1da177e4
LT
7222#ifdef CONFIG_PCI
7223EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7224EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7225EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7226EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7227EXPORT_SYMBOL_GPL(ata_pci_init_one);
7228EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7229#ifdef CONFIG_PM
500530f6
TH
7230EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7231EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7232EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7233EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7234#endif /* CONFIG_PM */
67951ade
AC
7235EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7236EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7237#endif /* CONFIG_PCI */
9b847548 7238
b64bbc39
TH
7239EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7240EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7241EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7242EXPORT_SYMBOL_GPL(ata_port_desc);
7243#ifdef CONFIG_PCI
7244EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7245#endif /* CONFIG_PCI */
ece1d636 7246EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7247EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7248EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7249EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7250EXPORT_SYMBOL_GPL(ata_port_freeze);
7251EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7252EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7253EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7254EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7255EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7256EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7257EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7258
7259EXPORT_SYMBOL_GPL(ata_cable_40wire);
7260EXPORT_SYMBOL_GPL(ata_cable_80wire);
7261EXPORT_SYMBOL_GPL(ata_cable_unknown);
7262EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.030906 seconds and 5 git commands to generate.