libata: ATA_12/16 doesn't fall into ATAPI_MISC
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
33267325
TH
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
109/* param_buf is thrown away after initialization, disallow read */
110module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
111MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
112
418dc1f5 113int atapi_enabled = 1;
1623c81e
JG
114module_param(atapi_enabled, int, 0444);
115MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
116
c5c61bda 117static int atapi_dmadir = 0;
95de719a
AL
118module_param(atapi_dmadir, int, 0444);
119MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
120
baf4fdfa
ML
121int atapi_passthru16 = 1;
122module_param(atapi_passthru16, int, 0444);
123MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
124
c3c013a2
JG
125int libata_fua = 0;
126module_param_named(fua, libata_fua, int, 0444);
127MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
128
2dcb407e 129static int ata_ignore_hpa;
1e999736
AC
130module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
131MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
132
b3a70601
AC
133static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
134module_param_named(dma, libata_dma_mask, int, 0444);
135MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
136
a8601e5f
AM
137static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
138module_param(ata_probe_timeout, int, 0444);
139MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
140
6ebe9d86 141int libata_noacpi = 0;
d7d0dad6 142module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 143MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 144
ae8d4ee7
AC
145int libata_allow_tpm = 0;
146module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
147MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
148
1da177e4
LT
149MODULE_AUTHOR("Jeff Garzik");
150MODULE_DESCRIPTION("Library module for ATA devices");
151MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION);
153
0baab86b 154
33267325
TH
155/**
156 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 157 * @ap: ATA port of interest
33267325
TH
158 *
159 * Force cable type according to libata.force and whine about it.
160 * The last entry which has matching port number is used, so it
161 * can be specified as part of device force parameters. For
162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
163 * same effect.
164 *
165 * LOCKING:
166 * EH context.
167 */
168void ata_force_cbl(struct ata_port *ap)
169{
170 int i;
171
172 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
173 const struct ata_force_ent *fe = &ata_force_tbl[i];
174
175 if (fe->port != -1 && fe->port != ap->print_id)
176 continue;
177
178 if (fe->param.cbl == ATA_CBL_NONE)
179 continue;
180
181 ap->cbl = fe->param.cbl;
182 ata_port_printk(ap, KERN_NOTICE,
183 "FORCE: cable set to %s\n", fe->param.name);
184 return;
185 }
186}
187
188/**
189 * ata_force_spd_limit - force SATA spd limit according to libata.force
190 * @link: ATA link of interest
191 *
192 * Force SATA spd limit according to libata.force and whine about
193 * it. When only the port part is specified (e.g. 1:), the limit
194 * applies to all links connected to both the host link and all
195 * fan-out ports connected via PMP. If the device part is
196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
197 * link not the host link. Device number 15 always points to the
198 * host link whether PMP is attached or not.
199 *
200 * LOCKING:
201 * EH context.
202 */
203static void ata_force_spd_limit(struct ata_link *link)
204{
205 int linkno, i;
206
207 if (ata_is_host_link(link))
208 linkno = 15;
209 else
210 linkno = link->pmp;
211
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
214
215 if (fe->port != -1 && fe->port != link->ap->print_id)
216 continue;
217
218 if (fe->device != -1 && fe->device != linkno)
219 continue;
220
221 if (!fe->param.spd_limit)
222 continue;
223
224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
225 ata_link_printk(link, KERN_NOTICE,
226 "FORCE: PHY spd limit set to %s\n", fe->param.name);
227 return;
228 }
229}
230
231/**
232 * ata_force_xfermask - force xfermask according to libata.force
233 * @dev: ATA device of interest
234 *
235 * Force xfer_mask according to libata.force and whine about it.
236 * For consistency with link selection, device number 15 selects
237 * the first device connected to the host link.
238 *
239 * LOCKING:
240 * EH context.
241 */
242static void ata_force_xfermask(struct ata_device *dev)
243{
244 int devno = dev->link->pmp + dev->devno;
245 int alt_devno = devno;
246 int i;
247
248 /* allow n.15 for the first device attached to host port */
249 if (ata_is_host_link(dev->link) && devno == 0)
250 alt_devno = 15;
251
252 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
253 const struct ata_force_ent *fe = &ata_force_tbl[i];
254 unsigned long pio_mask, mwdma_mask, udma_mask;
255
256 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
257 continue;
258
259 if (fe->device != -1 && fe->device != devno &&
260 fe->device != alt_devno)
261 continue;
262
263 if (!fe->param.xfer_mask)
264 continue;
265
266 ata_unpack_xfermask(fe->param.xfer_mask,
267 &pio_mask, &mwdma_mask, &udma_mask);
268 if (udma_mask)
269 dev->udma_mask = udma_mask;
270 else if (mwdma_mask) {
271 dev->udma_mask = 0;
272 dev->mwdma_mask = mwdma_mask;
273 } else {
274 dev->udma_mask = 0;
275 dev->mwdma_mask = 0;
276 dev->pio_mask = pio_mask;
277 }
278
279 ata_dev_printk(dev, KERN_NOTICE,
280 "FORCE: xfer_mask set to %s\n", fe->param.name);
281 return;
282 }
283}
284
285/**
286 * ata_force_horkage - force horkage according to libata.force
287 * @dev: ATA device of interest
288 *
289 * Force horkage according to libata.force and whine about it.
290 * For consistency with link selection, device number 15 selects
291 * the first device connected to the host link.
292 *
293 * LOCKING:
294 * EH context.
295 */
296static void ata_force_horkage(struct ata_device *dev)
297{
298 int devno = dev->link->pmp + dev->devno;
299 int alt_devno = devno;
300 int i;
301
302 /* allow n.15 for the first device attached to host port */
303 if (ata_is_host_link(dev->link) && devno == 0)
304 alt_devno = 15;
305
306 for (i = 0; i < ata_force_tbl_size; i++) {
307 const struct ata_force_ent *fe = &ata_force_tbl[i];
308
309 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
310 continue;
311
312 if (fe->device != -1 && fe->device != devno &&
313 fe->device != alt_devno)
314 continue;
315
316 if (!(~dev->horkage & fe->param.horkage_on) &&
317 !(dev->horkage & fe->param.horkage_off))
318 continue;
319
320 dev->horkage |= fe->param.horkage_on;
321 dev->horkage &= ~fe->param.horkage_off;
322
323 ata_dev_printk(dev, KERN_NOTICE,
324 "FORCE: horkage modified (%s)\n", fe->param.name);
325 }
326}
327
436d34b3
TH
328/**
329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
330 * @opcode: SCSI opcode
331 *
332 * Determine ATAPI command type from @opcode.
333 *
334 * LOCKING:
335 * None.
336 *
337 * RETURNS:
338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
339 */
340int atapi_cmd_type(u8 opcode)
341{
342 switch (opcode) {
343 case GPCMD_READ_10:
344 case GPCMD_READ_12:
345 return ATAPI_READ;
346
347 case GPCMD_WRITE_10:
348 case GPCMD_WRITE_12:
349 case GPCMD_WRITE_AND_VERIFY_10:
350 return ATAPI_WRITE;
351
352 case GPCMD_READ_CD:
353 case GPCMD_READ_CD_MSF:
354 return ATAPI_READ_CD;
355
e52dcc48
TH
356 case ATA_16:
357 case ATA_12:
358 if (atapi_passthru16)
359 return ATAPI_PASS_THRU;
360 /* fall thru */
436d34b3
TH
361 default:
362 return ATAPI_MISC;
363 }
364}
365
1da177e4
LT
366/**
367 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
368 * @tf: Taskfile to convert
1da177e4 369 * @pmp: Port multiplier port
9977126c
TH
370 * @is_cmd: This FIS is for command
371 * @fis: Buffer into which data will output
1da177e4
LT
372 *
373 * Converts a standard ATA taskfile to a Serial ATA
374 * FIS structure (Register - Host to Device).
375 *
376 * LOCKING:
377 * Inherited from caller.
378 */
9977126c 379void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 380{
9977126c
TH
381 fis[0] = 0x27; /* Register - Host to Device FIS */
382 fis[1] = pmp & 0xf; /* Port multiplier number*/
383 if (is_cmd)
384 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
385
1da177e4
LT
386 fis[2] = tf->command;
387 fis[3] = tf->feature;
388
389 fis[4] = tf->lbal;
390 fis[5] = tf->lbam;
391 fis[6] = tf->lbah;
392 fis[7] = tf->device;
393
394 fis[8] = tf->hob_lbal;
395 fis[9] = tf->hob_lbam;
396 fis[10] = tf->hob_lbah;
397 fis[11] = tf->hob_feature;
398
399 fis[12] = tf->nsect;
400 fis[13] = tf->hob_nsect;
401 fis[14] = 0;
402 fis[15] = tf->ctl;
403
404 fis[16] = 0;
405 fis[17] = 0;
406 fis[18] = 0;
407 fis[19] = 0;
408}
409
410/**
411 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
412 * @fis: Buffer from which data will be input
413 * @tf: Taskfile to output
414 *
e12a1be6 415 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420
057ace5e 421void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
422{
423 tf->command = fis[2]; /* status */
424 tf->feature = fis[3]; /* error */
425
426 tf->lbal = fis[4];
427 tf->lbam = fis[5];
428 tf->lbah = fis[6];
429 tf->device = fis[7];
430
431 tf->hob_lbal = fis[8];
432 tf->hob_lbam = fis[9];
433 tf->hob_lbah = fis[10];
434
435 tf->nsect = fis[12];
436 tf->hob_nsect = fis[13];
437}
438
8cbd6df1
AL
439static const u8 ata_rw_cmds[] = {
440 /* pio multi */
441 ATA_CMD_READ_MULTI,
442 ATA_CMD_WRITE_MULTI,
443 ATA_CMD_READ_MULTI_EXT,
444 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
445 0,
446 0,
447 0,
448 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
449 /* pio */
450 ATA_CMD_PIO_READ,
451 ATA_CMD_PIO_WRITE,
452 ATA_CMD_PIO_READ_EXT,
453 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
454 0,
455 0,
456 0,
457 0,
8cbd6df1
AL
458 /* dma */
459 ATA_CMD_READ,
460 ATA_CMD_WRITE,
461 ATA_CMD_READ_EXT,
9a3dccc4
TH
462 ATA_CMD_WRITE_EXT,
463 0,
464 0,
465 0,
466 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 467};
1da177e4
LT
468
469/**
8cbd6df1 470 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
471 * @tf: command to examine and configure
472 * @dev: device tf belongs to
1da177e4 473 *
2e9edbf8 474 * Examine the device configuration and tf->flags to calculate
8cbd6df1 475 * the proper read/write commands and protocol to use.
1da177e4
LT
476 *
477 * LOCKING:
478 * caller.
479 */
bd056d7e 480static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 481{
9a3dccc4 482 u8 cmd;
1da177e4 483
9a3dccc4 484 int index, fua, lba48, write;
2e9edbf8 485
9a3dccc4 486 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
487 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
488 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 489
8cbd6df1
AL
490 if (dev->flags & ATA_DFLAG_PIO) {
491 tf->protocol = ATA_PROT_PIO;
9a3dccc4 492 index = dev->multi_count ? 0 : 8;
9af5c9c9 493 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
494 /* Unable to use DMA due to host limitation */
495 tf->protocol = ATA_PROT_PIO;
0565c26d 496 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
497 } else {
498 tf->protocol = ATA_PROT_DMA;
9a3dccc4 499 index = 16;
8cbd6df1 500 }
1da177e4 501
9a3dccc4
TH
502 cmd = ata_rw_cmds[index + fua + lba48 + write];
503 if (cmd) {
504 tf->command = cmd;
505 return 0;
506 }
507 return -1;
1da177e4
LT
508}
509
35b649fe
TH
510/**
511 * ata_tf_read_block - Read block address from ATA taskfile
512 * @tf: ATA taskfile of interest
513 * @dev: ATA device @tf belongs to
514 *
515 * LOCKING:
516 * None.
517 *
518 * Read block address from @tf. This function can handle all
519 * three address formats - LBA, LBA48 and CHS. tf->protocol and
520 * flags select the address format to use.
521 *
522 * RETURNS:
523 * Block address read from @tf.
524 */
525u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
526{
527 u64 block = 0;
528
529 if (tf->flags & ATA_TFLAG_LBA) {
530 if (tf->flags & ATA_TFLAG_LBA48) {
531 block |= (u64)tf->hob_lbah << 40;
532 block |= (u64)tf->hob_lbam << 32;
533 block |= tf->hob_lbal << 24;
534 } else
535 block |= (tf->device & 0xf) << 24;
536
537 block |= tf->lbah << 16;
538 block |= tf->lbam << 8;
539 block |= tf->lbal;
540 } else {
541 u32 cyl, head, sect;
542
543 cyl = tf->lbam | (tf->lbah << 8);
544 head = tf->device & 0xf;
545 sect = tf->lbal;
546
547 block = (cyl * dev->heads + head) * dev->sectors + sect;
548 }
549
550 return block;
551}
552
bd056d7e
TH
553/**
554 * ata_build_rw_tf - Build ATA taskfile for given read/write request
555 * @tf: Target ATA taskfile
556 * @dev: ATA device @tf belongs to
557 * @block: Block address
558 * @n_block: Number of blocks
559 * @tf_flags: RW/FUA etc...
560 * @tag: tag
561 *
562 * LOCKING:
563 * None.
564 *
565 * Build ATA taskfile @tf for read/write request described by
566 * @block, @n_block, @tf_flags and @tag on @dev.
567 *
568 * RETURNS:
569 *
570 * 0 on success, -ERANGE if the request is too large for @dev,
571 * -EINVAL if the request is invalid.
572 */
573int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
574 u64 block, u32 n_block, unsigned int tf_flags,
575 unsigned int tag)
576{
577 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
578 tf->flags |= tf_flags;
579
6d1245bf 580 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
581 /* yay, NCQ */
582 if (!lba_48_ok(block, n_block))
583 return -ERANGE;
584
585 tf->protocol = ATA_PROT_NCQ;
586 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
587
588 if (tf->flags & ATA_TFLAG_WRITE)
589 tf->command = ATA_CMD_FPDMA_WRITE;
590 else
591 tf->command = ATA_CMD_FPDMA_READ;
592
593 tf->nsect = tag << 3;
594 tf->hob_feature = (n_block >> 8) & 0xff;
595 tf->feature = n_block & 0xff;
596
597 tf->hob_lbah = (block >> 40) & 0xff;
598 tf->hob_lbam = (block >> 32) & 0xff;
599 tf->hob_lbal = (block >> 24) & 0xff;
600 tf->lbah = (block >> 16) & 0xff;
601 tf->lbam = (block >> 8) & 0xff;
602 tf->lbal = block & 0xff;
603
604 tf->device = 1 << 6;
605 if (tf->flags & ATA_TFLAG_FUA)
606 tf->device |= 1 << 7;
607 } else if (dev->flags & ATA_DFLAG_LBA) {
608 tf->flags |= ATA_TFLAG_LBA;
609
610 if (lba_28_ok(block, n_block)) {
611 /* use LBA28 */
612 tf->device |= (block >> 24) & 0xf;
613 } else if (lba_48_ok(block, n_block)) {
614 if (!(dev->flags & ATA_DFLAG_LBA48))
615 return -ERANGE;
616
617 /* use LBA48 */
618 tf->flags |= ATA_TFLAG_LBA48;
619
620 tf->hob_nsect = (n_block >> 8) & 0xff;
621
622 tf->hob_lbah = (block >> 40) & 0xff;
623 tf->hob_lbam = (block >> 32) & 0xff;
624 tf->hob_lbal = (block >> 24) & 0xff;
625 } else
626 /* request too large even for LBA48 */
627 return -ERANGE;
628
629 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
630 return -EINVAL;
631
632 tf->nsect = n_block & 0xff;
633
634 tf->lbah = (block >> 16) & 0xff;
635 tf->lbam = (block >> 8) & 0xff;
636 tf->lbal = block & 0xff;
637
638 tf->device |= ATA_LBA;
639 } else {
640 /* CHS */
641 u32 sect, head, cyl, track;
642
643 /* The request -may- be too large for CHS addressing. */
644 if (!lba_28_ok(block, n_block))
645 return -ERANGE;
646
647 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
648 return -EINVAL;
649
650 /* Convert LBA to CHS */
651 track = (u32)block / dev->sectors;
652 cyl = track / dev->heads;
653 head = track % dev->heads;
654 sect = (u32)block % dev->sectors + 1;
655
656 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
657 (u32)block, track, cyl, head, sect);
658
659 /* Check whether the converted CHS can fit.
660 Cylinder: 0-65535
661 Head: 0-15
662 Sector: 1-255*/
663 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
664 return -ERANGE;
665
666 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
667 tf->lbal = sect;
668 tf->lbam = cyl;
669 tf->lbah = cyl >> 8;
670 tf->device |= head;
671 }
672
673 return 0;
674}
675
cb95d562
TH
676/**
677 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
678 * @pio_mask: pio_mask
679 * @mwdma_mask: mwdma_mask
680 * @udma_mask: udma_mask
681 *
682 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
683 * unsigned int xfer_mask.
684 *
685 * LOCKING:
686 * None.
687 *
688 * RETURNS:
689 * Packed xfer_mask.
690 */
7dc951ae
TH
691unsigned long ata_pack_xfermask(unsigned long pio_mask,
692 unsigned long mwdma_mask,
693 unsigned long udma_mask)
cb95d562
TH
694{
695 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
696 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
697 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
698}
699
c0489e4e
TH
700/**
701 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
702 * @xfer_mask: xfer_mask to unpack
703 * @pio_mask: resulting pio_mask
704 * @mwdma_mask: resulting mwdma_mask
705 * @udma_mask: resulting udma_mask
706 *
707 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
708 * Any NULL distination masks will be ignored.
709 */
7dc951ae
TH
710void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
711 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
712{
713 if (pio_mask)
714 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
715 if (mwdma_mask)
716 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
717 if (udma_mask)
718 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
719}
720
cb95d562 721static const struct ata_xfer_ent {
be9a50c8 722 int shift, bits;
cb95d562
TH
723 u8 base;
724} ata_xfer_tbl[] = {
70cd071e
TH
725 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
726 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
727 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
728 { -1, },
729};
730
731/**
732 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
733 * @xfer_mask: xfer_mask of interest
734 *
735 * Return matching XFER_* value for @xfer_mask. Only the highest
736 * bit of @xfer_mask is considered.
737 *
738 * LOCKING:
739 * None.
740 *
741 * RETURNS:
70cd071e 742 * Matching XFER_* value, 0xff if no match found.
cb95d562 743 */
7dc951ae 744u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
745{
746 int highbit = fls(xfer_mask) - 1;
747 const struct ata_xfer_ent *ent;
748
749 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
750 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
751 return ent->base + highbit - ent->shift;
70cd071e 752 return 0xff;
cb95d562
TH
753}
754
755/**
756 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
757 * @xfer_mode: XFER_* of interest
758 *
759 * Return matching xfer_mask for @xfer_mode.
760 *
761 * LOCKING:
762 * None.
763 *
764 * RETURNS:
765 * Matching xfer_mask, 0 if no match found.
766 */
7dc951ae 767unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
768{
769 const struct ata_xfer_ent *ent;
770
771 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
772 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
773 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
774 & ~((1 << ent->shift) - 1);
cb95d562
TH
775 return 0;
776}
777
778/**
779 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
780 * @xfer_mode: XFER_* of interest
781 *
782 * Return matching xfer_shift for @xfer_mode.
783 *
784 * LOCKING:
785 * None.
786 *
787 * RETURNS:
788 * Matching xfer_shift, -1 if no match found.
789 */
7dc951ae 790int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
791{
792 const struct ata_xfer_ent *ent;
793
794 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
795 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
796 return ent->shift;
797 return -1;
798}
799
1da177e4 800/**
1da7b0d0
TH
801 * ata_mode_string - convert xfer_mask to string
802 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
803 *
804 * Determine string which represents the highest speed
1da7b0d0 805 * (highest bit in @modemask).
1da177e4
LT
806 *
807 * LOCKING:
808 * None.
809 *
810 * RETURNS:
811 * Constant C string representing highest speed listed in
1da7b0d0 812 * @mode_mask, or the constant C string "<n/a>".
1da177e4 813 */
7dc951ae 814const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 815{
75f554bc
TH
816 static const char * const xfer_mode_str[] = {
817 "PIO0",
818 "PIO1",
819 "PIO2",
820 "PIO3",
821 "PIO4",
b352e57d
AC
822 "PIO5",
823 "PIO6",
75f554bc
TH
824 "MWDMA0",
825 "MWDMA1",
826 "MWDMA2",
b352e57d
AC
827 "MWDMA3",
828 "MWDMA4",
75f554bc
TH
829 "UDMA/16",
830 "UDMA/25",
831 "UDMA/33",
832 "UDMA/44",
833 "UDMA/66",
834 "UDMA/100",
835 "UDMA/133",
836 "UDMA7",
837 };
1da7b0d0 838 int highbit;
1da177e4 839
1da7b0d0
TH
840 highbit = fls(xfer_mask) - 1;
841 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
842 return xfer_mode_str[highbit];
1da177e4 843 return "<n/a>";
1da177e4
LT
844}
845
4c360c81
TH
846static const char *sata_spd_string(unsigned int spd)
847{
848 static const char * const spd_str[] = {
849 "1.5 Gbps",
850 "3.0 Gbps",
851 };
852
853 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
854 return "<unknown>";
855 return spd_str[spd - 1];
856}
857
3373efd8 858void ata_dev_disable(struct ata_device *dev)
0b8efb0a 859{
09d7f9b0 860 if (ata_dev_enabled(dev)) {
9af5c9c9 861 if (ata_msg_drv(dev->link->ap))
09d7f9b0 862 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 863 ata_acpi_on_disable(dev);
4ae72a1e
TH
864 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
865 ATA_DNXFER_QUIET);
0b8efb0a
TH
866 dev->class++;
867 }
868}
869
ca77329f
KCA
870static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
871{
872 struct ata_link *link = dev->link;
873 struct ata_port *ap = link->ap;
874 u32 scontrol;
875 unsigned int err_mask;
876 int rc;
877
878 /*
879 * disallow DIPM for drivers which haven't set
880 * ATA_FLAG_IPM. This is because when DIPM is enabled,
881 * phy ready will be set in the interrupt status on
882 * state changes, which will cause some drivers to
883 * think there are errors - additionally drivers will
884 * need to disable hot plug.
885 */
886 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
887 ap->pm_policy = NOT_AVAILABLE;
888 return -EINVAL;
889 }
890
891 /*
892 * For DIPM, we will only enable it for the
893 * min_power setting.
894 *
895 * Why? Because Disks are too stupid to know that
896 * If the host rejects a request to go to SLUMBER
897 * they should retry at PARTIAL, and instead it
898 * just would give up. So, for medium_power to
899 * work at all, we need to only allow HIPM.
900 */
901 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
902 if (rc)
903 return rc;
904
905 switch (policy) {
906 case MIN_POWER:
907 /* no restrictions on IPM transitions */
908 scontrol &= ~(0x3 << 8);
909 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
910 if (rc)
911 return rc;
912
913 /* enable DIPM */
914 if (dev->flags & ATA_DFLAG_DIPM)
915 err_mask = ata_dev_set_feature(dev,
916 SETFEATURES_SATA_ENABLE, SATA_DIPM);
917 break;
918 case MEDIUM_POWER:
919 /* allow IPM to PARTIAL */
920 scontrol &= ~(0x1 << 8);
921 scontrol |= (0x2 << 8);
922 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
923 if (rc)
924 return rc;
925
f5456b63
KCA
926 /*
927 * we don't have to disable DIPM since IPM flags
928 * disallow transitions to SLUMBER, which effectively
929 * disable DIPM if it does not support PARTIAL
930 */
ca77329f
KCA
931 break;
932 case NOT_AVAILABLE:
933 case MAX_PERFORMANCE:
934 /* disable all IPM transitions */
935 scontrol |= (0x3 << 8);
936 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
937 if (rc)
938 return rc;
939
f5456b63
KCA
940 /*
941 * we don't have to disable DIPM since IPM flags
942 * disallow all transitions which effectively
943 * disable DIPM anyway.
944 */
ca77329f
KCA
945 break;
946 }
947
948 /* FIXME: handle SET FEATURES failure */
949 (void) err_mask;
950
951 return 0;
952}
953
954/**
955 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
956 * @dev: device to enable power management
957 * @policy: the link power management policy
ca77329f
KCA
958 *
959 * Enable SATA Interface power management. This will enable
960 * Device Interface Power Management (DIPM) for min_power
961 * policy, and then call driver specific callbacks for
962 * enabling Host Initiated Power management.
963 *
964 * Locking: Caller.
965 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
966 */
967void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
968{
969 int rc = 0;
970 struct ata_port *ap = dev->link->ap;
971
972 /* set HIPM first, then DIPM */
973 if (ap->ops->enable_pm)
974 rc = ap->ops->enable_pm(ap, policy);
975 if (rc)
976 goto enable_pm_out;
977 rc = ata_dev_set_dipm(dev, policy);
978
979enable_pm_out:
980 if (rc)
981 ap->pm_policy = MAX_PERFORMANCE;
982 else
983 ap->pm_policy = policy;
984 return /* rc */; /* hopefully we can use 'rc' eventually */
985}
986
1992a5ed 987#ifdef CONFIG_PM
ca77329f
KCA
988/**
989 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 990 * @dev: device to disable power management
ca77329f
KCA
991 *
992 * Disable SATA Interface power management. This will disable
993 * Device Interface Power Management (DIPM) without changing
994 * policy, call driver specific callbacks for disabling Host
995 * Initiated Power management.
996 *
997 * Locking: Caller.
998 * Returns: void
999 */
1000static void ata_dev_disable_pm(struct ata_device *dev)
1001{
1002 struct ata_port *ap = dev->link->ap;
1003
1004 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1005 if (ap->ops->disable_pm)
1006 ap->ops->disable_pm(ap);
1007}
1992a5ed 1008#endif /* CONFIG_PM */
ca77329f
KCA
1009
1010void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1011{
1012 ap->pm_policy = policy;
3ec25ebd 1013 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1014 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1015 ata_port_schedule_eh(ap);
1016}
1017
1992a5ed 1018#ifdef CONFIG_PM
ca77329f
KCA
1019static void ata_lpm_enable(struct ata_host *host)
1020{
1021 struct ata_link *link;
1022 struct ata_port *ap;
1023 struct ata_device *dev;
1024 int i;
1025
1026 for (i = 0; i < host->n_ports; i++) {
1027 ap = host->ports[i];
1028 ata_port_for_each_link(link, ap) {
1029 ata_link_for_each_dev(dev, link)
1030 ata_dev_disable_pm(dev);
1031 }
1032 }
1033}
1034
1035static void ata_lpm_disable(struct ata_host *host)
1036{
1037 int i;
1038
1039 for (i = 0; i < host->n_ports; i++) {
1040 struct ata_port *ap = host->ports[i];
1041 ata_lpm_schedule(ap, ap->pm_policy);
1042 }
1043}
1992a5ed 1044#endif /* CONFIG_PM */
ca77329f
KCA
1045
1046
1da177e4 1047/**
0d5ff566 1048 * ata_devchk - PATA device presence detection
1da177e4
LT
1049 * @ap: ATA channel to examine
1050 * @device: Device to examine (starting at zero)
1051 *
1052 * This technique was originally described in
1053 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1054 * later found its way into the ATA/ATAPI spec.
1055 *
1056 * Write a pattern to the ATA shadow registers,
1057 * and if a device is present, it will respond by
1058 * correctly storing and echoing back the
1059 * ATA shadow register contents.
1060 *
1061 * LOCKING:
1062 * caller.
1063 */
1064
0d5ff566 1065static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1066{
1067 struct ata_ioports *ioaddr = &ap->ioaddr;
1068 u8 nsect, lbal;
1069
1070 ap->ops->dev_select(ap, device);
1071
0d5ff566
TH
1072 iowrite8(0x55, ioaddr->nsect_addr);
1073 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1074
0d5ff566
TH
1075 iowrite8(0xaa, ioaddr->nsect_addr);
1076 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1077
0d5ff566
TH
1078 iowrite8(0x55, ioaddr->nsect_addr);
1079 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1080
0d5ff566
TH
1081 nsect = ioread8(ioaddr->nsect_addr);
1082 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1083
1084 if ((nsect == 0x55) && (lbal == 0xaa))
1085 return 1; /* we found a device */
1086
1087 return 0; /* nothing found */
1088}
1089
1da177e4
LT
1090/**
1091 * ata_dev_classify - determine device type based on ATA-spec signature
1092 * @tf: ATA taskfile register set for device to be identified
1093 *
1094 * Determine from taskfile register contents whether a device is
1095 * ATA or ATAPI, as per "Signature and persistence" section
1096 * of ATA/PI spec (volume 1, sect 5.14).
1097 *
1098 * LOCKING:
1099 * None.
1100 *
1101 * RETURNS:
633273a3
TH
1102 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1103 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1104 */
057ace5e 1105unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1106{
1107 /* Apple's open source Darwin code hints that some devices only
1108 * put a proper signature into the LBA mid/high registers,
1109 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1110 *
1111 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1112 * signatures for ATA and ATAPI devices attached on SerialATA,
1113 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1114 * spec has never mentioned about using different signatures
1115 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1116 * Multiplier specification began to use 0x69/0x96 to identify
1117 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1118 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1119 * 0x69/0x96 shortly and described them as reserved for
1120 * SerialATA.
1121 *
1122 * We follow the current spec and consider that 0x69/0x96
1123 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1124 */
633273a3 1125 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1126 DPRINTK("found ATA device by sig\n");
1127 return ATA_DEV_ATA;
1128 }
1129
633273a3 1130 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1131 DPRINTK("found ATAPI device by sig\n");
1132 return ATA_DEV_ATAPI;
1133 }
1134
633273a3
TH
1135 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1136 DPRINTK("found PMP device by sig\n");
1137 return ATA_DEV_PMP;
1138 }
1139
1140 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1141 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1142 return ATA_DEV_SEMB_UNSUP; /* not yet */
1143 }
1144
1da177e4
LT
1145 DPRINTK("unknown device\n");
1146 return ATA_DEV_UNKNOWN;
1147}
1148
1149/**
1150 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1151 * @dev: ATA device to classify (starting at zero)
1152 * @present: device seems present
b4dc7623 1153 * @r_err: Value of error register on completion
1da177e4
LT
1154 *
1155 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1156 * an ATA/ATAPI-defined set of values is placed in the ATA
1157 * shadow registers, indicating the results of device detection
1158 * and diagnostics.
1159 *
1160 * Select the ATA device, and read the values from the ATA shadow
1161 * registers. Then parse according to the Error register value,
1162 * and the spec-defined values examined by ata_dev_classify().
1163 *
1164 * LOCKING:
1165 * caller.
b4dc7623
TH
1166 *
1167 * RETURNS:
1168 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1169 */
3f19859e
TH
1170unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1171 u8 *r_err)
1da177e4 1172{
3f19859e 1173 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1174 struct ata_taskfile tf;
1175 unsigned int class;
1176 u8 err;
1177
3f19859e 1178 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1179
1180 memset(&tf, 0, sizeof(tf));
1181
1da177e4 1182 ap->ops->tf_read(ap, &tf);
0169e284 1183 err = tf.feature;
b4dc7623
TH
1184 if (r_err)
1185 *r_err = err;
1da177e4 1186
c5038fc0
AC
1187 /* see if device passed diags: continue and warn later */
1188 if (err == 0)
93590859 1189 /* diagnostic fail : do nothing _YET_ */
3f19859e 1190 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1191 else if (err == 1)
1da177e4 1192 /* do nothing */ ;
3f19859e 1193 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1194 /* do nothing */ ;
1195 else
b4dc7623 1196 return ATA_DEV_NONE;
1da177e4 1197
b4dc7623 1198 /* determine if device is ATA or ATAPI */
1da177e4 1199 class = ata_dev_classify(&tf);
b4dc7623 1200
d7fbee05
TH
1201 if (class == ATA_DEV_UNKNOWN) {
1202 /* If the device failed diagnostic, it's likely to
1203 * have reported incorrect device signature too.
1204 * Assume ATA device if the device seems present but
1205 * device signature is invalid with diagnostic
1206 * failure.
1207 */
1208 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1209 class = ATA_DEV_ATA;
1210 else
1211 class = ATA_DEV_NONE;
1212 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1213 class = ATA_DEV_NONE;
1214
b4dc7623 1215 return class;
1da177e4
LT
1216}
1217
1218/**
6a62a04d 1219 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1220 * @id: IDENTIFY DEVICE results we will examine
1221 * @s: string into which data is output
1222 * @ofs: offset into identify device page
1223 * @len: length of string to return. must be an even number.
1224 *
1225 * The strings in the IDENTIFY DEVICE page are broken up into
1226 * 16-bit chunks. Run through the string, and output each
1227 * 8-bit chunk linearly, regardless of platform.
1228 *
1229 * LOCKING:
1230 * caller.
1231 */
1232
6a62a04d
TH
1233void ata_id_string(const u16 *id, unsigned char *s,
1234 unsigned int ofs, unsigned int len)
1da177e4
LT
1235{
1236 unsigned int c;
1237
1238 while (len > 0) {
1239 c = id[ofs] >> 8;
1240 *s = c;
1241 s++;
1242
1243 c = id[ofs] & 0xff;
1244 *s = c;
1245 s++;
1246
1247 ofs++;
1248 len -= 2;
1249 }
1250}
1251
0e949ff3 1252/**
6a62a04d 1253 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1254 * @id: IDENTIFY DEVICE results we will examine
1255 * @s: string into which data is output
1256 * @ofs: offset into identify device page
1257 * @len: length of string to return. must be an odd number.
1258 *
6a62a04d 1259 * This function is identical to ata_id_string except that it
0e949ff3
TH
1260 * trims trailing spaces and terminates the resulting string with
1261 * null. @len must be actual maximum length (even number) + 1.
1262 *
1263 * LOCKING:
1264 * caller.
1265 */
6a62a04d
TH
1266void ata_id_c_string(const u16 *id, unsigned char *s,
1267 unsigned int ofs, unsigned int len)
0e949ff3
TH
1268{
1269 unsigned char *p;
1270
1271 WARN_ON(!(len & 1));
1272
6a62a04d 1273 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1274
1275 p = s + strnlen(s, len - 1);
1276 while (p > s && p[-1] == ' ')
1277 p--;
1278 *p = '\0';
1279}
0baab86b 1280
db6f8759
TH
1281static u64 ata_id_n_sectors(const u16 *id)
1282{
1283 if (ata_id_has_lba(id)) {
1284 if (ata_id_has_lba48(id))
1285 return ata_id_u64(id, 100);
1286 else
1287 return ata_id_u32(id, 60);
1288 } else {
1289 if (ata_id_current_chs_valid(id))
1290 return ata_id_u32(id, 57);
1291 else
1292 return id[1] * id[3] * id[6];
1293 }
1294}
1295
1e999736
AC
1296static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1297{
1298 u64 sectors = 0;
1299
1300 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1301 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1302 sectors |= (tf->hob_lbal & 0xff) << 24;
1303 sectors |= (tf->lbah & 0xff) << 16;
1304 sectors |= (tf->lbam & 0xff) << 8;
1305 sectors |= (tf->lbal & 0xff);
1306
1307 return ++sectors;
1308}
1309
1310static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1311{
1312 u64 sectors = 0;
1313
1314 sectors |= (tf->device & 0x0f) << 24;
1315 sectors |= (tf->lbah & 0xff) << 16;
1316 sectors |= (tf->lbam & 0xff) << 8;
1317 sectors |= (tf->lbal & 0xff);
1318
1319 return ++sectors;
1320}
1321
1322/**
c728a914
TH
1323 * ata_read_native_max_address - Read native max address
1324 * @dev: target device
1325 * @max_sectors: out parameter for the result native max address
1e999736 1326 *
c728a914
TH
1327 * Perform an LBA48 or LBA28 native size query upon the device in
1328 * question.
1e999736 1329 *
c728a914
TH
1330 * RETURNS:
1331 * 0 on success, -EACCES if command is aborted by the drive.
1332 * -EIO on other errors.
1e999736 1333 */
c728a914 1334static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1335{
c728a914 1336 unsigned int err_mask;
1e999736 1337 struct ata_taskfile tf;
c728a914 1338 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1339
1340 ata_tf_init(dev, &tf);
1341
c728a914 1342 /* always clear all address registers */
1e999736 1343 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1344
c728a914
TH
1345 if (lba48) {
1346 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1347 tf.flags |= ATA_TFLAG_LBA48;
1348 } else
1349 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1350
1e999736 1351 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1352 tf.device |= ATA_LBA;
1353
2b789108 1354 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1355 if (err_mask) {
1356 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1357 "max address (err_mask=0x%x)\n", err_mask);
1358 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1359 return -EACCES;
1360 return -EIO;
1361 }
1e999736 1362
c728a914
TH
1363 if (lba48)
1364 *max_sectors = ata_tf_to_lba48(&tf);
1365 else
1366 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1367 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1368 (*max_sectors)--;
c728a914 1369 return 0;
1e999736
AC
1370}
1371
1372/**
c728a914
TH
1373 * ata_set_max_sectors - Set max sectors
1374 * @dev: target device
6b38d1d1 1375 * @new_sectors: new max sectors value to set for the device
1e999736 1376 *
c728a914
TH
1377 * Set max sectors of @dev to @new_sectors.
1378 *
1379 * RETURNS:
1380 * 0 on success, -EACCES if command is aborted or denied (due to
1381 * previous non-volatile SET_MAX) by the drive. -EIO on other
1382 * errors.
1e999736 1383 */
05027adc 1384static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1385{
c728a914 1386 unsigned int err_mask;
1e999736 1387 struct ata_taskfile tf;
c728a914 1388 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1389
1390 new_sectors--;
1391
1392 ata_tf_init(dev, &tf);
1393
1e999736 1394 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1395
1396 if (lba48) {
1397 tf.command = ATA_CMD_SET_MAX_EXT;
1398 tf.flags |= ATA_TFLAG_LBA48;
1399
1400 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1401 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1402 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1403 } else {
c728a914
TH
1404 tf.command = ATA_CMD_SET_MAX;
1405
1e582ba4
TH
1406 tf.device |= (new_sectors >> 24) & 0xf;
1407 }
1408
1e999736 1409 tf.protocol |= ATA_PROT_NODATA;
c728a914 1410 tf.device |= ATA_LBA;
1e999736
AC
1411
1412 tf.lbal = (new_sectors >> 0) & 0xff;
1413 tf.lbam = (new_sectors >> 8) & 0xff;
1414 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1415
2b789108 1416 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1417 if (err_mask) {
1418 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1419 "max address (err_mask=0x%x)\n", err_mask);
1420 if (err_mask == AC_ERR_DEV &&
1421 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1422 return -EACCES;
1423 return -EIO;
1424 }
1425
c728a914 1426 return 0;
1e999736
AC
1427}
1428
1429/**
1430 * ata_hpa_resize - Resize a device with an HPA set
1431 * @dev: Device to resize
1432 *
1433 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1434 * it if required to the full size of the media. The caller must check
1435 * the drive has the HPA feature set enabled.
05027adc
TH
1436 *
1437 * RETURNS:
1438 * 0 on success, -errno on failure.
1e999736 1439 */
05027adc 1440static int ata_hpa_resize(struct ata_device *dev)
1e999736 1441{
05027adc
TH
1442 struct ata_eh_context *ehc = &dev->link->eh_context;
1443 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1444 u64 sectors = ata_id_n_sectors(dev->id);
1445 u64 native_sectors;
c728a914 1446 int rc;
a617c09f 1447
05027adc
TH
1448 /* do we need to do it? */
1449 if (dev->class != ATA_DEV_ATA ||
1450 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1451 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1452 return 0;
1e999736 1453
05027adc
TH
1454 /* read native max address */
1455 rc = ata_read_native_max_address(dev, &native_sectors);
1456 if (rc) {
dda7aba1
TH
1457 /* If device aborted the command or HPA isn't going to
1458 * be unlocked, skip HPA resizing.
05027adc 1459 */
dda7aba1 1460 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1461 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1462 "broken, skipping HPA handling\n");
05027adc
TH
1463 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1464
1465 /* we can continue if device aborted the command */
1466 if (rc == -EACCES)
1467 rc = 0;
1e999736 1468 }
37301a55 1469
05027adc
TH
1470 return rc;
1471 }
1472
1473 /* nothing to do? */
1474 if (native_sectors <= sectors || !ata_ignore_hpa) {
1475 if (!print_info || native_sectors == sectors)
1476 return 0;
1477
1478 if (native_sectors > sectors)
1479 ata_dev_printk(dev, KERN_INFO,
1480 "HPA detected: current %llu, native %llu\n",
1481 (unsigned long long)sectors,
1482 (unsigned long long)native_sectors);
1483 else if (native_sectors < sectors)
1484 ata_dev_printk(dev, KERN_WARNING,
1485 "native sectors (%llu) is smaller than "
1486 "sectors (%llu)\n",
1487 (unsigned long long)native_sectors,
1488 (unsigned long long)sectors);
1489 return 0;
1490 }
1491
1492 /* let's unlock HPA */
1493 rc = ata_set_max_sectors(dev, native_sectors);
1494 if (rc == -EACCES) {
1495 /* if device aborted the command, skip HPA resizing */
1496 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1497 "(%llu -> %llu), skipping HPA handling\n",
1498 (unsigned long long)sectors,
1499 (unsigned long long)native_sectors);
1500 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1501 return 0;
1502 } else if (rc)
1503 return rc;
1504
1505 /* re-read IDENTIFY data */
1506 rc = ata_dev_reread_id(dev, 0);
1507 if (rc) {
1508 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1509 "data after HPA resizing\n");
1510 return rc;
1511 }
1512
1513 if (print_info) {
1514 u64 new_sectors = ata_id_n_sectors(dev->id);
1515 ata_dev_printk(dev, KERN_INFO,
1516 "HPA unlocked: %llu -> %llu, native %llu\n",
1517 (unsigned long long)sectors,
1518 (unsigned long long)new_sectors,
1519 (unsigned long long)native_sectors);
1520 }
1521
1522 return 0;
1e999736
AC
1523}
1524
0baab86b
EF
1525/**
1526 * ata_noop_dev_select - Select device 0/1 on ATA bus
1527 * @ap: ATA channel to manipulate
1528 * @device: ATA device (numbered from zero) to select
1529 *
1530 * This function performs no actual function.
1531 *
1532 * May be used as the dev_select() entry in ata_port_operations.
1533 *
1534 * LOCKING:
1535 * caller.
1536 */
2dcb407e 1537void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1538{
1539}
1540
0baab86b 1541
1da177e4
LT
1542/**
1543 * ata_std_dev_select - Select device 0/1 on ATA bus
1544 * @ap: ATA channel to manipulate
1545 * @device: ATA device (numbered from zero) to select
1546 *
1547 * Use the method defined in the ATA specification to
1548 * make either device 0, or device 1, active on the
0baab86b
EF
1549 * ATA channel. Works with both PIO and MMIO.
1550 *
1551 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1552 *
1553 * LOCKING:
1554 * caller.
1555 */
1556
2dcb407e 1557void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1558{
1559 u8 tmp;
1560
1561 if (device == 0)
1562 tmp = ATA_DEVICE_OBS;
1563 else
1564 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1565
0d5ff566 1566 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1567 ata_pause(ap); /* needed; also flushes, for mmio */
1568}
1569
1570/**
1571 * ata_dev_select - Select device 0/1 on ATA bus
1572 * @ap: ATA channel to manipulate
1573 * @device: ATA device (numbered from zero) to select
1574 * @wait: non-zero to wait for Status register BSY bit to clear
1575 * @can_sleep: non-zero if context allows sleeping
1576 *
1577 * Use the method defined in the ATA specification to
1578 * make either device 0, or device 1, active on the
1579 * ATA channel.
1580 *
1581 * This is a high-level version of ata_std_dev_select(),
1582 * which additionally provides the services of inserting
1583 * the proper pauses and status polling, where needed.
1584 *
1585 * LOCKING:
1586 * caller.
1587 */
1588
1589void ata_dev_select(struct ata_port *ap, unsigned int device,
1590 unsigned int wait, unsigned int can_sleep)
1591{
88574551 1592 if (ata_msg_probe(ap))
44877b4e
TH
1593 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1594 "device %u, wait %u\n", device, wait);
1da177e4
LT
1595
1596 if (wait)
1597 ata_wait_idle(ap);
1598
1599 ap->ops->dev_select(ap, device);
1600
1601 if (wait) {
9af5c9c9 1602 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1603 msleep(150);
1604 ata_wait_idle(ap);
1605 }
1606}
1607
1608/**
1609 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1610 * @id: IDENTIFY DEVICE page to dump
1da177e4 1611 *
0bd3300a
TH
1612 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1613 * page.
1da177e4
LT
1614 *
1615 * LOCKING:
1616 * caller.
1617 */
1618
0bd3300a 1619static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1620{
1621 DPRINTK("49==0x%04x "
1622 "53==0x%04x "
1623 "63==0x%04x "
1624 "64==0x%04x "
1625 "75==0x%04x \n",
0bd3300a
TH
1626 id[49],
1627 id[53],
1628 id[63],
1629 id[64],
1630 id[75]);
1da177e4
LT
1631 DPRINTK("80==0x%04x "
1632 "81==0x%04x "
1633 "82==0x%04x "
1634 "83==0x%04x "
1635 "84==0x%04x \n",
0bd3300a
TH
1636 id[80],
1637 id[81],
1638 id[82],
1639 id[83],
1640 id[84]);
1da177e4
LT
1641 DPRINTK("88==0x%04x "
1642 "93==0x%04x\n",
0bd3300a
TH
1643 id[88],
1644 id[93]);
1da177e4
LT
1645}
1646
cb95d562
TH
1647/**
1648 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1649 * @id: IDENTIFY data to compute xfer mask from
1650 *
1651 * Compute the xfermask for this device. This is not as trivial
1652 * as it seems if we must consider early devices correctly.
1653 *
1654 * FIXME: pre IDE drive timing (do we care ?).
1655 *
1656 * LOCKING:
1657 * None.
1658 *
1659 * RETURNS:
1660 * Computed xfermask
1661 */
7dc951ae 1662unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1663{
7dc951ae 1664 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1665
1666 /* Usual case. Word 53 indicates word 64 is valid */
1667 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1668 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1669 pio_mask <<= 3;
1670 pio_mask |= 0x7;
1671 } else {
1672 /* If word 64 isn't valid then Word 51 high byte holds
1673 * the PIO timing number for the maximum. Turn it into
1674 * a mask.
1675 */
7a0f1c8a 1676 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1677 if (mode < 5) /* Valid PIO range */
2dcb407e 1678 pio_mask = (2 << mode) - 1;
46767aeb
AC
1679 else
1680 pio_mask = 1;
cb95d562
TH
1681
1682 /* But wait.. there's more. Design your standards by
1683 * committee and you too can get a free iordy field to
1684 * process. However its the speeds not the modes that
1685 * are supported... Note drivers using the timing API
1686 * will get this right anyway
1687 */
1688 }
1689
1690 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1691
b352e57d
AC
1692 if (ata_id_is_cfa(id)) {
1693 /*
1694 * Process compact flash extended modes
1695 */
1696 int pio = id[163] & 0x7;
1697 int dma = (id[163] >> 3) & 7;
1698
1699 if (pio)
1700 pio_mask |= (1 << 5);
1701 if (pio > 1)
1702 pio_mask |= (1 << 6);
1703 if (dma)
1704 mwdma_mask |= (1 << 3);
1705 if (dma > 1)
1706 mwdma_mask |= (1 << 4);
1707 }
1708
fb21f0d0
TH
1709 udma_mask = 0;
1710 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1711 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1712
1713 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1714}
1715
86e45b6b 1716/**
442eacc3 1717 * ata_pio_queue_task - Queue port_task
86e45b6b 1718 * @ap: The ata_port to queue port_task for
e2a7f77a 1719 * @fn: workqueue function to be scheduled
65f27f38 1720 * @data: data for @fn to use
e2a7f77a 1721 * @delay: delay time for workqueue function
86e45b6b
TH
1722 *
1723 * Schedule @fn(@data) for execution after @delay jiffies using
1724 * port_task. There is one port_task per port and it's the
1725 * user(low level driver)'s responsibility to make sure that only
1726 * one task is active at any given time.
1727 *
1728 * libata core layer takes care of synchronization between
442eacc3 1729 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1730 * synchronization.
1731 *
1732 * LOCKING:
1733 * Inherited from caller.
1734 */
442eacc3
JG
1735static void ata_pio_queue_task(struct ata_port *ap, void *data,
1736 unsigned long delay)
86e45b6b 1737{
65f27f38 1738 ap->port_task_data = data;
86e45b6b 1739
45a66c1c
ON
1740 /* may fail if ata_port_flush_task() in progress */
1741 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1742}
1743
1744/**
1745 * ata_port_flush_task - Flush port_task
1746 * @ap: The ata_port to flush port_task for
1747 *
1748 * After this function completes, port_task is guranteed not to
1749 * be running or scheduled.
1750 *
1751 * LOCKING:
1752 * Kernel thread context (may sleep)
1753 */
1754void ata_port_flush_task(struct ata_port *ap)
1755{
86e45b6b
TH
1756 DPRINTK("ENTER\n");
1757
45a66c1c 1758 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1759
0dd4b21f 1760 if (ata_msg_ctl(ap))
7f5e4e8d 1761 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1762}
1763
7102d230 1764static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1765{
77853bf2 1766 struct completion *waiting = qc->private_data;
a2a7a662 1767
a2a7a662 1768 complete(waiting);
a2a7a662
TH
1769}
1770
1771/**
2432697b 1772 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1773 * @dev: Device to which the command is sent
1774 * @tf: Taskfile registers for the command and the result
d69cf37d 1775 * @cdb: CDB for packet command
a2a7a662 1776 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1777 * @sgl: sg list for the data buffer of the command
2432697b 1778 * @n_elem: Number of sg entries
2b789108 1779 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1780 *
1781 * Executes libata internal command with timeout. @tf contains
1782 * command on entry and result on return. Timeout and error
1783 * conditions are reported via return value. No recovery action
1784 * is taken after a command times out. It's caller's duty to
1785 * clean up after timeout.
1786 *
1787 * LOCKING:
1788 * None. Should be called with kernel context, might sleep.
551e8889
TH
1789 *
1790 * RETURNS:
1791 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1792 */
2432697b
TH
1793unsigned ata_exec_internal_sg(struct ata_device *dev,
1794 struct ata_taskfile *tf, const u8 *cdb,
87260216 1795 int dma_dir, struct scatterlist *sgl,
2b789108 1796 unsigned int n_elem, unsigned long timeout)
a2a7a662 1797{
9af5c9c9
TH
1798 struct ata_link *link = dev->link;
1799 struct ata_port *ap = link->ap;
a2a7a662
TH
1800 u8 command = tf->command;
1801 struct ata_queued_cmd *qc;
2ab7db1f 1802 unsigned int tag, preempted_tag;
dedaf2b0 1803 u32 preempted_sactive, preempted_qc_active;
da917d69 1804 int preempted_nr_active_links;
60be6b9a 1805 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1806 unsigned long flags;
77853bf2 1807 unsigned int err_mask;
d95a717f 1808 int rc;
a2a7a662 1809
ba6a1308 1810 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1811
e3180499 1812 /* no internal command while frozen */
b51e9e5d 1813 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1814 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1815 return AC_ERR_SYSTEM;
1816 }
1817
2ab7db1f 1818 /* initialize internal qc */
a2a7a662 1819
2ab7db1f
TH
1820 /* XXX: Tag 0 is used for drivers with legacy EH as some
1821 * drivers choke if any other tag is given. This breaks
1822 * ata_tag_internal() test for those drivers. Don't use new
1823 * EH stuff without converting to it.
1824 */
1825 if (ap->ops->error_handler)
1826 tag = ATA_TAG_INTERNAL;
1827 else
1828 tag = 0;
1829
6cec4a39 1830 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1831 BUG();
f69499f4 1832 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1833
1834 qc->tag = tag;
1835 qc->scsicmd = NULL;
1836 qc->ap = ap;
1837 qc->dev = dev;
1838 ata_qc_reinit(qc);
1839
9af5c9c9
TH
1840 preempted_tag = link->active_tag;
1841 preempted_sactive = link->sactive;
dedaf2b0 1842 preempted_qc_active = ap->qc_active;
da917d69 1843 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1844 link->active_tag = ATA_TAG_POISON;
1845 link->sactive = 0;
dedaf2b0 1846 ap->qc_active = 0;
da917d69 1847 ap->nr_active_links = 0;
2ab7db1f
TH
1848
1849 /* prepare & issue qc */
a2a7a662 1850 qc->tf = *tf;
d69cf37d
TH
1851 if (cdb)
1852 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1853 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1854 qc->dma_dir = dma_dir;
1855 if (dma_dir != DMA_NONE) {
2432697b 1856 unsigned int i, buflen = 0;
87260216 1857 struct scatterlist *sg;
2432697b 1858
87260216
JA
1859 for_each_sg(sgl, sg, n_elem, i)
1860 buflen += sg->length;
2432697b 1861
87260216 1862 ata_sg_init(qc, sgl, n_elem);
49c80429 1863 qc->nbytes = buflen;
a2a7a662
TH
1864 }
1865
77853bf2 1866 qc->private_data = &wait;
a2a7a662
TH
1867 qc->complete_fn = ata_qc_complete_internal;
1868
8e0e694a 1869 ata_qc_issue(qc);
a2a7a662 1870
ba6a1308 1871 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1872
2b789108
TH
1873 if (!timeout)
1874 timeout = ata_probe_timeout * 1000 / HZ;
1875
1876 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1877
1878 ata_port_flush_task(ap);
41ade50c 1879
d95a717f 1880 if (!rc) {
ba6a1308 1881 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1882
1883 /* We're racing with irq here. If we lose, the
1884 * following test prevents us from completing the qc
d95a717f
TH
1885 * twice. If we win, the port is frozen and will be
1886 * cleaned up by ->post_internal_cmd().
a2a7a662 1887 */
77853bf2 1888 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1889 qc->err_mask |= AC_ERR_TIMEOUT;
1890
1891 if (ap->ops->error_handler)
1892 ata_port_freeze(ap);
1893 else
1894 ata_qc_complete(qc);
f15a1daf 1895
0dd4b21f
BP
1896 if (ata_msg_warn(ap))
1897 ata_dev_printk(dev, KERN_WARNING,
88574551 1898 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1899 }
1900
ba6a1308 1901 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1902 }
1903
d95a717f
TH
1904 /* do post_internal_cmd */
1905 if (ap->ops->post_internal_cmd)
1906 ap->ops->post_internal_cmd(qc);
1907
a51d644a
TH
1908 /* perform minimal error analysis */
1909 if (qc->flags & ATA_QCFLAG_FAILED) {
1910 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1911 qc->err_mask |= AC_ERR_DEV;
1912
1913 if (!qc->err_mask)
1914 qc->err_mask |= AC_ERR_OTHER;
1915
1916 if (qc->err_mask & ~AC_ERR_OTHER)
1917 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1918 }
1919
15869303 1920 /* finish up */
ba6a1308 1921 spin_lock_irqsave(ap->lock, flags);
15869303 1922
e61e0672 1923 *tf = qc->result_tf;
77853bf2
TH
1924 err_mask = qc->err_mask;
1925
1926 ata_qc_free(qc);
9af5c9c9
TH
1927 link->active_tag = preempted_tag;
1928 link->sactive = preempted_sactive;
dedaf2b0 1929 ap->qc_active = preempted_qc_active;
da917d69 1930 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1931
1f7dd3e9
TH
1932 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1933 * Until those drivers are fixed, we detect the condition
1934 * here, fail the command with AC_ERR_SYSTEM and reenable the
1935 * port.
1936 *
1937 * Note that this doesn't change any behavior as internal
1938 * command failure results in disabling the device in the
1939 * higher layer for LLDDs without new reset/EH callbacks.
1940 *
1941 * Kill the following code as soon as those drivers are fixed.
1942 */
198e0fed 1943 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1944 err_mask |= AC_ERR_SYSTEM;
1945 ata_port_probe(ap);
1946 }
1947
ba6a1308 1948 spin_unlock_irqrestore(ap->lock, flags);
15869303 1949
77853bf2 1950 return err_mask;
a2a7a662
TH
1951}
1952
2432697b 1953/**
33480a0e 1954 * ata_exec_internal - execute libata internal command
2432697b
TH
1955 * @dev: Device to which the command is sent
1956 * @tf: Taskfile registers for the command and the result
1957 * @cdb: CDB for packet command
1958 * @dma_dir: Data tranfer direction of the command
1959 * @buf: Data buffer of the command
1960 * @buflen: Length of data buffer
2b789108 1961 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1962 *
1963 * Wrapper around ata_exec_internal_sg() which takes simple
1964 * buffer instead of sg list.
1965 *
1966 * LOCKING:
1967 * None. Should be called with kernel context, might sleep.
1968 *
1969 * RETURNS:
1970 * Zero on success, AC_ERR_* mask on failure
1971 */
1972unsigned ata_exec_internal(struct ata_device *dev,
1973 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1974 int dma_dir, void *buf, unsigned int buflen,
1975 unsigned long timeout)
2432697b 1976{
33480a0e
TH
1977 struct scatterlist *psg = NULL, sg;
1978 unsigned int n_elem = 0;
2432697b 1979
33480a0e
TH
1980 if (dma_dir != DMA_NONE) {
1981 WARN_ON(!buf);
1982 sg_init_one(&sg, buf, buflen);
1983 psg = &sg;
1984 n_elem++;
1985 }
2432697b 1986
2b789108
TH
1987 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1988 timeout);
2432697b
TH
1989}
1990
977e6b9f
TH
1991/**
1992 * ata_do_simple_cmd - execute simple internal command
1993 * @dev: Device to which the command is sent
1994 * @cmd: Opcode to execute
1995 *
1996 * Execute a 'simple' command, that only consists of the opcode
1997 * 'cmd' itself, without filling any other registers
1998 *
1999 * LOCKING:
2000 * Kernel thread context (may sleep).
2001 *
2002 * RETURNS:
2003 * Zero on success, AC_ERR_* mask on failure
e58eb583 2004 */
77b08fb5 2005unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
2006{
2007 struct ata_taskfile tf;
e58eb583
TH
2008
2009 ata_tf_init(dev, &tf);
2010
2011 tf.command = cmd;
2012 tf.flags |= ATA_TFLAG_DEVICE;
2013 tf.protocol = ATA_PROT_NODATA;
2014
2b789108 2015 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
2016}
2017
1bc4ccff
AC
2018/**
2019 * ata_pio_need_iordy - check if iordy needed
2020 * @adev: ATA device
2021 *
2022 * Check if the current speed of the device requires IORDY. Used
2023 * by various controllers for chip configuration.
2024 */
a617c09f 2025
1bc4ccff
AC
2026unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2027{
432729f0
AC
2028 /* Controller doesn't support IORDY. Probably a pointless check
2029 as the caller should know this */
9af5c9c9 2030 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 2031 return 0;
432729f0
AC
2032 /* PIO3 and higher it is mandatory */
2033 if (adev->pio_mode > XFER_PIO_2)
2034 return 1;
2035 /* We turn it on when possible */
2036 if (ata_id_has_iordy(adev->id))
1bc4ccff 2037 return 1;
432729f0
AC
2038 return 0;
2039}
2e9edbf8 2040
432729f0
AC
2041/**
2042 * ata_pio_mask_no_iordy - Return the non IORDY mask
2043 * @adev: ATA device
2044 *
2045 * Compute the highest mode possible if we are not using iordy. Return
2046 * -1 if no iordy mode is available.
2047 */
a617c09f 2048
432729f0
AC
2049static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2050{
1bc4ccff 2051 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2052 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2053 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2054 /* Is the speed faster than the drive allows non IORDY ? */
2055 if (pio) {
2056 /* This is cycle times not frequency - watch the logic! */
2057 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2058 return 3 << ATA_SHIFT_PIO;
2059 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2060 }
2061 }
432729f0 2062 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2063}
2064
1da177e4 2065/**
49016aca 2066 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2067 * @dev: target device
2068 * @p_class: pointer to class of the target device (may be changed)
bff04647 2069 * @flags: ATA_READID_* flags
fe635c7e 2070 * @id: buffer to read IDENTIFY data into
1da177e4 2071 *
49016aca
TH
2072 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2074 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2075 * for pre-ATA4 drives.
1da177e4 2076 *
50a99018 2077 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2078 * now we abort if we hit that case.
50a99018 2079 *
1da177e4 2080 * LOCKING:
49016aca
TH
2081 * Kernel thread context (may sleep)
2082 *
2083 * RETURNS:
2084 * 0 on success, -errno otherwise.
1da177e4 2085 */
a9beec95 2086int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2087 unsigned int flags, u16 *id)
1da177e4 2088{
9af5c9c9 2089 struct ata_port *ap = dev->link->ap;
49016aca 2090 unsigned int class = *p_class;
a0123703 2091 struct ata_taskfile tf;
49016aca
TH
2092 unsigned int err_mask = 0;
2093 const char *reason;
54936f8b 2094 int may_fallback = 1, tried_spinup = 0;
49016aca 2095 int rc;
1da177e4 2096
0dd4b21f 2097 if (ata_msg_ctl(ap))
7f5e4e8d 2098 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2099
49016aca 2100 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 2101 retry:
3373efd8 2102 ata_tf_init(dev, &tf);
a0123703 2103
49016aca
TH
2104 switch (class) {
2105 case ATA_DEV_ATA:
a0123703 2106 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2107 break;
2108 case ATA_DEV_ATAPI:
a0123703 2109 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2110 break;
2111 default:
2112 rc = -ENODEV;
2113 reason = "unsupported class";
2114 goto err_out;
1da177e4
LT
2115 }
2116
a0123703 2117 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2118
2119 /* Some devices choke if TF registers contain garbage. Make
2120 * sure those are properly initialized.
2121 */
2122 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2123
2124 /* Device presence detection is unreliable on some
2125 * controllers. Always poll IDENTIFY if available.
2126 */
2127 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2128
3373efd8 2129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2130 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2131 if (err_mask) {
800b3996 2132 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
2133 ata_dev_printk(dev, KERN_DEBUG,
2134 "NODEV after polling detection\n");
55a8e2c8
TH
2135 return -ENOENT;
2136 }
2137
1ffc151f
TH
2138 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2139 /* Device or controller might have reported
2140 * the wrong device class. Give a shot at the
2141 * other IDENTIFY if the current one is
2142 * aborted by the device.
2143 */
2144 if (may_fallback) {
2145 may_fallback = 0;
2146
2147 if (class == ATA_DEV_ATA)
2148 class = ATA_DEV_ATAPI;
2149 else
2150 class = ATA_DEV_ATA;
2151 goto retry;
2152 }
2153
2154 /* Control reaches here iff the device aborted
2155 * both flavors of IDENTIFYs which happens
2156 * sometimes with phantom devices.
2157 */
2158 ata_dev_printk(dev, KERN_DEBUG,
2159 "both IDENTIFYs aborted, assuming NODEV\n");
2160 return -ENOENT;
54936f8b
TH
2161 }
2162
49016aca
TH
2163 rc = -EIO;
2164 reason = "I/O error";
1da177e4
LT
2165 goto err_out;
2166 }
2167
54936f8b
TH
2168 /* Falling back doesn't make sense if ID data was read
2169 * successfully at least once.
2170 */
2171 may_fallback = 0;
2172
49016aca 2173 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2174
49016aca 2175 /* sanity check */
a4f5749b 2176 rc = -EINVAL;
6070068b 2177 reason = "device reports invalid type";
a4f5749b
TH
2178
2179 if (class == ATA_DEV_ATA) {
2180 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2181 goto err_out;
2182 } else {
2183 if (ata_id_is_ata(id))
2184 goto err_out;
49016aca
TH
2185 }
2186
169439c2
ML
2187 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2188 tried_spinup = 1;
2189 /*
2190 * Drive powered-up in standby mode, and requires a specific
2191 * SET_FEATURES spin-up subcommand before it will accept
2192 * anything other than the original IDENTIFY command.
2193 */
218f3d30 2194 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2195 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2196 rc = -EIO;
2197 reason = "SPINUP failed";
2198 goto err_out;
2199 }
2200 /*
2201 * If the drive initially returned incomplete IDENTIFY info,
2202 * we now must reissue the IDENTIFY command.
2203 */
2204 if (id[2] == 0x37c8)
2205 goto retry;
2206 }
2207
bff04647 2208 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2209 /*
2210 * The exact sequence expected by certain pre-ATA4 drives is:
2211 * SRST RESET
50a99018
AC
2212 * IDENTIFY (optional in early ATA)
2213 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2214 * anything else..
2215 * Some drives were very specific about that exact sequence.
50a99018
AC
2216 *
2217 * Note that ATA4 says lba is mandatory so the second check
2218 * shoud never trigger.
49016aca
TH
2219 */
2220 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2221 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2222 if (err_mask) {
2223 rc = -EIO;
2224 reason = "INIT_DEV_PARAMS failed";
2225 goto err_out;
2226 }
2227
2228 /* current CHS translation info (id[53-58]) might be
2229 * changed. reread the identify device info.
2230 */
bff04647 2231 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2232 goto retry;
2233 }
2234 }
2235
2236 *p_class = class;
fe635c7e 2237
49016aca
TH
2238 return 0;
2239
2240 err_out:
88574551 2241 if (ata_msg_warn(ap))
0dd4b21f 2242 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2243 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2244 return rc;
2245}
2246
3373efd8 2247static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2248{
9af5c9c9
TH
2249 struct ata_port *ap = dev->link->ap;
2250 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2251}
2252
a6e6ce8e
TH
2253static void ata_dev_config_ncq(struct ata_device *dev,
2254 char *desc, size_t desc_sz)
2255{
9af5c9c9 2256 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2257 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2258
2259 if (!ata_id_has_ncq(dev->id)) {
2260 desc[0] = '\0';
2261 return;
2262 }
75683fe7 2263 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2264 snprintf(desc, desc_sz, "NCQ (not used)");
2265 return;
2266 }
a6e6ce8e 2267 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2268 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2269 dev->flags |= ATA_DFLAG_NCQ;
2270 }
2271
2272 if (hdepth >= ddepth)
2273 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2274 else
2275 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2276}
2277
49016aca 2278/**
ffeae418 2279 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2280 * @dev: Target device to configure
2281 *
2282 * Configure @dev according to @dev->id. Generic and low-level
2283 * driver specific fixups are also applied.
49016aca
TH
2284 *
2285 * LOCKING:
ffeae418
TH
2286 * Kernel thread context (may sleep)
2287 *
2288 * RETURNS:
2289 * 0 on success, -errno otherwise
49016aca 2290 */
efdaedc4 2291int ata_dev_configure(struct ata_device *dev)
49016aca 2292{
9af5c9c9
TH
2293 struct ata_port *ap = dev->link->ap;
2294 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2295 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2296 const u16 *id = dev->id;
7dc951ae 2297 unsigned long xfer_mask;
b352e57d 2298 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2299 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2300 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2301 int rc;
49016aca 2302
0dd4b21f 2303 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2304 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2305 __func__);
ffeae418 2306 return 0;
49016aca
TH
2307 }
2308
0dd4b21f 2309 if (ata_msg_probe(ap))
7f5e4e8d 2310 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2311
75683fe7
TH
2312 /* set horkage */
2313 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2314 ata_force_horkage(dev);
75683fe7 2315
6746544c
TH
2316 /* let ACPI work its magic */
2317 rc = ata_acpi_on_devcfg(dev);
2318 if (rc)
2319 return rc;
08573a86 2320
05027adc
TH
2321 /* massage HPA, do it early as it might change IDENTIFY data */
2322 rc = ata_hpa_resize(dev);
2323 if (rc)
2324 return rc;
2325
c39f5ebe 2326 /* print device capabilities */
0dd4b21f 2327 if (ata_msg_probe(ap))
88574551
TH
2328 ata_dev_printk(dev, KERN_DEBUG,
2329 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2330 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2331 __func__,
f15a1daf
TH
2332 id[49], id[82], id[83], id[84],
2333 id[85], id[86], id[87], id[88]);
c39f5ebe 2334
208a9933 2335 /* initialize to-be-configured parameters */
ea1dd4e1 2336 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2337 dev->max_sectors = 0;
2338 dev->cdb_len = 0;
2339 dev->n_sectors = 0;
2340 dev->cylinders = 0;
2341 dev->heads = 0;
2342 dev->sectors = 0;
2343
1da177e4
LT
2344 /*
2345 * common ATA, ATAPI feature tests
2346 */
2347
ff8854b2 2348 /* find max transfer mode; for printk only */
1148c3a7 2349 xfer_mask = ata_id_xfermask(id);
1da177e4 2350
0dd4b21f
BP
2351 if (ata_msg_probe(ap))
2352 ata_dump_id(id);
1da177e4 2353
ef143d57
AL
2354 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2355 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2356 sizeof(fwrevbuf));
2357
2358 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2359 sizeof(modelbuf));
2360
1da177e4
LT
2361 /* ATA-specific feature tests */
2362 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2363 if (ata_id_is_cfa(id)) {
2364 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2365 ata_dev_printk(dev, KERN_WARNING,
2366 "supports DRM functions and may "
2367 "not be fully accessable.\n");
b352e57d 2368 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2369 } else {
2dcb407e 2370 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2371 /* Warn the user if the device has TPM extensions */
2372 if (ata_id_has_tpm(id))
2373 ata_dev_printk(dev, KERN_WARNING,
2374 "supports DRM functions and may "
2375 "not be fully accessable.\n");
2376 }
b352e57d 2377
1148c3a7 2378 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2379
3f64f565
EM
2380 if (dev->id[59] & 0x100)
2381 dev->multi_count = dev->id[59] & 0xff;
2382
1148c3a7 2383 if (ata_id_has_lba(id)) {
4c2d721a 2384 const char *lba_desc;
a6e6ce8e 2385 char ncq_desc[20];
8bf62ece 2386
4c2d721a
TH
2387 lba_desc = "LBA";
2388 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2389 if (ata_id_has_lba48(id)) {
8bf62ece 2390 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2391 lba_desc = "LBA48";
6fc49adb
TH
2392
2393 if (dev->n_sectors >= (1UL << 28) &&
2394 ata_id_has_flush_ext(id))
2395 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2396 }
8bf62ece 2397
a6e6ce8e
TH
2398 /* config NCQ */
2399 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2400
8bf62ece 2401 /* print device info to dmesg */
3f64f565
EM
2402 if (ata_msg_drv(ap) && print_info) {
2403 ata_dev_printk(dev, KERN_INFO,
2404 "%s: %s, %s, max %s\n",
2405 revbuf, modelbuf, fwrevbuf,
2406 ata_mode_string(xfer_mask));
2407 ata_dev_printk(dev, KERN_INFO,
2408 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2409 (unsigned long long)dev->n_sectors,
3f64f565
EM
2410 dev->multi_count, lba_desc, ncq_desc);
2411 }
ffeae418 2412 } else {
8bf62ece
AL
2413 /* CHS */
2414
2415 /* Default translation */
1148c3a7
TH
2416 dev->cylinders = id[1];
2417 dev->heads = id[3];
2418 dev->sectors = id[6];
8bf62ece 2419
1148c3a7 2420 if (ata_id_current_chs_valid(id)) {
8bf62ece 2421 /* Current CHS translation is valid. */
1148c3a7
TH
2422 dev->cylinders = id[54];
2423 dev->heads = id[55];
2424 dev->sectors = id[56];
8bf62ece
AL
2425 }
2426
2427 /* print device info to dmesg */
3f64f565 2428 if (ata_msg_drv(ap) && print_info) {
88574551 2429 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2430 "%s: %s, %s, max %s\n",
2431 revbuf, modelbuf, fwrevbuf,
2432 ata_mode_string(xfer_mask));
a84471fe 2433 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2434 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2435 (unsigned long long)dev->n_sectors,
2436 dev->multi_count, dev->cylinders,
2437 dev->heads, dev->sectors);
2438 }
07f6f7d0
AL
2439 }
2440
6e7846e9 2441 dev->cdb_len = 16;
1da177e4
LT
2442 }
2443
2444 /* ATAPI-specific feature tests */
2c13b7ce 2445 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2446 const char *cdb_intr_string = "";
2447 const char *atapi_an_string = "";
91163006 2448 const char *dma_dir_string = "";
7d77b247 2449 u32 sntf;
08a556db 2450
1148c3a7 2451 rc = atapi_cdb_len(id);
1da177e4 2452 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2453 if (ata_msg_warn(ap))
88574551
TH
2454 ata_dev_printk(dev, KERN_WARNING,
2455 "unsupported CDB len\n");
ffeae418 2456 rc = -EINVAL;
1da177e4
LT
2457 goto err_out_nosup;
2458 }
6e7846e9 2459 dev->cdb_len = (unsigned int) rc;
1da177e4 2460
7d77b247
TH
2461 /* Enable ATAPI AN if both the host and device have
2462 * the support. If PMP is attached, SNTF is required
2463 * to enable ATAPI AN to discern between PHY status
2464 * changed notifications and ATAPI ANs.
9f45cbd3 2465 */
7d77b247
TH
2466 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2467 (!ap->nr_pmp_links ||
2468 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2469 unsigned int err_mask;
2470
9f45cbd3 2471 /* issue SET feature command to turn this on */
218f3d30
JG
2472 err_mask = ata_dev_set_feature(dev,
2473 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2474 if (err_mask)
9f45cbd3 2475 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2476 "failed to enable ATAPI AN "
2477 "(err_mask=0x%x)\n", err_mask);
2478 else {
9f45cbd3 2479 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2480 atapi_an_string = ", ATAPI AN";
2481 }
9f45cbd3
KCA
2482 }
2483
08a556db 2484 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2485 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2486 cdb_intr_string = ", CDB intr";
2487 }
312f7da2 2488
91163006
TH
2489 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2490 dev->flags |= ATA_DFLAG_DMADIR;
2491 dma_dir_string = ", DMADIR";
2492 }
2493
1da177e4 2494 /* print device info to dmesg */
5afc8142 2495 if (ata_msg_drv(ap) && print_info)
ef143d57 2496 ata_dev_printk(dev, KERN_INFO,
91163006 2497 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2498 modelbuf, fwrevbuf,
12436c30 2499 ata_mode_string(xfer_mask),
91163006
TH
2500 cdb_intr_string, atapi_an_string,
2501 dma_dir_string);
1da177e4
LT
2502 }
2503
914ed354
TH
2504 /* determine max_sectors */
2505 dev->max_sectors = ATA_MAX_SECTORS;
2506 if (dev->flags & ATA_DFLAG_LBA48)
2507 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2508
ca77329f
KCA
2509 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2510 if (ata_id_has_hipm(dev->id))
2511 dev->flags |= ATA_DFLAG_HIPM;
2512 if (ata_id_has_dipm(dev->id))
2513 dev->flags |= ATA_DFLAG_DIPM;
2514 }
2515
c5038fc0
AC
2516 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2517 200 sectors */
3373efd8 2518 if (ata_dev_knobble(dev)) {
5afc8142 2519 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2520 ata_dev_printk(dev, KERN_INFO,
2521 "applying bridge limits\n");
5a529139 2522 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2523 dev->max_sectors = ATA_MAX_SECTORS;
2524 }
2525
f8d8e579 2526 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2527 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2528 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2529 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2530 }
f8d8e579 2531
75683fe7 2532 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2533 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2534 dev->max_sectors);
18d6e9d5 2535
ca77329f
KCA
2536 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2537 dev->horkage |= ATA_HORKAGE_IPM;
2538
2539 /* reset link pm_policy for this port to no pm */
2540 ap->pm_policy = MAX_PERFORMANCE;
2541 }
2542
4b2f3ede 2543 if (ap->ops->dev_config)
cd0d3bbc 2544 ap->ops->dev_config(dev);
4b2f3ede 2545
c5038fc0
AC
2546 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2547 /* Let the user know. We don't want to disallow opens for
2548 rescue purposes, or in case the vendor is just a blithering
2549 idiot. Do this after the dev_config call as some controllers
2550 with buggy firmware may want to avoid reporting false device
2551 bugs */
2552
2553 if (print_info) {
2554 ata_dev_printk(dev, KERN_WARNING,
2555"Drive reports diagnostics failure. This may indicate a drive\n");
2556 ata_dev_printk(dev, KERN_WARNING,
2557"fault or invalid emulation. Contact drive vendor for information.\n");
2558 }
2559 }
2560
0dd4b21f
BP
2561 if (ata_msg_probe(ap))
2562 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
7f5e4e8d 2563 __func__, ata_chk_status(ap));
ffeae418 2564 return 0;
1da177e4
LT
2565
2566err_out_nosup:
0dd4b21f 2567 if (ata_msg_probe(ap))
88574551 2568 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2569 "%s: EXIT, err\n", __func__);
ffeae418 2570 return rc;
1da177e4
LT
2571}
2572
be0d18df 2573/**
2e41e8e6 2574 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2575 * @ap: port
2576 *
2e41e8e6 2577 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2578 * detection.
2579 */
2580
2581int ata_cable_40wire(struct ata_port *ap)
2582{
2583 return ATA_CBL_PATA40;
2584}
2585
2586/**
2e41e8e6 2587 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2588 * @ap: port
2589 *
2e41e8e6 2590 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2591 * detection.
2592 */
2593
2594int ata_cable_80wire(struct ata_port *ap)
2595{
2596 return ATA_CBL_PATA80;
2597}
2598
2599/**
2600 * ata_cable_unknown - return unknown PATA cable.
2601 * @ap: port
2602 *
2603 * Helper method for drivers which have no PATA cable detection.
2604 */
2605
2606int ata_cable_unknown(struct ata_port *ap)
2607{
2608 return ATA_CBL_PATA_UNK;
2609}
2610
c88f90c3
TH
2611/**
2612 * ata_cable_ignore - return ignored PATA cable.
2613 * @ap: port
2614 *
2615 * Helper method for drivers which don't use cable type to limit
2616 * transfer mode.
2617 */
2618int ata_cable_ignore(struct ata_port *ap)
2619{
2620 return ATA_CBL_PATA_IGN;
2621}
2622
be0d18df
AC
2623/**
2624 * ata_cable_sata - return SATA cable type
2625 * @ap: port
2626 *
2627 * Helper method for drivers which have SATA cables
2628 */
2629
2630int ata_cable_sata(struct ata_port *ap)
2631{
2632 return ATA_CBL_SATA;
2633}
2634
1da177e4
LT
2635/**
2636 * ata_bus_probe - Reset and probe ATA bus
2637 * @ap: Bus to probe
2638 *
0cba632b
JG
2639 * Master ATA bus probing function. Initiates a hardware-dependent
2640 * bus reset, then attempts to identify any devices found on
2641 * the bus.
2642 *
1da177e4 2643 * LOCKING:
0cba632b 2644 * PCI/etc. bus probe sem.
1da177e4
LT
2645 *
2646 * RETURNS:
96072e69 2647 * Zero on success, negative errno otherwise.
1da177e4
LT
2648 */
2649
80289167 2650int ata_bus_probe(struct ata_port *ap)
1da177e4 2651{
28ca5c57 2652 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2653 int tries[ATA_MAX_DEVICES];
f58229f8 2654 int rc;
e82cbdb9 2655 struct ata_device *dev;
1da177e4 2656
28ca5c57 2657 ata_port_probe(ap);
c19ba8af 2658
f58229f8
TH
2659 ata_link_for_each_dev(dev, &ap->link)
2660 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2661
2662 retry:
cdeab114
TH
2663 ata_link_for_each_dev(dev, &ap->link) {
2664 /* If we issue an SRST then an ATA drive (not ATAPI)
2665 * may change configuration and be in PIO0 timing. If
2666 * we do a hard reset (or are coming from power on)
2667 * this is true for ATA or ATAPI. Until we've set a
2668 * suitable controller mode we should not touch the
2669 * bus as we may be talking too fast.
2670 */
2671 dev->pio_mode = XFER_PIO_0;
2672
2673 /* If the controller has a pio mode setup function
2674 * then use it to set the chipset to rights. Don't
2675 * touch the DMA setup as that will be dealt with when
2676 * configuring devices.
2677 */
2678 if (ap->ops->set_piomode)
2679 ap->ops->set_piomode(ap, dev);
2680 }
2681
2044470c 2682 /* reset and determine device classes */
52783c5d 2683 ap->ops->phy_reset(ap);
2061a47a 2684
f58229f8 2685 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2686 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2687 dev->class != ATA_DEV_UNKNOWN)
2688 classes[dev->devno] = dev->class;
2689 else
2690 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2691
52783c5d 2692 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2693 }
1da177e4 2694
52783c5d 2695 ata_port_probe(ap);
2044470c 2696
f31f0cc2
JG
2697 /* read IDENTIFY page and configure devices. We have to do the identify
2698 specific sequence bass-ackwards so that PDIAG- is released by
2699 the slave device */
2700
a4ba7fe2 2701 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2702 if (tries[dev->devno])
2703 dev->class = classes[dev->devno];
ffeae418 2704
14d2bac1 2705 if (!ata_dev_enabled(dev))
ffeae418 2706 continue;
ffeae418 2707
bff04647
TH
2708 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2709 dev->id);
14d2bac1
TH
2710 if (rc)
2711 goto fail;
f31f0cc2
JG
2712 }
2713
be0d18df
AC
2714 /* Now ask for the cable type as PDIAG- should have been released */
2715 if (ap->ops->cable_detect)
2716 ap->cbl = ap->ops->cable_detect(ap);
2717
614fe29b
AC
2718 /* We may have SATA bridge glue hiding here irrespective of the
2719 reported cable types and sensed types */
2720 ata_link_for_each_dev(dev, &ap->link) {
2721 if (!ata_dev_enabled(dev))
2722 continue;
2723 /* SATA drives indicate we have a bridge. We don't know which
2724 end of the link the bridge is which is a problem */
2725 if (ata_id_is_sata(dev->id))
2726 ap->cbl = ATA_CBL_SATA;
2727 }
2728
f31f0cc2
JG
2729 /* After the identify sequence we can now set up the devices. We do
2730 this in the normal order so that the user doesn't get confused */
2731
f58229f8 2732 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2733 if (!ata_dev_enabled(dev))
2734 continue;
14d2bac1 2735
9af5c9c9 2736 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2737 rc = ata_dev_configure(dev);
9af5c9c9 2738 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2739 if (rc)
2740 goto fail;
1da177e4
LT
2741 }
2742
e82cbdb9 2743 /* configure transfer mode */
0260731f 2744 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2745 if (rc)
51713d35 2746 goto fail;
1da177e4 2747
f58229f8
TH
2748 ata_link_for_each_dev(dev, &ap->link)
2749 if (ata_dev_enabled(dev))
e82cbdb9 2750 return 0;
1da177e4 2751
e82cbdb9
TH
2752 /* no device present, disable port */
2753 ata_port_disable(ap);
96072e69 2754 return -ENODEV;
14d2bac1
TH
2755
2756 fail:
4ae72a1e
TH
2757 tries[dev->devno]--;
2758
14d2bac1
TH
2759 switch (rc) {
2760 case -EINVAL:
4ae72a1e 2761 /* eeek, something went very wrong, give up */
14d2bac1
TH
2762 tries[dev->devno] = 0;
2763 break;
4ae72a1e
TH
2764
2765 case -ENODEV:
2766 /* give it just one more chance */
2767 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2768 case -EIO:
4ae72a1e
TH
2769 if (tries[dev->devno] == 1) {
2770 /* This is the last chance, better to slow
2771 * down than lose it.
2772 */
936fd732 2773 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2774 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2775 }
14d2bac1
TH
2776 }
2777
4ae72a1e 2778 if (!tries[dev->devno])
3373efd8 2779 ata_dev_disable(dev);
ec573755 2780
14d2bac1 2781 goto retry;
1da177e4
LT
2782}
2783
2784/**
0cba632b
JG
2785 * ata_port_probe - Mark port as enabled
2786 * @ap: Port for which we indicate enablement
1da177e4 2787 *
0cba632b
JG
2788 * Modify @ap data structure such that the system
2789 * thinks that the entire port is enabled.
2790 *
cca3974e 2791 * LOCKING: host lock, or some other form of
0cba632b 2792 * serialization.
1da177e4
LT
2793 */
2794
2795void ata_port_probe(struct ata_port *ap)
2796{
198e0fed 2797 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2798}
2799
3be680b7
TH
2800/**
2801 * sata_print_link_status - Print SATA link status
936fd732 2802 * @link: SATA link to printk link status about
3be680b7
TH
2803 *
2804 * This function prints link speed and status of a SATA link.
2805 *
2806 * LOCKING:
2807 * None.
2808 */
936fd732 2809void sata_print_link_status(struct ata_link *link)
3be680b7 2810{
6d5f9732 2811 u32 sstatus, scontrol, tmp;
3be680b7 2812
936fd732 2813 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2814 return;
936fd732 2815 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2816
936fd732 2817 if (ata_link_online(link)) {
3be680b7 2818 tmp = (sstatus >> 4) & 0xf;
936fd732 2819 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2820 "SATA link up %s (SStatus %X SControl %X)\n",
2821 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2822 } else {
936fd732 2823 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2824 "SATA link down (SStatus %X SControl %X)\n",
2825 sstatus, scontrol);
3be680b7
TH
2826 }
2827}
2828
ebdfca6e
AC
2829/**
2830 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2831 * @adev: device
2832 *
2833 * Obtain the other device on the same cable, or if none is
2834 * present NULL is returned
2835 */
2e9edbf8 2836
3373efd8 2837struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2838{
9af5c9c9
TH
2839 struct ata_link *link = adev->link;
2840 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2841 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2842 return NULL;
2843 return pair;
2844}
2845
1da177e4 2846/**
780a87f7
JG
2847 * ata_port_disable - Disable port.
2848 * @ap: Port to be disabled.
1da177e4 2849 *
780a87f7
JG
2850 * Modify @ap data structure such that the system
2851 * thinks that the entire port is disabled, and should
2852 * never attempt to probe or communicate with devices
2853 * on this port.
2854 *
cca3974e 2855 * LOCKING: host lock, or some other form of
780a87f7 2856 * serialization.
1da177e4
LT
2857 */
2858
2859void ata_port_disable(struct ata_port *ap)
2860{
9af5c9c9
TH
2861 ap->link.device[0].class = ATA_DEV_NONE;
2862 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2863 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2864}
2865
1c3fae4d 2866/**
3c567b7d 2867 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2868 * @link: Link to adjust SATA spd limit for
1c3fae4d 2869 *
936fd732 2870 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2871 * function only adjusts the limit. The change must be applied
3c567b7d 2872 * using sata_set_spd().
1c3fae4d
TH
2873 *
2874 * LOCKING:
2875 * Inherited from caller.
2876 *
2877 * RETURNS:
2878 * 0 on success, negative errno on failure
2879 */
936fd732 2880int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2881{
81952c54
TH
2882 u32 sstatus, spd, mask;
2883 int rc, highbit;
1c3fae4d 2884
936fd732 2885 if (!sata_scr_valid(link))
008a7896
TH
2886 return -EOPNOTSUPP;
2887
2888 /* If SCR can be read, use it to determine the current SPD.
936fd732 2889 * If not, use cached value in link->sata_spd.
008a7896 2890 */
936fd732 2891 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2892 if (rc == 0)
2893 spd = (sstatus >> 4) & 0xf;
2894 else
936fd732 2895 spd = link->sata_spd;
1c3fae4d 2896
936fd732 2897 mask = link->sata_spd_limit;
1c3fae4d
TH
2898 if (mask <= 1)
2899 return -EINVAL;
008a7896
TH
2900
2901 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2902 highbit = fls(mask) - 1;
2903 mask &= ~(1 << highbit);
2904
008a7896
TH
2905 /* Mask off all speeds higher than or equal to the current
2906 * one. Force 1.5Gbps if current SPD is not available.
2907 */
2908 if (spd > 1)
2909 mask &= (1 << (spd - 1)) - 1;
2910 else
2911 mask &= 1;
2912
2913 /* were we already at the bottom? */
1c3fae4d
TH
2914 if (!mask)
2915 return -EINVAL;
2916
936fd732 2917 link->sata_spd_limit = mask;
1c3fae4d 2918
936fd732 2919 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2920 sata_spd_string(fls(mask)));
1c3fae4d
TH
2921
2922 return 0;
2923}
2924
936fd732 2925static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2926{
5270222f
TH
2927 struct ata_link *host_link = &link->ap->link;
2928 u32 limit, target, spd;
1c3fae4d 2929
5270222f
TH
2930 limit = link->sata_spd_limit;
2931
2932 /* Don't configure downstream link faster than upstream link.
2933 * It doesn't speed up anything and some PMPs choke on such
2934 * configuration.
2935 */
2936 if (!ata_is_host_link(link) && host_link->sata_spd)
2937 limit &= (1 << host_link->sata_spd) - 1;
2938
2939 if (limit == UINT_MAX)
2940 target = 0;
1c3fae4d 2941 else
5270222f 2942 target = fls(limit);
1c3fae4d
TH
2943
2944 spd = (*scontrol >> 4) & 0xf;
5270222f 2945 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2946
5270222f 2947 return spd != target;
1c3fae4d
TH
2948}
2949
2950/**
3c567b7d 2951 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2952 * @link: Link in question
1c3fae4d
TH
2953 *
2954 * Test whether the spd limit in SControl matches
936fd732 2955 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2956 * whether hardreset is necessary to apply SATA spd
2957 * configuration.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 *
2962 * RETURNS:
2963 * 1 if SATA spd configuration is needed, 0 otherwise.
2964 */
936fd732 2965int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2966{
2967 u32 scontrol;
2968
936fd732 2969 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2970 return 1;
1c3fae4d 2971
936fd732 2972 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2973}
2974
2975/**
3c567b7d 2976 * sata_set_spd - set SATA spd according to spd limit
936fd732 2977 * @link: Link to set SATA spd for
1c3fae4d 2978 *
936fd732 2979 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2980 *
2981 * LOCKING:
2982 * Inherited from caller.
2983 *
2984 * RETURNS:
2985 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2986 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2987 */
936fd732 2988int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2989{
2990 u32 scontrol;
81952c54 2991 int rc;
1c3fae4d 2992
936fd732 2993 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2994 return rc;
1c3fae4d 2995
936fd732 2996 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2997 return 0;
2998
936fd732 2999 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
3000 return rc;
3001
1c3fae4d
TH
3002 return 1;
3003}
3004
452503f9
AC
3005/*
3006 * This mode timing computation functionality is ported over from
3007 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3008 */
3009/*
b352e57d 3010 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3011 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3012 * for UDMA6, which is currently supported only by Maxtor drives.
3013 *
3014 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3015 */
3016
3017static const struct ata_timing ata_timing[] = {
70cd071e
TH
3018/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3019 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3020 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3021 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3022 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3023 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3024 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3025 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 3026
70cd071e
TH
3027 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3028 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3029 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 3030
70cd071e
TH
3031 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3032 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3033 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 3034 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 3035 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
3036
3037/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
3038 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3039 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3040 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3041 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3042 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3043 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3044 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3045
3046 { 0xFF }
3047};
3048
2dcb407e
JG
3049#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3050#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
3051
3052static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3053{
3054 q->setup = EZ(t->setup * 1000, T);
3055 q->act8b = EZ(t->act8b * 1000, T);
3056 q->rec8b = EZ(t->rec8b * 1000, T);
3057 q->cyc8b = EZ(t->cyc8b * 1000, T);
3058 q->active = EZ(t->active * 1000, T);
3059 q->recover = EZ(t->recover * 1000, T);
3060 q->cycle = EZ(t->cycle * 1000, T);
3061 q->udma = EZ(t->udma * 1000, UT);
3062}
3063
3064void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3065 struct ata_timing *m, unsigned int what)
3066{
3067 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3068 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3069 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3070 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3071 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3072 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3073 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3074 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3075}
3076
6357357c 3077const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3078{
70cd071e
TH
3079 const struct ata_timing *t = ata_timing;
3080
3081 while (xfer_mode > t->mode)
3082 t++;
452503f9 3083
70cd071e
TH
3084 if (xfer_mode == t->mode)
3085 return t;
3086 return NULL;
452503f9
AC
3087}
3088
3089int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3090 struct ata_timing *t, int T, int UT)
3091{
3092 const struct ata_timing *s;
3093 struct ata_timing p;
3094
3095 /*
2e9edbf8 3096 * Find the mode.
75b1f2f8 3097 */
452503f9
AC
3098
3099 if (!(s = ata_timing_find_mode(speed)))
3100 return -EINVAL;
3101
75b1f2f8
AL
3102 memcpy(t, s, sizeof(*s));
3103
452503f9
AC
3104 /*
3105 * If the drive is an EIDE drive, it can tell us it needs extended
3106 * PIO/MW_DMA cycle timing.
3107 */
3108
3109 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3110 memset(&p, 0, sizeof(p));
2dcb407e 3111 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3112 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3113 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3114 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3115 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3116 }
3117 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3118 }
3119
3120 /*
3121 * Convert the timing to bus clock counts.
3122 */
3123
75b1f2f8 3124 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3125
3126 /*
c893a3ae
RD
3127 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3128 * S.M.A.R.T * and some other commands. We have to ensure that the
3129 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3130 */
3131
fd3367af 3132 if (speed > XFER_PIO_6) {
452503f9
AC
3133 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3134 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3135 }
3136
3137 /*
c893a3ae 3138 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3139 */
3140
3141 if (t->act8b + t->rec8b < t->cyc8b) {
3142 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3143 t->rec8b = t->cyc8b - t->act8b;
3144 }
3145
3146 if (t->active + t->recover < t->cycle) {
3147 t->active += (t->cycle - (t->active + t->recover)) / 2;
3148 t->recover = t->cycle - t->active;
3149 }
a617c09f 3150
4f701d1e
AC
3151 /* In a few cases quantisation may produce enough errors to
3152 leave t->cycle too low for the sum of active and recovery
3153 if so we must correct this */
3154 if (t->active + t->recover > t->cycle)
3155 t->cycle = t->active + t->recover;
452503f9
AC
3156
3157 return 0;
3158}
3159
a0f79b92
TH
3160/**
3161 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3162 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3163 * @cycle: cycle duration in ns
3164 *
3165 * Return matching xfer mode for @cycle. The returned mode is of
3166 * the transfer type specified by @xfer_shift. If @cycle is too
3167 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3168 * than the fastest known mode, the fasted mode is returned.
3169 *
3170 * LOCKING:
3171 * None.
3172 *
3173 * RETURNS:
3174 * Matching xfer_mode, 0xff if no match found.
3175 */
3176u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3177{
3178 u8 base_mode = 0xff, last_mode = 0xff;
3179 const struct ata_xfer_ent *ent;
3180 const struct ata_timing *t;
3181
3182 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3183 if (ent->shift == xfer_shift)
3184 base_mode = ent->base;
3185
3186 for (t = ata_timing_find_mode(base_mode);
3187 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3188 unsigned short this_cycle;
3189
3190 switch (xfer_shift) {
3191 case ATA_SHIFT_PIO:
3192 case ATA_SHIFT_MWDMA:
3193 this_cycle = t->cycle;
3194 break;
3195 case ATA_SHIFT_UDMA:
3196 this_cycle = t->udma;
3197 break;
3198 default:
3199 return 0xff;
3200 }
3201
3202 if (cycle > this_cycle)
3203 break;
3204
3205 last_mode = t->mode;
3206 }
3207
3208 return last_mode;
3209}
3210
cf176e1a
TH
3211/**
3212 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3213 * @dev: Device to adjust xfer masks
458337db 3214 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3215 *
3216 * Adjust xfer masks of @dev downward. Note that this function
3217 * does not apply the change. Invoking ata_set_mode() afterwards
3218 * will apply the limit.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 *
3223 * RETURNS:
3224 * 0 on success, negative errno on failure
3225 */
458337db 3226int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3227{
458337db 3228 char buf[32];
7dc951ae
TH
3229 unsigned long orig_mask, xfer_mask;
3230 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3231 int quiet, highbit;
cf176e1a 3232
458337db
TH
3233 quiet = !!(sel & ATA_DNXFER_QUIET);
3234 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3235
458337db
TH
3236 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3237 dev->mwdma_mask,
3238 dev->udma_mask);
3239 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3240
458337db
TH
3241 switch (sel) {
3242 case ATA_DNXFER_PIO:
3243 highbit = fls(pio_mask) - 1;
3244 pio_mask &= ~(1 << highbit);
3245 break;
3246
3247 case ATA_DNXFER_DMA:
3248 if (udma_mask) {
3249 highbit = fls(udma_mask) - 1;
3250 udma_mask &= ~(1 << highbit);
3251 if (!udma_mask)
3252 return -ENOENT;
3253 } else if (mwdma_mask) {
3254 highbit = fls(mwdma_mask) - 1;
3255 mwdma_mask &= ~(1 << highbit);
3256 if (!mwdma_mask)
3257 return -ENOENT;
3258 }
3259 break;
3260
3261 case ATA_DNXFER_40C:
3262 udma_mask &= ATA_UDMA_MASK_40C;
3263 break;
3264
3265 case ATA_DNXFER_FORCE_PIO0:
3266 pio_mask &= 1;
3267 case ATA_DNXFER_FORCE_PIO:
3268 mwdma_mask = 0;
3269 udma_mask = 0;
3270 break;
3271
458337db
TH
3272 default:
3273 BUG();
3274 }
3275
3276 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3277
3278 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3279 return -ENOENT;
3280
3281 if (!quiet) {
3282 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3283 snprintf(buf, sizeof(buf), "%s:%s",
3284 ata_mode_string(xfer_mask),
3285 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3286 else
3287 snprintf(buf, sizeof(buf), "%s",
3288 ata_mode_string(xfer_mask));
3289
3290 ata_dev_printk(dev, KERN_WARNING,
3291 "limiting speed to %s\n", buf);
3292 }
cf176e1a
TH
3293
3294 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3295 &dev->udma_mask);
3296
cf176e1a 3297 return 0;
cf176e1a
TH
3298}
3299
3373efd8 3300static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3301{
9af5c9c9 3302 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3303 const char *dev_err_whine = "";
3304 int ign_dev_err = 0;
83206a29
TH
3305 unsigned int err_mask;
3306 int rc;
1da177e4 3307
e8384607 3308 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3309 if (dev->xfer_shift == ATA_SHIFT_PIO)
3310 dev->flags |= ATA_DFLAG_PIO;
3311
3373efd8 3312 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3313
4055dee7
TH
3314 if (err_mask & ~AC_ERR_DEV)
3315 goto fail;
3316
3317 /* revalidate */
3318 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3319 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3320 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3321 if (rc)
3322 return rc;
3323
11750a40
A
3324 /* Old CFA may refuse this command, which is just fine */
3325 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3326 ign_dev_err = 1;
2dcb407e 3327
0bc2a79a
AC
3328 /* Some very old devices and some bad newer ones fail any kind of
3329 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3330 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3331 dev->pio_mode <= XFER_PIO_2)
4055dee7 3332 ign_dev_err = 1;
2dcb407e 3333
3acaf94b
AC
3334 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3335 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3336 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3337 dev->dma_mode == XFER_MW_DMA_0 &&
3338 (dev->id[63] >> 8) & 1)
4055dee7 3339 ign_dev_err = 1;
3acaf94b 3340
4055dee7
TH
3341 /* if the device is actually configured correctly, ignore dev err */
3342 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3343 ign_dev_err = 1;
1da177e4 3344
4055dee7
TH
3345 if (err_mask & AC_ERR_DEV) {
3346 if (!ign_dev_err)
3347 goto fail;
3348 else
3349 dev_err_whine = " (device error ignored)";
3350 }
48a8a14f 3351
23e71c3d
TH
3352 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3353 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3354
4055dee7
TH
3355 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3356 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3357 dev_err_whine);
3358
83206a29 3359 return 0;
4055dee7
TH
3360
3361 fail:
3362 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3363 "(err_mask=0x%x)\n", err_mask);
3364 return -EIO;
1da177e4
LT
3365}
3366
1da177e4 3367/**
04351821 3368 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3369 * @link: link on which timings will be programmed
1967b7ff 3370 * @r_failed_dev: out parameter for failed device
1da177e4 3371 *
04351821
A
3372 * Standard implementation of the function used to tune and set
3373 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3374 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3375 * returned in @r_failed_dev.
780a87f7 3376 *
1da177e4 3377 * LOCKING:
0cba632b 3378 * PCI/etc. bus probe sem.
e82cbdb9
TH
3379 *
3380 * RETURNS:
3381 * 0 on success, negative errno otherwise
1da177e4 3382 */
04351821 3383
0260731f 3384int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3385{
0260731f 3386 struct ata_port *ap = link->ap;
e8e0619f 3387 struct ata_device *dev;
f58229f8 3388 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3389
a6d5a51c 3390 /* step 1: calculate xfer_mask */
f58229f8 3391 ata_link_for_each_dev(dev, link) {
7dc951ae 3392 unsigned long pio_mask, dma_mask;
b3a70601 3393 unsigned int mode_mask;
a6d5a51c 3394
e1211e3f 3395 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3396 continue;
3397
b3a70601
AC
3398 mode_mask = ATA_DMA_MASK_ATA;
3399 if (dev->class == ATA_DEV_ATAPI)
3400 mode_mask = ATA_DMA_MASK_ATAPI;
3401 else if (ata_id_is_cfa(dev->id))
3402 mode_mask = ATA_DMA_MASK_CFA;
3403
3373efd8 3404 ata_dev_xfermask(dev);
33267325 3405 ata_force_xfermask(dev);
1da177e4 3406
acf356b1
TH
3407 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3408 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3409
3410 if (libata_dma_mask & mode_mask)
3411 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3412 else
3413 dma_mask = 0;
3414
acf356b1
TH
3415 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3416 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3417
4f65977d 3418 found = 1;
70cd071e 3419 if (dev->dma_mode != 0xff)
5444a6f4 3420 used_dma = 1;
a6d5a51c 3421 }
4f65977d 3422 if (!found)
e82cbdb9 3423 goto out;
a6d5a51c
TH
3424
3425 /* step 2: always set host PIO timings */
f58229f8 3426 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3427 if (!ata_dev_enabled(dev))
3428 continue;
3429
70cd071e 3430 if (dev->pio_mode == 0xff) {
f15a1daf 3431 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3432 rc = -EINVAL;
e82cbdb9 3433 goto out;
e8e0619f
TH
3434 }
3435
3436 dev->xfer_mode = dev->pio_mode;
3437 dev->xfer_shift = ATA_SHIFT_PIO;
3438 if (ap->ops->set_piomode)
3439 ap->ops->set_piomode(ap, dev);
3440 }
1da177e4 3441
a6d5a51c 3442 /* step 3: set host DMA timings */
f58229f8 3443 ata_link_for_each_dev(dev, link) {
70cd071e 3444 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3445 continue;
3446
3447 dev->xfer_mode = dev->dma_mode;
3448 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3449 if (ap->ops->set_dmamode)
3450 ap->ops->set_dmamode(ap, dev);
3451 }
1da177e4
LT
3452
3453 /* step 4: update devices' xfer mode */
f58229f8 3454 ata_link_for_each_dev(dev, link) {
18d90deb 3455 /* don't update suspended devices' xfer mode */
9666f400 3456 if (!ata_dev_enabled(dev))
83206a29
TH
3457 continue;
3458
3373efd8 3459 rc = ata_dev_set_mode(dev);
5bbc53f4 3460 if (rc)
e82cbdb9 3461 goto out;
83206a29 3462 }
1da177e4 3463
e8e0619f
TH
3464 /* Record simplex status. If we selected DMA then the other
3465 * host channels are not permitted to do so.
5444a6f4 3466 */
cca3974e 3467 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3468 ap->host->simplex_claimed = ap;
5444a6f4 3469
e82cbdb9
TH
3470 out:
3471 if (rc)
3472 *r_failed_dev = dev;
3473 return rc;
1da177e4
LT
3474}
3475
1fdffbce
JG
3476/**
3477 * ata_tf_to_host - issue ATA taskfile to host controller
3478 * @ap: port to which command is being issued
3479 * @tf: ATA taskfile register set
3480 *
3481 * Issues ATA taskfile register set to ATA host controller,
3482 * with proper synchronization with interrupt handler and
3483 * other threads.
3484 *
3485 * LOCKING:
cca3974e 3486 * spin_lock_irqsave(host lock)
1fdffbce
JG
3487 */
3488
3489static inline void ata_tf_to_host(struct ata_port *ap,
3490 const struct ata_taskfile *tf)
3491{
3492 ap->ops->tf_load(ap, tf);
3493 ap->ops->exec_command(ap, tf);
3494}
3495
1da177e4
LT
3496/**
3497 * ata_busy_sleep - sleep until BSY clears, or timeout
3498 * @ap: port containing status register to be polled
3499 * @tmout_pat: impatience timeout
3500 * @tmout: overall timeout
3501 *
780a87f7
JG
3502 * Sleep until ATA Status register bit BSY clears,
3503 * or a timeout occurs.
3504 *
d1adc1bb
TH
3505 * LOCKING:
3506 * Kernel thread context (may sleep).
3507 *
3508 * RETURNS:
3509 * 0 on success, -errno otherwise.
1da177e4 3510 */
d1adc1bb
TH
3511int ata_busy_sleep(struct ata_port *ap,
3512 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3513{
3514 unsigned long timer_start, timeout;
3515 u8 status;
3516
3517 status = ata_busy_wait(ap, ATA_BUSY, 300);
3518 timer_start = jiffies;
3519 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3520 while (status != 0xff && (status & ATA_BUSY) &&
3521 time_before(jiffies, timeout)) {
1da177e4
LT
3522 msleep(50);
3523 status = ata_busy_wait(ap, ATA_BUSY, 3);
3524 }
3525
d1adc1bb 3526 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3527 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3528 "port is slow to respond, please be patient "
3529 "(Status 0x%x)\n", status);
1da177e4
LT
3530
3531 timeout = timer_start + tmout;
d1adc1bb
TH
3532 while (status != 0xff && (status & ATA_BUSY) &&
3533 time_before(jiffies, timeout)) {
1da177e4
LT
3534 msleep(50);
3535 status = ata_chk_status(ap);
3536 }
3537
d1adc1bb
TH
3538 if (status == 0xff)
3539 return -ENODEV;
3540
1da177e4 3541 if (status & ATA_BUSY) {
f15a1daf 3542 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3543 "(%lu secs, Status 0x%x)\n",
3544 tmout / HZ, status);
d1adc1bb 3545 return -EBUSY;
1da177e4
LT
3546 }
3547
3548 return 0;
3549}
3550
88ff6eaf
TH
3551/**
3552 * ata_wait_after_reset - wait before checking status after reset
3553 * @ap: port containing status register to be polled
3554 * @deadline: deadline jiffies for the operation
3555 *
3556 * After reset, we need to pause a while before reading status.
3557 * Also, certain combination of controller and device report 0xff
3558 * for some duration (e.g. until SATA PHY is up and running)
3559 * which is interpreted as empty port in ATA world. This
3560 * function also waits for such devices to get out of 0xff
3561 * status.
3562 *
3563 * LOCKING:
3564 * Kernel thread context (may sleep).
3565 */
3566void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3567{
3568 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3569
3570 if (time_before(until, deadline))
3571 deadline = until;
3572
3573 /* Spec mandates ">= 2ms" before checking status. We wait
3574 * 150ms, because that was the magic delay used for ATAPI
3575 * devices in Hale Landis's ATADRVR, for the period of time
3576 * between when the ATA command register is written, and then
3577 * status is checked. Because waiting for "a while" before
3578 * checking status is fine, post SRST, we perform this magic
3579 * delay here as well.
3580 *
3581 * Old drivers/ide uses the 2mS rule and then waits for ready.
3582 */
3583 msleep(150);
3584
3585 /* Wait for 0xff to clear. Some SATA devices take a long time
3586 * to clear 0xff after reset. For example, HHD424020F7SV00
3587 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3588 * than that.
1974e201
TH
3589 *
3590 * Note that some PATA controllers (pata_ali) explode if
3591 * status register is read more than once when there's no
3592 * device attached.
88ff6eaf 3593 */
1974e201
TH
3594 if (ap->flags & ATA_FLAG_SATA) {
3595 while (1) {
3596 u8 status = ata_chk_status(ap);
88ff6eaf 3597
1974e201
TH
3598 if (status != 0xff || time_after(jiffies, deadline))
3599 return;
88ff6eaf 3600
1974e201
TH
3601 msleep(50);
3602 }
88ff6eaf
TH
3603 }
3604}
3605
d4b2bab4
TH
3606/**
3607 * ata_wait_ready - sleep until BSY clears, or timeout
3608 * @ap: port containing status register to be polled
3609 * @deadline: deadline jiffies for the operation
3610 *
3611 * Sleep until ATA Status register bit BSY clears, or timeout
3612 * occurs.
3613 *
3614 * LOCKING:
3615 * Kernel thread context (may sleep).
3616 *
3617 * RETURNS:
3618 * 0 on success, -errno otherwise.
3619 */
3620int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3621{
3622 unsigned long start = jiffies;
3623 int warned = 0;
3624
3625 while (1) {
3626 u8 status = ata_chk_status(ap);
3627 unsigned long now = jiffies;
3628
3629 if (!(status & ATA_BUSY))
3630 return 0;
936fd732 3631 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3632 return -ENODEV;
3633 if (time_after(now, deadline))
3634 return -EBUSY;
3635
3636 if (!warned && time_after(now, start + 5 * HZ) &&
3637 (deadline - now > 3 * HZ)) {
3638 ata_port_printk(ap, KERN_WARNING,
3639 "port is slow to respond, please be patient "
3640 "(Status 0x%x)\n", status);
3641 warned = 1;
3642 }
3643
3644 msleep(50);
3645 }
3646}
3647
3648static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3649 unsigned long deadline)
1da177e4
LT
3650{
3651 struct ata_ioports *ioaddr = &ap->ioaddr;
3652 unsigned int dev0 = devmask & (1 << 0);
3653 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3654 int rc, ret = 0;
1da177e4
LT
3655
3656 /* if device 0 was found in ata_devchk, wait for its
3657 * BSY bit to clear
3658 */
d4b2bab4
TH
3659 if (dev0) {
3660 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3661 if (rc) {
3662 if (rc != -ENODEV)
3663 return rc;
3664 ret = rc;
3665 }
d4b2bab4 3666 }
1da177e4 3667
e141d999
TH
3668 /* if device 1 was found in ata_devchk, wait for register
3669 * access briefly, then wait for BSY to clear.
1da177e4 3670 */
e141d999
TH
3671 if (dev1) {
3672 int i;
1da177e4
LT
3673
3674 ap->ops->dev_select(ap, 1);
e141d999
TH
3675
3676 /* Wait for register access. Some ATAPI devices fail
3677 * to set nsect/lbal after reset, so don't waste too
3678 * much time on it. We're gonna wait for !BSY anyway.
3679 */
3680 for (i = 0; i < 2; i++) {
3681 u8 nsect, lbal;
3682
3683 nsect = ioread8(ioaddr->nsect_addr);
3684 lbal = ioread8(ioaddr->lbal_addr);
3685 if ((nsect == 1) && (lbal == 1))
3686 break;
3687 msleep(50); /* give drive a breather */
3688 }
3689
d4b2bab4 3690 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3691 if (rc) {
3692 if (rc != -ENODEV)
3693 return rc;
3694 ret = rc;
3695 }
d4b2bab4 3696 }
1da177e4
LT
3697
3698 /* is all this really necessary? */
3699 ap->ops->dev_select(ap, 0);
3700 if (dev1)
3701 ap->ops->dev_select(ap, 1);
3702 if (dev0)
3703 ap->ops->dev_select(ap, 0);
d4b2bab4 3704
9b89391c 3705 return ret;
1da177e4
LT
3706}
3707
d4b2bab4
TH
3708static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3709 unsigned long deadline)
1da177e4
LT
3710{
3711 struct ata_ioports *ioaddr = &ap->ioaddr;
3712
44877b4e 3713 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3714
3715 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3716 iowrite8(ap->ctl, ioaddr->ctl_addr);
3717 udelay(20); /* FIXME: flush */
3718 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3719 udelay(20); /* FIXME: flush */
3720 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3721
88ff6eaf
TH
3722 /* wait a while before checking status */
3723 ata_wait_after_reset(ap, deadline);
1da177e4 3724
2e9edbf8 3725 /* Before we perform post reset processing we want to see if
298a41ca
TH
3726 * the bus shows 0xFF because the odd clown forgets the D7
3727 * pulldown resistor.
3728 */
150981b0 3729 if (ata_chk_status(ap) == 0xFF)
9b89391c 3730 return -ENODEV;
09c7ad79 3731
d4b2bab4 3732 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3733}
3734
3735/**
3736 * ata_bus_reset - reset host port and associated ATA channel
3737 * @ap: port to reset
3738 *
3739 * This is typically the first time we actually start issuing
3740 * commands to the ATA channel. We wait for BSY to clear, then
3741 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3742 * result. Determine what devices, if any, are on the channel
3743 * by looking at the device 0/1 error register. Look at the signature
3744 * stored in each device's taskfile registers, to determine if
3745 * the device is ATA or ATAPI.
3746 *
3747 * LOCKING:
0cba632b 3748 * PCI/etc. bus probe sem.
cca3974e 3749 * Obtains host lock.
1da177e4
LT
3750 *
3751 * SIDE EFFECTS:
198e0fed 3752 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3753 */
3754
3755void ata_bus_reset(struct ata_port *ap)
3756{
9af5c9c9 3757 struct ata_device *device = ap->link.device;
1da177e4
LT
3758 struct ata_ioports *ioaddr = &ap->ioaddr;
3759 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3760 u8 err;
aec5c3c1 3761 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3762 int rc;
1da177e4 3763
44877b4e 3764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3765
3766 /* determine if device 0/1 are present */
3767 if (ap->flags & ATA_FLAG_SATA_RESET)
3768 dev0 = 1;
3769 else {
3770 dev0 = ata_devchk(ap, 0);
3771 if (slave_possible)
3772 dev1 = ata_devchk(ap, 1);
3773 }
3774
3775 if (dev0)
3776 devmask |= (1 << 0);
3777 if (dev1)
3778 devmask |= (1 << 1);
3779
3780 /* select device 0 again */
3781 ap->ops->dev_select(ap, 0);
3782
3783 /* issue bus reset */
9b89391c
TH
3784 if (ap->flags & ATA_FLAG_SRST) {
3785 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3786 if (rc && rc != -ENODEV)
aec5c3c1 3787 goto err_out;
9b89391c 3788 }
1da177e4
LT
3789
3790 /*
3791 * determine by signature whether we have ATA or ATAPI devices
3792 */
3f19859e 3793 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3794 if ((slave_possible) && (err != 0x81))
3f19859e 3795 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3796
1da177e4 3797 /* is double-select really necessary? */
9af5c9c9 3798 if (device[1].class != ATA_DEV_NONE)
1da177e4 3799 ap->ops->dev_select(ap, 1);
9af5c9c9 3800 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3801 ap->ops->dev_select(ap, 0);
3802
3803 /* if no devices were detected, disable this port */
9af5c9c9
TH
3804 if ((device[0].class == ATA_DEV_NONE) &&
3805 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3806 goto err_out;
3807
3808 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3809 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3810 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3811 }
3812
3813 DPRINTK("EXIT\n");
3814 return;
3815
3816err_out:
f15a1daf 3817 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3818 ata_port_disable(ap);
1da177e4
LT
3819
3820 DPRINTK("EXIT\n");
3821}
3822
d7bb4cc7 3823/**
936fd732
TH
3824 * sata_link_debounce - debounce SATA phy status
3825 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3826 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3827 * @deadline: deadline jiffies for the operation
d7bb4cc7 3828 *
936fd732 3829* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3830 * holding the same value where DET is not 1 for @duration polled
3831 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3832 * beginning of the stable state. Because DET gets stuck at 1 on
3833 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3834 * until timeout then returns 0 if DET is stable at 1.
3835 *
d4b2bab4
TH
3836 * @timeout is further limited by @deadline. The sooner of the
3837 * two is used.
3838 *
d7bb4cc7
TH
3839 * LOCKING:
3840 * Kernel thread context (may sleep)
3841 *
3842 * RETURNS:
3843 * 0 on success, -errno on failure.
3844 */
936fd732
TH
3845int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3846 unsigned long deadline)
7a7921e8 3847{
d7bb4cc7 3848 unsigned long interval_msec = params[0];
d4b2bab4
TH
3849 unsigned long duration = msecs_to_jiffies(params[1]);
3850 unsigned long last_jiffies, t;
d7bb4cc7
TH
3851 u32 last, cur;
3852 int rc;
3853
d4b2bab4
TH
3854 t = jiffies + msecs_to_jiffies(params[2]);
3855 if (time_before(t, deadline))
3856 deadline = t;
3857
936fd732 3858 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3859 return rc;
3860 cur &= 0xf;
3861
3862 last = cur;
3863 last_jiffies = jiffies;
3864
3865 while (1) {
3866 msleep(interval_msec);
936fd732 3867 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3868 return rc;
3869 cur &= 0xf;
3870
3871 /* DET stable? */
3872 if (cur == last) {
d4b2bab4 3873 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3874 continue;
3875 if (time_after(jiffies, last_jiffies + duration))
3876 return 0;
3877 continue;
3878 }
3879
3880 /* unstable, start over */
3881 last = cur;
3882 last_jiffies = jiffies;
3883
f1545154
TH
3884 /* Check deadline. If debouncing failed, return
3885 * -EPIPE to tell upper layer to lower link speed.
3886 */
d4b2bab4 3887 if (time_after(jiffies, deadline))
f1545154 3888 return -EPIPE;
d7bb4cc7
TH
3889 }
3890}
3891
3892/**
936fd732
TH
3893 * sata_link_resume - resume SATA link
3894 * @link: ATA link to resume SATA
d7bb4cc7 3895 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3896 * @deadline: deadline jiffies for the operation
d7bb4cc7 3897 *
936fd732 3898 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3899 *
3900 * LOCKING:
3901 * Kernel thread context (may sleep)
3902 *
3903 * RETURNS:
3904 * 0 on success, -errno on failure.
3905 */
936fd732
TH
3906int sata_link_resume(struct ata_link *link, const unsigned long *params,
3907 unsigned long deadline)
d7bb4cc7
TH
3908{
3909 u32 scontrol;
81952c54
TH
3910 int rc;
3911
936fd732 3912 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3913 return rc;
7a7921e8 3914
852ee16a 3915 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3916
936fd732 3917 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3918 return rc;
7a7921e8 3919
d7bb4cc7
TH
3920 /* Some PHYs react badly if SStatus is pounded immediately
3921 * after resuming. Delay 200ms before debouncing.
3922 */
3923 msleep(200);
7a7921e8 3924
936fd732 3925 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3926}
3927
f5914a46
TH
3928/**
3929 * ata_std_prereset - prepare for reset
cc0680a5 3930 * @link: ATA link to be reset
d4b2bab4 3931 * @deadline: deadline jiffies for the operation
f5914a46 3932 *
cc0680a5 3933 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3934 * prereset makes libata abort whole reset sequence and give up
3935 * that port, so prereset should be best-effort. It does its
3936 * best to prepare for reset sequence but if things go wrong, it
3937 * should just whine, not fail.
f5914a46
TH
3938 *
3939 * LOCKING:
3940 * Kernel thread context (may sleep)
3941 *
3942 * RETURNS:
3943 * 0 on success, -errno otherwise.
3944 */
cc0680a5 3945int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3946{
cc0680a5 3947 struct ata_port *ap = link->ap;
936fd732 3948 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3949 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3950 int rc;
3951
31daabda 3952 /* handle link resume */
28324304 3953 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3954 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3955 ehc->i.action |= ATA_EH_HARDRESET;
3956
633273a3
TH
3957 /* Some PMPs don't work with only SRST, force hardreset if PMP
3958 * is supported.
3959 */
3960 if (ap->flags & ATA_FLAG_PMP)
3961 ehc->i.action |= ATA_EH_HARDRESET;
3962
f5914a46
TH
3963 /* if we're about to do hardreset, nothing more to do */
3964 if (ehc->i.action & ATA_EH_HARDRESET)
3965 return 0;
3966
936fd732 3967 /* if SATA, resume link */
a16abc0b 3968 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3969 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3970 /* whine about phy resume failure but proceed */
3971 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3972 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3973 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3974 }
3975
3976 /* Wait for !BSY if the controller can wait for the first D2H
3977 * Reg FIS and we don't know that no device is attached.
3978 */
0c88758b 3979 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3980 rc = ata_wait_ready(ap, deadline);
6dffaf61 3981 if (rc && rc != -ENODEV) {
cc0680a5 3982 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3983 "(errno=%d), forcing hardreset\n", rc);
3984 ehc->i.action |= ATA_EH_HARDRESET;
3985 }
3986 }
f5914a46
TH
3987
3988 return 0;
3989}
3990
c2bd5804
TH
3991/**
3992 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3993 * @link: ATA link to reset
c2bd5804 3994 * @classes: resulting classes of attached devices
d4b2bab4 3995 * @deadline: deadline jiffies for the operation
c2bd5804 3996 *
52783c5d 3997 * Reset host port using ATA SRST.
c2bd5804
TH
3998 *
3999 * LOCKING:
4000 * Kernel thread context (may sleep)
4001 *
4002 * RETURNS:
4003 * 0 on success, -errno otherwise.
4004 */
cc0680a5 4005int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 4006 unsigned long deadline)
c2bd5804 4007{
cc0680a5 4008 struct ata_port *ap = link->ap;
c2bd5804 4009 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
4010 unsigned int devmask = 0;
4011 int rc;
c2bd5804
TH
4012 u8 err;
4013
4014 DPRINTK("ENTER\n");
4015
936fd732 4016 if (ata_link_offline(link)) {
3a39746a
TH
4017 classes[0] = ATA_DEV_NONE;
4018 goto out;
4019 }
4020
c2bd5804
TH
4021 /* determine if device 0/1 are present */
4022 if (ata_devchk(ap, 0))
4023 devmask |= (1 << 0);
4024 if (slave_possible && ata_devchk(ap, 1))
4025 devmask |= (1 << 1);
4026
c2bd5804
TH
4027 /* select device 0 again */
4028 ap->ops->dev_select(ap, 0);
4029
4030 /* issue bus reset */
4031 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 4032 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 4033 /* if link is occupied, -ENODEV too is an error */
936fd732 4034 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 4035 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 4036 return rc;
c2bd5804
TH
4037 }
4038
4039 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
4040 classes[0] = ata_dev_try_classify(&link->device[0],
4041 devmask & (1 << 0), &err);
c2bd5804 4042 if (slave_possible && err != 0x81)
3f19859e
TH
4043 classes[1] = ata_dev_try_classify(&link->device[1],
4044 devmask & (1 << 1), &err);
c2bd5804 4045
3a39746a 4046 out:
c2bd5804
TH
4047 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4048 return 0;
4049}
4050
4051/**
cc0680a5
TH
4052 * sata_link_hardreset - reset link via SATA phy reset
4053 * @link: link to reset
b6103f6d 4054 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 4055 * @deadline: deadline jiffies for the operation
c2bd5804 4056 *
cc0680a5 4057 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4058 *
4059 * LOCKING:
4060 * Kernel thread context (may sleep)
4061 *
4062 * RETURNS:
4063 * 0 on success, -errno otherwise.
4064 */
cc0680a5 4065int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4066 unsigned long deadline)
c2bd5804 4067{
852ee16a 4068 u32 scontrol;
81952c54 4069 int rc;
852ee16a 4070
c2bd5804
TH
4071 DPRINTK("ENTER\n");
4072
936fd732 4073 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4074 /* SATA spec says nothing about how to reconfigure
4075 * spd. To be on the safe side, turn off phy during
4076 * reconfiguration. This works for at least ICH7 AHCI
4077 * and Sil3124.
4078 */
936fd732 4079 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4080 goto out;
81952c54 4081
a34b6fc0 4082 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4083
936fd732 4084 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4085 goto out;
1c3fae4d 4086
936fd732 4087 sata_set_spd(link);
1c3fae4d
TH
4088 }
4089
4090 /* issue phy wake/reset */
936fd732 4091 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4092 goto out;
81952c54 4093
852ee16a 4094 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4095
936fd732 4096 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4097 goto out;
c2bd5804 4098
1c3fae4d 4099 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4100 * 10.4.2 says at least 1 ms.
4101 */
4102 msleep(1);
4103
936fd732
TH
4104 /* bring link back */
4105 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4106 out:
4107 DPRINTK("EXIT, rc=%d\n", rc);
4108 return rc;
4109}
4110
4111/**
4112 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4113 * @link: link to reset
b6103f6d 4114 * @class: resulting class of attached device
d4b2bab4 4115 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4116 *
4117 * SATA phy-reset host port using DET bits of SControl register,
4118 * wait for !BSY and classify the attached device.
4119 *
4120 * LOCKING:
4121 * Kernel thread context (may sleep)
4122 *
4123 * RETURNS:
4124 * 0 on success, -errno otherwise.
4125 */
cc0680a5 4126int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4127 unsigned long deadline)
b6103f6d 4128{
cc0680a5 4129 struct ata_port *ap = link->ap;
936fd732 4130 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4131 int rc;
4132
4133 DPRINTK("ENTER\n");
4134
4135 /* do hardreset */
cc0680a5 4136 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4137 if (rc) {
cc0680a5 4138 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4139 "COMRESET failed (errno=%d)\n", rc);
4140 return rc;
4141 }
c2bd5804 4142
c2bd5804 4143 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4144 if (ata_link_offline(link)) {
c2bd5804
TH
4145 *class = ATA_DEV_NONE;
4146 DPRINTK("EXIT, link offline\n");
4147 return 0;
4148 }
4149
88ff6eaf
TH
4150 /* wait a while before checking status */
4151 ata_wait_after_reset(ap, deadline);
34fee227 4152
633273a3
TH
4153 /* If PMP is supported, we have to do follow-up SRST. Note
4154 * that some PMPs don't send D2H Reg FIS after hardreset at
4155 * all if the first port is empty. Wait for it just for a
4156 * second and request follow-up SRST.
4157 */
4158 if (ap->flags & ATA_FLAG_PMP) {
4159 ata_wait_ready(ap, jiffies + HZ);
4160 return -EAGAIN;
4161 }
4162
d4b2bab4 4163 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4164 /* link occupied, -ENODEV too is an error */
4165 if (rc) {
cc0680a5 4166 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4167 "COMRESET failed (errno=%d)\n", rc);
4168 return rc;
c2bd5804
TH
4169 }
4170
3a39746a
TH
4171 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4172
3f19859e 4173 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4174
4175 DPRINTK("EXIT, class=%u\n", *class);
4176 return 0;
4177}
4178
4179/**
4180 * ata_std_postreset - standard postreset callback
cc0680a5 4181 * @link: the target ata_link
c2bd5804
TH
4182 * @classes: classes of attached devices
4183 *
4184 * This function is invoked after a successful reset. Note that
4185 * the device might have been reset more than once using
4186 * different reset methods before postreset is invoked.
c2bd5804 4187 *
c2bd5804
TH
4188 * LOCKING:
4189 * Kernel thread context (may sleep)
4190 */
cc0680a5 4191void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4192{
cc0680a5 4193 struct ata_port *ap = link->ap;
dc2b3515
TH
4194 u32 serror;
4195
c2bd5804
TH
4196 DPRINTK("ENTER\n");
4197
c2bd5804 4198 /* print link status */
936fd732 4199 sata_print_link_status(link);
c2bd5804 4200
dc2b3515 4201 /* clear SError */
936fd732
TH
4202 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4203 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4204 link->eh_info.serror = 0;
dc2b3515 4205
c2bd5804
TH
4206 /* is double-select really necessary? */
4207 if (classes[0] != ATA_DEV_NONE)
4208 ap->ops->dev_select(ap, 1);
4209 if (classes[1] != ATA_DEV_NONE)
4210 ap->ops->dev_select(ap, 0);
4211
3a39746a
TH
4212 /* bail out if no device is present */
4213 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4214 DPRINTK("EXIT, no device\n");
4215 return;
4216 }
4217
4218 /* set up device control */
0d5ff566
TH
4219 if (ap->ioaddr.ctl_addr)
4220 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4221
4222 DPRINTK("EXIT\n");
4223}
4224
623a3128
TH
4225/**
4226 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4227 * @dev: device to compare against
4228 * @new_class: class of the new device
4229 * @new_id: IDENTIFY page of the new device
4230 *
4231 * Compare @new_class and @new_id against @dev and determine
4232 * whether @dev is the device indicated by @new_class and
4233 * @new_id.
4234 *
4235 * LOCKING:
4236 * None.
4237 *
4238 * RETURNS:
4239 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4240 */
3373efd8
TH
4241static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4242 const u16 *new_id)
623a3128
TH
4243{
4244 const u16 *old_id = dev->id;
a0cf733b
TH
4245 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4246 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4247
4248 if (dev->class != new_class) {
f15a1daf
TH
4249 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4250 dev->class, new_class);
623a3128
TH
4251 return 0;
4252 }
4253
a0cf733b
TH
4254 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4255 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4256 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4257 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4258
4259 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4260 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4261 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4262 return 0;
4263 }
4264
4265 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4266 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4267 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4268 return 0;
4269 }
4270
623a3128
TH
4271 return 1;
4272}
4273
4274/**
fe30911b 4275 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4276 * @dev: target ATA device
bff04647 4277 * @readid_flags: read ID flags
623a3128
TH
4278 *
4279 * Re-read IDENTIFY page and make sure @dev is still attached to
4280 * the port.
4281 *
4282 * LOCKING:
4283 * Kernel thread context (may sleep)
4284 *
4285 * RETURNS:
4286 * 0 on success, negative errno otherwise
4287 */
fe30911b 4288int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4289{
5eb45c02 4290 unsigned int class = dev->class;
9af5c9c9 4291 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4292 int rc;
4293
fe635c7e 4294 /* read ID data */
bff04647 4295 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4296 if (rc)
fe30911b 4297 return rc;
623a3128
TH
4298
4299 /* is the device still there? */
fe30911b
TH
4300 if (!ata_dev_same_device(dev, class, id))
4301 return -ENODEV;
623a3128 4302
fe635c7e 4303 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4304 return 0;
4305}
4306
4307/**
4308 * ata_dev_revalidate - Revalidate ATA device
4309 * @dev: device to revalidate
422c9daa 4310 * @new_class: new class code
fe30911b
TH
4311 * @readid_flags: read ID flags
4312 *
4313 * Re-read IDENTIFY page, make sure @dev is still attached to the
4314 * port and reconfigure it according to the new IDENTIFY page.
4315 *
4316 * LOCKING:
4317 * Kernel thread context (may sleep)
4318 *
4319 * RETURNS:
4320 * 0 on success, negative errno otherwise
4321 */
422c9daa
TH
4322int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4323 unsigned int readid_flags)
fe30911b 4324{
6ddcd3b0 4325 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4326 int rc;
4327
4328 if (!ata_dev_enabled(dev))
4329 return -ENODEV;
4330
422c9daa
TH
4331 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4332 if (ata_class_enabled(new_class) &&
4333 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4334 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4335 dev->class, new_class);
4336 rc = -ENODEV;
4337 goto fail;
4338 }
4339
fe30911b
TH
4340 /* re-read ID */
4341 rc = ata_dev_reread_id(dev, readid_flags);
4342 if (rc)
4343 goto fail;
623a3128
TH
4344
4345 /* configure device according to the new ID */
efdaedc4 4346 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4347 if (rc)
4348 goto fail;
4349
4350 /* verify n_sectors hasn't changed */
b54eebd6
TH
4351 if (dev->class == ATA_DEV_ATA && n_sectors &&
4352 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4353 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4354 "%llu != %llu\n",
4355 (unsigned long long)n_sectors,
4356 (unsigned long long)dev->n_sectors);
8270bec4
TH
4357
4358 /* restore original n_sectors */
4359 dev->n_sectors = n_sectors;
4360
6ddcd3b0
TH
4361 rc = -ENODEV;
4362 goto fail;
4363 }
4364
4365 return 0;
623a3128
TH
4366
4367 fail:
f15a1daf 4368 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4369 return rc;
4370}
4371
6919a0a6
AC
4372struct ata_blacklist_entry {
4373 const char *model_num;
4374 const char *model_rev;
4375 unsigned long horkage;
4376};
4377
4378static const struct ata_blacklist_entry ata_device_blacklist [] = {
4379 /* Devices with DMA related problems under Linux */
4380 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4381 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4382 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4383 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4384 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4385 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4386 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4387 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4388 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4389 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4390 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4391 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4392 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4393 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4394 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4395 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4396 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4397 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4398 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4399 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4400 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4401 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4402 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4403 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4404 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4405 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4406 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4407 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4408 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4409 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4410 /* Odd clown on sil3726/4726 PMPs */
4411 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4412 ATA_HORKAGE_SKIP_PM },
6919a0a6 4413
18d6e9d5 4414 /* Weird ATAPI devices */
40a1d531 4415 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4416
6919a0a6
AC
4417 /* Devices we expect to fail diagnostics */
4418
4419 /* Devices where NCQ should be avoided */
4420 /* NCQ is slow */
2dcb407e 4421 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4422 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4423 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4424 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4425 /* NCQ is broken */
539cc7c7 4426 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4427 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4428 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4429 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4430
36e337d0
RH
4431 /* Blacklist entries taken from Silicon Image 3124/3132
4432 Windows driver .inf file - also several Linux problem reports */
4433 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4434 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4435 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4436
16c55b03
TH
4437 /* devices which puke on READ_NATIVE_MAX */
4438 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4439 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4440 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4441 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4442
93328e11
AC
4443 /* Devices which report 1 sector over size HPA */
4444 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4445 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4446 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4447
6bbfd53d
AC
4448 /* Devices which get the IVB wrong */
4449 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4450 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4451 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4452 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4453 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4454
6919a0a6
AC
4455 /* End Marker */
4456 { }
1da177e4 4457};
2e9edbf8 4458
741b7763 4459static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4460{
4461 const char *p;
4462 int len;
4463
4464 /*
4465 * check for trailing wildcard: *\0
4466 */
4467 p = strchr(patt, wildchar);
4468 if (p && ((*(p + 1)) == 0))
4469 len = p - patt;
317b50b8 4470 else {
539cc7c7 4471 len = strlen(name);
317b50b8
AP
4472 if (!len) {
4473 if (!*patt)
4474 return 0;
4475 return -1;
4476 }
4477 }
539cc7c7
JG
4478
4479 return strncmp(patt, name, len);
4480}
4481
75683fe7 4482static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4483{
8bfa79fc
TH
4484 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4485 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4486 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4487
8bfa79fc
TH
4488 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4489 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4490
6919a0a6 4491 while (ad->model_num) {
539cc7c7 4492 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4493 if (ad->model_rev == NULL)
4494 return ad->horkage;
539cc7c7 4495 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4496 return ad->horkage;
f4b15fef 4497 }
6919a0a6 4498 ad++;
f4b15fef 4499 }
1da177e4
LT
4500 return 0;
4501}
4502
6919a0a6
AC
4503static int ata_dma_blacklisted(const struct ata_device *dev)
4504{
4505 /* We don't support polling DMA.
4506 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4507 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4508 */
9af5c9c9 4509 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4510 (dev->flags & ATA_DFLAG_CDB_INTR))
4511 return 1;
75683fe7 4512 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4513}
4514
6bbfd53d
AC
4515/**
4516 * ata_is_40wire - check drive side detection
4517 * @dev: device
4518 *
4519 * Perform drive side detection decoding, allowing for device vendors
4520 * who can't follow the documentation.
4521 */
4522
4523static int ata_is_40wire(struct ata_device *dev)
4524{
4525 if (dev->horkage & ATA_HORKAGE_IVB)
4526 return ata_drive_40wire_relaxed(dev->id);
4527 return ata_drive_40wire(dev->id);
4528}
4529
a6d5a51c
TH
4530/**
4531 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4532 * @dev: Device to compute xfermask for
4533 *
acf356b1
TH
4534 * Compute supported xfermask of @dev and store it in
4535 * dev->*_mask. This function is responsible for applying all
4536 * known limits including host controller limits, device
4537 * blacklist, etc...
a6d5a51c
TH
4538 *
4539 * LOCKING:
4540 * None.
a6d5a51c 4541 */
3373efd8 4542static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4543{
9af5c9c9
TH
4544 struct ata_link *link = dev->link;
4545 struct ata_port *ap = link->ap;
cca3974e 4546 struct ata_host *host = ap->host;
a6d5a51c 4547 unsigned long xfer_mask;
1da177e4 4548
37deecb5 4549 /* controller modes available */
565083e1
TH
4550 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4551 ap->mwdma_mask, ap->udma_mask);
4552
8343f889 4553 /* drive modes available */
37deecb5
TH
4554 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4555 dev->mwdma_mask, dev->udma_mask);
4556 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4557
b352e57d
AC
4558 /*
4559 * CFA Advanced TrueIDE timings are not allowed on a shared
4560 * cable
4561 */
4562 if (ata_dev_pair(dev)) {
4563 /* No PIO5 or PIO6 */
4564 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4565 /* No MWDMA3 or MWDMA 4 */
4566 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4567 }
4568
37deecb5
TH
4569 if (ata_dma_blacklisted(dev)) {
4570 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4571 ata_dev_printk(dev, KERN_WARNING,
4572 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4573 }
a6d5a51c 4574
14d66ab7 4575 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4576 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4577 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4578 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4579 "other device, disabling DMA\n");
5444a6f4 4580 }
565083e1 4581
e424675f
JG
4582 if (ap->flags & ATA_FLAG_NO_IORDY)
4583 xfer_mask &= ata_pio_mask_no_iordy(dev);
4584
5444a6f4 4585 if (ap->ops->mode_filter)
a76b62ca 4586 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4587
8343f889
RH
4588 /* Apply cable rule here. Don't apply it early because when
4589 * we handle hot plug the cable type can itself change.
4590 * Check this last so that we know if the transfer rate was
4591 * solely limited by the cable.
4592 * Unknown or 80 wire cables reported host side are checked
4593 * drive side as well. Cases where we know a 40wire cable
4594 * is used safely for 80 are not checked here.
4595 */
4596 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4597 /* UDMA/44 or higher would be available */
2dcb407e 4598 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4599 (ata_is_40wire(dev) &&
2dcb407e
JG
4600 (ap->cbl == ATA_CBL_PATA_UNK ||
4601 ap->cbl == ATA_CBL_PATA80))) {
4602 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4603 "limited to UDMA/33 due to 40-wire cable\n");
4604 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4605 }
4606
565083e1
TH
4607 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4608 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4609}
4610
1da177e4
LT
4611/**
4612 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4613 * @dev: Device to which command will be sent
4614 *
780a87f7
JG
4615 * Issue SET FEATURES - XFER MODE command to device @dev
4616 * on port @ap.
4617 *
1da177e4 4618 * LOCKING:
0cba632b 4619 * PCI/etc. bus probe sem.
83206a29
TH
4620 *
4621 * RETURNS:
4622 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4623 */
4624
3373efd8 4625static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4626{
a0123703 4627 struct ata_taskfile tf;
83206a29 4628 unsigned int err_mask;
1da177e4
LT
4629
4630 /* set up set-features taskfile */
4631 DPRINTK("set features - xfer mode\n");
4632
464cf177
TH
4633 /* Some controllers and ATAPI devices show flaky interrupt
4634 * behavior after setting xfer mode. Use polling instead.
4635 */
3373efd8 4636 ata_tf_init(dev, &tf);
a0123703
TH
4637 tf.command = ATA_CMD_SET_FEATURES;
4638 tf.feature = SETFEATURES_XFER;
464cf177 4639 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4640 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4641 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4642 if (ata_pio_need_iordy(dev))
4643 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4644 /* If the device has IORDY and the controller does not - turn it off */
4645 else if (ata_id_has_iordy(dev->id))
11b7becc 4646 tf.nsect = 0x01;
b9f8ab2d
AC
4647 else /* In the ancient relic department - skip all of this */
4648 return 0;
1da177e4 4649
2b789108 4650 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4651
4652 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4653 return err_mask;
4654}
9f45cbd3 4655/**
218f3d30 4656 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4657 * @dev: Device to which command will be sent
4658 * @enable: Whether to enable or disable the feature
218f3d30 4659 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4660 *
4661 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4662 * on port @ap with sector count
9f45cbd3
KCA
4663 *
4664 * LOCKING:
4665 * PCI/etc. bus probe sem.
4666 *
4667 * RETURNS:
4668 * 0 on success, AC_ERR_* mask otherwise.
4669 */
218f3d30
JG
4670static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4671 u8 feature)
9f45cbd3
KCA
4672{
4673 struct ata_taskfile tf;
4674 unsigned int err_mask;
4675
4676 /* set up set-features taskfile */
4677 DPRINTK("set features - SATA features\n");
4678
4679 ata_tf_init(dev, &tf);
4680 tf.command = ATA_CMD_SET_FEATURES;
4681 tf.feature = enable;
4682 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4683 tf.protocol = ATA_PROT_NODATA;
218f3d30 4684 tf.nsect = feature;
9f45cbd3 4685
2b789108 4686 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4687
83206a29
TH
4688 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4689 return err_mask;
1da177e4
LT
4690}
4691
8bf62ece
AL
4692/**
4693 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4694 * @dev: Device to which command will be sent
e2a7f77a
RD
4695 * @heads: Number of heads (taskfile parameter)
4696 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4697 *
4698 * LOCKING:
6aff8f1f
TH
4699 * Kernel thread context (may sleep)
4700 *
4701 * RETURNS:
4702 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4703 */
3373efd8
TH
4704static unsigned int ata_dev_init_params(struct ata_device *dev,
4705 u16 heads, u16 sectors)
8bf62ece 4706{
a0123703 4707 struct ata_taskfile tf;
6aff8f1f 4708 unsigned int err_mask;
8bf62ece
AL
4709
4710 /* Number of sectors per track 1-255. Number of heads 1-16 */
4711 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4712 return AC_ERR_INVALID;
8bf62ece
AL
4713
4714 /* set up init dev params taskfile */
4715 DPRINTK("init dev params \n");
4716
3373efd8 4717 ata_tf_init(dev, &tf);
a0123703
TH
4718 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4719 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4720 tf.protocol = ATA_PROT_NODATA;
4721 tf.nsect = sectors;
4722 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4723
2b789108 4724 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4725 /* A clean abort indicates an original or just out of spec drive
4726 and we should continue as we issue the setup based on the
4727 drive reported working geometry */
4728 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4729 err_mask = 0;
8bf62ece 4730
6aff8f1f
TH
4731 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4732 return err_mask;
8bf62ece
AL
4733}
4734
1da177e4 4735/**
0cba632b
JG
4736 * ata_sg_clean - Unmap DMA memory associated with command
4737 * @qc: Command containing DMA memory to be released
4738 *
4739 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4740 *
4741 * LOCKING:
cca3974e 4742 * spin_lock_irqsave(host lock)
1da177e4 4743 */
70e6ad0c 4744void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4745{
4746 struct ata_port *ap = qc->ap;
ff2aeb1e 4747 struct scatterlist *sg = qc->sg;
1da177e4
LT
4748 int dir = qc->dma_dir;
4749
a4631474 4750 WARN_ON(sg == NULL);
1da177e4 4751
dde20207 4752 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4753
dde20207
JB
4754 if (qc->n_elem)
4755 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4756
4757 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4758 qc->sg = NULL;
1da177e4
LT
4759}
4760
4761/**
4762 * ata_fill_sg - Fill PCI IDE PRD table
4763 * @qc: Metadata associated with taskfile to be transferred
4764 *
780a87f7
JG
4765 * Fill PCI IDE PRD (scatter-gather) table with segments
4766 * associated with the current disk command.
4767 *
1da177e4 4768 * LOCKING:
cca3974e 4769 * spin_lock_irqsave(host lock)
1da177e4
LT
4770 *
4771 */
4772static void ata_fill_sg(struct ata_queued_cmd *qc)
4773{
1da177e4 4774 struct ata_port *ap = qc->ap;
cedc9a47 4775 struct scatterlist *sg;
ff2aeb1e 4776 unsigned int si, pi;
1da177e4 4777
ff2aeb1e
TH
4778 pi = 0;
4779 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4780 u32 addr, offset;
4781 u32 sg_len, len;
4782
4783 /* determine if physical DMA addr spans 64K boundary.
4784 * Note h/w doesn't support 64-bit, so we unconditionally
4785 * truncate dma_addr_t to u32.
4786 */
4787 addr = (u32) sg_dma_address(sg);
4788 sg_len = sg_dma_len(sg);
4789
4790 while (sg_len) {
4791 offset = addr & 0xffff;
4792 len = sg_len;
4793 if ((offset + sg_len) > 0x10000)
4794 len = 0x10000 - offset;
4795
ff2aeb1e
TH
4796 ap->prd[pi].addr = cpu_to_le32(addr);
4797 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4798 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4799
ff2aeb1e 4800 pi++;
1da177e4
LT
4801 sg_len -= len;
4802 addr += len;
4803 }
4804 }
4805
ff2aeb1e 4806 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4807}
b9a4197e 4808
d26fc955
AC
4809/**
4810 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4811 * @qc: Metadata associated with taskfile to be transferred
4812 *
4813 * Fill PCI IDE PRD (scatter-gather) table with segments
4814 * associated with the current disk command. Perform the fill
4815 * so that we avoid writing any length 64K records for
4816 * controllers that don't follow the spec.
4817 *
4818 * LOCKING:
4819 * spin_lock_irqsave(host lock)
4820 *
4821 */
4822static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4823{
4824 struct ata_port *ap = qc->ap;
4825 struct scatterlist *sg;
ff2aeb1e 4826 unsigned int si, pi;
d26fc955 4827
ff2aeb1e
TH
4828 pi = 0;
4829 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4830 u32 addr, offset;
4831 u32 sg_len, len, blen;
4832
2dcb407e 4833 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4834 * Note h/w doesn't support 64-bit, so we unconditionally
4835 * truncate dma_addr_t to u32.
4836 */
4837 addr = (u32) sg_dma_address(sg);
4838 sg_len = sg_dma_len(sg);
4839
4840 while (sg_len) {
4841 offset = addr & 0xffff;
4842 len = sg_len;
4843 if ((offset + sg_len) > 0x10000)
4844 len = 0x10000 - offset;
4845
4846 blen = len & 0xffff;
ff2aeb1e 4847 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4848 if (blen == 0) {
4849 /* Some PATA chipsets like the CS5530 can't
4850 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4851 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4852 blen = 0x8000;
ff2aeb1e 4853 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4854 }
ff2aeb1e
TH
4855 ap->prd[pi].flags_len = cpu_to_le32(blen);
4856 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4857
ff2aeb1e 4858 pi++;
d26fc955
AC
4859 sg_len -= len;
4860 addr += len;
4861 }
4862 }
4863
ff2aeb1e 4864 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4865}
4866
1da177e4
LT
4867/**
4868 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4869 * @qc: Metadata associated with taskfile to check
4870 *
780a87f7
JG
4871 * Allow low-level driver to filter ATA PACKET commands, returning
4872 * a status indicating whether or not it is OK to use DMA for the
4873 * supplied PACKET command.
4874 *
1da177e4 4875 * LOCKING:
cca3974e 4876 * spin_lock_irqsave(host lock)
0cba632b 4877 *
1da177e4
LT
4878 * RETURNS: 0 when ATAPI DMA can be used
4879 * nonzero otherwise
4880 */
4881int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4882{
4883 struct ata_port *ap = qc->ap;
b9a4197e
TH
4884
4885 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4886 * few ATAPI devices choke on such DMA requests.
4887 */
4888 if (unlikely(qc->nbytes & 15))
4889 return 1;
6f23a31d 4890
1da177e4 4891 if (ap->ops->check_atapi_dma)
b9a4197e 4892 return ap->ops->check_atapi_dma(qc);
1da177e4 4893
b9a4197e 4894 return 0;
1da177e4 4895}
b9a4197e 4896
31cc23b3
TH
4897/**
4898 * ata_std_qc_defer - Check whether a qc needs to be deferred
4899 * @qc: ATA command in question
4900 *
4901 * Non-NCQ commands cannot run with any other command, NCQ or
4902 * not. As upper layer only knows the queue depth, we are
4903 * responsible for maintaining exclusion. This function checks
4904 * whether a new command @qc can be issued.
4905 *
4906 * LOCKING:
4907 * spin_lock_irqsave(host lock)
4908 *
4909 * RETURNS:
4910 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4911 */
4912int ata_std_qc_defer(struct ata_queued_cmd *qc)
4913{
4914 struct ata_link *link = qc->dev->link;
4915
4916 if (qc->tf.protocol == ATA_PROT_NCQ) {
4917 if (!ata_tag_valid(link->active_tag))
4918 return 0;
4919 } else {
4920 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4921 return 0;
4922 }
4923
4924 return ATA_DEFER_LINK;
4925}
4926
1da177e4
LT
4927/**
4928 * ata_qc_prep - Prepare taskfile for submission
4929 * @qc: Metadata associated with taskfile to be prepared
4930 *
780a87f7
JG
4931 * Prepare ATA taskfile for submission.
4932 *
1da177e4 4933 * LOCKING:
cca3974e 4934 * spin_lock_irqsave(host lock)
1da177e4
LT
4935 */
4936void ata_qc_prep(struct ata_queued_cmd *qc)
4937{
4938 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4939 return;
4940
4941 ata_fill_sg(qc);
4942}
4943
d26fc955
AC
4944/**
4945 * ata_dumb_qc_prep - Prepare taskfile for submission
4946 * @qc: Metadata associated with taskfile to be prepared
4947 *
4948 * Prepare ATA taskfile for submission.
4949 *
4950 * LOCKING:
4951 * spin_lock_irqsave(host lock)
4952 */
4953void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4954{
4955 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4956 return;
4957
4958 ata_fill_sg_dumb(qc);
4959}
4960
e46834cd
BK
4961void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4962
0cba632b
JG
4963/**
4964 * ata_sg_init - Associate command with scatter-gather table.
4965 * @qc: Command to be associated
4966 * @sg: Scatter-gather table.
4967 * @n_elem: Number of elements in s/g table.
4968 *
4969 * Initialize the data-related elements of queued_cmd @qc
4970 * to point to a scatter-gather table @sg, containing @n_elem
4971 * elements.
4972 *
4973 * LOCKING:
cca3974e 4974 * spin_lock_irqsave(host lock)
0cba632b 4975 */
1da177e4
LT
4976void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4977 unsigned int n_elem)
4978{
ff2aeb1e 4979 qc->sg = sg;
1da177e4 4980 qc->n_elem = n_elem;
ff2aeb1e 4981 qc->cursg = qc->sg;
1da177e4
LT
4982}
4983
ff2aeb1e
TH
4984/**
4985 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4986 * @qc: Command with scatter-gather table to be mapped.
4987 *
4988 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4989 *
4990 * LOCKING:
4991 * spin_lock_irqsave(host lock)
4992 *
4993 * RETURNS:
4994 * Zero on success, negative on error.
4995 *
4996 */
4997static int ata_sg_setup(struct ata_queued_cmd *qc)
4998{
4999 struct ata_port *ap = qc->ap;
dde20207 5000 unsigned int n_elem;
ff2aeb1e
TH
5001
5002 VPRINTK("ENTER, ata%u\n", ap->print_id);
5003
dde20207
JB
5004 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5005 if (n_elem < 1)
5006 return -1;
ff2aeb1e 5007
dde20207 5008 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 5009
dde20207 5010 qc->n_elem = n_elem;
f92a2636 5011 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
5012
5013 return 0;
5014}
5015
0baab86b 5016/**
c893a3ae 5017 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
5018 * @buf: Buffer to swap
5019 * @buf_words: Number of 16-bit words in buffer.
5020 *
5021 * Swap halves of 16-bit words if needed to convert from
5022 * little-endian byte order to native cpu byte order, or
5023 * vice-versa.
5024 *
5025 * LOCKING:
6f0ef4fa 5026 * Inherited from caller.
0baab86b 5027 */
1da177e4
LT
5028void swap_buf_le16(u16 *buf, unsigned int buf_words)
5029{
5030#ifdef __BIG_ENDIAN
5031 unsigned int i;
5032
5033 for (i = 0; i < buf_words; i++)
5034 buf[i] = le16_to_cpu(buf[i]);
5035#endif /* __BIG_ENDIAN */
5036}
5037
6ae4cfb5 5038/**
0d5ff566 5039 * ata_data_xfer - Transfer data by PIO
55dba312 5040 * @dev: device to target
6ae4cfb5
AL
5041 * @buf: data buffer
5042 * @buflen: buffer length
0affa456 5043 * @rw: read/write
6ae4cfb5
AL
5044 *
5045 * Transfer data from/to the device data register by PIO.
5046 *
5047 * LOCKING:
5048 * Inherited from caller.
55dba312
TH
5049 *
5050 * RETURNS:
5051 * Bytes consumed.
6ae4cfb5 5052 */
55dba312
TH
5053unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5054 unsigned int buflen, int rw)
1da177e4 5055{
55dba312
TH
5056 struct ata_port *ap = dev->link->ap;
5057 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5058 unsigned int words = buflen >> 1;
1da177e4 5059
6ae4cfb5 5060 /* Transfer multiple of 2 bytes */
55dba312
TH
5061 if (rw == READ)
5062 ioread16_rep(data_addr, buf, words);
1da177e4 5063 else
55dba312 5064 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5065
5066 /* Transfer trailing 1 byte, if any. */
5067 if (unlikely(buflen & 0x01)) {
4ca4e439 5068 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5069 unsigned char *trailing_buf = buf + buflen - 1;
5070
55dba312
TH
5071 if (rw == READ) {
5072 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5073 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5074 } else {
5075 memcpy(align_buf, trailing_buf, 1);
5076 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5077 }
55dba312 5078 words++;
6ae4cfb5 5079 }
55dba312
TH
5080
5081 return words << 1;
1da177e4
LT
5082}
5083
75e99585 5084/**
0d5ff566 5085 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5086 * @dev: device to target
75e99585
AC
5087 * @buf: data buffer
5088 * @buflen: buffer length
0affa456 5089 * @rw: read/write
75e99585 5090 *
88574551 5091 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5092 * transfer with interrupts disabled.
5093 *
5094 * LOCKING:
5095 * Inherited from caller.
55dba312
TH
5096 *
5097 * RETURNS:
5098 * Bytes consumed.
75e99585 5099 */
55dba312
TH
5100unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5101 unsigned int buflen, int rw)
75e99585
AC
5102{
5103 unsigned long flags;
55dba312
TH
5104 unsigned int consumed;
5105
75e99585 5106 local_irq_save(flags);
55dba312 5107 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5108 local_irq_restore(flags);
55dba312
TH
5109
5110 return consumed;
75e99585
AC
5111}
5112
5113
6ae4cfb5 5114/**
5a5dbd18 5115 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5116 * @qc: Command on going
5117 *
5a5dbd18 5118 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5119 *
5120 * LOCKING:
5121 * Inherited from caller.
5122 */
5123
1da177e4
LT
5124static void ata_pio_sector(struct ata_queued_cmd *qc)
5125{
5126 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5127 struct ata_port *ap = qc->ap;
5128 struct page *page;
5129 unsigned int offset;
5130 unsigned char *buf;
5131
5a5dbd18 5132 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5133 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5134
45711f1a 5135 page = sg_page(qc->cursg);
87260216 5136 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5137
5138 /* get the current page and offset */
5139 page = nth_page(page, (offset >> PAGE_SHIFT));
5140 offset %= PAGE_SIZE;
5141
1da177e4
LT
5142 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5143
91b8b313
AL
5144 if (PageHighMem(page)) {
5145 unsigned long flags;
5146
a6b2c5d4 5147 /* FIXME: use a bounce buffer */
91b8b313
AL
5148 local_irq_save(flags);
5149 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5150
91b8b313 5151 /* do the actual data transfer */
5a5dbd18 5152 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5153
91b8b313
AL
5154 kunmap_atomic(buf, KM_IRQ0);
5155 local_irq_restore(flags);
5156 } else {
5157 buf = page_address(page);
5a5dbd18 5158 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5159 }
1da177e4 5160
5a5dbd18
ML
5161 qc->curbytes += qc->sect_size;
5162 qc->cursg_ofs += qc->sect_size;
1da177e4 5163
87260216
JA
5164 if (qc->cursg_ofs == qc->cursg->length) {
5165 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5166 qc->cursg_ofs = 0;
5167 }
1da177e4 5168}
1da177e4 5169
07f6f7d0 5170/**
5a5dbd18 5171 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5172 * @qc: Command on going
5173 *
5a5dbd18 5174 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5175 * ATA device for the DRQ request.
5176 *
5177 * LOCKING:
5178 * Inherited from caller.
5179 */
1da177e4 5180
07f6f7d0
AL
5181static void ata_pio_sectors(struct ata_queued_cmd *qc)
5182{
5183 if (is_multi_taskfile(&qc->tf)) {
5184 /* READ/WRITE MULTIPLE */
5185 unsigned int nsect;
5186
587005de 5187 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5188
5a5dbd18 5189 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5190 qc->dev->multi_count);
07f6f7d0
AL
5191 while (nsect--)
5192 ata_pio_sector(qc);
5193 } else
5194 ata_pio_sector(qc);
4cc980b3
AL
5195
5196 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5197}
5198
c71c1857
AL
5199/**
5200 * atapi_send_cdb - Write CDB bytes to hardware
5201 * @ap: Port to which ATAPI device is attached.
5202 * @qc: Taskfile currently active
5203 *
5204 * When device has indicated its readiness to accept
5205 * a CDB, this function is called. Send the CDB.
5206 *
5207 * LOCKING:
5208 * caller.
5209 */
5210
5211static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5212{
5213 /* send SCSI cdb */
5214 DPRINTK("send cdb\n");
db024d53 5215 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5216
a6b2c5d4 5217 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5218 ata_altstatus(ap); /* flush */
5219
5220 switch (qc->tf.protocol) {
0dc36888 5221 case ATAPI_PROT_PIO:
c71c1857
AL
5222 ap->hsm_task_state = HSM_ST;
5223 break;
0dc36888 5224 case ATAPI_PROT_NODATA:
c71c1857
AL
5225 ap->hsm_task_state = HSM_ST_LAST;
5226 break;
0dc36888 5227 case ATAPI_PROT_DMA:
c71c1857
AL
5228 ap->hsm_task_state = HSM_ST_LAST;
5229 /* initiate bmdma */
5230 ap->ops->bmdma_start(qc);
5231 break;
5232 }
1da177e4
LT
5233}
5234
6ae4cfb5
AL
5235/**
5236 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5237 * @qc: Command on going
5238 * @bytes: number of bytes
5239 *
5240 * Transfer Transfer data from/to the ATAPI device.
5241 *
5242 * LOCKING:
5243 * Inherited from caller.
5244 *
5245 */
140b5e59 5246static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5247{
56c819df 5248 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5249 struct ata_port *ap = qc->ap;
56c819df
TH
5250 struct ata_device *dev = qc->dev;
5251 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5252 struct scatterlist *sg;
1da177e4
LT
5253 struct page *page;
5254 unsigned char *buf;
56c819df 5255 unsigned int offset, count, consumed;
1da177e4
LT
5256
5257next_sg:
140b5e59
TH
5258 sg = qc->cursg;
5259 if (unlikely(!sg)) {
fa2fc7f4
JB
5260 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5261 "buf=%u cur=%u bytes=%u",
5262 qc->nbytes, qc->curbytes, bytes);
5263 return -1;
140b5e59 5264 }
1da177e4 5265
45711f1a 5266 page = sg_page(sg);
1da177e4
LT
5267 offset = sg->offset + qc->cursg_ofs;
5268
5269 /* get the current page and offset */
5270 page = nth_page(page, (offset >> PAGE_SHIFT));
5271 offset %= PAGE_SIZE;
5272
6952df03 5273 /* don't overrun current sg */
32529e01 5274 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5275
5276 /* don't cross page boundaries */
5277 count = min(count, (unsigned int)PAGE_SIZE - offset);
5278
7282aa4b
AL
5279 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5280
91b8b313
AL
5281 if (PageHighMem(page)) {
5282 unsigned long flags;
5283
a6b2c5d4 5284 /* FIXME: use bounce buffer */
91b8b313
AL
5285 local_irq_save(flags);
5286 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5287
91b8b313 5288 /* do the actual data transfer */
56c819df 5289 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5290
91b8b313
AL
5291 kunmap_atomic(buf, KM_IRQ0);
5292 local_irq_restore(flags);
5293 } else {
5294 buf = page_address(page);
56c819df 5295 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5296 }
1da177e4 5297
56c819df 5298 bytes -= min(bytes, consumed);
1da177e4
LT
5299 qc->curbytes += count;
5300 qc->cursg_ofs += count;
5301
32529e01 5302 if (qc->cursg_ofs == sg->length) {
87260216 5303 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5304 qc->cursg_ofs = 0;
5305 }
5306
56c819df
TH
5307 /* consumed can be larger than count only for the last transfer */
5308 WARN_ON(qc->cursg && count != consumed);
5309
563a6e1f 5310 if (bytes)
1da177e4 5311 goto next_sg;
140b5e59 5312 return 0;
1da177e4
LT
5313}
5314
6ae4cfb5
AL
5315/**
5316 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5317 * @qc: Command on going
5318 *
5319 * Transfer Transfer data from/to the ATAPI device.
5320 *
5321 * LOCKING:
5322 * Inherited from caller.
6ae4cfb5
AL
5323 */
5324
1da177e4
LT
5325static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5326{
5327 struct ata_port *ap = qc->ap;
5328 struct ata_device *dev = qc->dev;
56c819df 5329 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5330 unsigned int ireason, bc_lo, bc_hi, bytes;
5331 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5332
eec4c3f3
AL
5333 /* Abuse qc->result_tf for temp storage of intermediate TF
5334 * here to save some kernel stack usage.
5335 * For normal completion, qc->result_tf is not relevant. For
5336 * error, qc->result_tf is later overwritten by ata_qc_complete().
5337 * So, the correctness of qc->result_tf is not affected.
5338 */
5339 ap->ops->tf_read(ap, &qc->result_tf);
5340 ireason = qc->result_tf.nsect;
5341 bc_lo = qc->result_tf.lbam;
5342 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5343 bytes = (bc_hi << 8) | bc_lo;
5344
5345 /* shall be cleared to zero, indicating xfer of data */
0106372d 5346 if (unlikely(ireason & (1 << 0)))
56c819df 5347 goto atapi_check;
1da177e4
LT
5348
5349 /* make sure transfer direction matches expected */
5350 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5351 if (unlikely(do_write != i_write))
56c819df 5352 goto atapi_check;
0106372d
AL
5353
5354 if (unlikely(!bytes))
56c819df 5355 goto atapi_check;
1da177e4 5356
44877b4e 5357 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5358
56c819df 5359 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5360 goto err_out;
4cc980b3 5361 ata_altstatus(ap); /* flush */
1da177e4
LT
5362
5363 return;
5364
56c819df
TH
5365 atapi_check:
5366 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5367 ireason, bytes);
5368 err_out:
11a56d24 5369 qc->err_mask |= AC_ERR_HSM;
14be71f4 5370 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5371}
5372
5373/**
c234fb00
AL
5374 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5375 * @ap: the target ata_port
5376 * @qc: qc on going
1da177e4 5377 *
c234fb00
AL
5378 * RETURNS:
5379 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5380 */
c234fb00
AL
5381
5382static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5383{
c234fb00
AL
5384 if (qc->tf.flags & ATA_TFLAG_POLLING)
5385 return 1;
1da177e4 5386
c234fb00
AL
5387 if (ap->hsm_task_state == HSM_ST_FIRST) {
5388 if (qc->tf.protocol == ATA_PROT_PIO &&
5389 (qc->tf.flags & ATA_TFLAG_WRITE))
5390 return 1;
1da177e4 5391
405e66b3 5392 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5393 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5394 return 1;
fe79e683
AL
5395 }
5396
c234fb00
AL
5397 return 0;
5398}
1da177e4 5399
c17ea20d
TH
5400/**
5401 * ata_hsm_qc_complete - finish a qc running on standard HSM
5402 * @qc: Command to complete
5403 * @in_wq: 1 if called from workqueue, 0 otherwise
5404 *
5405 * Finish @qc which is running on standard HSM.
5406 *
5407 * LOCKING:
cca3974e 5408 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5409 * Otherwise, none on entry and grabs host lock.
5410 */
5411static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5412{
5413 struct ata_port *ap = qc->ap;
5414 unsigned long flags;
5415
5416 if (ap->ops->error_handler) {
5417 if (in_wq) {
ba6a1308 5418 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5419
cca3974e
JG
5420 /* EH might have kicked in while host lock is
5421 * released.
c17ea20d
TH
5422 */
5423 qc = ata_qc_from_tag(ap, qc->tag);
5424 if (qc) {
5425 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5426 ap->ops->irq_on(ap);
c17ea20d
TH
5427 ata_qc_complete(qc);
5428 } else
5429 ata_port_freeze(ap);
5430 }
5431
ba6a1308 5432 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5433 } else {
5434 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5435 ata_qc_complete(qc);
5436 else
5437 ata_port_freeze(ap);
5438 }
5439 } else {
5440 if (in_wq) {
ba6a1308 5441 spin_lock_irqsave(ap->lock, flags);
83625006 5442 ap->ops->irq_on(ap);
c17ea20d 5443 ata_qc_complete(qc);
ba6a1308 5444 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5445 } else
5446 ata_qc_complete(qc);
5447 }
5448}
5449
bb5cb290
AL
5450/**
5451 * ata_hsm_move - move the HSM to the next state.
5452 * @ap: the target ata_port
5453 * @qc: qc on going
5454 * @status: current device status
5455 * @in_wq: 1 if called from workqueue, 0 otherwise
5456 *
5457 * RETURNS:
5458 * 1 when poll next status needed, 0 otherwise.
5459 */
9a1004d0
TH
5460int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5461 u8 status, int in_wq)
e2cec771 5462{
bb5cb290
AL
5463 unsigned long flags = 0;
5464 int poll_next;
5465
6912ccd5
AL
5466 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5467
bb5cb290
AL
5468 /* Make sure ata_qc_issue_prot() does not throw things
5469 * like DMA polling into the workqueue. Notice that
5470 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5471 */
c234fb00 5472 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5473
e2cec771 5474fsm_start:
999bb6f4 5475 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5476 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5477
e2cec771
AL
5478 switch (ap->hsm_task_state) {
5479 case HSM_ST_FIRST:
bb5cb290
AL
5480 /* Send first data block or PACKET CDB */
5481
5482 /* If polling, we will stay in the work queue after
5483 * sending the data. Otherwise, interrupt handler
5484 * takes over after sending the data.
5485 */
5486 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5487
e2cec771 5488 /* check device status */
3655d1d3
AL
5489 if (unlikely((status & ATA_DRQ) == 0)) {
5490 /* handle BSY=0, DRQ=0 as error */
5491 if (likely(status & (ATA_ERR | ATA_DF)))
5492 /* device stops HSM for abort/error */
5493 qc->err_mask |= AC_ERR_DEV;
5494 else
5495 /* HSM violation. Let EH handle this */
5496 qc->err_mask |= AC_ERR_HSM;
5497
14be71f4 5498 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5499 goto fsm_start;
1da177e4
LT
5500 }
5501
71601958
AL
5502 /* Device should not ask for data transfer (DRQ=1)
5503 * when it finds something wrong.
eee6c32f
AL
5504 * We ignore DRQ here and stop the HSM by
5505 * changing hsm_task_state to HSM_ST_ERR and
5506 * let the EH abort the command or reset the device.
71601958
AL
5507 */
5508 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5509 /* Some ATAPI tape drives forget to clear the ERR bit
5510 * when doing the next command (mostly request sense).
5511 * We ignore ERR here to workaround and proceed sending
5512 * the CDB.
5513 */
5514 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5515 ata_port_printk(ap, KERN_WARNING,
5516 "DRQ=1 with device error, "
5517 "dev_stat 0x%X\n", status);
5518 qc->err_mask |= AC_ERR_HSM;
5519 ap->hsm_task_state = HSM_ST_ERR;
5520 goto fsm_start;
5521 }
71601958 5522 }
1da177e4 5523
bb5cb290
AL
5524 /* Send the CDB (atapi) or the first data block (ata pio out).
5525 * During the state transition, interrupt handler shouldn't
5526 * be invoked before the data transfer is complete and
5527 * hsm_task_state is changed. Hence, the following locking.
5528 */
5529 if (in_wq)
ba6a1308 5530 spin_lock_irqsave(ap->lock, flags);
1da177e4 5531
bb5cb290
AL
5532 if (qc->tf.protocol == ATA_PROT_PIO) {
5533 /* PIO data out protocol.
5534 * send first data block.
5535 */
0565c26d 5536
bb5cb290
AL
5537 /* ata_pio_sectors() might change the state
5538 * to HSM_ST_LAST. so, the state is changed here
5539 * before ata_pio_sectors().
5540 */
5541 ap->hsm_task_state = HSM_ST;
5542 ata_pio_sectors(qc);
bb5cb290
AL
5543 } else
5544 /* send CDB */
5545 atapi_send_cdb(ap, qc);
5546
5547 if (in_wq)
ba6a1308 5548 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5549
5550 /* if polling, ata_pio_task() handles the rest.
5551 * otherwise, interrupt handler takes over from here.
5552 */
e2cec771 5553 break;
1c848984 5554
e2cec771
AL
5555 case HSM_ST:
5556 /* complete command or read/write the data register */
0dc36888 5557 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5558 /* ATAPI PIO protocol */
5559 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5560 /* No more data to transfer or device error.
5561 * Device error will be tagged in HSM_ST_LAST.
5562 */
e2cec771
AL
5563 ap->hsm_task_state = HSM_ST_LAST;
5564 goto fsm_start;
5565 }
1da177e4 5566
71601958
AL
5567 /* Device should not ask for data transfer (DRQ=1)
5568 * when it finds something wrong.
eee6c32f
AL
5569 * We ignore DRQ here and stop the HSM by
5570 * changing hsm_task_state to HSM_ST_ERR and
5571 * let the EH abort the command or reset the device.
71601958
AL
5572 */
5573 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5574 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5575 "device error, dev_stat 0x%X\n",
5576 status);
3655d1d3 5577 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5578 ap->hsm_task_state = HSM_ST_ERR;
5579 goto fsm_start;
71601958 5580 }
1da177e4 5581
e2cec771 5582 atapi_pio_bytes(qc);
7fb6ec28 5583
e2cec771
AL
5584 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5585 /* bad ireason reported by device */
5586 goto fsm_start;
1da177e4 5587
e2cec771
AL
5588 } else {
5589 /* ATA PIO protocol */
5590 if (unlikely((status & ATA_DRQ) == 0)) {
5591 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5592 if (likely(status & (ATA_ERR | ATA_DF)))
5593 /* device stops HSM for abort/error */
5594 qc->err_mask |= AC_ERR_DEV;
5595 else
55a8e2c8
TH
5596 /* HSM violation. Let EH handle this.
5597 * Phantom devices also trigger this
5598 * condition. Mark hint.
5599 */
5600 qc->err_mask |= AC_ERR_HSM |
5601 AC_ERR_NODEV_HINT;
3655d1d3 5602
e2cec771
AL
5603 ap->hsm_task_state = HSM_ST_ERR;
5604 goto fsm_start;
5605 }
1da177e4 5606
eee6c32f
AL
5607 /* For PIO reads, some devices may ask for
5608 * data transfer (DRQ=1) alone with ERR=1.
5609 * We respect DRQ here and transfer one
5610 * block of junk data before changing the
5611 * hsm_task_state to HSM_ST_ERR.
5612 *
5613 * For PIO writes, ERR=1 DRQ=1 doesn't make
5614 * sense since the data block has been
5615 * transferred to the device.
71601958
AL
5616 */
5617 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5618 /* data might be corrputed */
5619 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5620
5621 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5622 ata_pio_sectors(qc);
eee6c32f
AL
5623 status = ata_wait_idle(ap);
5624 }
5625
3655d1d3
AL
5626 if (status & (ATA_BUSY | ATA_DRQ))
5627 qc->err_mask |= AC_ERR_HSM;
5628
eee6c32f
AL
5629 /* ata_pio_sectors() might change the
5630 * state to HSM_ST_LAST. so, the state
5631 * is changed after ata_pio_sectors().
5632 */
5633 ap->hsm_task_state = HSM_ST_ERR;
5634 goto fsm_start;
71601958
AL
5635 }
5636
e2cec771
AL
5637 ata_pio_sectors(qc);
5638
5639 if (ap->hsm_task_state == HSM_ST_LAST &&
5640 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5641 /* all data read */
52a32205 5642 status = ata_wait_idle(ap);
e2cec771
AL
5643 goto fsm_start;
5644 }
5645 }
5646
bb5cb290 5647 poll_next = 1;
1da177e4
LT
5648 break;
5649
14be71f4 5650 case HSM_ST_LAST:
6912ccd5
AL
5651 if (unlikely(!ata_ok(status))) {
5652 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5653 ap->hsm_task_state = HSM_ST_ERR;
5654 goto fsm_start;
5655 }
5656
5657 /* no more data to transfer */
4332a771 5658 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5659 ap->print_id, qc->dev->devno, status);
e2cec771 5660
6912ccd5
AL
5661 WARN_ON(qc->err_mask);
5662
e2cec771 5663 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5664
e2cec771 5665 /* complete taskfile transaction */
c17ea20d 5666 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5667
5668 poll_next = 0;
1da177e4
LT
5669 break;
5670
14be71f4 5671 case HSM_ST_ERR:
e2cec771
AL
5672 /* make sure qc->err_mask is available to
5673 * know what's wrong and recover
5674 */
5675 WARN_ON(qc->err_mask == 0);
5676
5677 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5678
999bb6f4 5679 /* complete taskfile transaction */
c17ea20d 5680 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5681
5682 poll_next = 0;
e2cec771
AL
5683 break;
5684 default:
bb5cb290 5685 poll_next = 0;
6912ccd5 5686 BUG();
1da177e4
LT
5687 }
5688
bb5cb290 5689 return poll_next;
1da177e4
LT
5690}
5691
65f27f38 5692static void ata_pio_task(struct work_struct *work)
8061f5f0 5693{
65f27f38
DH
5694 struct ata_port *ap =
5695 container_of(work, struct ata_port, port_task.work);
5696 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5697 u8 status;
a1af3734 5698 int poll_next;
8061f5f0 5699
7fb6ec28 5700fsm_start:
a1af3734 5701 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5702
a1af3734
AL
5703 /*
5704 * This is purely heuristic. This is a fast path.
5705 * Sometimes when we enter, BSY will be cleared in
5706 * a chk-status or two. If not, the drive is probably seeking
5707 * or something. Snooze for a couple msecs, then
5708 * chk-status again. If still busy, queue delayed work.
5709 */
5710 status = ata_busy_wait(ap, ATA_BUSY, 5);
5711 if (status & ATA_BUSY) {
5712 msleep(2);
5713 status = ata_busy_wait(ap, ATA_BUSY, 10);
5714 if (status & ATA_BUSY) {
442eacc3 5715 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5716 return;
5717 }
8061f5f0
TH
5718 }
5719
a1af3734
AL
5720 /* move the HSM */
5721 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5722
a1af3734
AL
5723 /* another command or interrupt handler
5724 * may be running at this point.
5725 */
5726 if (poll_next)
7fb6ec28 5727 goto fsm_start;
8061f5f0
TH
5728}
5729
1da177e4
LT
5730/**
5731 * ata_qc_new - Request an available ATA command, for queueing
5732 * @ap: Port associated with device @dev
5733 * @dev: Device from whom we request an available command structure
5734 *
5735 * LOCKING:
0cba632b 5736 * None.
1da177e4
LT
5737 */
5738
5739static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5740{
5741 struct ata_queued_cmd *qc = NULL;
5742 unsigned int i;
5743
e3180499 5744 /* no command while frozen */
b51e9e5d 5745 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5746 return NULL;
5747
2ab7db1f
TH
5748 /* the last tag is reserved for internal command. */
5749 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5750 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5751 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5752 break;
5753 }
5754
5755 if (qc)
5756 qc->tag = i;
5757
5758 return qc;
5759}
5760
5761/**
5762 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5763 * @dev: Device from whom we request an available command structure
5764 *
5765 * LOCKING:
0cba632b 5766 * None.
1da177e4
LT
5767 */
5768
3373efd8 5769struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5770{
9af5c9c9 5771 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5772 struct ata_queued_cmd *qc;
5773
5774 qc = ata_qc_new(ap);
5775 if (qc) {
1da177e4
LT
5776 qc->scsicmd = NULL;
5777 qc->ap = ap;
5778 qc->dev = dev;
1da177e4 5779
2c13b7ce 5780 ata_qc_reinit(qc);
1da177e4
LT
5781 }
5782
5783 return qc;
5784}
5785
1da177e4
LT
5786/**
5787 * ata_qc_free - free unused ata_queued_cmd
5788 * @qc: Command to complete
5789 *
5790 * Designed to free unused ata_queued_cmd object
5791 * in case something prevents using it.
5792 *
5793 * LOCKING:
cca3974e 5794 * spin_lock_irqsave(host lock)
1da177e4
LT
5795 */
5796void ata_qc_free(struct ata_queued_cmd *qc)
5797{
4ba946e9
TH
5798 struct ata_port *ap = qc->ap;
5799 unsigned int tag;
5800
a4631474 5801 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5802
4ba946e9
TH
5803 qc->flags = 0;
5804 tag = qc->tag;
5805 if (likely(ata_tag_valid(tag))) {
4ba946e9 5806 qc->tag = ATA_TAG_POISON;
6cec4a39 5807 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5808 }
1da177e4
LT
5809}
5810
76014427 5811void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5812{
dedaf2b0 5813 struct ata_port *ap = qc->ap;
9af5c9c9 5814 struct ata_link *link = qc->dev->link;
dedaf2b0 5815
a4631474
TH
5816 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5817 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5818
5819 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5820 ata_sg_clean(qc);
5821
7401abf2 5822 /* command should be marked inactive atomically with qc completion */
da917d69 5823 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5824 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5825 if (!link->sactive)
5826 ap->nr_active_links--;
5827 } else {
9af5c9c9 5828 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5829 ap->nr_active_links--;
5830 }
5831
5832 /* clear exclusive status */
5833 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5834 ap->excl_link == link))
5835 ap->excl_link = NULL;
7401abf2 5836
3f3791d3
AL
5837 /* atapi: mark qc as inactive to prevent the interrupt handler
5838 * from completing the command twice later, before the error handler
5839 * is called. (when rc != 0 and atapi request sense is needed)
5840 */
5841 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5842 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5843
1da177e4 5844 /* call completion callback */
77853bf2 5845 qc->complete_fn(qc);
1da177e4
LT
5846}
5847
39599a53
TH
5848static void fill_result_tf(struct ata_queued_cmd *qc)
5849{
5850 struct ata_port *ap = qc->ap;
5851
39599a53 5852 qc->result_tf.flags = qc->tf.flags;
4742d54f 5853 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5854}
5855
00115e0f
TH
5856static void ata_verify_xfer(struct ata_queued_cmd *qc)
5857{
5858 struct ata_device *dev = qc->dev;
5859
5860 if (ata_tag_internal(qc->tag))
5861 return;
5862
5863 if (ata_is_nodata(qc->tf.protocol))
5864 return;
5865
5866 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5867 return;
5868
5869 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5870}
5871
f686bcb8
TH
5872/**
5873 * ata_qc_complete - Complete an active ATA command
5874 * @qc: Command to complete
5875 * @err_mask: ATA Status register contents
5876 *
5877 * Indicate to the mid and upper layers that an ATA
5878 * command has completed, with either an ok or not-ok status.
5879 *
5880 * LOCKING:
cca3974e 5881 * spin_lock_irqsave(host lock)
f686bcb8
TH
5882 */
5883void ata_qc_complete(struct ata_queued_cmd *qc)
5884{
5885 struct ata_port *ap = qc->ap;
5886
5887 /* XXX: New EH and old EH use different mechanisms to
5888 * synchronize EH with regular execution path.
5889 *
5890 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5891 * Normal execution path is responsible for not accessing a
5892 * failed qc. libata core enforces the rule by returning NULL
5893 * from ata_qc_from_tag() for failed qcs.
5894 *
5895 * Old EH depends on ata_qc_complete() nullifying completion
5896 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5897 * not synchronize with interrupt handler. Only PIO task is
5898 * taken care of.
5899 */
5900 if (ap->ops->error_handler) {
4dbfa39b
TH
5901 struct ata_device *dev = qc->dev;
5902 struct ata_eh_info *ehi = &dev->link->eh_info;
5903
b51e9e5d 5904 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5905
5906 if (unlikely(qc->err_mask))
5907 qc->flags |= ATA_QCFLAG_FAILED;
5908
5909 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5910 if (!ata_tag_internal(qc->tag)) {
5911 /* always fill result TF for failed qc */
39599a53 5912 fill_result_tf(qc);
f686bcb8
TH
5913 ata_qc_schedule_eh(qc);
5914 return;
5915 }
5916 }
5917
5918 /* read result TF if requested */
5919 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5920 fill_result_tf(qc);
f686bcb8 5921
4dbfa39b
TH
5922 /* Some commands need post-processing after successful
5923 * completion.
5924 */
5925 switch (qc->tf.command) {
5926 case ATA_CMD_SET_FEATURES:
5927 if (qc->tf.feature != SETFEATURES_WC_ON &&
5928 qc->tf.feature != SETFEATURES_WC_OFF)
5929 break;
5930 /* fall through */
5931 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5932 case ATA_CMD_SET_MULTI: /* multi_count changed */
5933 /* revalidate device */
5934 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5935 ata_port_schedule_eh(ap);
5936 break;
054a5fba
TH
5937
5938 case ATA_CMD_SLEEP:
5939 dev->flags |= ATA_DFLAG_SLEEPING;
5940 break;
4dbfa39b
TH
5941 }
5942
00115e0f
TH
5943 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5944 ata_verify_xfer(qc);
5945
f686bcb8
TH
5946 __ata_qc_complete(qc);
5947 } else {
5948 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5949 return;
5950
5951 /* read result TF if failed or requested */
5952 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5953 fill_result_tf(qc);
f686bcb8
TH
5954
5955 __ata_qc_complete(qc);
5956 }
5957}
5958
dedaf2b0
TH
5959/**
5960 * ata_qc_complete_multiple - Complete multiple qcs successfully
5961 * @ap: port in question
5962 * @qc_active: new qc_active mask
5963 * @finish_qc: LLDD callback invoked before completing a qc
5964 *
5965 * Complete in-flight commands. This functions is meant to be
5966 * called from low-level driver's interrupt routine to complete
5967 * requests normally. ap->qc_active and @qc_active is compared
5968 * and commands are completed accordingly.
5969 *
5970 * LOCKING:
cca3974e 5971 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5972 *
5973 * RETURNS:
5974 * Number of completed commands on success, -errno otherwise.
5975 */
5976int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5977 void (*finish_qc)(struct ata_queued_cmd *))
5978{
5979 int nr_done = 0;
5980 u32 done_mask;
5981 int i;
5982
5983 done_mask = ap->qc_active ^ qc_active;
5984
5985 if (unlikely(done_mask & qc_active)) {
5986 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5987 "(%08x->%08x)\n", ap->qc_active, qc_active);
5988 return -EINVAL;
5989 }
5990
5991 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5992 struct ata_queued_cmd *qc;
5993
5994 if (!(done_mask & (1 << i)))
5995 continue;
5996
5997 if ((qc = ata_qc_from_tag(ap, i))) {
5998 if (finish_qc)
5999 finish_qc(qc);
6000 ata_qc_complete(qc);
6001 nr_done++;
6002 }
6003 }
6004
6005 return nr_done;
6006}
6007
1da177e4
LT
6008/**
6009 * ata_qc_issue - issue taskfile to device
6010 * @qc: command to issue to device
6011 *
6012 * Prepare an ATA command to submission to device.
6013 * This includes mapping the data into a DMA-able
6014 * area, filling in the S/G table, and finally
6015 * writing the taskfile to hardware, starting the command.
6016 *
6017 * LOCKING:
cca3974e 6018 * spin_lock_irqsave(host lock)
1da177e4 6019 */
8e0e694a 6020void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
6021{
6022 struct ata_port *ap = qc->ap;
9af5c9c9 6023 struct ata_link *link = qc->dev->link;
405e66b3 6024 u8 prot = qc->tf.protocol;
1da177e4 6025
dedaf2b0
TH
6026 /* Make sure only one non-NCQ command is outstanding. The
6027 * check is skipped for old EH because it reuses active qc to
6028 * request ATAPI sense.
6029 */
9af5c9c9 6030 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 6031
1973a023 6032 if (ata_is_ncq(prot)) {
9af5c9c9 6033 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
6034
6035 if (!link->sactive)
6036 ap->nr_active_links++;
9af5c9c9 6037 link->sactive |= 1 << qc->tag;
dedaf2b0 6038 } else {
9af5c9c9 6039 WARN_ON(link->sactive);
da917d69
TH
6040
6041 ap->nr_active_links++;
9af5c9c9 6042 link->active_tag = qc->tag;
dedaf2b0
TH
6043 }
6044
e4a70e76 6045 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 6046 ap->qc_active |= 1 << qc->tag;
e4a70e76 6047
f92a2636
TH
6048 /* We guarantee to LLDs that they will have at least one
6049 * non-zero sg if the command is a data command.
6050 */
ff2aeb1e 6051 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 6052
405e66b3 6053 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 6054 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
6055 if (ata_sg_setup(qc))
6056 goto sg_err;
1da177e4 6057
054a5fba
TH
6058 /* if device is sleeping, schedule softreset and abort the link */
6059 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6060 link->eh_info.action |= ATA_EH_SOFTRESET;
6061 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6062 ata_link_abort(link);
6063 return;
6064 }
6065
1da177e4
LT
6066 ap->ops->qc_prep(qc);
6067
8e0e694a
TH
6068 qc->err_mask |= ap->ops->qc_issue(qc);
6069 if (unlikely(qc->err_mask))
6070 goto err;
6071 return;
1da177e4 6072
8e436af9 6073sg_err:
8e0e694a
TH
6074 qc->err_mask |= AC_ERR_SYSTEM;
6075err:
6076 ata_qc_complete(qc);
1da177e4
LT
6077}
6078
6079/**
6080 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6081 * @qc: command to issue to device
6082 *
6083 * Using various libata functions and hooks, this function
6084 * starts an ATA command. ATA commands are grouped into
6085 * classes called "protocols", and issuing each type of protocol
6086 * is slightly different.
6087 *
0baab86b
EF
6088 * May be used as the qc_issue() entry in ata_port_operations.
6089 *
1da177e4 6090 * LOCKING:
cca3974e 6091 * spin_lock_irqsave(host lock)
1da177e4
LT
6092 *
6093 * RETURNS:
9a3d9eb0 6094 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6095 */
6096
9a3d9eb0 6097unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6098{
6099 struct ata_port *ap = qc->ap;
6100
e50362ec
AL
6101 /* Use polling pio if the LLD doesn't handle
6102 * interrupt driven pio and atapi CDB interrupt.
6103 */
6104 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6105 switch (qc->tf.protocol) {
6106 case ATA_PROT_PIO:
e3472cbe 6107 case ATA_PROT_NODATA:
0dc36888
TH
6108 case ATAPI_PROT_PIO:
6109 case ATAPI_PROT_NODATA:
e50362ec
AL
6110 qc->tf.flags |= ATA_TFLAG_POLLING;
6111 break;
0dc36888 6112 case ATAPI_PROT_DMA:
e50362ec 6113 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6114 /* see ata_dma_blacklisted() */
e50362ec
AL
6115 BUG();
6116 break;
6117 default:
6118 break;
6119 }
6120 }
6121
312f7da2 6122 /* select the device */
1da177e4
LT
6123 ata_dev_select(ap, qc->dev->devno, 1, 0);
6124
312f7da2 6125 /* start the command */
1da177e4
LT
6126 switch (qc->tf.protocol) {
6127 case ATA_PROT_NODATA:
312f7da2
AL
6128 if (qc->tf.flags & ATA_TFLAG_POLLING)
6129 ata_qc_set_polling(qc);
6130
e5338254 6131 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6132 ap->hsm_task_state = HSM_ST_LAST;
6133
6134 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6135 ata_pio_queue_task(ap, qc, 0);
312f7da2 6136
1da177e4
LT
6137 break;
6138
6139 case ATA_PROT_DMA:
587005de 6140 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6141
1da177e4
LT
6142 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6143 ap->ops->bmdma_setup(qc); /* set up bmdma */
6144 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6145 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6146 break;
6147
312f7da2
AL
6148 case ATA_PROT_PIO:
6149 if (qc->tf.flags & ATA_TFLAG_POLLING)
6150 ata_qc_set_polling(qc);
1da177e4 6151
e5338254 6152 ata_tf_to_host(ap, &qc->tf);
312f7da2 6153
54f00389
AL
6154 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6155 /* PIO data out protocol */
6156 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6157 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6158
6159 /* always send first data block using
e27486db 6160 * the ata_pio_task() codepath.
54f00389 6161 */
312f7da2 6162 } else {
54f00389
AL
6163 /* PIO data in protocol */
6164 ap->hsm_task_state = HSM_ST;
6165
6166 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6167 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6168
6169 /* if polling, ata_pio_task() handles the rest.
6170 * otherwise, interrupt handler takes over from here.
6171 */
312f7da2
AL
6172 }
6173
1da177e4
LT
6174 break;
6175
0dc36888
TH
6176 case ATAPI_PROT_PIO:
6177 case ATAPI_PROT_NODATA:
312f7da2
AL
6178 if (qc->tf.flags & ATA_TFLAG_POLLING)
6179 ata_qc_set_polling(qc);
6180
e5338254 6181 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6182
312f7da2
AL
6183 ap->hsm_task_state = HSM_ST_FIRST;
6184
6185 /* send cdb by polling if no cdb interrupt */
6186 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6187 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6188 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6189 break;
6190
0dc36888 6191 case ATAPI_PROT_DMA:
587005de 6192 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6193
1da177e4
LT
6194 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6195 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6196 ap->hsm_task_state = HSM_ST_FIRST;
6197
6198 /* send cdb by polling if no cdb interrupt */
6199 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6200 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6201 break;
6202
6203 default:
6204 WARN_ON(1);
9a3d9eb0 6205 return AC_ERR_SYSTEM;
1da177e4
LT
6206 }
6207
6208 return 0;
6209}
6210
1da177e4
LT
6211/**
6212 * ata_host_intr - Handle host interrupt for given (port, task)
6213 * @ap: Port on which interrupt arrived (possibly...)
6214 * @qc: Taskfile currently active in engine
6215 *
6216 * Handle host interrupt for given queued command. Currently,
6217 * only DMA interrupts are handled. All other commands are
6218 * handled via polling with interrupts disabled (nIEN bit).
6219 *
6220 * LOCKING:
cca3974e 6221 * spin_lock_irqsave(host lock)
1da177e4
LT
6222 *
6223 * RETURNS:
6224 * One if interrupt was handled, zero if not (shared irq).
6225 */
6226
2dcb407e
JG
6227inline unsigned int ata_host_intr(struct ata_port *ap,
6228 struct ata_queued_cmd *qc)
1da177e4 6229{
9af5c9c9 6230 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6231 u8 status, host_stat = 0;
1da177e4 6232
312f7da2 6233 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6234 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6235
312f7da2
AL
6236 /* Check whether we are expecting interrupt in this state */
6237 switch (ap->hsm_task_state) {
6238 case HSM_ST_FIRST:
6912ccd5
AL
6239 /* Some pre-ATAPI-4 devices assert INTRQ
6240 * at this state when ready to receive CDB.
6241 */
1da177e4 6242
312f7da2 6243 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6244 * The flag was turned on only for atapi devices. No
6245 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6246 */
6247 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6248 goto idle_irq;
1da177e4 6249 break;
312f7da2
AL
6250 case HSM_ST_LAST:
6251 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6252 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6253 /* check status of DMA engine */
6254 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6255 VPRINTK("ata%u: host_stat 0x%X\n",
6256 ap->print_id, host_stat);
312f7da2
AL
6257
6258 /* if it's not our irq... */
6259 if (!(host_stat & ATA_DMA_INTR))
6260 goto idle_irq;
6261
6262 /* before we do anything else, clear DMA-Start bit */
6263 ap->ops->bmdma_stop(qc);
a4f16610
AL
6264
6265 if (unlikely(host_stat & ATA_DMA_ERR)) {
6266 /* error when transfering data to/from memory */
6267 qc->err_mask |= AC_ERR_HOST_BUS;
6268 ap->hsm_task_state = HSM_ST_ERR;
6269 }
312f7da2
AL
6270 }
6271 break;
6272 case HSM_ST:
6273 break;
1da177e4
LT
6274 default:
6275 goto idle_irq;
6276 }
6277
312f7da2
AL
6278 /* check altstatus */
6279 status = ata_altstatus(ap);
6280 if (status & ATA_BUSY)
6281 goto idle_irq;
1da177e4 6282
312f7da2
AL
6283 /* check main status, clearing INTRQ */
6284 status = ata_chk_status(ap);
6285 if (unlikely(status & ATA_BUSY))
6286 goto idle_irq;
1da177e4 6287
312f7da2
AL
6288 /* ack bmdma irq events */
6289 ap->ops->irq_clear(ap);
1da177e4 6290
bb5cb290 6291 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6292
6293 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6294 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6295 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6296
1da177e4
LT
6297 return 1; /* irq handled */
6298
6299idle_irq:
6300 ap->stats.idle_irq++;
6301
6302#ifdef ATA_IRQ_TRAP
6303 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6304 ata_chk_status(ap);
6305 ap->ops->irq_clear(ap);
f15a1daf 6306 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6307 return 1;
1da177e4
LT
6308 }
6309#endif
6310 return 0; /* irq not handled */
6311}
6312
6313/**
6314 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6315 * @irq: irq line (unused)
cca3974e 6316 * @dev_instance: pointer to our ata_host information structure
1da177e4 6317 *
0cba632b
JG
6318 * Default interrupt handler for PCI IDE devices. Calls
6319 * ata_host_intr() for each port that is not disabled.
6320 *
1da177e4 6321 * LOCKING:
cca3974e 6322 * Obtains host lock during operation.
1da177e4
LT
6323 *
6324 * RETURNS:
0cba632b 6325 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6326 */
6327
2dcb407e 6328irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6329{
cca3974e 6330 struct ata_host *host = dev_instance;
1da177e4
LT
6331 unsigned int i;
6332 unsigned int handled = 0;
6333 unsigned long flags;
6334
6335 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6336 spin_lock_irqsave(&host->lock, flags);
1da177e4 6337
cca3974e 6338 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6339 struct ata_port *ap;
6340
cca3974e 6341 ap = host->ports[i];
c1389503 6342 if (ap &&
029f5468 6343 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6344 struct ata_queued_cmd *qc;
6345
9af5c9c9 6346 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6347 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6348 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6349 handled |= ata_host_intr(ap, qc);
6350 }
6351 }
6352
cca3974e 6353 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6354
6355 return IRQ_RETVAL(handled);
6356}
6357
34bf2170
TH
6358/**
6359 * sata_scr_valid - test whether SCRs are accessible
936fd732 6360 * @link: ATA link to test SCR accessibility for
34bf2170 6361 *
936fd732 6362 * Test whether SCRs are accessible for @link.
34bf2170
TH
6363 *
6364 * LOCKING:
6365 * None.
6366 *
6367 * RETURNS:
6368 * 1 if SCRs are accessible, 0 otherwise.
6369 */
936fd732 6370int sata_scr_valid(struct ata_link *link)
34bf2170 6371{
936fd732
TH
6372 struct ata_port *ap = link->ap;
6373
a16abc0b 6374 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6375}
6376
6377/**
6378 * sata_scr_read - read SCR register of the specified port
936fd732 6379 * @link: ATA link to read SCR for
34bf2170
TH
6380 * @reg: SCR to read
6381 * @val: Place to store read value
6382 *
936fd732 6383 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6384 * guaranteed to succeed if @link is ap->link, the cable type of
6385 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6386 *
6387 * LOCKING:
633273a3 6388 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6389 *
6390 * RETURNS:
6391 * 0 on success, negative errno on failure.
6392 */
936fd732 6393int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6394{
633273a3
TH
6395 if (ata_is_host_link(link)) {
6396 struct ata_port *ap = link->ap;
936fd732 6397
633273a3
TH
6398 if (sata_scr_valid(link))
6399 return ap->ops->scr_read(ap, reg, val);
6400 return -EOPNOTSUPP;
6401 }
6402
6403 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6404}
6405
6406/**
6407 * sata_scr_write - write SCR register of the specified port
936fd732 6408 * @link: ATA link to write SCR for
34bf2170
TH
6409 * @reg: SCR to write
6410 * @val: value to write
6411 *
936fd732 6412 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6413 * guaranteed to succeed if @link is ap->link, the cable type of
6414 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6415 *
6416 * LOCKING:
633273a3 6417 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6418 *
6419 * RETURNS:
6420 * 0 on success, negative errno on failure.
6421 */
936fd732 6422int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6423{
633273a3
TH
6424 if (ata_is_host_link(link)) {
6425 struct ata_port *ap = link->ap;
6426
6427 if (sata_scr_valid(link))
6428 return ap->ops->scr_write(ap, reg, val);
6429 return -EOPNOTSUPP;
6430 }
936fd732 6431
633273a3 6432 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6433}
6434
6435/**
6436 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6437 * @link: ATA link to write SCR for
34bf2170
TH
6438 * @reg: SCR to write
6439 * @val: value to write
6440 *
6441 * This function is identical to sata_scr_write() except that this
6442 * function performs flush after writing to the register.
6443 *
6444 * LOCKING:
633273a3 6445 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6446 *
6447 * RETURNS:
6448 * 0 on success, negative errno on failure.
6449 */
936fd732 6450int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6451{
633273a3
TH
6452 if (ata_is_host_link(link)) {
6453 struct ata_port *ap = link->ap;
6454 int rc;
da3dbb17 6455
633273a3
TH
6456 if (sata_scr_valid(link)) {
6457 rc = ap->ops->scr_write(ap, reg, val);
6458 if (rc == 0)
6459 rc = ap->ops->scr_read(ap, reg, &val);
6460 return rc;
6461 }
6462 return -EOPNOTSUPP;
34bf2170 6463 }
633273a3
TH
6464
6465 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6466}
6467
6468/**
936fd732
TH
6469 * ata_link_online - test whether the given link is online
6470 * @link: ATA link to test
34bf2170 6471 *
936fd732
TH
6472 * Test whether @link is online. Note that this function returns
6473 * 0 if online status of @link cannot be obtained, so
6474 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6475 *
6476 * LOCKING:
6477 * None.
6478 *
6479 * RETURNS:
6480 * 1 if the port online status is available and online.
6481 */
936fd732 6482int ata_link_online(struct ata_link *link)
34bf2170
TH
6483{
6484 u32 sstatus;
6485
936fd732
TH
6486 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6487 (sstatus & 0xf) == 0x3)
34bf2170
TH
6488 return 1;
6489 return 0;
6490}
6491
6492/**
936fd732
TH
6493 * ata_link_offline - test whether the given link is offline
6494 * @link: ATA link to test
34bf2170 6495 *
936fd732
TH
6496 * Test whether @link is offline. Note that this function
6497 * returns 0 if offline status of @link cannot be obtained, so
6498 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6499 *
6500 * LOCKING:
6501 * None.
6502 *
6503 * RETURNS:
6504 * 1 if the port offline status is available and offline.
6505 */
936fd732 6506int ata_link_offline(struct ata_link *link)
34bf2170
TH
6507{
6508 u32 sstatus;
6509
936fd732
TH
6510 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6511 (sstatus & 0xf) != 0x3)
34bf2170
TH
6512 return 1;
6513 return 0;
6514}
0baab86b 6515
77b08fb5 6516int ata_flush_cache(struct ata_device *dev)
9b847548 6517{
977e6b9f 6518 unsigned int err_mask;
9b847548
JA
6519 u8 cmd;
6520
6521 if (!ata_try_flush_cache(dev))
6522 return 0;
6523
6fc49adb 6524 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6525 cmd = ATA_CMD_FLUSH_EXT;
6526 else
6527 cmd = ATA_CMD_FLUSH;
6528
4f34337b
AC
6529 /* This is wrong. On a failed flush we get back the LBA of the lost
6530 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6531 a further flush command to continue the writeback until it
4f34337b 6532 does not error */
977e6b9f
TH
6533 err_mask = ata_do_simple_cmd(dev, cmd);
6534 if (err_mask) {
6535 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6536 return -EIO;
6537 }
6538
6539 return 0;
9b847548
JA
6540}
6541
6ffa01d8 6542#ifdef CONFIG_PM
cca3974e
JG
6543static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6544 unsigned int action, unsigned int ehi_flags,
6545 int wait)
500530f6
TH
6546{
6547 unsigned long flags;
6548 int i, rc;
6549
cca3974e
JG
6550 for (i = 0; i < host->n_ports; i++) {
6551 struct ata_port *ap = host->ports[i];
e3667ebf 6552 struct ata_link *link;
500530f6
TH
6553
6554 /* Previous resume operation might still be in
6555 * progress. Wait for PM_PENDING to clear.
6556 */
6557 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6558 ata_port_wait_eh(ap);
6559 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6560 }
6561
6562 /* request PM ops to EH */
6563 spin_lock_irqsave(ap->lock, flags);
6564
6565 ap->pm_mesg = mesg;
6566 if (wait) {
6567 rc = 0;
6568 ap->pm_result = &rc;
6569 }
6570
6571 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6572 __ata_port_for_each_link(link, ap) {
6573 link->eh_info.action |= action;
6574 link->eh_info.flags |= ehi_flags;
6575 }
500530f6
TH
6576
6577 ata_port_schedule_eh(ap);
6578
6579 spin_unlock_irqrestore(ap->lock, flags);
6580
6581 /* wait and check result */
6582 if (wait) {
6583 ata_port_wait_eh(ap);
6584 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6585 if (rc)
6586 return rc;
6587 }
6588 }
6589
6590 return 0;
6591}
6592
6593/**
cca3974e
JG
6594 * ata_host_suspend - suspend host
6595 * @host: host to suspend
500530f6
TH
6596 * @mesg: PM message
6597 *
cca3974e 6598 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6599 * function requests EH to perform PM operations and waits for EH
6600 * to finish.
6601 *
6602 * LOCKING:
6603 * Kernel thread context (may sleep).
6604 *
6605 * RETURNS:
6606 * 0 on success, -errno on failure.
6607 */
cca3974e 6608int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6609{
9666f400 6610 int rc;
500530f6 6611
ca77329f
KCA
6612 /*
6613 * disable link pm on all ports before requesting
6614 * any pm activity
6615 */
6616 ata_lpm_enable(host);
6617
cca3974e 6618 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
6619 if (rc == 0)
6620 host->dev->power.power_state = mesg;
500530f6
TH
6621 return rc;
6622}
6623
6624/**
cca3974e
JG
6625 * ata_host_resume - resume host
6626 * @host: host to resume
500530f6 6627 *
cca3974e 6628 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6629 * function requests EH to perform PM operations and returns.
6630 * Note that all resume operations are performed parallely.
6631 *
6632 * LOCKING:
6633 * Kernel thread context (may sleep).
6634 */
cca3974e 6635void ata_host_resume(struct ata_host *host)
500530f6 6636{
cca3974e
JG
6637 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6638 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 6639 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6640
6641 /* reenable link pm */
6642 ata_lpm_disable(host);
500530f6 6643}
6ffa01d8 6644#endif
500530f6 6645
c893a3ae
RD
6646/**
6647 * ata_port_start - Set port up for dma.
6648 * @ap: Port to initialize
6649 *
6650 * Called just after data structures for each port are
6651 * initialized. Allocates space for PRD table.
6652 *
6653 * May be used as the port_start() entry in ata_port_operations.
6654 *
6655 * LOCKING:
6656 * Inherited from caller.
6657 */
f0d36efd 6658int ata_port_start(struct ata_port *ap)
1da177e4 6659{
2f1f610b 6660 struct device *dev = ap->dev;
1da177e4 6661
f0d36efd
TH
6662 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6663 GFP_KERNEL);
1da177e4
LT
6664 if (!ap->prd)
6665 return -ENOMEM;
6666
1da177e4
LT
6667 return 0;
6668}
6669
3ef3b43d
TH
6670/**
6671 * ata_dev_init - Initialize an ata_device structure
6672 * @dev: Device structure to initialize
6673 *
6674 * Initialize @dev in preparation for probing.
6675 *
6676 * LOCKING:
6677 * Inherited from caller.
6678 */
6679void ata_dev_init(struct ata_device *dev)
6680{
9af5c9c9
TH
6681 struct ata_link *link = dev->link;
6682 struct ata_port *ap = link->ap;
72fa4b74
TH
6683 unsigned long flags;
6684
5a04bf4b 6685 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6686 link->sata_spd_limit = link->hw_sata_spd_limit;
6687 link->sata_spd = 0;
5a04bf4b 6688
72fa4b74
TH
6689 /* High bits of dev->flags are used to record warm plug
6690 * requests which occur asynchronously. Synchronize using
cca3974e 6691 * host lock.
72fa4b74 6692 */
ba6a1308 6693 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6694 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6695 dev->horkage = 0;
ba6a1308 6696 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6697
72fa4b74
TH
6698 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6699 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6700 dev->pio_mask = UINT_MAX;
6701 dev->mwdma_mask = UINT_MAX;
6702 dev->udma_mask = UINT_MAX;
6703}
6704
4fb37a25
TH
6705/**
6706 * ata_link_init - Initialize an ata_link structure
6707 * @ap: ATA port link is attached to
6708 * @link: Link structure to initialize
8989805d 6709 * @pmp: Port multiplier port number
4fb37a25
TH
6710 *
6711 * Initialize @link.
6712 *
6713 * LOCKING:
6714 * Kernel thread context (may sleep)
6715 */
fb7fd614 6716void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6717{
6718 int i;
6719
6720 /* clear everything except for devices */
6721 memset(link, 0, offsetof(struct ata_link, device[0]));
6722
6723 link->ap = ap;
8989805d 6724 link->pmp = pmp;
4fb37a25
TH
6725 link->active_tag = ATA_TAG_POISON;
6726 link->hw_sata_spd_limit = UINT_MAX;
6727
6728 /* can't use iterator, ap isn't initialized yet */
6729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6730 struct ata_device *dev = &link->device[i];
6731
6732 dev->link = link;
6733 dev->devno = dev - link->device;
6734 ata_dev_init(dev);
6735 }
6736}
6737
6738/**
6739 * sata_link_init_spd - Initialize link->sata_spd_limit
6740 * @link: Link to configure sata_spd_limit for
6741 *
6742 * Initialize @link->[hw_]sata_spd_limit to the currently
6743 * configured value.
6744 *
6745 * LOCKING:
6746 * Kernel thread context (may sleep).
6747 *
6748 * RETURNS:
6749 * 0 on success, -errno on failure.
6750 */
fb7fd614 6751int sata_link_init_spd(struct ata_link *link)
4fb37a25 6752{
33267325
TH
6753 u32 scontrol;
6754 u8 spd;
4fb37a25
TH
6755 int rc;
6756
6757 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6758 if (rc)
6759 return rc;
6760
6761 spd = (scontrol >> 4) & 0xf;
6762 if (spd)
6763 link->hw_sata_spd_limit &= (1 << spd) - 1;
6764
33267325
TH
6765 ata_force_spd_limit(link);
6766
4fb37a25
TH
6767 link->sata_spd_limit = link->hw_sata_spd_limit;
6768
6769 return 0;
6770}
6771
1da177e4 6772/**
f3187195
TH
6773 * ata_port_alloc - allocate and initialize basic ATA port resources
6774 * @host: ATA host this allocated port belongs to
1da177e4 6775 *
f3187195
TH
6776 * Allocate and initialize basic ATA port resources.
6777 *
6778 * RETURNS:
6779 * Allocate ATA port on success, NULL on failure.
0cba632b 6780 *
1da177e4 6781 * LOCKING:
f3187195 6782 * Inherited from calling layer (may sleep).
1da177e4 6783 */
f3187195 6784struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6785{
f3187195 6786 struct ata_port *ap;
1da177e4 6787
f3187195
TH
6788 DPRINTK("ENTER\n");
6789
6790 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6791 if (!ap)
6792 return NULL;
6793
f4d6d004 6794 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6795 ap->lock = &host->lock;
198e0fed 6796 ap->flags = ATA_FLAG_DISABLED;
f3187195 6797 ap->print_id = -1;
1da177e4 6798 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6799 ap->host = host;
f3187195 6800 ap->dev = host->dev;
1da177e4 6801 ap->last_ctl = 0xFF;
bd5d825c
BP
6802
6803#if defined(ATA_VERBOSE_DEBUG)
6804 /* turn on all debugging levels */
6805 ap->msg_enable = 0x00FF;
6806#elif defined(ATA_DEBUG)
6807 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6808#else
0dd4b21f 6809 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6810#endif
1da177e4 6811
442eacc3 6812 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6813 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6814 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6815 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6816 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6817 init_timer_deferrable(&ap->fastdrain_timer);
6818 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6819 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6820
838df628 6821 ap->cbl = ATA_CBL_NONE;
838df628 6822
8989805d 6823 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6824
6825#ifdef ATA_IRQ_TRAP
6826 ap->stats.unhandled_irq = 1;
6827 ap->stats.idle_irq = 1;
6828#endif
1da177e4 6829 return ap;
1da177e4
LT
6830}
6831
f0d36efd
TH
6832static void ata_host_release(struct device *gendev, void *res)
6833{
6834 struct ata_host *host = dev_get_drvdata(gendev);
6835 int i;
6836
1aa506e4
TH
6837 for (i = 0; i < host->n_ports; i++) {
6838 struct ata_port *ap = host->ports[i];
6839
4911487a
TH
6840 if (!ap)
6841 continue;
6842
6843 if (ap->scsi_host)
1aa506e4
TH
6844 scsi_host_put(ap->scsi_host);
6845
633273a3 6846 kfree(ap->pmp_link);
4911487a 6847 kfree(ap);
1aa506e4
TH
6848 host->ports[i] = NULL;
6849 }
6850
1aa56cca 6851 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6852}
6853
f3187195
TH
6854/**
6855 * ata_host_alloc - allocate and init basic ATA host resources
6856 * @dev: generic device this host is associated with
6857 * @max_ports: maximum number of ATA ports associated with this host
6858 *
6859 * Allocate and initialize basic ATA host resources. LLD calls
6860 * this function to allocate a host, initializes it fully and
6861 * attaches it using ata_host_register().
6862 *
6863 * @max_ports ports are allocated and host->n_ports is
6864 * initialized to @max_ports. The caller is allowed to decrease
6865 * host->n_ports before calling ata_host_register(). The unused
6866 * ports will be automatically freed on registration.
6867 *
6868 * RETURNS:
6869 * Allocate ATA host on success, NULL on failure.
6870 *
6871 * LOCKING:
6872 * Inherited from calling layer (may sleep).
6873 */
6874struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6875{
6876 struct ata_host *host;
6877 size_t sz;
6878 int i;
6879
6880 DPRINTK("ENTER\n");
6881
6882 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6883 return NULL;
6884
6885 /* alloc a container for our list of ATA ports (buses) */
6886 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6887 /* alloc a container for our list of ATA ports (buses) */
6888 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6889 if (!host)
6890 goto err_out;
6891
6892 devres_add(dev, host);
6893 dev_set_drvdata(dev, host);
6894
6895 spin_lock_init(&host->lock);
6896 host->dev = dev;
6897 host->n_ports = max_ports;
6898
6899 /* allocate ports bound to this host */
6900 for (i = 0; i < max_ports; i++) {
6901 struct ata_port *ap;
6902
6903 ap = ata_port_alloc(host);
6904 if (!ap)
6905 goto err_out;
6906
6907 ap->port_no = i;
6908 host->ports[i] = ap;
6909 }
6910
6911 devres_remove_group(dev, NULL);
6912 return host;
6913
6914 err_out:
6915 devres_release_group(dev, NULL);
6916 return NULL;
6917}
6918
f5cda257
TH
6919/**
6920 * ata_host_alloc_pinfo - alloc host and init with port_info array
6921 * @dev: generic device this host is associated with
6922 * @ppi: array of ATA port_info to initialize host with
6923 * @n_ports: number of ATA ports attached to this host
6924 *
6925 * Allocate ATA host and initialize with info from @ppi. If NULL
6926 * terminated, @ppi may contain fewer entries than @n_ports. The
6927 * last entry will be used for the remaining ports.
6928 *
6929 * RETURNS:
6930 * Allocate ATA host on success, NULL on failure.
6931 *
6932 * LOCKING:
6933 * Inherited from calling layer (may sleep).
6934 */
6935struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6936 const struct ata_port_info * const * ppi,
6937 int n_ports)
6938{
6939 const struct ata_port_info *pi;
6940 struct ata_host *host;
6941 int i, j;
6942
6943 host = ata_host_alloc(dev, n_ports);
6944 if (!host)
6945 return NULL;
6946
6947 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6948 struct ata_port *ap = host->ports[i];
6949
6950 if (ppi[j])
6951 pi = ppi[j++];
6952
6953 ap->pio_mask = pi->pio_mask;
6954 ap->mwdma_mask = pi->mwdma_mask;
6955 ap->udma_mask = pi->udma_mask;
6956 ap->flags |= pi->flags;
0c88758b 6957 ap->link.flags |= pi->link_flags;
f5cda257
TH
6958 ap->ops = pi->port_ops;
6959
6960 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6961 host->ops = pi->port_ops;
6962 if (!host->private_data && pi->private_data)
6963 host->private_data = pi->private_data;
6964 }
6965
6966 return host;
6967}
6968
32ebbc0c
TH
6969static void ata_host_stop(struct device *gendev, void *res)
6970{
6971 struct ata_host *host = dev_get_drvdata(gendev);
6972 int i;
6973
6974 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6975
6976 for (i = 0; i < host->n_ports; i++) {
6977 struct ata_port *ap = host->ports[i];
6978
6979 if (ap->ops->port_stop)
6980 ap->ops->port_stop(ap);
6981 }
6982
6983 if (host->ops->host_stop)
6984 host->ops->host_stop(host);
6985}
6986
ecef7253
TH
6987/**
6988 * ata_host_start - start and freeze ports of an ATA host
6989 * @host: ATA host to start ports for
6990 *
6991 * Start and then freeze ports of @host. Started status is
6992 * recorded in host->flags, so this function can be called
6993 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6994 * once. If host->ops isn't initialized yet, its set to the
6995 * first non-dummy port ops.
ecef7253
TH
6996 *
6997 * LOCKING:
6998 * Inherited from calling layer (may sleep).
6999 *
7000 * RETURNS:
7001 * 0 if all ports are started successfully, -errno otherwise.
7002 */
7003int ata_host_start(struct ata_host *host)
7004{
32ebbc0c
TH
7005 int have_stop = 0;
7006 void *start_dr = NULL;
ecef7253
TH
7007 int i, rc;
7008
7009 if (host->flags & ATA_HOST_STARTED)
7010 return 0;
7011
7012 for (i = 0; i < host->n_ports; i++) {
7013 struct ata_port *ap = host->ports[i];
7014
f3187195
TH
7015 if (!host->ops && !ata_port_is_dummy(ap))
7016 host->ops = ap->ops;
7017
32ebbc0c
TH
7018 if (ap->ops->port_stop)
7019 have_stop = 1;
7020 }
7021
7022 if (host->ops->host_stop)
7023 have_stop = 1;
7024
7025 if (have_stop) {
7026 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7027 if (!start_dr)
7028 return -ENOMEM;
7029 }
7030
7031 for (i = 0; i < host->n_ports; i++) {
7032 struct ata_port *ap = host->ports[i];
7033
ecef7253
TH
7034 if (ap->ops->port_start) {
7035 rc = ap->ops->port_start(ap);
7036 if (rc) {
0f9fe9b7 7037 if (rc != -ENODEV)
0f757743
AM
7038 dev_printk(KERN_ERR, host->dev,
7039 "failed to start port %d "
7040 "(errno=%d)\n", i, rc);
ecef7253
TH
7041 goto err_out;
7042 }
7043 }
ecef7253
TH
7044 ata_eh_freeze_port(ap);
7045 }
7046
32ebbc0c
TH
7047 if (start_dr)
7048 devres_add(host->dev, start_dr);
ecef7253
TH
7049 host->flags |= ATA_HOST_STARTED;
7050 return 0;
7051
7052 err_out:
7053 while (--i >= 0) {
7054 struct ata_port *ap = host->ports[i];
7055
7056 if (ap->ops->port_stop)
7057 ap->ops->port_stop(ap);
7058 }
32ebbc0c 7059 devres_free(start_dr);
ecef7253
TH
7060 return rc;
7061}
7062
b03732f0 7063/**
cca3974e
JG
7064 * ata_sas_host_init - Initialize a host struct
7065 * @host: host to initialize
7066 * @dev: device host is attached to
7067 * @flags: host flags
7068 * @ops: port_ops
b03732f0
BK
7069 *
7070 * LOCKING:
7071 * PCI/etc. bus probe sem.
7072 *
7073 */
f3187195 7074/* KILLME - the only user left is ipr */
cca3974e
JG
7075void ata_host_init(struct ata_host *host, struct device *dev,
7076 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7077{
cca3974e
JG
7078 spin_lock_init(&host->lock);
7079 host->dev = dev;
7080 host->flags = flags;
7081 host->ops = ops;
b03732f0
BK
7082}
7083
f3187195
TH
7084/**
7085 * ata_host_register - register initialized ATA host
7086 * @host: ATA host to register
7087 * @sht: template for SCSI host
7088 *
7089 * Register initialized ATA host. @host is allocated using
7090 * ata_host_alloc() and fully initialized by LLD. This function
7091 * starts ports, registers @host with ATA and SCSI layers and
7092 * probe registered devices.
7093 *
7094 * LOCKING:
7095 * Inherited from calling layer (may sleep).
7096 *
7097 * RETURNS:
7098 * 0 on success, -errno otherwise.
7099 */
7100int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7101{
7102 int i, rc;
7103
7104 /* host must have been started */
7105 if (!(host->flags & ATA_HOST_STARTED)) {
7106 dev_printk(KERN_ERR, host->dev,
7107 "BUG: trying to register unstarted host\n");
7108 WARN_ON(1);
7109 return -EINVAL;
7110 }
7111
7112 /* Blow away unused ports. This happens when LLD can't
7113 * determine the exact number of ports to allocate at
7114 * allocation time.
7115 */
7116 for (i = host->n_ports; host->ports[i]; i++)
7117 kfree(host->ports[i]);
7118
7119 /* give ports names and add SCSI hosts */
7120 for (i = 0; i < host->n_ports; i++)
7121 host->ports[i]->print_id = ata_print_id++;
7122
7123 rc = ata_scsi_add_hosts(host, sht);
7124 if (rc)
7125 return rc;
7126
fafbae87
TH
7127 /* associate with ACPI nodes */
7128 ata_acpi_associate(host);
7129
f3187195
TH
7130 /* set cable, sata_spd_limit and report */
7131 for (i = 0; i < host->n_ports; i++) {
7132 struct ata_port *ap = host->ports[i];
f3187195
TH
7133 unsigned long xfer_mask;
7134
7135 /* set SATA cable type if still unset */
7136 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7137 ap->cbl = ATA_CBL_SATA;
7138
7139 /* init sata_spd_limit to the current value */
4fb37a25 7140 sata_link_init_spd(&ap->link);
f3187195 7141
cbcdd875 7142 /* print per-port info to dmesg */
f3187195
TH
7143 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7144 ap->udma_mask);
7145
abf6e8ed 7146 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7147 ata_port_printk(ap, KERN_INFO,
7148 "%cATA max %s %s\n",
a16abc0b 7149 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7150 ata_mode_string(xfer_mask),
cbcdd875 7151 ap->link.eh_info.desc);
abf6e8ed
TH
7152 ata_ehi_clear_desc(&ap->link.eh_info);
7153 } else
f3187195
TH
7154 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7155 }
7156
7157 /* perform each probe synchronously */
7158 DPRINTK("probe begin\n");
7159 for (i = 0; i < host->n_ports; i++) {
7160 struct ata_port *ap = host->ports[i];
f3187195
TH
7161
7162 /* probe */
7163 if (ap->ops->error_handler) {
9af5c9c9 7164 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7165 unsigned long flags;
7166
7167 ata_port_probe(ap);
7168
7169 /* kick EH for boot probing */
7170 spin_lock_irqsave(ap->lock, flags);
7171
f58229f8
TH
7172 ehi->probe_mask =
7173 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7174 ehi->action |= ATA_EH_SOFTRESET;
7175 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7176
f4d6d004 7177 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7178 ap->pflags |= ATA_PFLAG_LOADING;
7179 ata_port_schedule_eh(ap);
7180
7181 spin_unlock_irqrestore(ap->lock, flags);
7182
7183 /* wait for EH to finish */
7184 ata_port_wait_eh(ap);
7185 } else {
7186 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7187 rc = ata_bus_probe(ap);
7188 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7189
7190 if (rc) {
7191 /* FIXME: do something useful here?
7192 * Current libata behavior will
7193 * tear down everything when
7194 * the module is removed
7195 * or the h/w is unplugged.
7196 */
7197 }
7198 }
7199 }
7200
7201 /* probes are done, now scan each port's disk(s) */
7202 DPRINTK("host probe begin\n");
7203 for (i = 0; i < host->n_ports; i++) {
7204 struct ata_port *ap = host->ports[i];
7205
1ae46317 7206 ata_scsi_scan_host(ap, 1);
ca77329f 7207 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7208 }
7209
7210 return 0;
7211}
7212
f5cda257
TH
7213/**
7214 * ata_host_activate - start host, request IRQ and register it
7215 * @host: target ATA host
7216 * @irq: IRQ to request
7217 * @irq_handler: irq_handler used when requesting IRQ
7218 * @irq_flags: irq_flags used when requesting IRQ
7219 * @sht: scsi_host_template to use when registering the host
7220 *
7221 * After allocating an ATA host and initializing it, most libata
7222 * LLDs perform three steps to activate the host - start host,
7223 * request IRQ and register it. This helper takes necessasry
7224 * arguments and performs the three steps in one go.
7225 *
3d46b2e2
PM
7226 * An invalid IRQ skips the IRQ registration and expects the host to
7227 * have set polling mode on the port. In this case, @irq_handler
7228 * should be NULL.
7229 *
f5cda257
TH
7230 * LOCKING:
7231 * Inherited from calling layer (may sleep).
7232 *
7233 * RETURNS:
7234 * 0 on success, -errno otherwise.
7235 */
7236int ata_host_activate(struct ata_host *host, int irq,
7237 irq_handler_t irq_handler, unsigned long irq_flags,
7238 struct scsi_host_template *sht)
7239{
cbcdd875 7240 int i, rc;
f5cda257
TH
7241
7242 rc = ata_host_start(host);
7243 if (rc)
7244 return rc;
7245
3d46b2e2
PM
7246 /* Special case for polling mode */
7247 if (!irq) {
7248 WARN_ON(irq_handler);
7249 return ata_host_register(host, sht);
7250 }
7251
f5cda257
TH
7252 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7253 dev_driver_string(host->dev), host);
7254 if (rc)
7255 return rc;
7256
cbcdd875
TH
7257 for (i = 0; i < host->n_ports; i++)
7258 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7259
f5cda257
TH
7260 rc = ata_host_register(host, sht);
7261 /* if failed, just free the IRQ and leave ports alone */
7262 if (rc)
7263 devm_free_irq(host->dev, irq, host);
7264
7265 return rc;
7266}
7267
720ba126
TH
7268/**
7269 * ata_port_detach - Detach ATA port in prepration of device removal
7270 * @ap: ATA port to be detached
7271 *
7272 * Detach all ATA devices and the associated SCSI devices of @ap;
7273 * then, remove the associated SCSI host. @ap is guaranteed to
7274 * be quiescent on return from this function.
7275 *
7276 * LOCKING:
7277 * Kernel thread context (may sleep).
7278 */
741b7763 7279static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7280{
7281 unsigned long flags;
41bda9c9 7282 struct ata_link *link;
f58229f8 7283 struct ata_device *dev;
720ba126
TH
7284
7285 if (!ap->ops->error_handler)
c3cf30a9 7286 goto skip_eh;
720ba126
TH
7287
7288 /* tell EH we're leaving & flush EH */
ba6a1308 7289 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7290 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7291 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7292
7293 ata_port_wait_eh(ap);
7294
7f9ad9b8
TH
7295 /* EH is now guaranteed to see UNLOADING - EH context belongs
7296 * to us. Disable all existing devices.
720ba126 7297 */
41bda9c9
TH
7298 ata_port_for_each_link(link, ap) {
7299 ata_link_for_each_dev(dev, link)
7300 ata_dev_disable(dev);
7301 }
720ba126 7302
720ba126
TH
7303 /* Final freeze & EH. All in-flight commands are aborted. EH
7304 * will be skipped and retrials will be terminated with bad
7305 * target.
7306 */
ba6a1308 7307 spin_lock_irqsave(ap->lock, flags);
720ba126 7308 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7309 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7310
7311 ata_port_wait_eh(ap);
45a66c1c 7312 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7313
c3cf30a9 7314 skip_eh:
720ba126 7315 /* remove the associated SCSI host */
cca3974e 7316 scsi_remove_host(ap->scsi_host);
720ba126
TH
7317}
7318
0529c159
TH
7319/**
7320 * ata_host_detach - Detach all ports of an ATA host
7321 * @host: Host to detach
7322 *
7323 * Detach all ports of @host.
7324 *
7325 * LOCKING:
7326 * Kernel thread context (may sleep).
7327 */
7328void ata_host_detach(struct ata_host *host)
7329{
7330 int i;
7331
7332 for (i = 0; i < host->n_ports; i++)
7333 ata_port_detach(host->ports[i]);
562f0c2d
TH
7334
7335 /* the host is dead now, dissociate ACPI */
7336 ata_acpi_dissociate(host);
0529c159
TH
7337}
7338
1da177e4
LT
7339/**
7340 * ata_std_ports - initialize ioaddr with standard port offsets.
7341 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7342 *
7343 * Utility function which initializes data_addr, error_addr,
7344 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7345 * device_addr, status_addr, and command_addr to standard offsets
7346 * relative to cmd_addr.
7347 *
7348 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7349 */
0baab86b 7350
1da177e4
LT
7351void ata_std_ports(struct ata_ioports *ioaddr)
7352{
7353 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7354 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7355 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7356 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7357 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7358 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7359 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7360 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7361 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7362 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7363}
7364
0baab86b 7365
374b1873
JG
7366#ifdef CONFIG_PCI
7367
1da177e4
LT
7368/**
7369 * ata_pci_remove_one - PCI layer callback for device removal
7370 * @pdev: PCI device that was removed
7371 *
b878ca5d
TH
7372 * PCI layer indicates to libata via this hook that hot-unplug or
7373 * module unload event has occurred. Detach all ports. Resource
7374 * release is handled via devres.
1da177e4
LT
7375 *
7376 * LOCKING:
7377 * Inherited from PCI layer (may sleep).
7378 */
f0d36efd 7379void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7380{
2855568b 7381 struct device *dev = &pdev->dev;
cca3974e 7382 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7383
b878ca5d 7384 ata_host_detach(host);
1da177e4
LT
7385}
7386
7387/* move to PCI subsystem */
057ace5e 7388int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7389{
7390 unsigned long tmp = 0;
7391
7392 switch (bits->width) {
7393 case 1: {
7394 u8 tmp8 = 0;
7395 pci_read_config_byte(pdev, bits->reg, &tmp8);
7396 tmp = tmp8;
7397 break;
7398 }
7399 case 2: {
7400 u16 tmp16 = 0;
7401 pci_read_config_word(pdev, bits->reg, &tmp16);
7402 tmp = tmp16;
7403 break;
7404 }
7405 case 4: {
7406 u32 tmp32 = 0;
7407 pci_read_config_dword(pdev, bits->reg, &tmp32);
7408 tmp = tmp32;
7409 break;
7410 }
7411
7412 default:
7413 return -EINVAL;
7414 }
7415
7416 tmp &= bits->mask;
7417
7418 return (tmp == bits->val) ? 1 : 0;
7419}
9b847548 7420
6ffa01d8 7421#ifdef CONFIG_PM
3c5100c1 7422void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7423{
7424 pci_save_state(pdev);
4c90d971 7425 pci_disable_device(pdev);
500530f6 7426
3a2d5b70 7427 if (mesg.event & PM_EVENT_SLEEP)
500530f6 7428 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7429}
7430
553c4aa6 7431int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7432{
553c4aa6
TH
7433 int rc;
7434
9b847548
JA
7435 pci_set_power_state(pdev, PCI_D0);
7436 pci_restore_state(pdev);
553c4aa6 7437
b878ca5d 7438 rc = pcim_enable_device(pdev);
553c4aa6
TH
7439 if (rc) {
7440 dev_printk(KERN_ERR, &pdev->dev,
7441 "failed to enable device after resume (%d)\n", rc);
7442 return rc;
7443 }
7444
9b847548 7445 pci_set_master(pdev);
553c4aa6 7446 return 0;
500530f6
TH
7447}
7448
3c5100c1 7449int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7450{
cca3974e 7451 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7452 int rc = 0;
7453
cca3974e 7454 rc = ata_host_suspend(host, mesg);
500530f6
TH
7455 if (rc)
7456 return rc;
7457
3c5100c1 7458 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7459
7460 return 0;
7461}
7462
7463int ata_pci_device_resume(struct pci_dev *pdev)
7464{
cca3974e 7465 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7466 int rc;
500530f6 7467
553c4aa6
TH
7468 rc = ata_pci_device_do_resume(pdev);
7469 if (rc == 0)
7470 ata_host_resume(host);
7471 return rc;
9b847548 7472}
6ffa01d8
TH
7473#endif /* CONFIG_PM */
7474
1da177e4
LT
7475#endif /* CONFIG_PCI */
7476
33267325
TH
7477static int __init ata_parse_force_one(char **cur,
7478 struct ata_force_ent *force_ent,
7479 const char **reason)
7480{
7481 /* FIXME: Currently, there's no way to tag init const data and
7482 * using __initdata causes build failure on some versions of
7483 * gcc. Once __initdataconst is implemented, add const to the
7484 * following structure.
7485 */
7486 static struct ata_force_param force_tbl[] __initdata = {
7487 { "40c", .cbl = ATA_CBL_PATA40 },
7488 { "80c", .cbl = ATA_CBL_PATA80 },
7489 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7490 { "unk", .cbl = ATA_CBL_PATA_UNK },
7491 { "ign", .cbl = ATA_CBL_PATA_IGN },
7492 { "sata", .cbl = ATA_CBL_SATA },
7493 { "1.5Gbps", .spd_limit = 1 },
7494 { "3.0Gbps", .spd_limit = 2 },
7495 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7496 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7497 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7498 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7499 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7500 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7501 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7502 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7503 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7504 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7505 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7506 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7507 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7508 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7509 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7510 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7511 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7512 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7513 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7514 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7515 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7516 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7517 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7518 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7519 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7520 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7521 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7522 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7523 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7524 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7525 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7526 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7527 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7528 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7529 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7530 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7531 };
7532 char *start = *cur, *p = *cur;
7533 char *id, *val, *endp;
7534 const struct ata_force_param *match_fp = NULL;
7535 int nr_matches = 0, i;
7536
7537 /* find where this param ends and update *cur */
7538 while (*p != '\0' && *p != ',')
7539 p++;
7540
7541 if (*p == '\0')
7542 *cur = p;
7543 else
7544 *cur = p + 1;
7545
7546 *p = '\0';
7547
7548 /* parse */
7549 p = strchr(start, ':');
7550 if (!p) {
7551 val = strstrip(start);
7552 goto parse_val;
7553 }
7554 *p = '\0';
7555
7556 id = strstrip(start);
7557 val = strstrip(p + 1);
7558
7559 /* parse id */
7560 p = strchr(id, '.');
7561 if (p) {
7562 *p++ = '\0';
7563 force_ent->device = simple_strtoul(p, &endp, 10);
7564 if (p == endp || *endp != '\0') {
7565 *reason = "invalid device";
7566 return -EINVAL;
7567 }
7568 }
7569
7570 force_ent->port = simple_strtoul(id, &endp, 10);
7571 if (p == endp || *endp != '\0') {
7572 *reason = "invalid port/link";
7573 return -EINVAL;
7574 }
7575
7576 parse_val:
7577 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7578 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7579 const struct ata_force_param *fp = &force_tbl[i];
7580
7581 if (strncasecmp(val, fp->name, strlen(val)))
7582 continue;
7583
7584 nr_matches++;
7585 match_fp = fp;
7586
7587 if (strcasecmp(val, fp->name) == 0) {
7588 nr_matches = 1;
7589 break;
7590 }
7591 }
7592
7593 if (!nr_matches) {
7594 *reason = "unknown value";
7595 return -EINVAL;
7596 }
7597 if (nr_matches > 1) {
7598 *reason = "ambigious value";
7599 return -EINVAL;
7600 }
7601
7602 force_ent->param = *match_fp;
7603
7604 return 0;
7605}
7606
7607static void __init ata_parse_force_param(void)
7608{
7609 int idx = 0, size = 1;
7610 int last_port = -1, last_device = -1;
7611 char *p, *cur, *next;
7612
7613 /* calculate maximum number of params and allocate force_tbl */
7614 for (p = ata_force_param_buf; *p; p++)
7615 if (*p == ',')
7616 size++;
7617
7618 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7619 if (!ata_force_tbl) {
7620 printk(KERN_WARNING "ata: failed to extend force table, "
7621 "libata.force ignored\n");
7622 return;
7623 }
7624
7625 /* parse and populate the table */
7626 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7627 const char *reason = "";
7628 struct ata_force_ent te = { .port = -1, .device = -1 };
7629
7630 next = cur;
7631 if (ata_parse_force_one(&next, &te, &reason)) {
7632 printk(KERN_WARNING "ata: failed to parse force "
7633 "parameter \"%s\" (%s)\n",
7634 cur, reason);
7635 continue;
7636 }
7637
7638 if (te.port == -1) {
7639 te.port = last_port;
7640 te.device = last_device;
7641 }
7642
7643 ata_force_tbl[idx++] = te;
7644
7645 last_port = te.port;
7646 last_device = te.device;
7647 }
7648
7649 ata_force_tbl_size = idx;
7650}
1da177e4 7651
1da177e4
LT
7652static int __init ata_init(void)
7653{
a8601e5f 7654 ata_probe_timeout *= HZ;
33267325
TH
7655
7656 ata_parse_force_param();
7657
1da177e4
LT
7658 ata_wq = create_workqueue("ata");
7659 if (!ata_wq)
7660 return -ENOMEM;
7661
453b07ac
TH
7662 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7663 if (!ata_aux_wq) {
7664 destroy_workqueue(ata_wq);
7665 return -ENOMEM;
7666 }
7667
1da177e4
LT
7668 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7669 return 0;
7670}
7671
7672static void __exit ata_exit(void)
7673{
33267325 7674 kfree(ata_force_tbl);
1da177e4 7675 destroy_workqueue(ata_wq);
453b07ac 7676 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7677}
7678
a4625085 7679subsys_initcall(ata_init);
1da177e4
LT
7680module_exit(ata_exit);
7681
67846b30 7682static unsigned long ratelimit_time;
34af946a 7683static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7684
7685int ata_ratelimit(void)
7686{
7687 int rc;
7688 unsigned long flags;
7689
7690 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7691
7692 if (time_after(jiffies, ratelimit_time)) {
7693 rc = 1;
7694 ratelimit_time = jiffies + (HZ/5);
7695 } else
7696 rc = 0;
7697
7698 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7699
7700 return rc;
7701}
7702
c22daff4
TH
7703/**
7704 * ata_wait_register - wait until register value changes
7705 * @reg: IO-mapped register
7706 * @mask: Mask to apply to read register value
7707 * @val: Wait condition
7708 * @interval_msec: polling interval in milliseconds
7709 * @timeout_msec: timeout in milliseconds
7710 *
7711 * Waiting for some bits of register to change is a common
7712 * operation for ATA controllers. This function reads 32bit LE
7713 * IO-mapped register @reg and tests for the following condition.
7714 *
7715 * (*@reg & mask) != val
7716 *
7717 * If the condition is met, it returns; otherwise, the process is
7718 * repeated after @interval_msec until timeout.
7719 *
7720 * LOCKING:
7721 * Kernel thread context (may sleep)
7722 *
7723 * RETURNS:
7724 * The final register value.
7725 */
7726u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7727 unsigned long interval_msec,
7728 unsigned long timeout_msec)
7729{
7730 unsigned long timeout;
7731 u32 tmp;
7732
7733 tmp = ioread32(reg);
7734
7735 /* Calculate timeout _after_ the first read to make sure
7736 * preceding writes reach the controller before starting to
7737 * eat away the timeout.
7738 */
7739 timeout = jiffies + (timeout_msec * HZ) / 1000;
7740
7741 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7742 msleep(interval_msec);
7743 tmp = ioread32(reg);
7744 }
7745
7746 return tmp;
7747}
7748
dd5b06c4
TH
7749/*
7750 * Dummy port_ops
7751 */
7752static void ata_dummy_noret(struct ata_port *ap) { }
7753static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7754static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7755
7756static u8 ata_dummy_check_status(struct ata_port *ap)
7757{
7758 return ATA_DRDY;
7759}
7760
7761static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7762{
7763 return AC_ERR_SYSTEM;
7764}
7765
7766const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7767 .check_status = ata_dummy_check_status,
7768 .check_altstatus = ata_dummy_check_status,
7769 .dev_select = ata_noop_dev_select,
7770 .qc_prep = ata_noop_qc_prep,
7771 .qc_issue = ata_dummy_qc_issue,
7772 .freeze = ata_dummy_noret,
7773 .thaw = ata_dummy_noret,
7774 .error_handler = ata_dummy_noret,
7775 .post_internal_cmd = ata_dummy_qc_noret,
7776 .irq_clear = ata_dummy_noret,
7777 .port_start = ata_dummy_ret0,
7778 .port_stop = ata_dummy_noret,
7779};
7780
21b0ad4f
TH
7781const struct ata_port_info ata_dummy_port_info = {
7782 .port_ops = &ata_dummy_port_ops,
7783};
7784
1da177e4
LT
7785/*
7786 * libata is essentially a library of internal helper functions for
7787 * low-level ATA host controller drivers. As such, the API/ABI is
7788 * likely to change as new drivers are added and updated.
7789 * Do not depend on ABI/API stability.
7790 */
e9c83914
TH
7791EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7792EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7793EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7794EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7795EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7796EXPORT_SYMBOL_GPL(ata_std_bios_param);
7797EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7798EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7799EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7800EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7801EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7802EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7803EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7804EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7805EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7806EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7807EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7808EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7809EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7810EXPORT_SYMBOL_GPL(ata_tf_load);
7811EXPORT_SYMBOL_GPL(ata_tf_read);
7812EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7813EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7814EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 7815EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7816EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7817EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7818EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7819EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7820EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7821EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7822EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7823EXPORT_SYMBOL_GPL(ata_mode_string);
7824EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7825EXPORT_SYMBOL_GPL(ata_check_status);
7826EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7827EXPORT_SYMBOL_GPL(ata_exec_command);
7828EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7829EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7830EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7831EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7832EXPORT_SYMBOL_GPL(ata_data_xfer);
7833EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7834EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7835EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7836EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7837EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7838EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7839EXPORT_SYMBOL_GPL(ata_bmdma_start);
7840EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7841EXPORT_SYMBOL_GPL(ata_bmdma_status);
7842EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7843EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7844EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7845EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7846EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7847EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7848EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7849EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7850EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7851EXPORT_SYMBOL_GPL(sata_link_debounce);
7852EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7853EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7854EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7855EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7856EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7857EXPORT_SYMBOL_GPL(sata_std_hardreset);
7858EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7859EXPORT_SYMBOL_GPL(ata_dev_classify);
7860EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7861EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7862EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7863EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7864EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7865EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7866EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7867EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7868EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7869EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7870EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7871EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7872EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7873EXPORT_SYMBOL_GPL(sata_scr_valid);
7874EXPORT_SYMBOL_GPL(sata_scr_read);
7875EXPORT_SYMBOL_GPL(sata_scr_write);
7876EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7877EXPORT_SYMBOL_GPL(ata_link_online);
7878EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7879#ifdef CONFIG_PM
cca3974e
JG
7880EXPORT_SYMBOL_GPL(ata_host_suspend);
7881EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7882#endif /* CONFIG_PM */
6a62a04d
TH
7883EXPORT_SYMBOL_GPL(ata_id_string);
7884EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7885EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7886
1bc4ccff 7887EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7888EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7889EXPORT_SYMBOL_GPL(ata_timing_compute);
7890EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7891EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7892
1da177e4
LT
7893#ifdef CONFIG_PCI
7894EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7895EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7896EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7897EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7898EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7899EXPORT_SYMBOL_GPL(ata_pci_init_one);
7900EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7901#ifdef CONFIG_PM
500530f6
TH
7902EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7903EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7904EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7905EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7906#endif /* CONFIG_PM */
67951ade
AC
7907EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7908EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7909#endif /* CONFIG_PCI */
9b847548 7910
31f88384 7911EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7912EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7913EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7914EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7915EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7916
b64bbc39
TH
7917EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7918EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7919EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7920EXPORT_SYMBOL_GPL(ata_port_desc);
7921#ifdef CONFIG_PCI
7922EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7923#endif /* CONFIG_PCI */
7b70fc03 7924EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7925EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7926EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7927EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7928EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7929EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7930EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7931EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7932EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7933EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7934EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7935EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7936
7937EXPORT_SYMBOL_GPL(ata_cable_40wire);
7938EXPORT_SYMBOL_GPL(ata_cable_80wire);
7939EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7940EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7941EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.084666 seconds and 5 git commands to generate.