libata: reorder functions in libata-sff.c
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
029cfd6b
TH
77const struct ata_port_operations ata_base_port_ops = {
78 .irq_clear = ata_noop_irq_clear,
a1efdaba
TH
79 .prereset = ata_std_prereset,
80 .hardreset = sata_std_hardreset,
81 .postreset = ata_std_postreset,
82 .error_handler = ata_std_error_handler,
029cfd6b
TH
83};
84
85const struct ata_port_operations sata_port_ops = {
86 .inherits = &ata_base_port_ops,
87
88 .qc_defer = ata_std_qc_defer,
89 .dev_select = ata_noop_dev_select,
90};
91
92const struct ata_port_operations sata_pmp_port_ops = {
93 .inherits = &sata_port_ops,
a1efdaba
TH
94
95 .pmp_prereset = sata_pmp_std_prereset,
96 .pmp_hardreset = sata_pmp_std_hardreset,
97 .pmp_postreset = sata_pmp_std_postreset,
98 .error_handler = sata_pmp_error_handler,
029cfd6b
TH
99};
100
101const struct ata_port_operations ata_sff_port_ops = {
102 .inherits = &ata_base_port_ops,
103
104 .qc_prep = ata_qc_prep,
105 .qc_issue = ata_qc_issue_prot,
106
107 .freeze = ata_bmdma_freeze,
108 .thaw = ata_bmdma_thaw,
a1efdaba 109 .softreset = ata_std_softreset,
029cfd6b
TH
110 .error_handler = ata_bmdma_error_handler,
111 .post_internal_cmd = ata_bmdma_post_internal_cmd,
112
113 .dev_select = ata_std_dev_select,
114 .check_status = ata_check_status,
115 .tf_load = ata_tf_load,
116 .tf_read = ata_tf_read,
117 .exec_command = ata_exec_command,
118 .data_xfer = ata_data_xfer,
119 .irq_on = ata_irq_on,
120
121 .port_start = ata_sff_port_start,
029cfd6b
TH
122};
123
124const struct ata_port_operations ata_bmdma_port_ops = {
125 .inherits = &ata_sff_port_ops,
126
127 .mode_filter = ata_pci_default_filter,
128
129 .bmdma_setup = ata_bmdma_setup,
130 .bmdma_start = ata_bmdma_start,
131 .bmdma_stop = ata_bmdma_stop,
132 .bmdma_status = ata_bmdma_status,
133 .irq_clear = ata_bmdma_irq_clear,
134};
135
3373efd8
TH
136static unsigned int ata_dev_init_params(struct ata_device *dev,
137 u16 heads, u16 sectors);
138static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
139static unsigned int ata_dev_set_feature(struct ata_device *dev,
140 u8 enable, u8 feature);
3373efd8 141static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 142static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 143
f3187195 144unsigned int ata_print_id = 1;
1da177e4
LT
145static struct workqueue_struct *ata_wq;
146
453b07ac
TH
147struct workqueue_struct *ata_aux_wq;
148
33267325
TH
149struct ata_force_param {
150 const char *name;
151 unsigned int cbl;
152 int spd_limit;
153 unsigned long xfer_mask;
154 unsigned int horkage_on;
155 unsigned int horkage_off;
156};
157
158struct ata_force_ent {
159 int port;
160 int device;
161 struct ata_force_param param;
162};
163
164static struct ata_force_ent *ata_force_tbl;
165static int ata_force_tbl_size;
166
167static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
168/* param_buf is thrown away after initialization, disallow read */
169module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
170MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
171
418dc1f5 172int atapi_enabled = 1;
1623c81e
JG
173module_param(atapi_enabled, int, 0444);
174MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
175
c5c61bda 176static int atapi_dmadir = 0;
95de719a
AL
177module_param(atapi_dmadir, int, 0444);
178MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
179
baf4fdfa
ML
180int atapi_passthru16 = 1;
181module_param(atapi_passthru16, int, 0444);
182MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
183
c3c013a2
JG
184int libata_fua = 0;
185module_param_named(fua, libata_fua, int, 0444);
186MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
187
2dcb407e 188static int ata_ignore_hpa;
1e999736
AC
189module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
190MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
191
b3a70601
AC
192static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
193module_param_named(dma, libata_dma_mask, int, 0444);
194MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
195
a8601e5f
AM
196static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
197module_param(ata_probe_timeout, int, 0444);
198MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
199
6ebe9d86 200int libata_noacpi = 0;
d7d0dad6 201module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 202MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 203
ae8d4ee7
AC
204int libata_allow_tpm = 0;
205module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
206MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
207
1da177e4
LT
208MODULE_AUTHOR("Jeff Garzik");
209MODULE_DESCRIPTION("Library module for ATA devices");
210MODULE_LICENSE("GPL");
211MODULE_VERSION(DRV_VERSION);
212
0baab86b 213
33267325
TH
214/**
215 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 216 * @ap: ATA port of interest
33267325
TH
217 *
218 * Force cable type according to libata.force and whine about it.
219 * The last entry which has matching port number is used, so it
220 * can be specified as part of device force parameters. For
221 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
222 * same effect.
223 *
224 * LOCKING:
225 * EH context.
226 */
227void ata_force_cbl(struct ata_port *ap)
228{
229 int i;
230
231 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
232 const struct ata_force_ent *fe = &ata_force_tbl[i];
233
234 if (fe->port != -1 && fe->port != ap->print_id)
235 continue;
236
237 if (fe->param.cbl == ATA_CBL_NONE)
238 continue;
239
240 ap->cbl = fe->param.cbl;
241 ata_port_printk(ap, KERN_NOTICE,
242 "FORCE: cable set to %s\n", fe->param.name);
243 return;
244 }
245}
246
247/**
248 * ata_force_spd_limit - force SATA spd limit according to libata.force
249 * @link: ATA link of interest
250 *
251 * Force SATA spd limit according to libata.force and whine about
252 * it. When only the port part is specified (e.g. 1:), the limit
253 * applies to all links connected to both the host link and all
254 * fan-out ports connected via PMP. If the device part is
255 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
256 * link not the host link. Device number 15 always points to the
257 * host link whether PMP is attached or not.
258 *
259 * LOCKING:
260 * EH context.
261 */
262static void ata_force_spd_limit(struct ata_link *link)
263{
264 int linkno, i;
265
266 if (ata_is_host_link(link))
267 linkno = 15;
268 else
269 linkno = link->pmp;
270
271 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
272 const struct ata_force_ent *fe = &ata_force_tbl[i];
273
274 if (fe->port != -1 && fe->port != link->ap->print_id)
275 continue;
276
277 if (fe->device != -1 && fe->device != linkno)
278 continue;
279
280 if (!fe->param.spd_limit)
281 continue;
282
283 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
284 ata_link_printk(link, KERN_NOTICE,
285 "FORCE: PHY spd limit set to %s\n", fe->param.name);
286 return;
287 }
288}
289
290/**
291 * ata_force_xfermask - force xfermask according to libata.force
292 * @dev: ATA device of interest
293 *
294 * Force xfer_mask according to libata.force and whine about it.
295 * For consistency with link selection, device number 15 selects
296 * the first device connected to the host link.
297 *
298 * LOCKING:
299 * EH context.
300 */
301static void ata_force_xfermask(struct ata_device *dev)
302{
303 int devno = dev->link->pmp + dev->devno;
304 int alt_devno = devno;
305 int i;
306
307 /* allow n.15 for the first device attached to host port */
308 if (ata_is_host_link(dev->link) && devno == 0)
309 alt_devno = 15;
310
311 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
312 const struct ata_force_ent *fe = &ata_force_tbl[i];
313 unsigned long pio_mask, mwdma_mask, udma_mask;
314
315 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
316 continue;
317
318 if (fe->device != -1 && fe->device != devno &&
319 fe->device != alt_devno)
320 continue;
321
322 if (!fe->param.xfer_mask)
323 continue;
324
325 ata_unpack_xfermask(fe->param.xfer_mask,
326 &pio_mask, &mwdma_mask, &udma_mask);
327 if (udma_mask)
328 dev->udma_mask = udma_mask;
329 else if (mwdma_mask) {
330 dev->udma_mask = 0;
331 dev->mwdma_mask = mwdma_mask;
332 } else {
333 dev->udma_mask = 0;
334 dev->mwdma_mask = 0;
335 dev->pio_mask = pio_mask;
336 }
337
338 ata_dev_printk(dev, KERN_NOTICE,
339 "FORCE: xfer_mask set to %s\n", fe->param.name);
340 return;
341 }
342}
343
344/**
345 * ata_force_horkage - force horkage according to libata.force
346 * @dev: ATA device of interest
347 *
348 * Force horkage according to libata.force and whine about it.
349 * For consistency with link selection, device number 15 selects
350 * the first device connected to the host link.
351 *
352 * LOCKING:
353 * EH context.
354 */
355static void ata_force_horkage(struct ata_device *dev)
356{
357 int devno = dev->link->pmp + dev->devno;
358 int alt_devno = devno;
359 int i;
360
361 /* allow n.15 for the first device attached to host port */
362 if (ata_is_host_link(dev->link) && devno == 0)
363 alt_devno = 15;
364
365 for (i = 0; i < ata_force_tbl_size; i++) {
366 const struct ata_force_ent *fe = &ata_force_tbl[i];
367
368 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
369 continue;
370
371 if (fe->device != -1 && fe->device != devno &&
372 fe->device != alt_devno)
373 continue;
374
375 if (!(~dev->horkage & fe->param.horkage_on) &&
376 !(dev->horkage & fe->param.horkage_off))
377 continue;
378
379 dev->horkage |= fe->param.horkage_on;
380 dev->horkage &= ~fe->param.horkage_off;
381
382 ata_dev_printk(dev, KERN_NOTICE,
383 "FORCE: horkage modified (%s)\n", fe->param.name);
384 }
385}
386
436d34b3
TH
387/**
388 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
389 * @opcode: SCSI opcode
390 *
391 * Determine ATAPI command type from @opcode.
392 *
393 * LOCKING:
394 * None.
395 *
396 * RETURNS:
397 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
398 */
399int atapi_cmd_type(u8 opcode)
400{
401 switch (opcode) {
402 case GPCMD_READ_10:
403 case GPCMD_READ_12:
404 return ATAPI_READ;
405
406 case GPCMD_WRITE_10:
407 case GPCMD_WRITE_12:
408 case GPCMD_WRITE_AND_VERIFY_10:
409 return ATAPI_WRITE;
410
411 case GPCMD_READ_CD:
412 case GPCMD_READ_CD_MSF:
413 return ATAPI_READ_CD;
414
e52dcc48
TH
415 case ATA_16:
416 case ATA_12:
417 if (atapi_passthru16)
418 return ATAPI_PASS_THRU;
419 /* fall thru */
436d34b3
TH
420 default:
421 return ATAPI_MISC;
422 }
423}
424
1da177e4
LT
425/**
426 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
427 * @tf: Taskfile to convert
1da177e4 428 * @pmp: Port multiplier port
9977126c
TH
429 * @is_cmd: This FIS is for command
430 * @fis: Buffer into which data will output
1da177e4
LT
431 *
432 * Converts a standard ATA taskfile to a Serial ATA
433 * FIS structure (Register - Host to Device).
434 *
435 * LOCKING:
436 * Inherited from caller.
437 */
9977126c 438void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 439{
9977126c
TH
440 fis[0] = 0x27; /* Register - Host to Device FIS */
441 fis[1] = pmp & 0xf; /* Port multiplier number*/
442 if (is_cmd)
443 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
444
1da177e4
LT
445 fis[2] = tf->command;
446 fis[3] = tf->feature;
447
448 fis[4] = tf->lbal;
449 fis[5] = tf->lbam;
450 fis[6] = tf->lbah;
451 fis[7] = tf->device;
452
453 fis[8] = tf->hob_lbal;
454 fis[9] = tf->hob_lbam;
455 fis[10] = tf->hob_lbah;
456 fis[11] = tf->hob_feature;
457
458 fis[12] = tf->nsect;
459 fis[13] = tf->hob_nsect;
460 fis[14] = 0;
461 fis[15] = tf->ctl;
462
463 fis[16] = 0;
464 fis[17] = 0;
465 fis[18] = 0;
466 fis[19] = 0;
467}
468
469/**
470 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
471 * @fis: Buffer from which data will be input
472 * @tf: Taskfile to output
473 *
e12a1be6 474 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
475 *
476 * LOCKING:
477 * Inherited from caller.
478 */
479
057ace5e 480void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
481{
482 tf->command = fis[2]; /* status */
483 tf->feature = fis[3]; /* error */
484
485 tf->lbal = fis[4];
486 tf->lbam = fis[5];
487 tf->lbah = fis[6];
488 tf->device = fis[7];
489
490 tf->hob_lbal = fis[8];
491 tf->hob_lbam = fis[9];
492 tf->hob_lbah = fis[10];
493
494 tf->nsect = fis[12];
495 tf->hob_nsect = fis[13];
496}
497
8cbd6df1
AL
498static const u8 ata_rw_cmds[] = {
499 /* pio multi */
500 ATA_CMD_READ_MULTI,
501 ATA_CMD_WRITE_MULTI,
502 ATA_CMD_READ_MULTI_EXT,
503 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
504 0,
505 0,
506 0,
507 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
508 /* pio */
509 ATA_CMD_PIO_READ,
510 ATA_CMD_PIO_WRITE,
511 ATA_CMD_PIO_READ_EXT,
512 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
513 0,
514 0,
515 0,
516 0,
8cbd6df1
AL
517 /* dma */
518 ATA_CMD_READ,
519 ATA_CMD_WRITE,
520 ATA_CMD_READ_EXT,
9a3dccc4
TH
521 ATA_CMD_WRITE_EXT,
522 0,
523 0,
524 0,
525 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 526};
1da177e4
LT
527
528/**
8cbd6df1 529 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
530 * @tf: command to examine and configure
531 * @dev: device tf belongs to
1da177e4 532 *
2e9edbf8 533 * Examine the device configuration and tf->flags to calculate
8cbd6df1 534 * the proper read/write commands and protocol to use.
1da177e4
LT
535 *
536 * LOCKING:
537 * caller.
538 */
bd056d7e 539static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 540{
9a3dccc4 541 u8 cmd;
1da177e4 542
9a3dccc4 543 int index, fua, lba48, write;
2e9edbf8 544
9a3dccc4 545 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
546 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
547 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 548
8cbd6df1
AL
549 if (dev->flags & ATA_DFLAG_PIO) {
550 tf->protocol = ATA_PROT_PIO;
9a3dccc4 551 index = dev->multi_count ? 0 : 8;
9af5c9c9 552 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
553 /* Unable to use DMA due to host limitation */
554 tf->protocol = ATA_PROT_PIO;
0565c26d 555 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
556 } else {
557 tf->protocol = ATA_PROT_DMA;
9a3dccc4 558 index = 16;
8cbd6df1 559 }
1da177e4 560
9a3dccc4
TH
561 cmd = ata_rw_cmds[index + fua + lba48 + write];
562 if (cmd) {
563 tf->command = cmd;
564 return 0;
565 }
566 return -1;
1da177e4
LT
567}
568
35b649fe
TH
569/**
570 * ata_tf_read_block - Read block address from ATA taskfile
571 * @tf: ATA taskfile of interest
572 * @dev: ATA device @tf belongs to
573 *
574 * LOCKING:
575 * None.
576 *
577 * Read block address from @tf. This function can handle all
578 * three address formats - LBA, LBA48 and CHS. tf->protocol and
579 * flags select the address format to use.
580 *
581 * RETURNS:
582 * Block address read from @tf.
583 */
584u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
585{
586 u64 block = 0;
587
588 if (tf->flags & ATA_TFLAG_LBA) {
589 if (tf->flags & ATA_TFLAG_LBA48) {
590 block |= (u64)tf->hob_lbah << 40;
591 block |= (u64)tf->hob_lbam << 32;
592 block |= tf->hob_lbal << 24;
593 } else
594 block |= (tf->device & 0xf) << 24;
595
596 block |= tf->lbah << 16;
597 block |= tf->lbam << 8;
598 block |= tf->lbal;
599 } else {
600 u32 cyl, head, sect;
601
602 cyl = tf->lbam | (tf->lbah << 8);
603 head = tf->device & 0xf;
604 sect = tf->lbal;
605
606 block = (cyl * dev->heads + head) * dev->sectors + sect;
607 }
608
609 return block;
610}
611
bd056d7e
TH
612/**
613 * ata_build_rw_tf - Build ATA taskfile for given read/write request
614 * @tf: Target ATA taskfile
615 * @dev: ATA device @tf belongs to
616 * @block: Block address
617 * @n_block: Number of blocks
618 * @tf_flags: RW/FUA etc...
619 * @tag: tag
620 *
621 * LOCKING:
622 * None.
623 *
624 * Build ATA taskfile @tf for read/write request described by
625 * @block, @n_block, @tf_flags and @tag on @dev.
626 *
627 * RETURNS:
628 *
629 * 0 on success, -ERANGE if the request is too large for @dev,
630 * -EINVAL if the request is invalid.
631 */
632int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
633 u64 block, u32 n_block, unsigned int tf_flags,
634 unsigned int tag)
635{
636 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
637 tf->flags |= tf_flags;
638
6d1245bf 639 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
640 /* yay, NCQ */
641 if (!lba_48_ok(block, n_block))
642 return -ERANGE;
643
644 tf->protocol = ATA_PROT_NCQ;
645 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
646
647 if (tf->flags & ATA_TFLAG_WRITE)
648 tf->command = ATA_CMD_FPDMA_WRITE;
649 else
650 tf->command = ATA_CMD_FPDMA_READ;
651
652 tf->nsect = tag << 3;
653 tf->hob_feature = (n_block >> 8) & 0xff;
654 tf->feature = n_block & 0xff;
655
656 tf->hob_lbah = (block >> 40) & 0xff;
657 tf->hob_lbam = (block >> 32) & 0xff;
658 tf->hob_lbal = (block >> 24) & 0xff;
659 tf->lbah = (block >> 16) & 0xff;
660 tf->lbam = (block >> 8) & 0xff;
661 tf->lbal = block & 0xff;
662
663 tf->device = 1 << 6;
664 if (tf->flags & ATA_TFLAG_FUA)
665 tf->device |= 1 << 7;
666 } else if (dev->flags & ATA_DFLAG_LBA) {
667 tf->flags |= ATA_TFLAG_LBA;
668
669 if (lba_28_ok(block, n_block)) {
670 /* use LBA28 */
671 tf->device |= (block >> 24) & 0xf;
672 } else if (lba_48_ok(block, n_block)) {
673 if (!(dev->flags & ATA_DFLAG_LBA48))
674 return -ERANGE;
675
676 /* use LBA48 */
677 tf->flags |= ATA_TFLAG_LBA48;
678
679 tf->hob_nsect = (n_block >> 8) & 0xff;
680
681 tf->hob_lbah = (block >> 40) & 0xff;
682 tf->hob_lbam = (block >> 32) & 0xff;
683 tf->hob_lbal = (block >> 24) & 0xff;
684 } else
685 /* request too large even for LBA48 */
686 return -ERANGE;
687
688 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
689 return -EINVAL;
690
691 tf->nsect = n_block & 0xff;
692
693 tf->lbah = (block >> 16) & 0xff;
694 tf->lbam = (block >> 8) & 0xff;
695 tf->lbal = block & 0xff;
696
697 tf->device |= ATA_LBA;
698 } else {
699 /* CHS */
700 u32 sect, head, cyl, track;
701
702 /* The request -may- be too large for CHS addressing. */
703 if (!lba_28_ok(block, n_block))
704 return -ERANGE;
705
706 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
707 return -EINVAL;
708
709 /* Convert LBA to CHS */
710 track = (u32)block / dev->sectors;
711 cyl = track / dev->heads;
712 head = track % dev->heads;
713 sect = (u32)block % dev->sectors + 1;
714
715 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
716 (u32)block, track, cyl, head, sect);
717
718 /* Check whether the converted CHS can fit.
719 Cylinder: 0-65535
720 Head: 0-15
721 Sector: 1-255*/
722 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
723 return -ERANGE;
724
725 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
726 tf->lbal = sect;
727 tf->lbam = cyl;
728 tf->lbah = cyl >> 8;
729 tf->device |= head;
730 }
731
732 return 0;
733}
734
cb95d562
TH
735/**
736 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
737 * @pio_mask: pio_mask
738 * @mwdma_mask: mwdma_mask
739 * @udma_mask: udma_mask
740 *
741 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
742 * unsigned int xfer_mask.
743 *
744 * LOCKING:
745 * None.
746 *
747 * RETURNS:
748 * Packed xfer_mask.
749 */
7dc951ae
TH
750unsigned long ata_pack_xfermask(unsigned long pio_mask,
751 unsigned long mwdma_mask,
752 unsigned long udma_mask)
cb95d562
TH
753{
754 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
755 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
756 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
757}
758
c0489e4e
TH
759/**
760 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
761 * @xfer_mask: xfer_mask to unpack
762 * @pio_mask: resulting pio_mask
763 * @mwdma_mask: resulting mwdma_mask
764 * @udma_mask: resulting udma_mask
765 *
766 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
767 * Any NULL distination masks will be ignored.
768 */
7dc951ae
TH
769void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
770 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
771{
772 if (pio_mask)
773 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
774 if (mwdma_mask)
775 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
776 if (udma_mask)
777 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
778}
779
cb95d562 780static const struct ata_xfer_ent {
be9a50c8 781 int shift, bits;
cb95d562
TH
782 u8 base;
783} ata_xfer_tbl[] = {
70cd071e
TH
784 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
785 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
786 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
787 { -1, },
788};
789
790/**
791 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
792 * @xfer_mask: xfer_mask of interest
793 *
794 * Return matching XFER_* value for @xfer_mask. Only the highest
795 * bit of @xfer_mask is considered.
796 *
797 * LOCKING:
798 * None.
799 *
800 * RETURNS:
70cd071e 801 * Matching XFER_* value, 0xff if no match found.
cb95d562 802 */
7dc951ae 803u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
804{
805 int highbit = fls(xfer_mask) - 1;
806 const struct ata_xfer_ent *ent;
807
808 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
809 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
810 return ent->base + highbit - ent->shift;
70cd071e 811 return 0xff;
cb95d562
TH
812}
813
814/**
815 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
816 * @xfer_mode: XFER_* of interest
817 *
818 * Return matching xfer_mask for @xfer_mode.
819 *
820 * LOCKING:
821 * None.
822 *
823 * RETURNS:
824 * Matching xfer_mask, 0 if no match found.
825 */
7dc951ae 826unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
827{
828 const struct ata_xfer_ent *ent;
829
830 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
831 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
832 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
833 & ~((1 << ent->shift) - 1);
cb95d562
TH
834 return 0;
835}
836
837/**
838 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
839 * @xfer_mode: XFER_* of interest
840 *
841 * Return matching xfer_shift for @xfer_mode.
842 *
843 * LOCKING:
844 * None.
845 *
846 * RETURNS:
847 * Matching xfer_shift, -1 if no match found.
848 */
7dc951ae 849int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
850{
851 const struct ata_xfer_ent *ent;
852
853 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
854 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
855 return ent->shift;
856 return -1;
857}
858
1da177e4 859/**
1da7b0d0
TH
860 * ata_mode_string - convert xfer_mask to string
861 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
862 *
863 * Determine string which represents the highest speed
1da7b0d0 864 * (highest bit in @modemask).
1da177e4
LT
865 *
866 * LOCKING:
867 * None.
868 *
869 * RETURNS:
870 * Constant C string representing highest speed listed in
1da7b0d0 871 * @mode_mask, or the constant C string "<n/a>".
1da177e4 872 */
7dc951ae 873const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 874{
75f554bc
TH
875 static const char * const xfer_mode_str[] = {
876 "PIO0",
877 "PIO1",
878 "PIO2",
879 "PIO3",
880 "PIO4",
b352e57d
AC
881 "PIO5",
882 "PIO6",
75f554bc
TH
883 "MWDMA0",
884 "MWDMA1",
885 "MWDMA2",
b352e57d
AC
886 "MWDMA3",
887 "MWDMA4",
75f554bc
TH
888 "UDMA/16",
889 "UDMA/25",
890 "UDMA/33",
891 "UDMA/44",
892 "UDMA/66",
893 "UDMA/100",
894 "UDMA/133",
895 "UDMA7",
896 };
1da7b0d0 897 int highbit;
1da177e4 898
1da7b0d0
TH
899 highbit = fls(xfer_mask) - 1;
900 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
901 return xfer_mode_str[highbit];
1da177e4 902 return "<n/a>";
1da177e4
LT
903}
904
4c360c81
TH
905static const char *sata_spd_string(unsigned int spd)
906{
907 static const char * const spd_str[] = {
908 "1.5 Gbps",
909 "3.0 Gbps",
910 };
911
912 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
913 return "<unknown>";
914 return spd_str[spd - 1];
915}
916
3373efd8 917void ata_dev_disable(struct ata_device *dev)
0b8efb0a 918{
09d7f9b0 919 if (ata_dev_enabled(dev)) {
9af5c9c9 920 if (ata_msg_drv(dev->link->ap))
09d7f9b0 921 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 922 ata_acpi_on_disable(dev);
4ae72a1e
TH
923 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
924 ATA_DNXFER_QUIET);
0b8efb0a
TH
925 dev->class++;
926 }
927}
928
ca77329f
KCA
929static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
930{
931 struct ata_link *link = dev->link;
932 struct ata_port *ap = link->ap;
933 u32 scontrol;
934 unsigned int err_mask;
935 int rc;
936
937 /*
938 * disallow DIPM for drivers which haven't set
939 * ATA_FLAG_IPM. This is because when DIPM is enabled,
940 * phy ready will be set in the interrupt status on
941 * state changes, which will cause some drivers to
942 * think there are errors - additionally drivers will
943 * need to disable hot plug.
944 */
945 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
946 ap->pm_policy = NOT_AVAILABLE;
947 return -EINVAL;
948 }
949
950 /*
951 * For DIPM, we will only enable it for the
952 * min_power setting.
953 *
954 * Why? Because Disks are too stupid to know that
955 * If the host rejects a request to go to SLUMBER
956 * they should retry at PARTIAL, and instead it
957 * just would give up. So, for medium_power to
958 * work at all, we need to only allow HIPM.
959 */
960 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
961 if (rc)
962 return rc;
963
964 switch (policy) {
965 case MIN_POWER:
966 /* no restrictions on IPM transitions */
967 scontrol &= ~(0x3 << 8);
968 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
969 if (rc)
970 return rc;
971
972 /* enable DIPM */
973 if (dev->flags & ATA_DFLAG_DIPM)
974 err_mask = ata_dev_set_feature(dev,
975 SETFEATURES_SATA_ENABLE, SATA_DIPM);
976 break;
977 case MEDIUM_POWER:
978 /* allow IPM to PARTIAL */
979 scontrol &= ~(0x1 << 8);
980 scontrol |= (0x2 << 8);
981 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
982 if (rc)
983 return rc;
984
f5456b63
KCA
985 /*
986 * we don't have to disable DIPM since IPM flags
987 * disallow transitions to SLUMBER, which effectively
988 * disable DIPM if it does not support PARTIAL
989 */
ca77329f
KCA
990 break;
991 case NOT_AVAILABLE:
992 case MAX_PERFORMANCE:
993 /* disable all IPM transitions */
994 scontrol |= (0x3 << 8);
995 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
996 if (rc)
997 return rc;
998
f5456b63
KCA
999 /*
1000 * we don't have to disable DIPM since IPM flags
1001 * disallow all transitions which effectively
1002 * disable DIPM anyway.
1003 */
ca77329f
KCA
1004 break;
1005 }
1006
1007 /* FIXME: handle SET FEATURES failure */
1008 (void) err_mask;
1009
1010 return 0;
1011}
1012
1013/**
1014 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
1015 * @dev: device to enable power management
1016 * @policy: the link power management policy
ca77329f
KCA
1017 *
1018 * Enable SATA Interface power management. This will enable
1019 * Device Interface Power Management (DIPM) for min_power
1020 * policy, and then call driver specific callbacks for
1021 * enabling Host Initiated Power management.
1022 *
1023 * Locking: Caller.
1024 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1025 */
1026void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1027{
1028 int rc = 0;
1029 struct ata_port *ap = dev->link->ap;
1030
1031 /* set HIPM first, then DIPM */
1032 if (ap->ops->enable_pm)
1033 rc = ap->ops->enable_pm(ap, policy);
1034 if (rc)
1035 goto enable_pm_out;
1036 rc = ata_dev_set_dipm(dev, policy);
1037
1038enable_pm_out:
1039 if (rc)
1040 ap->pm_policy = MAX_PERFORMANCE;
1041 else
1042 ap->pm_policy = policy;
1043 return /* rc */; /* hopefully we can use 'rc' eventually */
1044}
1045
1992a5ed 1046#ifdef CONFIG_PM
ca77329f
KCA
1047/**
1048 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 1049 * @dev: device to disable power management
ca77329f
KCA
1050 *
1051 * Disable SATA Interface power management. This will disable
1052 * Device Interface Power Management (DIPM) without changing
1053 * policy, call driver specific callbacks for disabling Host
1054 * Initiated Power management.
1055 *
1056 * Locking: Caller.
1057 * Returns: void
1058 */
1059static void ata_dev_disable_pm(struct ata_device *dev)
1060{
1061 struct ata_port *ap = dev->link->ap;
1062
1063 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1064 if (ap->ops->disable_pm)
1065 ap->ops->disable_pm(ap);
1066}
1992a5ed 1067#endif /* CONFIG_PM */
ca77329f
KCA
1068
1069void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1070{
1071 ap->pm_policy = policy;
3ec25ebd 1072 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1073 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1074 ata_port_schedule_eh(ap);
1075}
1076
1992a5ed 1077#ifdef CONFIG_PM
ca77329f
KCA
1078static void ata_lpm_enable(struct ata_host *host)
1079{
1080 struct ata_link *link;
1081 struct ata_port *ap;
1082 struct ata_device *dev;
1083 int i;
1084
1085 for (i = 0; i < host->n_ports; i++) {
1086 ap = host->ports[i];
1087 ata_port_for_each_link(link, ap) {
1088 ata_link_for_each_dev(dev, link)
1089 ata_dev_disable_pm(dev);
1090 }
1091 }
1092}
1093
1094static void ata_lpm_disable(struct ata_host *host)
1095{
1096 int i;
1097
1098 for (i = 0; i < host->n_ports; i++) {
1099 struct ata_port *ap = host->ports[i];
1100 ata_lpm_schedule(ap, ap->pm_policy);
1101 }
1102}
1992a5ed 1103#endif /* CONFIG_PM */
ca77329f
KCA
1104
1105
1da177e4 1106/**
0d5ff566 1107 * ata_devchk - PATA device presence detection
1da177e4
LT
1108 * @ap: ATA channel to examine
1109 * @device: Device to examine (starting at zero)
1110 *
1111 * This technique was originally described in
1112 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1113 * later found its way into the ATA/ATAPI spec.
1114 *
1115 * Write a pattern to the ATA shadow registers,
1116 * and if a device is present, it will respond by
1117 * correctly storing and echoing back the
1118 * ATA shadow register contents.
1119 *
1120 * LOCKING:
1121 * caller.
1122 */
1123
0d5ff566 1124static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1125{
1126 struct ata_ioports *ioaddr = &ap->ioaddr;
1127 u8 nsect, lbal;
1128
1129 ap->ops->dev_select(ap, device);
1130
0d5ff566
TH
1131 iowrite8(0x55, ioaddr->nsect_addr);
1132 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1133
0d5ff566
TH
1134 iowrite8(0xaa, ioaddr->nsect_addr);
1135 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1136
0d5ff566
TH
1137 iowrite8(0x55, ioaddr->nsect_addr);
1138 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1139
0d5ff566
TH
1140 nsect = ioread8(ioaddr->nsect_addr);
1141 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1142
1143 if ((nsect == 0x55) && (lbal == 0xaa))
1144 return 1; /* we found a device */
1145
1146 return 0; /* nothing found */
1147}
1148
1da177e4
LT
1149/**
1150 * ata_dev_classify - determine device type based on ATA-spec signature
1151 * @tf: ATA taskfile register set for device to be identified
1152 *
1153 * Determine from taskfile register contents whether a device is
1154 * ATA or ATAPI, as per "Signature and persistence" section
1155 * of ATA/PI spec (volume 1, sect 5.14).
1156 *
1157 * LOCKING:
1158 * None.
1159 *
1160 * RETURNS:
633273a3
TH
1161 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1162 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1163 */
057ace5e 1164unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1165{
1166 /* Apple's open source Darwin code hints that some devices only
1167 * put a proper signature into the LBA mid/high registers,
1168 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1169 *
1170 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1171 * signatures for ATA and ATAPI devices attached on SerialATA,
1172 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1173 * spec has never mentioned about using different signatures
1174 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1175 * Multiplier specification began to use 0x69/0x96 to identify
1176 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1177 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1178 * 0x69/0x96 shortly and described them as reserved for
1179 * SerialATA.
1180 *
1181 * We follow the current spec and consider that 0x69/0x96
1182 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1183 */
633273a3 1184 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1185 DPRINTK("found ATA device by sig\n");
1186 return ATA_DEV_ATA;
1187 }
1188
633273a3 1189 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1190 DPRINTK("found ATAPI device by sig\n");
1191 return ATA_DEV_ATAPI;
1192 }
1193
633273a3
TH
1194 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1195 DPRINTK("found PMP device by sig\n");
1196 return ATA_DEV_PMP;
1197 }
1198
1199 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1200 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1201 return ATA_DEV_SEMB_UNSUP; /* not yet */
1202 }
1203
1da177e4
LT
1204 DPRINTK("unknown device\n");
1205 return ATA_DEV_UNKNOWN;
1206}
1207
1208/**
1209 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1210 * @dev: ATA device to classify (starting at zero)
1211 * @present: device seems present
b4dc7623 1212 * @r_err: Value of error register on completion
1da177e4
LT
1213 *
1214 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1215 * an ATA/ATAPI-defined set of values is placed in the ATA
1216 * shadow registers, indicating the results of device detection
1217 * and diagnostics.
1218 *
1219 * Select the ATA device, and read the values from the ATA shadow
1220 * registers. Then parse according to the Error register value,
1221 * and the spec-defined values examined by ata_dev_classify().
1222 *
1223 * LOCKING:
1224 * caller.
b4dc7623
TH
1225 *
1226 * RETURNS:
1227 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1228 */
3f19859e
TH
1229unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1230 u8 *r_err)
1da177e4 1231{
3f19859e 1232 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1233 struct ata_taskfile tf;
1234 unsigned int class;
1235 u8 err;
1236
3f19859e 1237 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1238
1239 memset(&tf, 0, sizeof(tf));
1240
1da177e4 1241 ap->ops->tf_read(ap, &tf);
0169e284 1242 err = tf.feature;
b4dc7623
TH
1243 if (r_err)
1244 *r_err = err;
1da177e4 1245
c5038fc0
AC
1246 /* see if device passed diags: continue and warn later */
1247 if (err == 0)
93590859 1248 /* diagnostic fail : do nothing _YET_ */
3f19859e 1249 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1250 else if (err == 1)
1da177e4 1251 /* do nothing */ ;
3f19859e 1252 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1253 /* do nothing */ ;
1254 else
b4dc7623 1255 return ATA_DEV_NONE;
1da177e4 1256
b4dc7623 1257 /* determine if device is ATA or ATAPI */
1da177e4 1258 class = ata_dev_classify(&tf);
b4dc7623 1259
d7fbee05
TH
1260 if (class == ATA_DEV_UNKNOWN) {
1261 /* If the device failed diagnostic, it's likely to
1262 * have reported incorrect device signature too.
1263 * Assume ATA device if the device seems present but
1264 * device signature is invalid with diagnostic
1265 * failure.
1266 */
1267 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1268 class = ATA_DEV_ATA;
1269 else
1270 class = ATA_DEV_NONE;
1271 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1272 class = ATA_DEV_NONE;
1273
b4dc7623 1274 return class;
1da177e4
LT
1275}
1276
1277/**
6a62a04d 1278 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1279 * @id: IDENTIFY DEVICE results we will examine
1280 * @s: string into which data is output
1281 * @ofs: offset into identify device page
1282 * @len: length of string to return. must be an even number.
1283 *
1284 * The strings in the IDENTIFY DEVICE page are broken up into
1285 * 16-bit chunks. Run through the string, and output each
1286 * 8-bit chunk linearly, regardless of platform.
1287 *
1288 * LOCKING:
1289 * caller.
1290 */
1291
6a62a04d
TH
1292void ata_id_string(const u16 *id, unsigned char *s,
1293 unsigned int ofs, unsigned int len)
1da177e4
LT
1294{
1295 unsigned int c;
1296
1297 while (len > 0) {
1298 c = id[ofs] >> 8;
1299 *s = c;
1300 s++;
1301
1302 c = id[ofs] & 0xff;
1303 *s = c;
1304 s++;
1305
1306 ofs++;
1307 len -= 2;
1308 }
1309}
1310
0e949ff3 1311/**
6a62a04d 1312 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1313 * @id: IDENTIFY DEVICE results we will examine
1314 * @s: string into which data is output
1315 * @ofs: offset into identify device page
1316 * @len: length of string to return. must be an odd number.
1317 *
6a62a04d 1318 * This function is identical to ata_id_string except that it
0e949ff3
TH
1319 * trims trailing spaces and terminates the resulting string with
1320 * null. @len must be actual maximum length (even number) + 1.
1321 *
1322 * LOCKING:
1323 * caller.
1324 */
6a62a04d
TH
1325void ata_id_c_string(const u16 *id, unsigned char *s,
1326 unsigned int ofs, unsigned int len)
0e949ff3
TH
1327{
1328 unsigned char *p;
1329
1330 WARN_ON(!(len & 1));
1331
6a62a04d 1332 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1333
1334 p = s + strnlen(s, len - 1);
1335 while (p > s && p[-1] == ' ')
1336 p--;
1337 *p = '\0';
1338}
0baab86b 1339
db6f8759
TH
1340static u64 ata_id_n_sectors(const u16 *id)
1341{
1342 if (ata_id_has_lba(id)) {
1343 if (ata_id_has_lba48(id))
1344 return ata_id_u64(id, 100);
1345 else
1346 return ata_id_u32(id, 60);
1347 } else {
1348 if (ata_id_current_chs_valid(id))
1349 return ata_id_u32(id, 57);
1350 else
1351 return id[1] * id[3] * id[6];
1352 }
1353}
1354
1e999736
AC
1355static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1356{
1357 u64 sectors = 0;
1358
1359 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1360 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1361 sectors |= (tf->hob_lbal & 0xff) << 24;
1362 sectors |= (tf->lbah & 0xff) << 16;
1363 sectors |= (tf->lbam & 0xff) << 8;
1364 sectors |= (tf->lbal & 0xff);
1365
1366 return ++sectors;
1367}
1368
1369static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1370{
1371 u64 sectors = 0;
1372
1373 sectors |= (tf->device & 0x0f) << 24;
1374 sectors |= (tf->lbah & 0xff) << 16;
1375 sectors |= (tf->lbam & 0xff) << 8;
1376 sectors |= (tf->lbal & 0xff);
1377
1378 return ++sectors;
1379}
1380
1381/**
c728a914
TH
1382 * ata_read_native_max_address - Read native max address
1383 * @dev: target device
1384 * @max_sectors: out parameter for the result native max address
1e999736 1385 *
c728a914
TH
1386 * Perform an LBA48 or LBA28 native size query upon the device in
1387 * question.
1e999736 1388 *
c728a914
TH
1389 * RETURNS:
1390 * 0 on success, -EACCES if command is aborted by the drive.
1391 * -EIO on other errors.
1e999736 1392 */
c728a914 1393static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1394{
c728a914 1395 unsigned int err_mask;
1e999736 1396 struct ata_taskfile tf;
c728a914 1397 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1398
1399 ata_tf_init(dev, &tf);
1400
c728a914 1401 /* always clear all address registers */
1e999736 1402 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1403
c728a914
TH
1404 if (lba48) {
1405 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1406 tf.flags |= ATA_TFLAG_LBA48;
1407 } else
1408 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1409
1e999736 1410 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1411 tf.device |= ATA_LBA;
1412
2b789108 1413 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1414 if (err_mask) {
1415 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1416 "max address (err_mask=0x%x)\n", err_mask);
1417 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1418 return -EACCES;
1419 return -EIO;
1420 }
1e999736 1421
c728a914
TH
1422 if (lba48)
1423 *max_sectors = ata_tf_to_lba48(&tf);
1424 else
1425 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1426 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1427 (*max_sectors)--;
c728a914 1428 return 0;
1e999736
AC
1429}
1430
1431/**
c728a914
TH
1432 * ata_set_max_sectors - Set max sectors
1433 * @dev: target device
6b38d1d1 1434 * @new_sectors: new max sectors value to set for the device
1e999736 1435 *
c728a914
TH
1436 * Set max sectors of @dev to @new_sectors.
1437 *
1438 * RETURNS:
1439 * 0 on success, -EACCES if command is aborted or denied (due to
1440 * previous non-volatile SET_MAX) by the drive. -EIO on other
1441 * errors.
1e999736 1442 */
05027adc 1443static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1444{
c728a914 1445 unsigned int err_mask;
1e999736 1446 struct ata_taskfile tf;
c728a914 1447 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1448
1449 new_sectors--;
1450
1451 ata_tf_init(dev, &tf);
1452
1e999736 1453 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1454
1455 if (lba48) {
1456 tf.command = ATA_CMD_SET_MAX_EXT;
1457 tf.flags |= ATA_TFLAG_LBA48;
1458
1459 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1460 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1461 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1462 } else {
c728a914
TH
1463 tf.command = ATA_CMD_SET_MAX;
1464
1e582ba4
TH
1465 tf.device |= (new_sectors >> 24) & 0xf;
1466 }
1467
1e999736 1468 tf.protocol |= ATA_PROT_NODATA;
c728a914 1469 tf.device |= ATA_LBA;
1e999736
AC
1470
1471 tf.lbal = (new_sectors >> 0) & 0xff;
1472 tf.lbam = (new_sectors >> 8) & 0xff;
1473 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1474
2b789108 1475 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1476 if (err_mask) {
1477 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1478 "max address (err_mask=0x%x)\n", err_mask);
1479 if (err_mask == AC_ERR_DEV &&
1480 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1481 return -EACCES;
1482 return -EIO;
1483 }
1484
c728a914 1485 return 0;
1e999736
AC
1486}
1487
1488/**
1489 * ata_hpa_resize - Resize a device with an HPA set
1490 * @dev: Device to resize
1491 *
1492 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1493 * it if required to the full size of the media. The caller must check
1494 * the drive has the HPA feature set enabled.
05027adc
TH
1495 *
1496 * RETURNS:
1497 * 0 on success, -errno on failure.
1e999736 1498 */
05027adc 1499static int ata_hpa_resize(struct ata_device *dev)
1e999736 1500{
05027adc
TH
1501 struct ata_eh_context *ehc = &dev->link->eh_context;
1502 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1503 u64 sectors = ata_id_n_sectors(dev->id);
1504 u64 native_sectors;
c728a914 1505 int rc;
a617c09f 1506
05027adc
TH
1507 /* do we need to do it? */
1508 if (dev->class != ATA_DEV_ATA ||
1509 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1510 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1511 return 0;
1e999736 1512
05027adc
TH
1513 /* read native max address */
1514 rc = ata_read_native_max_address(dev, &native_sectors);
1515 if (rc) {
dda7aba1
TH
1516 /* If device aborted the command or HPA isn't going to
1517 * be unlocked, skip HPA resizing.
05027adc 1518 */
dda7aba1 1519 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1520 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1521 "broken, skipping HPA handling\n");
05027adc
TH
1522 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1523
1524 /* we can continue if device aborted the command */
1525 if (rc == -EACCES)
1526 rc = 0;
1e999736 1527 }
37301a55 1528
05027adc
TH
1529 return rc;
1530 }
1531
1532 /* nothing to do? */
1533 if (native_sectors <= sectors || !ata_ignore_hpa) {
1534 if (!print_info || native_sectors == sectors)
1535 return 0;
1536
1537 if (native_sectors > sectors)
1538 ata_dev_printk(dev, KERN_INFO,
1539 "HPA detected: current %llu, native %llu\n",
1540 (unsigned long long)sectors,
1541 (unsigned long long)native_sectors);
1542 else if (native_sectors < sectors)
1543 ata_dev_printk(dev, KERN_WARNING,
1544 "native sectors (%llu) is smaller than "
1545 "sectors (%llu)\n",
1546 (unsigned long long)native_sectors,
1547 (unsigned long long)sectors);
1548 return 0;
1549 }
1550
1551 /* let's unlock HPA */
1552 rc = ata_set_max_sectors(dev, native_sectors);
1553 if (rc == -EACCES) {
1554 /* if device aborted the command, skip HPA resizing */
1555 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1556 "(%llu -> %llu), skipping HPA handling\n",
1557 (unsigned long long)sectors,
1558 (unsigned long long)native_sectors);
1559 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1560 return 0;
1561 } else if (rc)
1562 return rc;
1563
1564 /* re-read IDENTIFY data */
1565 rc = ata_dev_reread_id(dev, 0);
1566 if (rc) {
1567 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1568 "data after HPA resizing\n");
1569 return rc;
1570 }
1571
1572 if (print_info) {
1573 u64 new_sectors = ata_id_n_sectors(dev->id);
1574 ata_dev_printk(dev, KERN_INFO,
1575 "HPA unlocked: %llu -> %llu, native %llu\n",
1576 (unsigned long long)sectors,
1577 (unsigned long long)new_sectors,
1578 (unsigned long long)native_sectors);
1579 }
1580
1581 return 0;
1e999736
AC
1582}
1583
0baab86b
EF
1584/**
1585 * ata_noop_dev_select - Select device 0/1 on ATA bus
1586 * @ap: ATA channel to manipulate
1587 * @device: ATA device (numbered from zero) to select
1588 *
1589 * This function performs no actual function.
1590 *
1591 * May be used as the dev_select() entry in ata_port_operations.
1592 *
1593 * LOCKING:
1594 * caller.
1595 */
2dcb407e 1596void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1597{
1598}
1599
0baab86b 1600
1da177e4
LT
1601/**
1602 * ata_std_dev_select - Select device 0/1 on ATA bus
1603 * @ap: ATA channel to manipulate
1604 * @device: ATA device (numbered from zero) to select
1605 *
1606 * Use the method defined in the ATA specification to
1607 * make either device 0, or device 1, active on the
0baab86b
EF
1608 * ATA channel. Works with both PIO and MMIO.
1609 *
1610 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1611 *
1612 * LOCKING:
1613 * caller.
1614 */
1615
2dcb407e 1616void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1617{
1618 u8 tmp;
1619
1620 if (device == 0)
1621 tmp = ATA_DEVICE_OBS;
1622 else
1623 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1624
0d5ff566 1625 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1626 ata_pause(ap); /* needed; also flushes, for mmio */
1627}
1628
1629/**
1630 * ata_dev_select - Select device 0/1 on ATA bus
1631 * @ap: ATA channel to manipulate
1632 * @device: ATA device (numbered from zero) to select
1633 * @wait: non-zero to wait for Status register BSY bit to clear
1634 * @can_sleep: non-zero if context allows sleeping
1635 *
1636 * Use the method defined in the ATA specification to
1637 * make either device 0, or device 1, active on the
1638 * ATA channel.
1639 *
1640 * This is a high-level version of ata_std_dev_select(),
1641 * which additionally provides the services of inserting
1642 * the proper pauses and status polling, where needed.
1643 *
1644 * LOCKING:
1645 * caller.
1646 */
1647
1648void ata_dev_select(struct ata_port *ap, unsigned int device,
1649 unsigned int wait, unsigned int can_sleep)
1650{
88574551 1651 if (ata_msg_probe(ap))
44877b4e
TH
1652 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1653 "device %u, wait %u\n", device, wait);
1da177e4
LT
1654
1655 if (wait)
1656 ata_wait_idle(ap);
1657
1658 ap->ops->dev_select(ap, device);
1659
1660 if (wait) {
9af5c9c9 1661 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1662 msleep(150);
1663 ata_wait_idle(ap);
1664 }
1665}
1666
1667/**
1668 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1669 * @id: IDENTIFY DEVICE page to dump
1da177e4 1670 *
0bd3300a
TH
1671 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1672 * page.
1da177e4
LT
1673 *
1674 * LOCKING:
1675 * caller.
1676 */
1677
0bd3300a 1678static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1679{
1680 DPRINTK("49==0x%04x "
1681 "53==0x%04x "
1682 "63==0x%04x "
1683 "64==0x%04x "
1684 "75==0x%04x \n",
0bd3300a
TH
1685 id[49],
1686 id[53],
1687 id[63],
1688 id[64],
1689 id[75]);
1da177e4
LT
1690 DPRINTK("80==0x%04x "
1691 "81==0x%04x "
1692 "82==0x%04x "
1693 "83==0x%04x "
1694 "84==0x%04x \n",
0bd3300a
TH
1695 id[80],
1696 id[81],
1697 id[82],
1698 id[83],
1699 id[84]);
1da177e4
LT
1700 DPRINTK("88==0x%04x "
1701 "93==0x%04x\n",
0bd3300a
TH
1702 id[88],
1703 id[93]);
1da177e4
LT
1704}
1705
cb95d562
TH
1706/**
1707 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1708 * @id: IDENTIFY data to compute xfer mask from
1709 *
1710 * Compute the xfermask for this device. This is not as trivial
1711 * as it seems if we must consider early devices correctly.
1712 *
1713 * FIXME: pre IDE drive timing (do we care ?).
1714 *
1715 * LOCKING:
1716 * None.
1717 *
1718 * RETURNS:
1719 * Computed xfermask
1720 */
7dc951ae 1721unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1722{
7dc951ae 1723 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1724
1725 /* Usual case. Word 53 indicates word 64 is valid */
1726 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1727 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1728 pio_mask <<= 3;
1729 pio_mask |= 0x7;
1730 } else {
1731 /* If word 64 isn't valid then Word 51 high byte holds
1732 * the PIO timing number for the maximum. Turn it into
1733 * a mask.
1734 */
7a0f1c8a 1735 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1736 if (mode < 5) /* Valid PIO range */
2dcb407e 1737 pio_mask = (2 << mode) - 1;
46767aeb
AC
1738 else
1739 pio_mask = 1;
cb95d562
TH
1740
1741 /* But wait.. there's more. Design your standards by
1742 * committee and you too can get a free iordy field to
1743 * process. However its the speeds not the modes that
1744 * are supported... Note drivers using the timing API
1745 * will get this right anyway
1746 */
1747 }
1748
1749 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1750
b352e57d
AC
1751 if (ata_id_is_cfa(id)) {
1752 /*
1753 * Process compact flash extended modes
1754 */
1755 int pio = id[163] & 0x7;
1756 int dma = (id[163] >> 3) & 7;
1757
1758 if (pio)
1759 pio_mask |= (1 << 5);
1760 if (pio > 1)
1761 pio_mask |= (1 << 6);
1762 if (dma)
1763 mwdma_mask |= (1 << 3);
1764 if (dma > 1)
1765 mwdma_mask |= (1 << 4);
1766 }
1767
fb21f0d0
TH
1768 udma_mask = 0;
1769 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1770 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1771
1772 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1773}
1774
86e45b6b 1775/**
442eacc3 1776 * ata_pio_queue_task - Queue port_task
86e45b6b 1777 * @ap: The ata_port to queue port_task for
e2a7f77a 1778 * @fn: workqueue function to be scheduled
65f27f38 1779 * @data: data for @fn to use
e2a7f77a 1780 * @delay: delay time for workqueue function
86e45b6b
TH
1781 *
1782 * Schedule @fn(@data) for execution after @delay jiffies using
1783 * port_task. There is one port_task per port and it's the
1784 * user(low level driver)'s responsibility to make sure that only
1785 * one task is active at any given time.
1786 *
1787 * libata core layer takes care of synchronization between
442eacc3 1788 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1789 * synchronization.
1790 *
1791 * LOCKING:
1792 * Inherited from caller.
1793 */
442eacc3
JG
1794static void ata_pio_queue_task(struct ata_port *ap, void *data,
1795 unsigned long delay)
86e45b6b 1796{
65f27f38 1797 ap->port_task_data = data;
86e45b6b 1798
45a66c1c
ON
1799 /* may fail if ata_port_flush_task() in progress */
1800 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1801}
1802
1803/**
1804 * ata_port_flush_task - Flush port_task
1805 * @ap: The ata_port to flush port_task for
1806 *
1807 * After this function completes, port_task is guranteed not to
1808 * be running or scheduled.
1809 *
1810 * LOCKING:
1811 * Kernel thread context (may sleep)
1812 */
1813void ata_port_flush_task(struct ata_port *ap)
1814{
86e45b6b
TH
1815 DPRINTK("ENTER\n");
1816
45a66c1c 1817 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1818
0dd4b21f 1819 if (ata_msg_ctl(ap))
7f5e4e8d 1820 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1821}
1822
7102d230 1823static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1824{
77853bf2 1825 struct completion *waiting = qc->private_data;
a2a7a662 1826
a2a7a662 1827 complete(waiting);
a2a7a662
TH
1828}
1829
1830/**
2432697b 1831 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1832 * @dev: Device to which the command is sent
1833 * @tf: Taskfile registers for the command and the result
d69cf37d 1834 * @cdb: CDB for packet command
a2a7a662 1835 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1836 * @sgl: sg list for the data buffer of the command
2432697b 1837 * @n_elem: Number of sg entries
2b789108 1838 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1839 *
1840 * Executes libata internal command with timeout. @tf contains
1841 * command on entry and result on return. Timeout and error
1842 * conditions are reported via return value. No recovery action
1843 * is taken after a command times out. It's caller's duty to
1844 * clean up after timeout.
1845 *
1846 * LOCKING:
1847 * None. Should be called with kernel context, might sleep.
551e8889
TH
1848 *
1849 * RETURNS:
1850 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1851 */
2432697b
TH
1852unsigned ata_exec_internal_sg(struct ata_device *dev,
1853 struct ata_taskfile *tf, const u8 *cdb,
87260216 1854 int dma_dir, struct scatterlist *sgl,
2b789108 1855 unsigned int n_elem, unsigned long timeout)
a2a7a662 1856{
9af5c9c9
TH
1857 struct ata_link *link = dev->link;
1858 struct ata_port *ap = link->ap;
a2a7a662
TH
1859 u8 command = tf->command;
1860 struct ata_queued_cmd *qc;
2ab7db1f 1861 unsigned int tag, preempted_tag;
dedaf2b0 1862 u32 preempted_sactive, preempted_qc_active;
da917d69 1863 int preempted_nr_active_links;
60be6b9a 1864 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1865 unsigned long flags;
77853bf2 1866 unsigned int err_mask;
d95a717f 1867 int rc;
a2a7a662 1868
ba6a1308 1869 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1870
e3180499 1871 /* no internal command while frozen */
b51e9e5d 1872 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1873 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1874 return AC_ERR_SYSTEM;
1875 }
1876
2ab7db1f 1877 /* initialize internal qc */
a2a7a662 1878
2ab7db1f
TH
1879 /* XXX: Tag 0 is used for drivers with legacy EH as some
1880 * drivers choke if any other tag is given. This breaks
1881 * ata_tag_internal() test for those drivers. Don't use new
1882 * EH stuff without converting to it.
1883 */
1884 if (ap->ops->error_handler)
1885 tag = ATA_TAG_INTERNAL;
1886 else
1887 tag = 0;
1888
6cec4a39 1889 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1890 BUG();
f69499f4 1891 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1892
1893 qc->tag = tag;
1894 qc->scsicmd = NULL;
1895 qc->ap = ap;
1896 qc->dev = dev;
1897 ata_qc_reinit(qc);
1898
9af5c9c9
TH
1899 preempted_tag = link->active_tag;
1900 preempted_sactive = link->sactive;
dedaf2b0 1901 preempted_qc_active = ap->qc_active;
da917d69 1902 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1903 link->active_tag = ATA_TAG_POISON;
1904 link->sactive = 0;
dedaf2b0 1905 ap->qc_active = 0;
da917d69 1906 ap->nr_active_links = 0;
2ab7db1f
TH
1907
1908 /* prepare & issue qc */
a2a7a662 1909 qc->tf = *tf;
d69cf37d
TH
1910 if (cdb)
1911 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1912 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1913 qc->dma_dir = dma_dir;
1914 if (dma_dir != DMA_NONE) {
2432697b 1915 unsigned int i, buflen = 0;
87260216 1916 struct scatterlist *sg;
2432697b 1917
87260216
JA
1918 for_each_sg(sgl, sg, n_elem, i)
1919 buflen += sg->length;
2432697b 1920
87260216 1921 ata_sg_init(qc, sgl, n_elem);
49c80429 1922 qc->nbytes = buflen;
a2a7a662
TH
1923 }
1924
77853bf2 1925 qc->private_data = &wait;
a2a7a662
TH
1926 qc->complete_fn = ata_qc_complete_internal;
1927
8e0e694a 1928 ata_qc_issue(qc);
a2a7a662 1929
ba6a1308 1930 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1931
2b789108
TH
1932 if (!timeout)
1933 timeout = ata_probe_timeout * 1000 / HZ;
1934
1935 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1936
1937 ata_port_flush_task(ap);
41ade50c 1938
d95a717f 1939 if (!rc) {
ba6a1308 1940 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1941
1942 /* We're racing with irq here. If we lose, the
1943 * following test prevents us from completing the qc
d95a717f
TH
1944 * twice. If we win, the port is frozen and will be
1945 * cleaned up by ->post_internal_cmd().
a2a7a662 1946 */
77853bf2 1947 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1948 qc->err_mask |= AC_ERR_TIMEOUT;
1949
1950 if (ap->ops->error_handler)
1951 ata_port_freeze(ap);
1952 else
1953 ata_qc_complete(qc);
f15a1daf 1954
0dd4b21f
BP
1955 if (ata_msg_warn(ap))
1956 ata_dev_printk(dev, KERN_WARNING,
88574551 1957 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1958 }
1959
ba6a1308 1960 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1961 }
1962
d95a717f
TH
1963 /* do post_internal_cmd */
1964 if (ap->ops->post_internal_cmd)
1965 ap->ops->post_internal_cmd(qc);
1966
a51d644a
TH
1967 /* perform minimal error analysis */
1968 if (qc->flags & ATA_QCFLAG_FAILED) {
1969 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1970 qc->err_mask |= AC_ERR_DEV;
1971
1972 if (!qc->err_mask)
1973 qc->err_mask |= AC_ERR_OTHER;
1974
1975 if (qc->err_mask & ~AC_ERR_OTHER)
1976 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1977 }
1978
15869303 1979 /* finish up */
ba6a1308 1980 spin_lock_irqsave(ap->lock, flags);
15869303 1981
e61e0672 1982 *tf = qc->result_tf;
77853bf2
TH
1983 err_mask = qc->err_mask;
1984
1985 ata_qc_free(qc);
9af5c9c9
TH
1986 link->active_tag = preempted_tag;
1987 link->sactive = preempted_sactive;
dedaf2b0 1988 ap->qc_active = preempted_qc_active;
da917d69 1989 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1990
1f7dd3e9
TH
1991 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1992 * Until those drivers are fixed, we detect the condition
1993 * here, fail the command with AC_ERR_SYSTEM and reenable the
1994 * port.
1995 *
1996 * Note that this doesn't change any behavior as internal
1997 * command failure results in disabling the device in the
1998 * higher layer for LLDDs without new reset/EH callbacks.
1999 *
2000 * Kill the following code as soon as those drivers are fixed.
2001 */
198e0fed 2002 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
2003 err_mask |= AC_ERR_SYSTEM;
2004 ata_port_probe(ap);
2005 }
2006
ba6a1308 2007 spin_unlock_irqrestore(ap->lock, flags);
15869303 2008
77853bf2 2009 return err_mask;
a2a7a662
TH
2010}
2011
2432697b 2012/**
33480a0e 2013 * ata_exec_internal - execute libata internal command
2432697b
TH
2014 * @dev: Device to which the command is sent
2015 * @tf: Taskfile registers for the command and the result
2016 * @cdb: CDB for packet command
2017 * @dma_dir: Data tranfer direction of the command
2018 * @buf: Data buffer of the command
2019 * @buflen: Length of data buffer
2b789108 2020 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
2021 *
2022 * Wrapper around ata_exec_internal_sg() which takes simple
2023 * buffer instead of sg list.
2024 *
2025 * LOCKING:
2026 * None. Should be called with kernel context, might sleep.
2027 *
2028 * RETURNS:
2029 * Zero on success, AC_ERR_* mask on failure
2030 */
2031unsigned ata_exec_internal(struct ata_device *dev,
2032 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
2033 int dma_dir, void *buf, unsigned int buflen,
2034 unsigned long timeout)
2432697b 2035{
33480a0e
TH
2036 struct scatterlist *psg = NULL, sg;
2037 unsigned int n_elem = 0;
2432697b 2038
33480a0e
TH
2039 if (dma_dir != DMA_NONE) {
2040 WARN_ON(!buf);
2041 sg_init_one(&sg, buf, buflen);
2042 psg = &sg;
2043 n_elem++;
2044 }
2432697b 2045
2b789108
TH
2046 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
2047 timeout);
2432697b
TH
2048}
2049
977e6b9f
TH
2050/**
2051 * ata_do_simple_cmd - execute simple internal command
2052 * @dev: Device to which the command is sent
2053 * @cmd: Opcode to execute
2054 *
2055 * Execute a 'simple' command, that only consists of the opcode
2056 * 'cmd' itself, without filling any other registers
2057 *
2058 * LOCKING:
2059 * Kernel thread context (may sleep).
2060 *
2061 * RETURNS:
2062 * Zero on success, AC_ERR_* mask on failure
e58eb583 2063 */
77b08fb5 2064unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
2065{
2066 struct ata_taskfile tf;
e58eb583
TH
2067
2068 ata_tf_init(dev, &tf);
2069
2070 tf.command = cmd;
2071 tf.flags |= ATA_TFLAG_DEVICE;
2072 tf.protocol = ATA_PROT_NODATA;
2073
2b789108 2074 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
2075}
2076
1bc4ccff
AC
2077/**
2078 * ata_pio_need_iordy - check if iordy needed
2079 * @adev: ATA device
2080 *
2081 * Check if the current speed of the device requires IORDY. Used
2082 * by various controllers for chip configuration.
2083 */
a617c09f 2084
1bc4ccff
AC
2085unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2086{
432729f0
AC
2087 /* Controller doesn't support IORDY. Probably a pointless check
2088 as the caller should know this */
9af5c9c9 2089 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 2090 return 0;
432729f0
AC
2091 /* PIO3 and higher it is mandatory */
2092 if (adev->pio_mode > XFER_PIO_2)
2093 return 1;
2094 /* We turn it on when possible */
2095 if (ata_id_has_iordy(adev->id))
1bc4ccff 2096 return 1;
432729f0
AC
2097 return 0;
2098}
2e9edbf8 2099
432729f0
AC
2100/**
2101 * ata_pio_mask_no_iordy - Return the non IORDY mask
2102 * @adev: ATA device
2103 *
2104 * Compute the highest mode possible if we are not using iordy. Return
2105 * -1 if no iordy mode is available.
2106 */
a617c09f 2107
432729f0
AC
2108static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2109{
1bc4ccff 2110 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2111 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2112 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2113 /* Is the speed faster than the drive allows non IORDY ? */
2114 if (pio) {
2115 /* This is cycle times not frequency - watch the logic! */
2116 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2117 return 3 << ATA_SHIFT_PIO;
2118 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2119 }
2120 }
432729f0 2121 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2122}
2123
1da177e4 2124/**
49016aca 2125 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2126 * @dev: target device
2127 * @p_class: pointer to class of the target device (may be changed)
bff04647 2128 * @flags: ATA_READID_* flags
fe635c7e 2129 * @id: buffer to read IDENTIFY data into
1da177e4 2130 *
49016aca
TH
2131 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2132 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2133 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2134 * for pre-ATA4 drives.
1da177e4 2135 *
50a99018 2136 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2137 * now we abort if we hit that case.
50a99018 2138 *
1da177e4 2139 * LOCKING:
49016aca
TH
2140 * Kernel thread context (may sleep)
2141 *
2142 * RETURNS:
2143 * 0 on success, -errno otherwise.
1da177e4 2144 */
a9beec95 2145int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2146 unsigned int flags, u16 *id)
1da177e4 2147{
9af5c9c9 2148 struct ata_port *ap = dev->link->ap;
49016aca 2149 unsigned int class = *p_class;
a0123703 2150 struct ata_taskfile tf;
49016aca
TH
2151 unsigned int err_mask = 0;
2152 const char *reason;
54936f8b 2153 int may_fallback = 1, tried_spinup = 0;
49016aca 2154 int rc;
1da177e4 2155
0dd4b21f 2156 if (ata_msg_ctl(ap))
7f5e4e8d 2157 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2158
49016aca 2159 retry:
3373efd8 2160 ata_tf_init(dev, &tf);
a0123703 2161
49016aca
TH
2162 switch (class) {
2163 case ATA_DEV_ATA:
a0123703 2164 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2165 break;
2166 case ATA_DEV_ATAPI:
a0123703 2167 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2168 break;
2169 default:
2170 rc = -ENODEV;
2171 reason = "unsupported class";
2172 goto err_out;
1da177e4
LT
2173 }
2174
a0123703 2175 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2176
2177 /* Some devices choke if TF registers contain garbage. Make
2178 * sure those are properly initialized.
2179 */
2180 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2181
2182 /* Device presence detection is unreliable on some
2183 * controllers. Always poll IDENTIFY if available.
2184 */
2185 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2186
3373efd8 2187 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2188 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2189 if (err_mask) {
800b3996 2190 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
2191 ata_dev_printk(dev, KERN_DEBUG,
2192 "NODEV after polling detection\n");
55a8e2c8
TH
2193 return -ENOENT;
2194 }
2195
1ffc151f
TH
2196 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2197 /* Device or controller might have reported
2198 * the wrong device class. Give a shot at the
2199 * other IDENTIFY if the current one is
2200 * aborted by the device.
2201 */
2202 if (may_fallback) {
2203 may_fallback = 0;
2204
2205 if (class == ATA_DEV_ATA)
2206 class = ATA_DEV_ATAPI;
2207 else
2208 class = ATA_DEV_ATA;
2209 goto retry;
2210 }
2211
2212 /* Control reaches here iff the device aborted
2213 * both flavors of IDENTIFYs which happens
2214 * sometimes with phantom devices.
2215 */
2216 ata_dev_printk(dev, KERN_DEBUG,
2217 "both IDENTIFYs aborted, assuming NODEV\n");
2218 return -ENOENT;
54936f8b
TH
2219 }
2220
49016aca
TH
2221 rc = -EIO;
2222 reason = "I/O error";
1da177e4
LT
2223 goto err_out;
2224 }
2225
54936f8b
TH
2226 /* Falling back doesn't make sense if ID data was read
2227 * successfully at least once.
2228 */
2229 may_fallback = 0;
2230
49016aca 2231 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2232
49016aca 2233 /* sanity check */
a4f5749b 2234 rc = -EINVAL;
6070068b 2235 reason = "device reports invalid type";
a4f5749b
TH
2236
2237 if (class == ATA_DEV_ATA) {
2238 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2239 goto err_out;
2240 } else {
2241 if (ata_id_is_ata(id))
2242 goto err_out;
49016aca
TH
2243 }
2244
169439c2
ML
2245 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2246 tried_spinup = 1;
2247 /*
2248 * Drive powered-up in standby mode, and requires a specific
2249 * SET_FEATURES spin-up subcommand before it will accept
2250 * anything other than the original IDENTIFY command.
2251 */
218f3d30 2252 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2253 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2254 rc = -EIO;
2255 reason = "SPINUP failed";
2256 goto err_out;
2257 }
2258 /*
2259 * If the drive initially returned incomplete IDENTIFY info,
2260 * we now must reissue the IDENTIFY command.
2261 */
2262 if (id[2] == 0x37c8)
2263 goto retry;
2264 }
2265
bff04647 2266 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2267 /*
2268 * The exact sequence expected by certain pre-ATA4 drives is:
2269 * SRST RESET
50a99018
AC
2270 * IDENTIFY (optional in early ATA)
2271 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2272 * anything else..
2273 * Some drives were very specific about that exact sequence.
50a99018
AC
2274 *
2275 * Note that ATA4 says lba is mandatory so the second check
2276 * shoud never trigger.
49016aca
TH
2277 */
2278 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2279 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2280 if (err_mask) {
2281 rc = -EIO;
2282 reason = "INIT_DEV_PARAMS failed";
2283 goto err_out;
2284 }
2285
2286 /* current CHS translation info (id[53-58]) might be
2287 * changed. reread the identify device info.
2288 */
bff04647 2289 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2290 goto retry;
2291 }
2292 }
2293
2294 *p_class = class;
fe635c7e 2295
49016aca
TH
2296 return 0;
2297
2298 err_out:
88574551 2299 if (ata_msg_warn(ap))
0dd4b21f 2300 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2301 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2302 return rc;
2303}
2304
3373efd8 2305static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2306{
9af5c9c9
TH
2307 struct ata_port *ap = dev->link->ap;
2308 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2309}
2310
a6e6ce8e
TH
2311static void ata_dev_config_ncq(struct ata_device *dev,
2312 char *desc, size_t desc_sz)
2313{
9af5c9c9 2314 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2315 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2316
2317 if (!ata_id_has_ncq(dev->id)) {
2318 desc[0] = '\0';
2319 return;
2320 }
75683fe7 2321 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2322 snprintf(desc, desc_sz, "NCQ (not used)");
2323 return;
2324 }
a6e6ce8e 2325 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2326 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2327 dev->flags |= ATA_DFLAG_NCQ;
2328 }
2329
2330 if (hdepth >= ddepth)
2331 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2332 else
2333 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2334}
2335
49016aca 2336/**
ffeae418 2337 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2338 * @dev: Target device to configure
2339 *
2340 * Configure @dev according to @dev->id. Generic and low-level
2341 * driver specific fixups are also applied.
49016aca
TH
2342 *
2343 * LOCKING:
ffeae418
TH
2344 * Kernel thread context (may sleep)
2345 *
2346 * RETURNS:
2347 * 0 on success, -errno otherwise
49016aca 2348 */
efdaedc4 2349int ata_dev_configure(struct ata_device *dev)
49016aca 2350{
9af5c9c9
TH
2351 struct ata_port *ap = dev->link->ap;
2352 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2353 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2354 const u16 *id = dev->id;
7dc951ae 2355 unsigned long xfer_mask;
b352e57d 2356 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2357 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2358 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2359 int rc;
49016aca 2360
0dd4b21f 2361 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2362 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2363 __func__);
ffeae418 2364 return 0;
49016aca
TH
2365 }
2366
0dd4b21f 2367 if (ata_msg_probe(ap))
7f5e4e8d 2368 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2369
75683fe7
TH
2370 /* set horkage */
2371 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2372 ata_force_horkage(dev);
75683fe7 2373
6746544c
TH
2374 /* let ACPI work its magic */
2375 rc = ata_acpi_on_devcfg(dev);
2376 if (rc)
2377 return rc;
08573a86 2378
05027adc
TH
2379 /* massage HPA, do it early as it might change IDENTIFY data */
2380 rc = ata_hpa_resize(dev);
2381 if (rc)
2382 return rc;
2383
c39f5ebe 2384 /* print device capabilities */
0dd4b21f 2385 if (ata_msg_probe(ap))
88574551
TH
2386 ata_dev_printk(dev, KERN_DEBUG,
2387 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2388 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2389 __func__,
f15a1daf
TH
2390 id[49], id[82], id[83], id[84],
2391 id[85], id[86], id[87], id[88]);
c39f5ebe 2392
208a9933 2393 /* initialize to-be-configured parameters */
ea1dd4e1 2394 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2395 dev->max_sectors = 0;
2396 dev->cdb_len = 0;
2397 dev->n_sectors = 0;
2398 dev->cylinders = 0;
2399 dev->heads = 0;
2400 dev->sectors = 0;
2401
1da177e4
LT
2402 /*
2403 * common ATA, ATAPI feature tests
2404 */
2405
ff8854b2 2406 /* find max transfer mode; for printk only */
1148c3a7 2407 xfer_mask = ata_id_xfermask(id);
1da177e4 2408
0dd4b21f
BP
2409 if (ata_msg_probe(ap))
2410 ata_dump_id(id);
1da177e4 2411
ef143d57
AL
2412 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2413 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2414 sizeof(fwrevbuf));
2415
2416 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2417 sizeof(modelbuf));
2418
1da177e4
LT
2419 /* ATA-specific feature tests */
2420 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2421 if (ata_id_is_cfa(id)) {
2422 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2423 ata_dev_printk(dev, KERN_WARNING,
2424 "supports DRM functions and may "
2425 "not be fully accessable.\n");
b352e57d 2426 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2427 } else {
2dcb407e 2428 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2429 /* Warn the user if the device has TPM extensions */
2430 if (ata_id_has_tpm(id))
2431 ata_dev_printk(dev, KERN_WARNING,
2432 "supports DRM functions and may "
2433 "not be fully accessable.\n");
2434 }
b352e57d 2435
1148c3a7 2436 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2437
3f64f565
EM
2438 if (dev->id[59] & 0x100)
2439 dev->multi_count = dev->id[59] & 0xff;
2440
1148c3a7 2441 if (ata_id_has_lba(id)) {
4c2d721a 2442 const char *lba_desc;
a6e6ce8e 2443 char ncq_desc[20];
8bf62ece 2444
4c2d721a
TH
2445 lba_desc = "LBA";
2446 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2447 if (ata_id_has_lba48(id)) {
8bf62ece 2448 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2449 lba_desc = "LBA48";
6fc49adb
TH
2450
2451 if (dev->n_sectors >= (1UL << 28) &&
2452 ata_id_has_flush_ext(id))
2453 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2454 }
8bf62ece 2455
a6e6ce8e
TH
2456 /* config NCQ */
2457 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2458
8bf62ece 2459 /* print device info to dmesg */
3f64f565
EM
2460 if (ata_msg_drv(ap) && print_info) {
2461 ata_dev_printk(dev, KERN_INFO,
2462 "%s: %s, %s, max %s\n",
2463 revbuf, modelbuf, fwrevbuf,
2464 ata_mode_string(xfer_mask));
2465 ata_dev_printk(dev, KERN_INFO,
2466 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2467 (unsigned long long)dev->n_sectors,
3f64f565
EM
2468 dev->multi_count, lba_desc, ncq_desc);
2469 }
ffeae418 2470 } else {
8bf62ece
AL
2471 /* CHS */
2472
2473 /* Default translation */
1148c3a7
TH
2474 dev->cylinders = id[1];
2475 dev->heads = id[3];
2476 dev->sectors = id[6];
8bf62ece 2477
1148c3a7 2478 if (ata_id_current_chs_valid(id)) {
8bf62ece 2479 /* Current CHS translation is valid. */
1148c3a7
TH
2480 dev->cylinders = id[54];
2481 dev->heads = id[55];
2482 dev->sectors = id[56];
8bf62ece
AL
2483 }
2484
2485 /* print device info to dmesg */
3f64f565 2486 if (ata_msg_drv(ap) && print_info) {
88574551 2487 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2488 "%s: %s, %s, max %s\n",
2489 revbuf, modelbuf, fwrevbuf,
2490 ata_mode_string(xfer_mask));
a84471fe 2491 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2492 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2493 (unsigned long long)dev->n_sectors,
2494 dev->multi_count, dev->cylinders,
2495 dev->heads, dev->sectors);
2496 }
07f6f7d0
AL
2497 }
2498
6e7846e9 2499 dev->cdb_len = 16;
1da177e4
LT
2500 }
2501
2502 /* ATAPI-specific feature tests */
2c13b7ce 2503 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2504 const char *cdb_intr_string = "";
2505 const char *atapi_an_string = "";
91163006 2506 const char *dma_dir_string = "";
7d77b247 2507 u32 sntf;
08a556db 2508
1148c3a7 2509 rc = atapi_cdb_len(id);
1da177e4 2510 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2511 if (ata_msg_warn(ap))
88574551
TH
2512 ata_dev_printk(dev, KERN_WARNING,
2513 "unsupported CDB len\n");
ffeae418 2514 rc = -EINVAL;
1da177e4
LT
2515 goto err_out_nosup;
2516 }
6e7846e9 2517 dev->cdb_len = (unsigned int) rc;
1da177e4 2518
7d77b247
TH
2519 /* Enable ATAPI AN if both the host and device have
2520 * the support. If PMP is attached, SNTF is required
2521 * to enable ATAPI AN to discern between PHY status
2522 * changed notifications and ATAPI ANs.
9f45cbd3 2523 */
7d77b247
TH
2524 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2525 (!ap->nr_pmp_links ||
2526 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2527 unsigned int err_mask;
2528
9f45cbd3 2529 /* issue SET feature command to turn this on */
218f3d30
JG
2530 err_mask = ata_dev_set_feature(dev,
2531 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2532 if (err_mask)
9f45cbd3 2533 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2534 "failed to enable ATAPI AN "
2535 "(err_mask=0x%x)\n", err_mask);
2536 else {
9f45cbd3 2537 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2538 atapi_an_string = ", ATAPI AN";
2539 }
9f45cbd3
KCA
2540 }
2541
08a556db 2542 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2543 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2544 cdb_intr_string = ", CDB intr";
2545 }
312f7da2 2546
91163006
TH
2547 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2548 dev->flags |= ATA_DFLAG_DMADIR;
2549 dma_dir_string = ", DMADIR";
2550 }
2551
1da177e4 2552 /* print device info to dmesg */
5afc8142 2553 if (ata_msg_drv(ap) && print_info)
ef143d57 2554 ata_dev_printk(dev, KERN_INFO,
91163006 2555 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2556 modelbuf, fwrevbuf,
12436c30 2557 ata_mode_string(xfer_mask),
91163006
TH
2558 cdb_intr_string, atapi_an_string,
2559 dma_dir_string);
1da177e4
LT
2560 }
2561
914ed354
TH
2562 /* determine max_sectors */
2563 dev->max_sectors = ATA_MAX_SECTORS;
2564 if (dev->flags & ATA_DFLAG_LBA48)
2565 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2566
ca77329f
KCA
2567 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2568 if (ata_id_has_hipm(dev->id))
2569 dev->flags |= ATA_DFLAG_HIPM;
2570 if (ata_id_has_dipm(dev->id))
2571 dev->flags |= ATA_DFLAG_DIPM;
2572 }
2573
c5038fc0
AC
2574 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2575 200 sectors */
3373efd8 2576 if (ata_dev_knobble(dev)) {
5afc8142 2577 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2578 ata_dev_printk(dev, KERN_INFO,
2579 "applying bridge limits\n");
5a529139 2580 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2581 dev->max_sectors = ATA_MAX_SECTORS;
2582 }
2583
f8d8e579 2584 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2585 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2586 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2587 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2588 }
f8d8e579 2589
75683fe7 2590 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2591 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2592 dev->max_sectors);
18d6e9d5 2593
ca77329f
KCA
2594 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2595 dev->horkage |= ATA_HORKAGE_IPM;
2596
2597 /* reset link pm_policy for this port to no pm */
2598 ap->pm_policy = MAX_PERFORMANCE;
2599 }
2600
4b2f3ede 2601 if (ap->ops->dev_config)
cd0d3bbc 2602 ap->ops->dev_config(dev);
4b2f3ede 2603
c5038fc0
AC
2604 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2605 /* Let the user know. We don't want to disallow opens for
2606 rescue purposes, or in case the vendor is just a blithering
2607 idiot. Do this after the dev_config call as some controllers
2608 with buggy firmware may want to avoid reporting false device
2609 bugs */
2610
2611 if (print_info) {
2612 ata_dev_printk(dev, KERN_WARNING,
2613"Drive reports diagnostics failure. This may indicate a drive\n");
2614 ata_dev_printk(dev, KERN_WARNING,
2615"fault or invalid emulation. Contact drive vendor for information.\n");
2616 }
2617 }
2618
0dd4b21f
BP
2619 if (ata_msg_probe(ap))
2620 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
7f5e4e8d 2621 __func__, ata_chk_status(ap));
ffeae418 2622 return 0;
1da177e4
LT
2623
2624err_out_nosup:
0dd4b21f 2625 if (ata_msg_probe(ap))
88574551 2626 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2627 "%s: EXIT, err\n", __func__);
ffeae418 2628 return rc;
1da177e4
LT
2629}
2630
be0d18df 2631/**
2e41e8e6 2632 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2633 * @ap: port
2634 *
2e41e8e6 2635 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2636 * detection.
2637 */
2638
2639int ata_cable_40wire(struct ata_port *ap)
2640{
2641 return ATA_CBL_PATA40;
2642}
2643
2644/**
2e41e8e6 2645 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2646 * @ap: port
2647 *
2e41e8e6 2648 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2649 * detection.
2650 */
2651
2652int ata_cable_80wire(struct ata_port *ap)
2653{
2654 return ATA_CBL_PATA80;
2655}
2656
2657/**
2658 * ata_cable_unknown - return unknown PATA cable.
2659 * @ap: port
2660 *
2661 * Helper method for drivers which have no PATA cable detection.
2662 */
2663
2664int ata_cable_unknown(struct ata_port *ap)
2665{
2666 return ATA_CBL_PATA_UNK;
2667}
2668
c88f90c3
TH
2669/**
2670 * ata_cable_ignore - return ignored PATA cable.
2671 * @ap: port
2672 *
2673 * Helper method for drivers which don't use cable type to limit
2674 * transfer mode.
2675 */
2676int ata_cable_ignore(struct ata_port *ap)
2677{
2678 return ATA_CBL_PATA_IGN;
2679}
2680
be0d18df
AC
2681/**
2682 * ata_cable_sata - return SATA cable type
2683 * @ap: port
2684 *
2685 * Helper method for drivers which have SATA cables
2686 */
2687
2688int ata_cable_sata(struct ata_port *ap)
2689{
2690 return ATA_CBL_SATA;
2691}
2692
1da177e4
LT
2693/**
2694 * ata_bus_probe - Reset and probe ATA bus
2695 * @ap: Bus to probe
2696 *
0cba632b
JG
2697 * Master ATA bus probing function. Initiates a hardware-dependent
2698 * bus reset, then attempts to identify any devices found on
2699 * the bus.
2700 *
1da177e4 2701 * LOCKING:
0cba632b 2702 * PCI/etc. bus probe sem.
1da177e4
LT
2703 *
2704 * RETURNS:
96072e69 2705 * Zero on success, negative errno otherwise.
1da177e4
LT
2706 */
2707
80289167 2708int ata_bus_probe(struct ata_port *ap)
1da177e4 2709{
28ca5c57 2710 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2711 int tries[ATA_MAX_DEVICES];
f58229f8 2712 int rc;
e82cbdb9 2713 struct ata_device *dev;
1da177e4 2714
28ca5c57 2715 ata_port_probe(ap);
c19ba8af 2716
f58229f8
TH
2717 ata_link_for_each_dev(dev, &ap->link)
2718 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2719
2720 retry:
cdeab114
TH
2721 ata_link_for_each_dev(dev, &ap->link) {
2722 /* If we issue an SRST then an ATA drive (not ATAPI)
2723 * may change configuration and be in PIO0 timing. If
2724 * we do a hard reset (or are coming from power on)
2725 * this is true for ATA or ATAPI. Until we've set a
2726 * suitable controller mode we should not touch the
2727 * bus as we may be talking too fast.
2728 */
2729 dev->pio_mode = XFER_PIO_0;
2730
2731 /* If the controller has a pio mode setup function
2732 * then use it to set the chipset to rights. Don't
2733 * touch the DMA setup as that will be dealt with when
2734 * configuring devices.
2735 */
2736 if (ap->ops->set_piomode)
2737 ap->ops->set_piomode(ap, dev);
2738 }
2739
2044470c 2740 /* reset and determine device classes */
52783c5d 2741 ap->ops->phy_reset(ap);
2061a47a 2742
f58229f8 2743 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2744 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2745 dev->class != ATA_DEV_UNKNOWN)
2746 classes[dev->devno] = dev->class;
2747 else
2748 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2749
52783c5d 2750 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2751 }
1da177e4 2752
52783c5d 2753 ata_port_probe(ap);
2044470c 2754
f31f0cc2
JG
2755 /* read IDENTIFY page and configure devices. We have to do the identify
2756 specific sequence bass-ackwards so that PDIAG- is released by
2757 the slave device */
2758
a4ba7fe2 2759 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2760 if (tries[dev->devno])
2761 dev->class = classes[dev->devno];
ffeae418 2762
14d2bac1 2763 if (!ata_dev_enabled(dev))
ffeae418 2764 continue;
ffeae418 2765
bff04647
TH
2766 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2767 dev->id);
14d2bac1
TH
2768 if (rc)
2769 goto fail;
f31f0cc2
JG
2770 }
2771
be0d18df
AC
2772 /* Now ask for the cable type as PDIAG- should have been released */
2773 if (ap->ops->cable_detect)
2774 ap->cbl = ap->ops->cable_detect(ap);
2775
614fe29b
AC
2776 /* We may have SATA bridge glue hiding here irrespective of the
2777 reported cable types and sensed types */
2778 ata_link_for_each_dev(dev, &ap->link) {
2779 if (!ata_dev_enabled(dev))
2780 continue;
2781 /* SATA drives indicate we have a bridge. We don't know which
2782 end of the link the bridge is which is a problem */
2783 if (ata_id_is_sata(dev->id))
2784 ap->cbl = ATA_CBL_SATA;
2785 }
2786
f31f0cc2
JG
2787 /* After the identify sequence we can now set up the devices. We do
2788 this in the normal order so that the user doesn't get confused */
2789
f58229f8 2790 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2791 if (!ata_dev_enabled(dev))
2792 continue;
14d2bac1 2793
9af5c9c9 2794 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2795 rc = ata_dev_configure(dev);
9af5c9c9 2796 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2797 if (rc)
2798 goto fail;
1da177e4
LT
2799 }
2800
e82cbdb9 2801 /* configure transfer mode */
0260731f 2802 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2803 if (rc)
51713d35 2804 goto fail;
1da177e4 2805
f58229f8
TH
2806 ata_link_for_each_dev(dev, &ap->link)
2807 if (ata_dev_enabled(dev))
e82cbdb9 2808 return 0;
1da177e4 2809
e82cbdb9
TH
2810 /* no device present, disable port */
2811 ata_port_disable(ap);
96072e69 2812 return -ENODEV;
14d2bac1
TH
2813
2814 fail:
4ae72a1e
TH
2815 tries[dev->devno]--;
2816
14d2bac1
TH
2817 switch (rc) {
2818 case -EINVAL:
4ae72a1e 2819 /* eeek, something went very wrong, give up */
14d2bac1
TH
2820 tries[dev->devno] = 0;
2821 break;
4ae72a1e
TH
2822
2823 case -ENODEV:
2824 /* give it just one more chance */
2825 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2826 case -EIO:
4ae72a1e
TH
2827 if (tries[dev->devno] == 1) {
2828 /* This is the last chance, better to slow
2829 * down than lose it.
2830 */
936fd732 2831 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2832 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2833 }
14d2bac1
TH
2834 }
2835
4ae72a1e 2836 if (!tries[dev->devno])
3373efd8 2837 ata_dev_disable(dev);
ec573755 2838
14d2bac1 2839 goto retry;
1da177e4
LT
2840}
2841
2842/**
0cba632b
JG
2843 * ata_port_probe - Mark port as enabled
2844 * @ap: Port for which we indicate enablement
1da177e4 2845 *
0cba632b
JG
2846 * Modify @ap data structure such that the system
2847 * thinks that the entire port is enabled.
2848 *
cca3974e 2849 * LOCKING: host lock, or some other form of
0cba632b 2850 * serialization.
1da177e4
LT
2851 */
2852
2853void ata_port_probe(struct ata_port *ap)
2854{
198e0fed 2855 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2856}
2857
3be680b7
TH
2858/**
2859 * sata_print_link_status - Print SATA link status
936fd732 2860 * @link: SATA link to printk link status about
3be680b7
TH
2861 *
2862 * This function prints link speed and status of a SATA link.
2863 *
2864 * LOCKING:
2865 * None.
2866 */
936fd732 2867void sata_print_link_status(struct ata_link *link)
3be680b7 2868{
6d5f9732 2869 u32 sstatus, scontrol, tmp;
3be680b7 2870
936fd732 2871 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2872 return;
936fd732 2873 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2874
936fd732 2875 if (ata_link_online(link)) {
3be680b7 2876 tmp = (sstatus >> 4) & 0xf;
936fd732 2877 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2878 "SATA link up %s (SStatus %X SControl %X)\n",
2879 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2880 } else {
936fd732 2881 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2882 "SATA link down (SStatus %X SControl %X)\n",
2883 sstatus, scontrol);
3be680b7
TH
2884 }
2885}
2886
ebdfca6e
AC
2887/**
2888 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2889 * @adev: device
2890 *
2891 * Obtain the other device on the same cable, or if none is
2892 * present NULL is returned
2893 */
2e9edbf8 2894
3373efd8 2895struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2896{
9af5c9c9
TH
2897 struct ata_link *link = adev->link;
2898 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2899 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2900 return NULL;
2901 return pair;
2902}
2903
1da177e4 2904/**
780a87f7
JG
2905 * ata_port_disable - Disable port.
2906 * @ap: Port to be disabled.
1da177e4 2907 *
780a87f7
JG
2908 * Modify @ap data structure such that the system
2909 * thinks that the entire port is disabled, and should
2910 * never attempt to probe or communicate with devices
2911 * on this port.
2912 *
cca3974e 2913 * LOCKING: host lock, or some other form of
780a87f7 2914 * serialization.
1da177e4
LT
2915 */
2916
2917void ata_port_disable(struct ata_port *ap)
2918{
9af5c9c9
TH
2919 ap->link.device[0].class = ATA_DEV_NONE;
2920 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2921 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2922}
2923
1c3fae4d 2924/**
3c567b7d 2925 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2926 * @link: Link to adjust SATA spd limit for
1c3fae4d 2927 *
936fd732 2928 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2929 * function only adjusts the limit. The change must be applied
3c567b7d 2930 * using sata_set_spd().
1c3fae4d
TH
2931 *
2932 * LOCKING:
2933 * Inherited from caller.
2934 *
2935 * RETURNS:
2936 * 0 on success, negative errno on failure
2937 */
936fd732 2938int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2939{
81952c54
TH
2940 u32 sstatus, spd, mask;
2941 int rc, highbit;
1c3fae4d 2942
936fd732 2943 if (!sata_scr_valid(link))
008a7896
TH
2944 return -EOPNOTSUPP;
2945
2946 /* If SCR can be read, use it to determine the current SPD.
936fd732 2947 * If not, use cached value in link->sata_spd.
008a7896 2948 */
936fd732 2949 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2950 if (rc == 0)
2951 spd = (sstatus >> 4) & 0xf;
2952 else
936fd732 2953 spd = link->sata_spd;
1c3fae4d 2954
936fd732 2955 mask = link->sata_spd_limit;
1c3fae4d
TH
2956 if (mask <= 1)
2957 return -EINVAL;
008a7896
TH
2958
2959 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2960 highbit = fls(mask) - 1;
2961 mask &= ~(1 << highbit);
2962
008a7896
TH
2963 /* Mask off all speeds higher than or equal to the current
2964 * one. Force 1.5Gbps if current SPD is not available.
2965 */
2966 if (spd > 1)
2967 mask &= (1 << (spd - 1)) - 1;
2968 else
2969 mask &= 1;
2970
2971 /* were we already at the bottom? */
1c3fae4d
TH
2972 if (!mask)
2973 return -EINVAL;
2974
936fd732 2975 link->sata_spd_limit = mask;
1c3fae4d 2976
936fd732 2977 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2978 sata_spd_string(fls(mask)));
1c3fae4d
TH
2979
2980 return 0;
2981}
2982
936fd732 2983static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2984{
5270222f
TH
2985 struct ata_link *host_link = &link->ap->link;
2986 u32 limit, target, spd;
1c3fae4d 2987
5270222f
TH
2988 limit = link->sata_spd_limit;
2989
2990 /* Don't configure downstream link faster than upstream link.
2991 * It doesn't speed up anything and some PMPs choke on such
2992 * configuration.
2993 */
2994 if (!ata_is_host_link(link) && host_link->sata_spd)
2995 limit &= (1 << host_link->sata_spd) - 1;
2996
2997 if (limit == UINT_MAX)
2998 target = 0;
1c3fae4d 2999 else
5270222f 3000 target = fls(limit);
1c3fae4d
TH
3001
3002 spd = (*scontrol >> 4) & 0xf;
5270222f 3003 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 3004
5270222f 3005 return spd != target;
1c3fae4d
TH
3006}
3007
3008/**
3c567b7d 3009 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 3010 * @link: Link in question
1c3fae4d
TH
3011 *
3012 * Test whether the spd limit in SControl matches
936fd732 3013 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
3014 * whether hardreset is necessary to apply SATA spd
3015 * configuration.
3016 *
3017 * LOCKING:
3018 * Inherited from caller.
3019 *
3020 * RETURNS:
3021 * 1 if SATA spd configuration is needed, 0 otherwise.
3022 */
936fd732 3023int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
3024{
3025 u32 scontrol;
3026
936fd732 3027 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 3028 return 1;
1c3fae4d 3029
936fd732 3030 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
3031}
3032
3033/**
3c567b7d 3034 * sata_set_spd - set SATA spd according to spd limit
936fd732 3035 * @link: Link to set SATA spd for
1c3fae4d 3036 *
936fd732 3037 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
3038 *
3039 * LOCKING:
3040 * Inherited from caller.
3041 *
3042 * RETURNS:
3043 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 3044 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 3045 */
936fd732 3046int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
3047{
3048 u32 scontrol;
81952c54 3049 int rc;
1c3fae4d 3050
936fd732 3051 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3052 return rc;
1c3fae4d 3053
936fd732 3054 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
3055 return 0;
3056
936fd732 3057 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
3058 return rc;
3059
1c3fae4d
TH
3060 return 1;
3061}
3062
452503f9
AC
3063/*
3064 * This mode timing computation functionality is ported over from
3065 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3066 */
3067/*
b352e57d 3068 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3069 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3070 * for UDMA6, which is currently supported only by Maxtor drives.
3071 *
3072 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3073 */
3074
3075static const struct ata_timing ata_timing[] = {
70cd071e
TH
3076/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3077 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3078 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3079 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3080 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3081 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3082 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3083 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 3084
70cd071e
TH
3085 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3086 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3087 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 3088
70cd071e
TH
3089 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3090 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3091 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 3092 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 3093 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
3094
3095/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
3096 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3097 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3098 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3099 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3100 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3101 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3102 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3103
3104 { 0xFF }
3105};
3106
2dcb407e
JG
3107#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3108#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
3109
3110static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3111{
3112 q->setup = EZ(t->setup * 1000, T);
3113 q->act8b = EZ(t->act8b * 1000, T);
3114 q->rec8b = EZ(t->rec8b * 1000, T);
3115 q->cyc8b = EZ(t->cyc8b * 1000, T);
3116 q->active = EZ(t->active * 1000, T);
3117 q->recover = EZ(t->recover * 1000, T);
3118 q->cycle = EZ(t->cycle * 1000, T);
3119 q->udma = EZ(t->udma * 1000, UT);
3120}
3121
3122void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3123 struct ata_timing *m, unsigned int what)
3124{
3125 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3126 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3127 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3128 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3129 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3130 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3131 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3132 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3133}
3134
6357357c 3135const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3136{
70cd071e
TH
3137 const struct ata_timing *t = ata_timing;
3138
3139 while (xfer_mode > t->mode)
3140 t++;
452503f9 3141
70cd071e
TH
3142 if (xfer_mode == t->mode)
3143 return t;
3144 return NULL;
452503f9
AC
3145}
3146
3147int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3148 struct ata_timing *t, int T, int UT)
3149{
3150 const struct ata_timing *s;
3151 struct ata_timing p;
3152
3153 /*
2e9edbf8 3154 * Find the mode.
75b1f2f8 3155 */
452503f9
AC
3156
3157 if (!(s = ata_timing_find_mode(speed)))
3158 return -EINVAL;
3159
75b1f2f8
AL
3160 memcpy(t, s, sizeof(*s));
3161
452503f9
AC
3162 /*
3163 * If the drive is an EIDE drive, it can tell us it needs extended
3164 * PIO/MW_DMA cycle timing.
3165 */
3166
3167 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3168 memset(&p, 0, sizeof(p));
2dcb407e 3169 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3170 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3171 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3172 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3173 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3174 }
3175 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3176 }
3177
3178 /*
3179 * Convert the timing to bus clock counts.
3180 */
3181
75b1f2f8 3182 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3183
3184 /*
c893a3ae
RD
3185 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3186 * S.M.A.R.T * and some other commands. We have to ensure that the
3187 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3188 */
3189
fd3367af 3190 if (speed > XFER_PIO_6) {
452503f9
AC
3191 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3192 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3193 }
3194
3195 /*
c893a3ae 3196 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3197 */
3198
3199 if (t->act8b + t->rec8b < t->cyc8b) {
3200 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3201 t->rec8b = t->cyc8b - t->act8b;
3202 }
3203
3204 if (t->active + t->recover < t->cycle) {
3205 t->active += (t->cycle - (t->active + t->recover)) / 2;
3206 t->recover = t->cycle - t->active;
3207 }
a617c09f 3208
4f701d1e
AC
3209 /* In a few cases quantisation may produce enough errors to
3210 leave t->cycle too low for the sum of active and recovery
3211 if so we must correct this */
3212 if (t->active + t->recover > t->cycle)
3213 t->cycle = t->active + t->recover;
452503f9
AC
3214
3215 return 0;
3216}
3217
a0f79b92
TH
3218/**
3219 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3220 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3221 * @cycle: cycle duration in ns
3222 *
3223 * Return matching xfer mode for @cycle. The returned mode is of
3224 * the transfer type specified by @xfer_shift. If @cycle is too
3225 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3226 * than the fastest known mode, the fasted mode is returned.
3227 *
3228 * LOCKING:
3229 * None.
3230 *
3231 * RETURNS:
3232 * Matching xfer_mode, 0xff if no match found.
3233 */
3234u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3235{
3236 u8 base_mode = 0xff, last_mode = 0xff;
3237 const struct ata_xfer_ent *ent;
3238 const struct ata_timing *t;
3239
3240 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3241 if (ent->shift == xfer_shift)
3242 base_mode = ent->base;
3243
3244 for (t = ata_timing_find_mode(base_mode);
3245 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3246 unsigned short this_cycle;
3247
3248 switch (xfer_shift) {
3249 case ATA_SHIFT_PIO:
3250 case ATA_SHIFT_MWDMA:
3251 this_cycle = t->cycle;
3252 break;
3253 case ATA_SHIFT_UDMA:
3254 this_cycle = t->udma;
3255 break;
3256 default:
3257 return 0xff;
3258 }
3259
3260 if (cycle > this_cycle)
3261 break;
3262
3263 last_mode = t->mode;
3264 }
3265
3266 return last_mode;
3267}
3268
cf176e1a
TH
3269/**
3270 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3271 * @dev: Device to adjust xfer masks
458337db 3272 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3273 *
3274 * Adjust xfer masks of @dev downward. Note that this function
3275 * does not apply the change. Invoking ata_set_mode() afterwards
3276 * will apply the limit.
3277 *
3278 * LOCKING:
3279 * Inherited from caller.
3280 *
3281 * RETURNS:
3282 * 0 on success, negative errno on failure
3283 */
458337db 3284int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3285{
458337db 3286 char buf[32];
7dc951ae
TH
3287 unsigned long orig_mask, xfer_mask;
3288 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3289 int quiet, highbit;
cf176e1a 3290
458337db
TH
3291 quiet = !!(sel & ATA_DNXFER_QUIET);
3292 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3293
458337db
TH
3294 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3295 dev->mwdma_mask,
3296 dev->udma_mask);
3297 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3298
458337db
TH
3299 switch (sel) {
3300 case ATA_DNXFER_PIO:
3301 highbit = fls(pio_mask) - 1;
3302 pio_mask &= ~(1 << highbit);
3303 break;
3304
3305 case ATA_DNXFER_DMA:
3306 if (udma_mask) {
3307 highbit = fls(udma_mask) - 1;
3308 udma_mask &= ~(1 << highbit);
3309 if (!udma_mask)
3310 return -ENOENT;
3311 } else if (mwdma_mask) {
3312 highbit = fls(mwdma_mask) - 1;
3313 mwdma_mask &= ~(1 << highbit);
3314 if (!mwdma_mask)
3315 return -ENOENT;
3316 }
3317 break;
3318
3319 case ATA_DNXFER_40C:
3320 udma_mask &= ATA_UDMA_MASK_40C;
3321 break;
3322
3323 case ATA_DNXFER_FORCE_PIO0:
3324 pio_mask &= 1;
3325 case ATA_DNXFER_FORCE_PIO:
3326 mwdma_mask = 0;
3327 udma_mask = 0;
3328 break;
3329
458337db
TH
3330 default:
3331 BUG();
3332 }
3333
3334 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3335
3336 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3337 return -ENOENT;
3338
3339 if (!quiet) {
3340 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3341 snprintf(buf, sizeof(buf), "%s:%s",
3342 ata_mode_string(xfer_mask),
3343 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3344 else
3345 snprintf(buf, sizeof(buf), "%s",
3346 ata_mode_string(xfer_mask));
3347
3348 ata_dev_printk(dev, KERN_WARNING,
3349 "limiting speed to %s\n", buf);
3350 }
cf176e1a
TH
3351
3352 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3353 &dev->udma_mask);
3354
cf176e1a 3355 return 0;
cf176e1a
TH
3356}
3357
3373efd8 3358static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3359{
9af5c9c9 3360 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3361 const char *dev_err_whine = "";
3362 int ign_dev_err = 0;
83206a29
TH
3363 unsigned int err_mask;
3364 int rc;
1da177e4 3365
e8384607 3366 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3367 if (dev->xfer_shift == ATA_SHIFT_PIO)
3368 dev->flags |= ATA_DFLAG_PIO;
3369
3373efd8 3370 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3371
4055dee7
TH
3372 if (err_mask & ~AC_ERR_DEV)
3373 goto fail;
3374
3375 /* revalidate */
3376 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3377 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3378 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3379 if (rc)
3380 return rc;
3381
11750a40
A
3382 /* Old CFA may refuse this command, which is just fine */
3383 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3384 ign_dev_err = 1;
2dcb407e 3385
0bc2a79a
AC
3386 /* Some very old devices and some bad newer ones fail any kind of
3387 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3388 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3389 dev->pio_mode <= XFER_PIO_2)
4055dee7 3390 ign_dev_err = 1;
2dcb407e 3391
3acaf94b
AC
3392 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3393 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3394 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3395 dev->dma_mode == XFER_MW_DMA_0 &&
3396 (dev->id[63] >> 8) & 1)
4055dee7 3397 ign_dev_err = 1;
3acaf94b 3398
4055dee7
TH
3399 /* if the device is actually configured correctly, ignore dev err */
3400 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3401 ign_dev_err = 1;
1da177e4 3402
4055dee7
TH
3403 if (err_mask & AC_ERR_DEV) {
3404 if (!ign_dev_err)
3405 goto fail;
3406 else
3407 dev_err_whine = " (device error ignored)";
3408 }
48a8a14f 3409
23e71c3d
TH
3410 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3411 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3412
4055dee7
TH
3413 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3414 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3415 dev_err_whine);
3416
83206a29 3417 return 0;
4055dee7
TH
3418
3419 fail:
3420 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3421 "(err_mask=0x%x)\n", err_mask);
3422 return -EIO;
1da177e4
LT
3423}
3424
1da177e4 3425/**
04351821 3426 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3427 * @link: link on which timings will be programmed
1967b7ff 3428 * @r_failed_dev: out parameter for failed device
1da177e4 3429 *
04351821
A
3430 * Standard implementation of the function used to tune and set
3431 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3432 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3433 * returned in @r_failed_dev.
780a87f7 3434 *
1da177e4 3435 * LOCKING:
0cba632b 3436 * PCI/etc. bus probe sem.
e82cbdb9
TH
3437 *
3438 * RETURNS:
3439 * 0 on success, negative errno otherwise
1da177e4 3440 */
04351821 3441
0260731f 3442int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3443{
0260731f 3444 struct ata_port *ap = link->ap;
e8e0619f 3445 struct ata_device *dev;
f58229f8 3446 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3447
a6d5a51c 3448 /* step 1: calculate xfer_mask */
f58229f8 3449 ata_link_for_each_dev(dev, link) {
7dc951ae 3450 unsigned long pio_mask, dma_mask;
b3a70601 3451 unsigned int mode_mask;
a6d5a51c 3452
e1211e3f 3453 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3454 continue;
3455
b3a70601
AC
3456 mode_mask = ATA_DMA_MASK_ATA;
3457 if (dev->class == ATA_DEV_ATAPI)
3458 mode_mask = ATA_DMA_MASK_ATAPI;
3459 else if (ata_id_is_cfa(dev->id))
3460 mode_mask = ATA_DMA_MASK_CFA;
3461
3373efd8 3462 ata_dev_xfermask(dev);
33267325 3463 ata_force_xfermask(dev);
1da177e4 3464
acf356b1
TH
3465 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3466 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3467
3468 if (libata_dma_mask & mode_mask)
3469 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3470 else
3471 dma_mask = 0;
3472
acf356b1
TH
3473 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3474 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3475
4f65977d 3476 found = 1;
70cd071e 3477 if (dev->dma_mode != 0xff)
5444a6f4 3478 used_dma = 1;
a6d5a51c 3479 }
4f65977d 3480 if (!found)
e82cbdb9 3481 goto out;
a6d5a51c
TH
3482
3483 /* step 2: always set host PIO timings */
f58229f8 3484 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3485 if (!ata_dev_enabled(dev))
3486 continue;
3487
70cd071e 3488 if (dev->pio_mode == 0xff) {
f15a1daf 3489 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3490 rc = -EINVAL;
e82cbdb9 3491 goto out;
e8e0619f
TH
3492 }
3493
3494 dev->xfer_mode = dev->pio_mode;
3495 dev->xfer_shift = ATA_SHIFT_PIO;
3496 if (ap->ops->set_piomode)
3497 ap->ops->set_piomode(ap, dev);
3498 }
1da177e4 3499
a6d5a51c 3500 /* step 3: set host DMA timings */
f58229f8 3501 ata_link_for_each_dev(dev, link) {
70cd071e 3502 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3503 continue;
3504
3505 dev->xfer_mode = dev->dma_mode;
3506 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3507 if (ap->ops->set_dmamode)
3508 ap->ops->set_dmamode(ap, dev);
3509 }
1da177e4
LT
3510
3511 /* step 4: update devices' xfer mode */
f58229f8 3512 ata_link_for_each_dev(dev, link) {
18d90deb 3513 /* don't update suspended devices' xfer mode */
9666f400 3514 if (!ata_dev_enabled(dev))
83206a29
TH
3515 continue;
3516
3373efd8 3517 rc = ata_dev_set_mode(dev);
5bbc53f4 3518 if (rc)
e82cbdb9 3519 goto out;
83206a29 3520 }
1da177e4 3521
e8e0619f
TH
3522 /* Record simplex status. If we selected DMA then the other
3523 * host channels are not permitted to do so.
5444a6f4 3524 */
cca3974e 3525 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3526 ap->host->simplex_claimed = ap;
5444a6f4 3527
e82cbdb9
TH
3528 out:
3529 if (rc)
3530 *r_failed_dev = dev;
3531 return rc;
1da177e4
LT
3532}
3533
1fdffbce
JG
3534/**
3535 * ata_tf_to_host - issue ATA taskfile to host controller
3536 * @ap: port to which command is being issued
3537 * @tf: ATA taskfile register set
3538 *
3539 * Issues ATA taskfile register set to ATA host controller,
3540 * with proper synchronization with interrupt handler and
3541 * other threads.
3542 *
3543 * LOCKING:
cca3974e 3544 * spin_lock_irqsave(host lock)
1fdffbce
JG
3545 */
3546
3547static inline void ata_tf_to_host(struct ata_port *ap,
3548 const struct ata_taskfile *tf)
3549{
3550 ap->ops->tf_load(ap, tf);
3551 ap->ops->exec_command(ap, tf);
3552}
3553
1da177e4
LT
3554/**
3555 * ata_busy_sleep - sleep until BSY clears, or timeout
3556 * @ap: port containing status register to be polled
3557 * @tmout_pat: impatience timeout
3558 * @tmout: overall timeout
3559 *
780a87f7
JG
3560 * Sleep until ATA Status register bit BSY clears,
3561 * or a timeout occurs.
3562 *
d1adc1bb
TH
3563 * LOCKING:
3564 * Kernel thread context (may sleep).
3565 *
3566 * RETURNS:
3567 * 0 on success, -errno otherwise.
1da177e4 3568 */
d1adc1bb
TH
3569int ata_busy_sleep(struct ata_port *ap,
3570 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3571{
3572 unsigned long timer_start, timeout;
3573 u8 status;
3574
3575 status = ata_busy_wait(ap, ATA_BUSY, 300);
3576 timer_start = jiffies;
3577 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3578 while (status != 0xff && (status & ATA_BUSY) &&
3579 time_before(jiffies, timeout)) {
1da177e4
LT
3580 msleep(50);
3581 status = ata_busy_wait(ap, ATA_BUSY, 3);
3582 }
3583
d1adc1bb 3584 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3585 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3586 "port is slow to respond, please be patient "
3587 "(Status 0x%x)\n", status);
1da177e4
LT
3588
3589 timeout = timer_start + tmout;
d1adc1bb
TH
3590 while (status != 0xff && (status & ATA_BUSY) &&
3591 time_before(jiffies, timeout)) {
1da177e4
LT
3592 msleep(50);
3593 status = ata_chk_status(ap);
3594 }
3595
d1adc1bb
TH
3596 if (status == 0xff)
3597 return -ENODEV;
3598
1da177e4 3599 if (status & ATA_BUSY) {
f15a1daf 3600 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3601 "(%lu secs, Status 0x%x)\n",
3602 tmout / HZ, status);
d1adc1bb 3603 return -EBUSY;
1da177e4
LT
3604 }
3605
3606 return 0;
3607}
3608
88ff6eaf
TH
3609/**
3610 * ata_wait_after_reset - wait before checking status after reset
3611 * @ap: port containing status register to be polled
3612 * @deadline: deadline jiffies for the operation
3613 *
3614 * After reset, we need to pause a while before reading status.
3615 * Also, certain combination of controller and device report 0xff
3616 * for some duration (e.g. until SATA PHY is up and running)
3617 * which is interpreted as empty port in ATA world. This
3618 * function also waits for such devices to get out of 0xff
3619 * status.
3620 *
3621 * LOCKING:
3622 * Kernel thread context (may sleep).
3623 */
3624void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3625{
3626 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3627
3628 if (time_before(until, deadline))
3629 deadline = until;
3630
3631 /* Spec mandates ">= 2ms" before checking status. We wait
3632 * 150ms, because that was the magic delay used for ATAPI
3633 * devices in Hale Landis's ATADRVR, for the period of time
3634 * between when the ATA command register is written, and then
3635 * status is checked. Because waiting for "a while" before
3636 * checking status is fine, post SRST, we perform this magic
3637 * delay here as well.
3638 *
3639 * Old drivers/ide uses the 2mS rule and then waits for ready.
3640 */
3641 msleep(150);
3642
3643 /* Wait for 0xff to clear. Some SATA devices take a long time
3644 * to clear 0xff after reset. For example, HHD424020F7SV00
3645 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3646 * than that.
1974e201
TH
3647 *
3648 * Note that some PATA controllers (pata_ali) explode if
3649 * status register is read more than once when there's no
3650 * device attached.
88ff6eaf 3651 */
1974e201
TH
3652 if (ap->flags & ATA_FLAG_SATA) {
3653 while (1) {
3654 u8 status = ata_chk_status(ap);
88ff6eaf 3655
1974e201
TH
3656 if (status != 0xff || time_after(jiffies, deadline))
3657 return;
88ff6eaf 3658
1974e201
TH
3659 msleep(50);
3660 }
88ff6eaf
TH
3661 }
3662}
3663
d4b2bab4
TH
3664/**
3665 * ata_wait_ready - sleep until BSY clears, or timeout
3666 * @ap: port containing status register to be polled
3667 * @deadline: deadline jiffies for the operation
3668 *
3669 * Sleep until ATA Status register bit BSY clears, or timeout
3670 * occurs.
3671 *
3672 * LOCKING:
3673 * Kernel thread context (may sleep).
3674 *
3675 * RETURNS:
3676 * 0 on success, -errno otherwise.
3677 */
3678int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3679{
3680 unsigned long start = jiffies;
3681 int warned = 0;
3682
3683 while (1) {
3684 u8 status = ata_chk_status(ap);
3685 unsigned long now = jiffies;
3686
3687 if (!(status & ATA_BUSY))
3688 return 0;
936fd732 3689 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3690 return -ENODEV;
3691 if (time_after(now, deadline))
3692 return -EBUSY;
3693
3694 if (!warned && time_after(now, start + 5 * HZ) &&
3695 (deadline - now > 3 * HZ)) {
3696 ata_port_printk(ap, KERN_WARNING,
3697 "port is slow to respond, please be patient "
3698 "(Status 0x%x)\n", status);
3699 warned = 1;
3700 }
3701
3702 msleep(50);
3703 }
3704}
3705
3706static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3707 unsigned long deadline)
1da177e4
LT
3708{
3709 struct ata_ioports *ioaddr = &ap->ioaddr;
3710 unsigned int dev0 = devmask & (1 << 0);
3711 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3712 int rc, ret = 0;
1da177e4
LT
3713
3714 /* if device 0 was found in ata_devchk, wait for its
3715 * BSY bit to clear
3716 */
d4b2bab4
TH
3717 if (dev0) {
3718 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3719 if (rc) {
3720 if (rc != -ENODEV)
3721 return rc;
3722 ret = rc;
3723 }
d4b2bab4 3724 }
1da177e4 3725
e141d999
TH
3726 /* if device 1 was found in ata_devchk, wait for register
3727 * access briefly, then wait for BSY to clear.
1da177e4 3728 */
e141d999
TH
3729 if (dev1) {
3730 int i;
1da177e4
LT
3731
3732 ap->ops->dev_select(ap, 1);
e141d999
TH
3733
3734 /* Wait for register access. Some ATAPI devices fail
3735 * to set nsect/lbal after reset, so don't waste too
3736 * much time on it. We're gonna wait for !BSY anyway.
3737 */
3738 for (i = 0; i < 2; i++) {
3739 u8 nsect, lbal;
3740
3741 nsect = ioread8(ioaddr->nsect_addr);
3742 lbal = ioread8(ioaddr->lbal_addr);
3743 if ((nsect == 1) && (lbal == 1))
3744 break;
3745 msleep(50); /* give drive a breather */
3746 }
3747
d4b2bab4 3748 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3749 if (rc) {
3750 if (rc != -ENODEV)
3751 return rc;
3752 ret = rc;
3753 }
d4b2bab4 3754 }
1da177e4
LT
3755
3756 /* is all this really necessary? */
3757 ap->ops->dev_select(ap, 0);
3758 if (dev1)
3759 ap->ops->dev_select(ap, 1);
3760 if (dev0)
3761 ap->ops->dev_select(ap, 0);
d4b2bab4 3762
9b89391c 3763 return ret;
1da177e4
LT
3764}
3765
d4b2bab4
TH
3766static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3767 unsigned long deadline)
1da177e4
LT
3768{
3769 struct ata_ioports *ioaddr = &ap->ioaddr;
3770
44877b4e 3771 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3772
3773 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3774 iowrite8(ap->ctl, ioaddr->ctl_addr);
3775 udelay(20); /* FIXME: flush */
3776 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3777 udelay(20); /* FIXME: flush */
3778 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3779
88ff6eaf
TH
3780 /* wait a while before checking status */
3781 ata_wait_after_reset(ap, deadline);
1da177e4 3782
2e9edbf8 3783 /* Before we perform post reset processing we want to see if
298a41ca
TH
3784 * the bus shows 0xFF because the odd clown forgets the D7
3785 * pulldown resistor.
3786 */
150981b0 3787 if (ata_chk_status(ap) == 0xFF)
9b89391c 3788 return -ENODEV;
09c7ad79 3789
d4b2bab4 3790 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3791}
3792
3793/**
3794 * ata_bus_reset - reset host port and associated ATA channel
3795 * @ap: port to reset
3796 *
3797 * This is typically the first time we actually start issuing
3798 * commands to the ATA channel. We wait for BSY to clear, then
3799 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3800 * result. Determine what devices, if any, are on the channel
3801 * by looking at the device 0/1 error register. Look at the signature
3802 * stored in each device's taskfile registers, to determine if
3803 * the device is ATA or ATAPI.
3804 *
3805 * LOCKING:
0cba632b 3806 * PCI/etc. bus probe sem.
cca3974e 3807 * Obtains host lock.
1da177e4
LT
3808 *
3809 * SIDE EFFECTS:
198e0fed 3810 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3811 */
3812
3813void ata_bus_reset(struct ata_port *ap)
3814{
9af5c9c9 3815 struct ata_device *device = ap->link.device;
1da177e4
LT
3816 struct ata_ioports *ioaddr = &ap->ioaddr;
3817 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3818 u8 err;
aec5c3c1 3819 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3820 int rc;
1da177e4 3821
44877b4e 3822 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3823
3824 /* determine if device 0/1 are present */
3825 if (ap->flags & ATA_FLAG_SATA_RESET)
3826 dev0 = 1;
3827 else {
3828 dev0 = ata_devchk(ap, 0);
3829 if (slave_possible)
3830 dev1 = ata_devchk(ap, 1);
3831 }
3832
3833 if (dev0)
3834 devmask |= (1 << 0);
3835 if (dev1)
3836 devmask |= (1 << 1);
3837
3838 /* select device 0 again */
3839 ap->ops->dev_select(ap, 0);
3840
3841 /* issue bus reset */
9b89391c
TH
3842 if (ap->flags & ATA_FLAG_SRST) {
3843 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3844 if (rc && rc != -ENODEV)
aec5c3c1 3845 goto err_out;
9b89391c 3846 }
1da177e4
LT
3847
3848 /*
3849 * determine by signature whether we have ATA or ATAPI devices
3850 */
3f19859e 3851 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3852 if ((slave_possible) && (err != 0x81))
3f19859e 3853 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3854
1da177e4 3855 /* is double-select really necessary? */
9af5c9c9 3856 if (device[1].class != ATA_DEV_NONE)
1da177e4 3857 ap->ops->dev_select(ap, 1);
9af5c9c9 3858 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3859 ap->ops->dev_select(ap, 0);
3860
3861 /* if no devices were detected, disable this port */
9af5c9c9
TH
3862 if ((device[0].class == ATA_DEV_NONE) &&
3863 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3864 goto err_out;
3865
3866 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3867 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3868 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3869 }
3870
3871 DPRINTK("EXIT\n");
3872 return;
3873
3874err_out:
f15a1daf 3875 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3876 ata_port_disable(ap);
1da177e4
LT
3877
3878 DPRINTK("EXIT\n");
3879}
3880
d7bb4cc7 3881/**
936fd732
TH
3882 * sata_link_debounce - debounce SATA phy status
3883 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3884 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3885 * @deadline: deadline jiffies for the operation
d7bb4cc7 3886 *
936fd732 3887* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3888 * holding the same value where DET is not 1 for @duration polled
3889 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3890 * beginning of the stable state. Because DET gets stuck at 1 on
3891 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3892 * until timeout then returns 0 if DET is stable at 1.
3893 *
d4b2bab4
TH
3894 * @timeout is further limited by @deadline. The sooner of the
3895 * two is used.
3896 *
d7bb4cc7
TH
3897 * LOCKING:
3898 * Kernel thread context (may sleep)
3899 *
3900 * RETURNS:
3901 * 0 on success, -errno on failure.
3902 */
936fd732
TH
3903int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3904 unsigned long deadline)
7a7921e8 3905{
d7bb4cc7 3906 unsigned long interval_msec = params[0];
d4b2bab4
TH
3907 unsigned long duration = msecs_to_jiffies(params[1]);
3908 unsigned long last_jiffies, t;
d7bb4cc7
TH
3909 u32 last, cur;
3910 int rc;
3911
d4b2bab4
TH
3912 t = jiffies + msecs_to_jiffies(params[2]);
3913 if (time_before(t, deadline))
3914 deadline = t;
3915
936fd732 3916 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3917 return rc;
3918 cur &= 0xf;
3919
3920 last = cur;
3921 last_jiffies = jiffies;
3922
3923 while (1) {
3924 msleep(interval_msec);
936fd732 3925 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3926 return rc;
3927 cur &= 0xf;
3928
3929 /* DET stable? */
3930 if (cur == last) {
d4b2bab4 3931 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3932 continue;
3933 if (time_after(jiffies, last_jiffies + duration))
3934 return 0;
3935 continue;
3936 }
3937
3938 /* unstable, start over */
3939 last = cur;
3940 last_jiffies = jiffies;
3941
f1545154
TH
3942 /* Check deadline. If debouncing failed, return
3943 * -EPIPE to tell upper layer to lower link speed.
3944 */
d4b2bab4 3945 if (time_after(jiffies, deadline))
f1545154 3946 return -EPIPE;
d7bb4cc7
TH
3947 }
3948}
3949
3950/**
936fd732
TH
3951 * sata_link_resume - resume SATA link
3952 * @link: ATA link to resume SATA
d7bb4cc7 3953 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3954 * @deadline: deadline jiffies for the operation
d7bb4cc7 3955 *
936fd732 3956 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3957 *
3958 * LOCKING:
3959 * Kernel thread context (may sleep)
3960 *
3961 * RETURNS:
3962 * 0 on success, -errno on failure.
3963 */
936fd732
TH
3964int sata_link_resume(struct ata_link *link, const unsigned long *params,
3965 unsigned long deadline)
d7bb4cc7
TH
3966{
3967 u32 scontrol;
81952c54
TH
3968 int rc;
3969
936fd732 3970 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3971 return rc;
7a7921e8 3972
852ee16a 3973 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3974
936fd732 3975 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3976 return rc;
7a7921e8 3977
d7bb4cc7
TH
3978 /* Some PHYs react badly if SStatus is pounded immediately
3979 * after resuming. Delay 200ms before debouncing.
3980 */
3981 msleep(200);
7a7921e8 3982
936fd732 3983 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3984}
3985
f5914a46
TH
3986/**
3987 * ata_std_prereset - prepare for reset
cc0680a5 3988 * @link: ATA link to be reset
d4b2bab4 3989 * @deadline: deadline jiffies for the operation
f5914a46 3990 *
cc0680a5 3991 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3992 * prereset makes libata abort whole reset sequence and give up
3993 * that port, so prereset should be best-effort. It does its
3994 * best to prepare for reset sequence but if things go wrong, it
3995 * should just whine, not fail.
f5914a46
TH
3996 *
3997 * LOCKING:
3998 * Kernel thread context (may sleep)
3999 *
4000 * RETURNS:
4001 * 0 on success, -errno otherwise.
4002 */
cc0680a5 4003int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 4004{
cc0680a5 4005 struct ata_port *ap = link->ap;
936fd732 4006 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 4007 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
4008 int rc;
4009
f5914a46
TH
4010 /* if we're about to do hardreset, nothing more to do */
4011 if (ehc->i.action & ATA_EH_HARDRESET)
4012 return 0;
4013
936fd732 4014 /* if SATA, resume link */
a16abc0b 4015 if (ap->flags & ATA_FLAG_SATA) {
936fd732 4016 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
4017 /* whine about phy resume failure but proceed */
4018 if (rc && rc != -EOPNOTSUPP)
cc0680a5 4019 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 4020 "link for reset (errno=%d)\n", rc);
f5914a46
TH
4021 }
4022
8cebf274
TH
4023 /* wait for !BSY if we don't know that no device is attached */
4024 if (!ata_link_offline(link)) {
b8cffc6a 4025 rc = ata_wait_ready(ap, deadline);
6dffaf61 4026 if (rc && rc != -ENODEV) {
cc0680a5 4027 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
4028 "(errno=%d), forcing hardreset\n", rc);
4029 ehc->i.action |= ATA_EH_HARDRESET;
4030 }
4031 }
f5914a46
TH
4032
4033 return 0;
4034}
4035
c2bd5804
TH
4036/**
4037 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 4038 * @link: ATA link to reset
c2bd5804 4039 * @classes: resulting classes of attached devices
d4b2bab4 4040 * @deadline: deadline jiffies for the operation
c2bd5804 4041 *
52783c5d 4042 * Reset host port using ATA SRST.
c2bd5804
TH
4043 *
4044 * LOCKING:
4045 * Kernel thread context (may sleep)
4046 *
4047 * RETURNS:
4048 * 0 on success, -errno otherwise.
4049 */
cc0680a5 4050int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 4051 unsigned long deadline)
c2bd5804 4052{
cc0680a5 4053 struct ata_port *ap = link->ap;
c2bd5804 4054 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
4055 unsigned int devmask = 0;
4056 int rc;
c2bd5804
TH
4057 u8 err;
4058
4059 DPRINTK("ENTER\n");
4060
936fd732 4061 if (ata_link_offline(link)) {
3a39746a
TH
4062 classes[0] = ATA_DEV_NONE;
4063 goto out;
4064 }
4065
c2bd5804
TH
4066 /* determine if device 0/1 are present */
4067 if (ata_devchk(ap, 0))
4068 devmask |= (1 << 0);
4069 if (slave_possible && ata_devchk(ap, 1))
4070 devmask |= (1 << 1);
4071
c2bd5804
TH
4072 /* select device 0 again */
4073 ap->ops->dev_select(ap, 0);
4074
4075 /* issue bus reset */
4076 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 4077 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 4078 /* if link is occupied, -ENODEV too is an error */
936fd732 4079 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 4080 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 4081 return rc;
c2bd5804
TH
4082 }
4083
4084 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
4085 classes[0] = ata_dev_try_classify(&link->device[0],
4086 devmask & (1 << 0), &err);
c2bd5804 4087 if (slave_possible && err != 0x81)
3f19859e
TH
4088 classes[1] = ata_dev_try_classify(&link->device[1],
4089 devmask & (1 << 1), &err);
c2bd5804 4090
3a39746a 4091 out:
c2bd5804
TH
4092 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4093 return 0;
4094}
4095
4096/**
cc0680a5
TH
4097 * sata_link_hardreset - reset link via SATA phy reset
4098 * @link: link to reset
b6103f6d 4099 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 4100 * @deadline: deadline jiffies for the operation
c2bd5804 4101 *
cc0680a5 4102 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4103 *
4104 * LOCKING:
4105 * Kernel thread context (may sleep)
4106 *
4107 * RETURNS:
4108 * 0 on success, -errno otherwise.
4109 */
cc0680a5 4110int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4111 unsigned long deadline)
c2bd5804 4112{
852ee16a 4113 u32 scontrol;
81952c54 4114 int rc;
852ee16a 4115
c2bd5804
TH
4116 DPRINTK("ENTER\n");
4117
936fd732 4118 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4119 /* SATA spec says nothing about how to reconfigure
4120 * spd. To be on the safe side, turn off phy during
4121 * reconfiguration. This works for at least ICH7 AHCI
4122 * and Sil3124.
4123 */
936fd732 4124 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4125 goto out;
81952c54 4126
a34b6fc0 4127 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4128
936fd732 4129 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4130 goto out;
1c3fae4d 4131
936fd732 4132 sata_set_spd(link);
1c3fae4d
TH
4133 }
4134
4135 /* issue phy wake/reset */
936fd732 4136 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4137 goto out;
81952c54 4138
852ee16a 4139 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4140
936fd732 4141 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4142 goto out;
c2bd5804 4143
1c3fae4d 4144 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4145 * 10.4.2 says at least 1 ms.
4146 */
4147 msleep(1);
4148
936fd732
TH
4149 /* bring link back */
4150 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4151 out:
4152 DPRINTK("EXIT, rc=%d\n", rc);
4153 return rc;
4154}
4155
4156/**
4157 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4158 * @link: link to reset
b6103f6d 4159 * @class: resulting class of attached device
d4b2bab4 4160 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4161 *
4162 * SATA phy-reset host port using DET bits of SControl register,
4163 * wait for !BSY and classify the attached device.
4164 *
4165 * LOCKING:
4166 * Kernel thread context (may sleep)
4167 *
4168 * RETURNS:
4169 * 0 on success, -errno otherwise.
4170 */
cc0680a5 4171int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4172 unsigned long deadline)
b6103f6d 4173{
cc0680a5 4174 struct ata_port *ap = link->ap;
936fd732 4175 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4176 int rc;
4177
4178 DPRINTK("ENTER\n");
4179
4180 /* do hardreset */
cc0680a5 4181 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4182 if (rc) {
cc0680a5 4183 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4184 "COMRESET failed (errno=%d)\n", rc);
4185 return rc;
4186 }
c2bd5804 4187
c2bd5804 4188 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4189 if (ata_link_offline(link)) {
c2bd5804
TH
4190 *class = ATA_DEV_NONE;
4191 DPRINTK("EXIT, link offline\n");
4192 return 0;
4193 }
4194
88ff6eaf
TH
4195 /* wait a while before checking status */
4196 ata_wait_after_reset(ap, deadline);
34fee227 4197
633273a3
TH
4198 /* If PMP is supported, we have to do follow-up SRST. Note
4199 * that some PMPs don't send D2H Reg FIS after hardreset at
4200 * all if the first port is empty. Wait for it just for a
4201 * second and request follow-up SRST.
4202 */
4203 if (ap->flags & ATA_FLAG_PMP) {
4204 ata_wait_ready(ap, jiffies + HZ);
4205 return -EAGAIN;
4206 }
4207
d4b2bab4 4208 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4209 /* link occupied, -ENODEV too is an error */
4210 if (rc) {
cc0680a5 4211 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4212 "COMRESET failed (errno=%d)\n", rc);
4213 return rc;
c2bd5804
TH
4214 }
4215
3a39746a
TH
4216 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4217
3f19859e 4218 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4219
4220 DPRINTK("EXIT, class=%u\n", *class);
4221 return 0;
4222}
4223
4224/**
4225 * ata_std_postreset - standard postreset callback
cc0680a5 4226 * @link: the target ata_link
c2bd5804
TH
4227 * @classes: classes of attached devices
4228 *
4229 * This function is invoked after a successful reset. Note that
4230 * the device might have been reset more than once using
4231 * different reset methods before postreset is invoked.
c2bd5804 4232 *
c2bd5804
TH
4233 * LOCKING:
4234 * Kernel thread context (may sleep)
4235 */
cc0680a5 4236void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4237{
cc0680a5 4238 struct ata_port *ap = link->ap;
dc2b3515
TH
4239 u32 serror;
4240
c2bd5804
TH
4241 DPRINTK("ENTER\n");
4242
c2bd5804 4243 /* print link status */
936fd732 4244 sata_print_link_status(link);
c2bd5804 4245
dc2b3515 4246 /* clear SError */
936fd732
TH
4247 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4248 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4249 link->eh_info.serror = 0;
dc2b3515 4250
c2bd5804
TH
4251 /* is double-select really necessary? */
4252 if (classes[0] != ATA_DEV_NONE)
4253 ap->ops->dev_select(ap, 1);
4254 if (classes[1] != ATA_DEV_NONE)
4255 ap->ops->dev_select(ap, 0);
4256
3a39746a
TH
4257 /* bail out if no device is present */
4258 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4259 DPRINTK("EXIT, no device\n");
4260 return;
4261 }
4262
4263 /* set up device control */
0d5ff566
TH
4264 if (ap->ioaddr.ctl_addr)
4265 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4266
4267 DPRINTK("EXIT\n");
4268}
4269
623a3128
TH
4270/**
4271 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4272 * @dev: device to compare against
4273 * @new_class: class of the new device
4274 * @new_id: IDENTIFY page of the new device
4275 *
4276 * Compare @new_class and @new_id against @dev and determine
4277 * whether @dev is the device indicated by @new_class and
4278 * @new_id.
4279 *
4280 * LOCKING:
4281 * None.
4282 *
4283 * RETURNS:
4284 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4285 */
3373efd8
TH
4286static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4287 const u16 *new_id)
623a3128
TH
4288{
4289 const u16 *old_id = dev->id;
a0cf733b
TH
4290 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4291 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4292
4293 if (dev->class != new_class) {
f15a1daf
TH
4294 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4295 dev->class, new_class);
623a3128
TH
4296 return 0;
4297 }
4298
a0cf733b
TH
4299 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4300 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4301 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4302 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4303
4304 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4305 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4306 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4307 return 0;
4308 }
4309
4310 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4311 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4312 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4313 return 0;
4314 }
4315
623a3128
TH
4316 return 1;
4317}
4318
4319/**
fe30911b 4320 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4321 * @dev: target ATA device
bff04647 4322 * @readid_flags: read ID flags
623a3128
TH
4323 *
4324 * Re-read IDENTIFY page and make sure @dev is still attached to
4325 * the port.
4326 *
4327 * LOCKING:
4328 * Kernel thread context (may sleep)
4329 *
4330 * RETURNS:
4331 * 0 on success, negative errno otherwise
4332 */
fe30911b 4333int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4334{
5eb45c02 4335 unsigned int class = dev->class;
9af5c9c9 4336 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4337 int rc;
4338
fe635c7e 4339 /* read ID data */
bff04647 4340 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4341 if (rc)
fe30911b 4342 return rc;
623a3128
TH
4343
4344 /* is the device still there? */
fe30911b
TH
4345 if (!ata_dev_same_device(dev, class, id))
4346 return -ENODEV;
623a3128 4347
fe635c7e 4348 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4349 return 0;
4350}
4351
4352/**
4353 * ata_dev_revalidate - Revalidate ATA device
4354 * @dev: device to revalidate
422c9daa 4355 * @new_class: new class code
fe30911b
TH
4356 * @readid_flags: read ID flags
4357 *
4358 * Re-read IDENTIFY page, make sure @dev is still attached to the
4359 * port and reconfigure it according to the new IDENTIFY page.
4360 *
4361 * LOCKING:
4362 * Kernel thread context (may sleep)
4363 *
4364 * RETURNS:
4365 * 0 on success, negative errno otherwise
4366 */
422c9daa
TH
4367int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4368 unsigned int readid_flags)
fe30911b 4369{
6ddcd3b0 4370 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4371 int rc;
4372
4373 if (!ata_dev_enabled(dev))
4374 return -ENODEV;
4375
422c9daa
TH
4376 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4377 if (ata_class_enabled(new_class) &&
4378 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4379 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4380 dev->class, new_class);
4381 rc = -ENODEV;
4382 goto fail;
4383 }
4384
fe30911b
TH
4385 /* re-read ID */
4386 rc = ata_dev_reread_id(dev, readid_flags);
4387 if (rc)
4388 goto fail;
623a3128
TH
4389
4390 /* configure device according to the new ID */
efdaedc4 4391 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4392 if (rc)
4393 goto fail;
4394
4395 /* verify n_sectors hasn't changed */
b54eebd6
TH
4396 if (dev->class == ATA_DEV_ATA && n_sectors &&
4397 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4398 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4399 "%llu != %llu\n",
4400 (unsigned long long)n_sectors,
4401 (unsigned long long)dev->n_sectors);
8270bec4
TH
4402
4403 /* restore original n_sectors */
4404 dev->n_sectors = n_sectors;
4405
6ddcd3b0
TH
4406 rc = -ENODEV;
4407 goto fail;
4408 }
4409
4410 return 0;
623a3128
TH
4411
4412 fail:
f15a1daf 4413 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4414 return rc;
4415}
4416
6919a0a6
AC
4417struct ata_blacklist_entry {
4418 const char *model_num;
4419 const char *model_rev;
4420 unsigned long horkage;
4421};
4422
4423static const struct ata_blacklist_entry ata_device_blacklist [] = {
4424 /* Devices with DMA related problems under Linux */
4425 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4426 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4427 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4428 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4429 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4430 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4431 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4432 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4433 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4434 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4435 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4436 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4437 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4438 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4439 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4440 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4441 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4442 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4443 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4444 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4445 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4446 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4447 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4448 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4449 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4450 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4451 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4452 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4453 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4454 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4455 /* Odd clown on sil3726/4726 PMPs */
4456 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4457 ATA_HORKAGE_SKIP_PM },
6919a0a6 4458
18d6e9d5 4459 /* Weird ATAPI devices */
40a1d531 4460 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4461
6919a0a6
AC
4462 /* Devices we expect to fail diagnostics */
4463
4464 /* Devices where NCQ should be avoided */
4465 /* NCQ is slow */
2dcb407e 4466 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4467 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4468 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4469 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4470 /* NCQ is broken */
539cc7c7 4471 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4472 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4473 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4474 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4475
36e337d0
RH
4476 /* Blacklist entries taken from Silicon Image 3124/3132
4477 Windows driver .inf file - also several Linux problem reports */
4478 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4479 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4480 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4481
16c55b03
TH
4482 /* devices which puke on READ_NATIVE_MAX */
4483 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4484 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4485 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4486 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4487
93328e11
AC
4488 /* Devices which report 1 sector over size HPA */
4489 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4490 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4491 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4492
6bbfd53d
AC
4493 /* Devices which get the IVB wrong */
4494 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4495 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4496 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4497 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4498 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4499
6919a0a6
AC
4500 /* End Marker */
4501 { }
1da177e4 4502};
2e9edbf8 4503
741b7763 4504static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4505{
4506 const char *p;
4507 int len;
4508
4509 /*
4510 * check for trailing wildcard: *\0
4511 */
4512 p = strchr(patt, wildchar);
4513 if (p && ((*(p + 1)) == 0))
4514 len = p - patt;
317b50b8 4515 else {
539cc7c7 4516 len = strlen(name);
317b50b8
AP
4517 if (!len) {
4518 if (!*patt)
4519 return 0;
4520 return -1;
4521 }
4522 }
539cc7c7
JG
4523
4524 return strncmp(patt, name, len);
4525}
4526
75683fe7 4527static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4528{
8bfa79fc
TH
4529 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4530 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4531 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4532
8bfa79fc
TH
4533 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4534 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4535
6919a0a6 4536 while (ad->model_num) {
539cc7c7 4537 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4538 if (ad->model_rev == NULL)
4539 return ad->horkage;
539cc7c7 4540 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4541 return ad->horkage;
f4b15fef 4542 }
6919a0a6 4543 ad++;
f4b15fef 4544 }
1da177e4
LT
4545 return 0;
4546}
4547
6919a0a6
AC
4548static int ata_dma_blacklisted(const struct ata_device *dev)
4549{
4550 /* We don't support polling DMA.
4551 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4552 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4553 */
9af5c9c9 4554 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4555 (dev->flags & ATA_DFLAG_CDB_INTR))
4556 return 1;
75683fe7 4557 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4558}
4559
6bbfd53d
AC
4560/**
4561 * ata_is_40wire - check drive side detection
4562 * @dev: device
4563 *
4564 * Perform drive side detection decoding, allowing for device vendors
4565 * who can't follow the documentation.
4566 */
4567
4568static int ata_is_40wire(struct ata_device *dev)
4569{
4570 if (dev->horkage & ATA_HORKAGE_IVB)
4571 return ata_drive_40wire_relaxed(dev->id);
4572 return ata_drive_40wire(dev->id);
4573}
4574
a6d5a51c
TH
4575/**
4576 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4577 * @dev: Device to compute xfermask for
4578 *
acf356b1
TH
4579 * Compute supported xfermask of @dev and store it in
4580 * dev->*_mask. This function is responsible for applying all
4581 * known limits including host controller limits, device
4582 * blacklist, etc...
a6d5a51c
TH
4583 *
4584 * LOCKING:
4585 * None.
a6d5a51c 4586 */
3373efd8 4587static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4588{
9af5c9c9
TH
4589 struct ata_link *link = dev->link;
4590 struct ata_port *ap = link->ap;
cca3974e 4591 struct ata_host *host = ap->host;
a6d5a51c 4592 unsigned long xfer_mask;
1da177e4 4593
37deecb5 4594 /* controller modes available */
565083e1
TH
4595 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4596 ap->mwdma_mask, ap->udma_mask);
4597
8343f889 4598 /* drive modes available */
37deecb5
TH
4599 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4600 dev->mwdma_mask, dev->udma_mask);
4601 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4602
b352e57d
AC
4603 /*
4604 * CFA Advanced TrueIDE timings are not allowed on a shared
4605 * cable
4606 */
4607 if (ata_dev_pair(dev)) {
4608 /* No PIO5 or PIO6 */
4609 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4610 /* No MWDMA3 or MWDMA 4 */
4611 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4612 }
4613
37deecb5
TH
4614 if (ata_dma_blacklisted(dev)) {
4615 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4616 ata_dev_printk(dev, KERN_WARNING,
4617 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4618 }
a6d5a51c 4619
14d66ab7 4620 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4621 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4622 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4623 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4624 "other device, disabling DMA\n");
5444a6f4 4625 }
565083e1 4626
e424675f
JG
4627 if (ap->flags & ATA_FLAG_NO_IORDY)
4628 xfer_mask &= ata_pio_mask_no_iordy(dev);
4629
5444a6f4 4630 if (ap->ops->mode_filter)
a76b62ca 4631 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4632
8343f889
RH
4633 /* Apply cable rule here. Don't apply it early because when
4634 * we handle hot plug the cable type can itself change.
4635 * Check this last so that we know if the transfer rate was
4636 * solely limited by the cable.
4637 * Unknown or 80 wire cables reported host side are checked
4638 * drive side as well. Cases where we know a 40wire cable
4639 * is used safely for 80 are not checked here.
4640 */
4641 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4642 /* UDMA/44 or higher would be available */
2dcb407e 4643 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4644 (ata_is_40wire(dev) &&
2dcb407e
JG
4645 (ap->cbl == ATA_CBL_PATA_UNK ||
4646 ap->cbl == ATA_CBL_PATA80))) {
4647 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4648 "limited to UDMA/33 due to 40-wire cable\n");
4649 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4650 }
4651
565083e1
TH
4652 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4653 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4654}
4655
1da177e4
LT
4656/**
4657 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4658 * @dev: Device to which command will be sent
4659 *
780a87f7
JG
4660 * Issue SET FEATURES - XFER MODE command to device @dev
4661 * on port @ap.
4662 *
1da177e4 4663 * LOCKING:
0cba632b 4664 * PCI/etc. bus probe sem.
83206a29
TH
4665 *
4666 * RETURNS:
4667 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4668 */
4669
3373efd8 4670static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4671{
a0123703 4672 struct ata_taskfile tf;
83206a29 4673 unsigned int err_mask;
1da177e4
LT
4674
4675 /* set up set-features taskfile */
4676 DPRINTK("set features - xfer mode\n");
4677
464cf177
TH
4678 /* Some controllers and ATAPI devices show flaky interrupt
4679 * behavior after setting xfer mode. Use polling instead.
4680 */
3373efd8 4681 ata_tf_init(dev, &tf);
a0123703
TH
4682 tf.command = ATA_CMD_SET_FEATURES;
4683 tf.feature = SETFEATURES_XFER;
464cf177 4684 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4685 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4686 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4687 if (ata_pio_need_iordy(dev))
4688 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4689 /* If the device has IORDY and the controller does not - turn it off */
4690 else if (ata_id_has_iordy(dev->id))
11b7becc 4691 tf.nsect = 0x01;
b9f8ab2d
AC
4692 else /* In the ancient relic department - skip all of this */
4693 return 0;
1da177e4 4694
2b789108 4695 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4696
4697 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4698 return err_mask;
4699}
9f45cbd3 4700/**
218f3d30 4701 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4702 * @dev: Device to which command will be sent
4703 * @enable: Whether to enable or disable the feature
218f3d30 4704 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4705 *
4706 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4707 * on port @ap with sector count
9f45cbd3
KCA
4708 *
4709 * LOCKING:
4710 * PCI/etc. bus probe sem.
4711 *
4712 * RETURNS:
4713 * 0 on success, AC_ERR_* mask otherwise.
4714 */
218f3d30
JG
4715static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4716 u8 feature)
9f45cbd3
KCA
4717{
4718 struct ata_taskfile tf;
4719 unsigned int err_mask;
4720
4721 /* set up set-features taskfile */
4722 DPRINTK("set features - SATA features\n");
4723
4724 ata_tf_init(dev, &tf);
4725 tf.command = ATA_CMD_SET_FEATURES;
4726 tf.feature = enable;
4727 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4728 tf.protocol = ATA_PROT_NODATA;
218f3d30 4729 tf.nsect = feature;
9f45cbd3 4730
2b789108 4731 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4732
83206a29
TH
4733 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4734 return err_mask;
1da177e4
LT
4735}
4736
8bf62ece
AL
4737/**
4738 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4739 * @dev: Device to which command will be sent
e2a7f77a
RD
4740 * @heads: Number of heads (taskfile parameter)
4741 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4742 *
4743 * LOCKING:
6aff8f1f
TH
4744 * Kernel thread context (may sleep)
4745 *
4746 * RETURNS:
4747 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4748 */
3373efd8
TH
4749static unsigned int ata_dev_init_params(struct ata_device *dev,
4750 u16 heads, u16 sectors)
8bf62ece 4751{
a0123703 4752 struct ata_taskfile tf;
6aff8f1f 4753 unsigned int err_mask;
8bf62ece
AL
4754
4755 /* Number of sectors per track 1-255. Number of heads 1-16 */
4756 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4757 return AC_ERR_INVALID;
8bf62ece
AL
4758
4759 /* set up init dev params taskfile */
4760 DPRINTK("init dev params \n");
4761
3373efd8 4762 ata_tf_init(dev, &tf);
a0123703
TH
4763 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4764 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4765 tf.protocol = ATA_PROT_NODATA;
4766 tf.nsect = sectors;
4767 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4768
2b789108 4769 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4770 /* A clean abort indicates an original or just out of spec drive
4771 and we should continue as we issue the setup based on the
4772 drive reported working geometry */
4773 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4774 err_mask = 0;
8bf62ece 4775
6aff8f1f
TH
4776 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4777 return err_mask;
8bf62ece
AL
4778}
4779
1da177e4 4780/**
0cba632b
JG
4781 * ata_sg_clean - Unmap DMA memory associated with command
4782 * @qc: Command containing DMA memory to be released
4783 *
4784 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4785 *
4786 * LOCKING:
cca3974e 4787 * spin_lock_irqsave(host lock)
1da177e4 4788 */
70e6ad0c 4789void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4790{
4791 struct ata_port *ap = qc->ap;
ff2aeb1e 4792 struct scatterlist *sg = qc->sg;
1da177e4
LT
4793 int dir = qc->dma_dir;
4794
a4631474 4795 WARN_ON(sg == NULL);
1da177e4 4796
dde20207 4797 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4798
dde20207
JB
4799 if (qc->n_elem)
4800 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4801
4802 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4803 qc->sg = NULL;
1da177e4
LT
4804}
4805
4806/**
4807 * ata_fill_sg - Fill PCI IDE PRD table
4808 * @qc: Metadata associated with taskfile to be transferred
4809 *
780a87f7
JG
4810 * Fill PCI IDE PRD (scatter-gather) table with segments
4811 * associated with the current disk command.
4812 *
1da177e4 4813 * LOCKING:
cca3974e 4814 * spin_lock_irqsave(host lock)
1da177e4
LT
4815 *
4816 */
4817static void ata_fill_sg(struct ata_queued_cmd *qc)
4818{
1da177e4 4819 struct ata_port *ap = qc->ap;
cedc9a47 4820 struct scatterlist *sg;
ff2aeb1e 4821 unsigned int si, pi;
1da177e4 4822
ff2aeb1e
TH
4823 pi = 0;
4824 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4825 u32 addr, offset;
4826 u32 sg_len, len;
4827
4828 /* determine if physical DMA addr spans 64K boundary.
4829 * Note h/w doesn't support 64-bit, so we unconditionally
4830 * truncate dma_addr_t to u32.
4831 */
4832 addr = (u32) sg_dma_address(sg);
4833 sg_len = sg_dma_len(sg);
4834
4835 while (sg_len) {
4836 offset = addr & 0xffff;
4837 len = sg_len;
4838 if ((offset + sg_len) > 0x10000)
4839 len = 0x10000 - offset;
4840
ff2aeb1e
TH
4841 ap->prd[pi].addr = cpu_to_le32(addr);
4842 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4843 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4844
ff2aeb1e 4845 pi++;
1da177e4
LT
4846 sg_len -= len;
4847 addr += len;
4848 }
4849 }
4850
ff2aeb1e 4851 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4852}
b9a4197e 4853
d26fc955
AC
4854/**
4855 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4856 * @qc: Metadata associated with taskfile to be transferred
4857 *
4858 * Fill PCI IDE PRD (scatter-gather) table with segments
4859 * associated with the current disk command. Perform the fill
4860 * so that we avoid writing any length 64K records for
4861 * controllers that don't follow the spec.
4862 *
4863 * LOCKING:
4864 * spin_lock_irqsave(host lock)
4865 *
4866 */
4867static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4868{
4869 struct ata_port *ap = qc->ap;
4870 struct scatterlist *sg;
ff2aeb1e 4871 unsigned int si, pi;
d26fc955 4872
ff2aeb1e
TH
4873 pi = 0;
4874 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4875 u32 addr, offset;
4876 u32 sg_len, len, blen;
4877
2dcb407e 4878 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4879 * Note h/w doesn't support 64-bit, so we unconditionally
4880 * truncate dma_addr_t to u32.
4881 */
4882 addr = (u32) sg_dma_address(sg);
4883 sg_len = sg_dma_len(sg);
4884
4885 while (sg_len) {
4886 offset = addr & 0xffff;
4887 len = sg_len;
4888 if ((offset + sg_len) > 0x10000)
4889 len = 0x10000 - offset;
4890
4891 blen = len & 0xffff;
ff2aeb1e 4892 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4893 if (blen == 0) {
4894 /* Some PATA chipsets like the CS5530 can't
4895 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4896 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4897 blen = 0x8000;
ff2aeb1e 4898 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4899 }
ff2aeb1e
TH
4900 ap->prd[pi].flags_len = cpu_to_le32(blen);
4901 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4902
ff2aeb1e 4903 pi++;
d26fc955
AC
4904 sg_len -= len;
4905 addr += len;
4906 }
4907 }
4908
ff2aeb1e 4909 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4910}
4911
1da177e4
LT
4912/**
4913 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4914 * @qc: Metadata associated with taskfile to check
4915 *
780a87f7
JG
4916 * Allow low-level driver to filter ATA PACKET commands, returning
4917 * a status indicating whether or not it is OK to use DMA for the
4918 * supplied PACKET command.
4919 *
1da177e4 4920 * LOCKING:
cca3974e 4921 * spin_lock_irqsave(host lock)
0cba632b 4922 *
1da177e4
LT
4923 * RETURNS: 0 when ATAPI DMA can be used
4924 * nonzero otherwise
4925 */
4926int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4927{
4928 struct ata_port *ap = qc->ap;
b9a4197e
TH
4929
4930 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4931 * few ATAPI devices choke on such DMA requests.
4932 */
4933 if (unlikely(qc->nbytes & 15))
4934 return 1;
6f23a31d 4935
1da177e4 4936 if (ap->ops->check_atapi_dma)
b9a4197e 4937 return ap->ops->check_atapi_dma(qc);
1da177e4 4938
b9a4197e 4939 return 0;
1da177e4 4940}
b9a4197e 4941
31cc23b3
TH
4942/**
4943 * ata_std_qc_defer - Check whether a qc needs to be deferred
4944 * @qc: ATA command in question
4945 *
4946 * Non-NCQ commands cannot run with any other command, NCQ or
4947 * not. As upper layer only knows the queue depth, we are
4948 * responsible for maintaining exclusion. This function checks
4949 * whether a new command @qc can be issued.
4950 *
4951 * LOCKING:
4952 * spin_lock_irqsave(host lock)
4953 *
4954 * RETURNS:
4955 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4956 */
4957int ata_std_qc_defer(struct ata_queued_cmd *qc)
4958{
4959 struct ata_link *link = qc->dev->link;
4960
4961 if (qc->tf.protocol == ATA_PROT_NCQ) {
4962 if (!ata_tag_valid(link->active_tag))
4963 return 0;
4964 } else {
4965 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4966 return 0;
4967 }
4968
4969 return ATA_DEFER_LINK;
4970}
4971
1da177e4
LT
4972/**
4973 * ata_qc_prep - Prepare taskfile for submission
4974 * @qc: Metadata associated with taskfile to be prepared
4975 *
780a87f7
JG
4976 * Prepare ATA taskfile for submission.
4977 *
1da177e4 4978 * LOCKING:
cca3974e 4979 * spin_lock_irqsave(host lock)
1da177e4
LT
4980 */
4981void ata_qc_prep(struct ata_queued_cmd *qc)
4982{
4983 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4984 return;
4985
4986 ata_fill_sg(qc);
4987}
4988
d26fc955
AC
4989/**
4990 * ata_dumb_qc_prep - Prepare taskfile for submission
4991 * @qc: Metadata associated with taskfile to be prepared
4992 *
4993 * Prepare ATA taskfile for submission.
4994 *
4995 * LOCKING:
4996 * spin_lock_irqsave(host lock)
4997 */
4998void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4999{
5000 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
5001 return;
5002
5003 ata_fill_sg_dumb(qc);
5004}
5005
e46834cd
BK
5006void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
5007
0cba632b
JG
5008/**
5009 * ata_sg_init - Associate command with scatter-gather table.
5010 * @qc: Command to be associated
5011 * @sg: Scatter-gather table.
5012 * @n_elem: Number of elements in s/g table.
5013 *
5014 * Initialize the data-related elements of queued_cmd @qc
5015 * to point to a scatter-gather table @sg, containing @n_elem
5016 * elements.
5017 *
5018 * LOCKING:
cca3974e 5019 * spin_lock_irqsave(host lock)
0cba632b 5020 */
1da177e4
LT
5021void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5022 unsigned int n_elem)
5023{
ff2aeb1e 5024 qc->sg = sg;
1da177e4 5025 qc->n_elem = n_elem;
ff2aeb1e 5026 qc->cursg = qc->sg;
1da177e4
LT
5027}
5028
ff2aeb1e
TH
5029/**
5030 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5031 * @qc: Command with scatter-gather table to be mapped.
5032 *
5033 * DMA-map the scatter-gather table associated with queued_cmd @qc.
5034 *
5035 * LOCKING:
5036 * spin_lock_irqsave(host lock)
5037 *
5038 * RETURNS:
5039 * Zero on success, negative on error.
5040 *
5041 */
5042static int ata_sg_setup(struct ata_queued_cmd *qc)
5043{
5044 struct ata_port *ap = qc->ap;
dde20207 5045 unsigned int n_elem;
ff2aeb1e
TH
5046
5047 VPRINTK("ENTER, ata%u\n", ap->print_id);
5048
dde20207
JB
5049 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5050 if (n_elem < 1)
5051 return -1;
ff2aeb1e 5052
dde20207 5053 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 5054
dde20207 5055 qc->n_elem = n_elem;
f92a2636 5056 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
5057
5058 return 0;
5059}
5060
0baab86b 5061/**
c893a3ae 5062 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
5063 * @buf: Buffer to swap
5064 * @buf_words: Number of 16-bit words in buffer.
5065 *
5066 * Swap halves of 16-bit words if needed to convert from
5067 * little-endian byte order to native cpu byte order, or
5068 * vice-versa.
5069 *
5070 * LOCKING:
6f0ef4fa 5071 * Inherited from caller.
0baab86b 5072 */
1da177e4
LT
5073void swap_buf_le16(u16 *buf, unsigned int buf_words)
5074{
5075#ifdef __BIG_ENDIAN
5076 unsigned int i;
5077
5078 for (i = 0; i < buf_words; i++)
5079 buf[i] = le16_to_cpu(buf[i]);
5080#endif /* __BIG_ENDIAN */
5081}
5082
6ae4cfb5 5083/**
0d5ff566 5084 * ata_data_xfer - Transfer data by PIO
55dba312 5085 * @dev: device to target
6ae4cfb5
AL
5086 * @buf: data buffer
5087 * @buflen: buffer length
0affa456 5088 * @rw: read/write
6ae4cfb5
AL
5089 *
5090 * Transfer data from/to the device data register by PIO.
5091 *
5092 * LOCKING:
5093 * Inherited from caller.
55dba312
TH
5094 *
5095 * RETURNS:
5096 * Bytes consumed.
6ae4cfb5 5097 */
55dba312
TH
5098unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5099 unsigned int buflen, int rw)
1da177e4 5100{
55dba312
TH
5101 struct ata_port *ap = dev->link->ap;
5102 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5103 unsigned int words = buflen >> 1;
1da177e4 5104
6ae4cfb5 5105 /* Transfer multiple of 2 bytes */
55dba312
TH
5106 if (rw == READ)
5107 ioread16_rep(data_addr, buf, words);
1da177e4 5108 else
55dba312 5109 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5110
5111 /* Transfer trailing 1 byte, if any. */
5112 if (unlikely(buflen & 0x01)) {
4ca4e439 5113 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5114 unsigned char *trailing_buf = buf + buflen - 1;
5115
55dba312
TH
5116 if (rw == READ) {
5117 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5118 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5119 } else {
5120 memcpy(align_buf, trailing_buf, 1);
5121 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5122 }
55dba312 5123 words++;
6ae4cfb5 5124 }
55dba312
TH
5125
5126 return words << 1;
1da177e4
LT
5127}
5128
75e99585 5129/**
0d5ff566 5130 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5131 * @dev: device to target
75e99585
AC
5132 * @buf: data buffer
5133 * @buflen: buffer length
0affa456 5134 * @rw: read/write
75e99585 5135 *
88574551 5136 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5137 * transfer with interrupts disabled.
5138 *
5139 * LOCKING:
5140 * Inherited from caller.
55dba312
TH
5141 *
5142 * RETURNS:
5143 * Bytes consumed.
75e99585 5144 */
55dba312
TH
5145unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5146 unsigned int buflen, int rw)
75e99585
AC
5147{
5148 unsigned long flags;
55dba312
TH
5149 unsigned int consumed;
5150
75e99585 5151 local_irq_save(flags);
55dba312 5152 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5153 local_irq_restore(flags);
55dba312
TH
5154
5155 return consumed;
75e99585
AC
5156}
5157
5158
6ae4cfb5 5159/**
5a5dbd18 5160 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5161 * @qc: Command on going
5162 *
5a5dbd18 5163 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5164 *
5165 * LOCKING:
5166 * Inherited from caller.
5167 */
5168
1da177e4
LT
5169static void ata_pio_sector(struct ata_queued_cmd *qc)
5170{
5171 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5172 struct ata_port *ap = qc->ap;
5173 struct page *page;
5174 unsigned int offset;
5175 unsigned char *buf;
5176
5a5dbd18 5177 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5178 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5179
45711f1a 5180 page = sg_page(qc->cursg);
87260216 5181 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5182
5183 /* get the current page and offset */
5184 page = nth_page(page, (offset >> PAGE_SHIFT));
5185 offset %= PAGE_SIZE;
5186
1da177e4
LT
5187 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5188
91b8b313
AL
5189 if (PageHighMem(page)) {
5190 unsigned long flags;
5191
a6b2c5d4 5192 /* FIXME: use a bounce buffer */
91b8b313
AL
5193 local_irq_save(flags);
5194 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5195
91b8b313 5196 /* do the actual data transfer */
5a5dbd18 5197 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5198
91b8b313
AL
5199 kunmap_atomic(buf, KM_IRQ0);
5200 local_irq_restore(flags);
5201 } else {
5202 buf = page_address(page);
5a5dbd18 5203 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5204 }
1da177e4 5205
5a5dbd18
ML
5206 qc->curbytes += qc->sect_size;
5207 qc->cursg_ofs += qc->sect_size;
1da177e4 5208
87260216
JA
5209 if (qc->cursg_ofs == qc->cursg->length) {
5210 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5211 qc->cursg_ofs = 0;
5212 }
1da177e4 5213}
1da177e4 5214
07f6f7d0 5215/**
5a5dbd18 5216 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5217 * @qc: Command on going
5218 *
5a5dbd18 5219 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5220 * ATA device for the DRQ request.
5221 *
5222 * LOCKING:
5223 * Inherited from caller.
5224 */
1da177e4 5225
07f6f7d0
AL
5226static void ata_pio_sectors(struct ata_queued_cmd *qc)
5227{
5228 if (is_multi_taskfile(&qc->tf)) {
5229 /* READ/WRITE MULTIPLE */
5230 unsigned int nsect;
5231
587005de 5232 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5233
5a5dbd18 5234 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5235 qc->dev->multi_count);
07f6f7d0
AL
5236 while (nsect--)
5237 ata_pio_sector(qc);
5238 } else
5239 ata_pio_sector(qc);
4cc980b3
AL
5240
5241 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5242}
5243
c71c1857
AL
5244/**
5245 * atapi_send_cdb - Write CDB bytes to hardware
5246 * @ap: Port to which ATAPI device is attached.
5247 * @qc: Taskfile currently active
5248 *
5249 * When device has indicated its readiness to accept
5250 * a CDB, this function is called. Send the CDB.
5251 *
5252 * LOCKING:
5253 * caller.
5254 */
5255
5256static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5257{
5258 /* send SCSI cdb */
5259 DPRINTK("send cdb\n");
db024d53 5260 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5261
a6b2c5d4 5262 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5263 ata_altstatus(ap); /* flush */
5264
5265 switch (qc->tf.protocol) {
0dc36888 5266 case ATAPI_PROT_PIO:
c71c1857
AL
5267 ap->hsm_task_state = HSM_ST;
5268 break;
0dc36888 5269 case ATAPI_PROT_NODATA:
c71c1857
AL
5270 ap->hsm_task_state = HSM_ST_LAST;
5271 break;
0dc36888 5272 case ATAPI_PROT_DMA:
c71c1857
AL
5273 ap->hsm_task_state = HSM_ST_LAST;
5274 /* initiate bmdma */
5275 ap->ops->bmdma_start(qc);
5276 break;
5277 }
1da177e4
LT
5278}
5279
6ae4cfb5
AL
5280/**
5281 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5282 * @qc: Command on going
5283 * @bytes: number of bytes
5284 *
5285 * Transfer Transfer data from/to the ATAPI device.
5286 *
5287 * LOCKING:
5288 * Inherited from caller.
5289 *
5290 */
140b5e59 5291static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5292{
56c819df 5293 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5294 struct ata_port *ap = qc->ap;
56c819df
TH
5295 struct ata_device *dev = qc->dev;
5296 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5297 struct scatterlist *sg;
1da177e4
LT
5298 struct page *page;
5299 unsigned char *buf;
56c819df 5300 unsigned int offset, count, consumed;
1da177e4
LT
5301
5302next_sg:
140b5e59
TH
5303 sg = qc->cursg;
5304 if (unlikely(!sg)) {
fa2fc7f4
JB
5305 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5306 "buf=%u cur=%u bytes=%u",
5307 qc->nbytes, qc->curbytes, bytes);
5308 return -1;
140b5e59 5309 }
1da177e4 5310
45711f1a 5311 page = sg_page(sg);
1da177e4
LT
5312 offset = sg->offset + qc->cursg_ofs;
5313
5314 /* get the current page and offset */
5315 page = nth_page(page, (offset >> PAGE_SHIFT));
5316 offset %= PAGE_SIZE;
5317
6952df03 5318 /* don't overrun current sg */
32529e01 5319 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5320
5321 /* don't cross page boundaries */
5322 count = min(count, (unsigned int)PAGE_SIZE - offset);
5323
7282aa4b
AL
5324 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5325
91b8b313
AL
5326 if (PageHighMem(page)) {
5327 unsigned long flags;
5328
a6b2c5d4 5329 /* FIXME: use bounce buffer */
91b8b313
AL
5330 local_irq_save(flags);
5331 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5332
91b8b313 5333 /* do the actual data transfer */
56c819df 5334 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5335
91b8b313
AL
5336 kunmap_atomic(buf, KM_IRQ0);
5337 local_irq_restore(flags);
5338 } else {
5339 buf = page_address(page);
56c819df 5340 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5341 }
1da177e4 5342
56c819df 5343 bytes -= min(bytes, consumed);
1da177e4
LT
5344 qc->curbytes += count;
5345 qc->cursg_ofs += count;
5346
32529e01 5347 if (qc->cursg_ofs == sg->length) {
87260216 5348 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5349 qc->cursg_ofs = 0;
5350 }
5351
56c819df
TH
5352 /* consumed can be larger than count only for the last transfer */
5353 WARN_ON(qc->cursg && count != consumed);
5354
563a6e1f 5355 if (bytes)
1da177e4 5356 goto next_sg;
140b5e59 5357 return 0;
1da177e4
LT
5358}
5359
6ae4cfb5
AL
5360/**
5361 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5362 * @qc: Command on going
5363 *
5364 * Transfer Transfer data from/to the ATAPI device.
5365 *
5366 * LOCKING:
5367 * Inherited from caller.
6ae4cfb5
AL
5368 */
5369
1da177e4
LT
5370static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5371{
5372 struct ata_port *ap = qc->ap;
5373 struct ata_device *dev = qc->dev;
56c819df 5374 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5375 unsigned int ireason, bc_lo, bc_hi, bytes;
5376 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5377
eec4c3f3
AL
5378 /* Abuse qc->result_tf for temp storage of intermediate TF
5379 * here to save some kernel stack usage.
5380 * For normal completion, qc->result_tf is not relevant. For
5381 * error, qc->result_tf is later overwritten by ata_qc_complete().
5382 * So, the correctness of qc->result_tf is not affected.
5383 */
5384 ap->ops->tf_read(ap, &qc->result_tf);
5385 ireason = qc->result_tf.nsect;
5386 bc_lo = qc->result_tf.lbam;
5387 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5388 bytes = (bc_hi << 8) | bc_lo;
5389
5390 /* shall be cleared to zero, indicating xfer of data */
0106372d 5391 if (unlikely(ireason & (1 << 0)))
56c819df 5392 goto atapi_check;
1da177e4
LT
5393
5394 /* make sure transfer direction matches expected */
5395 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5396 if (unlikely(do_write != i_write))
56c819df 5397 goto atapi_check;
0106372d
AL
5398
5399 if (unlikely(!bytes))
56c819df 5400 goto atapi_check;
1da177e4 5401
44877b4e 5402 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5403
56c819df 5404 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5405 goto err_out;
4cc980b3 5406 ata_altstatus(ap); /* flush */
1da177e4
LT
5407
5408 return;
5409
56c819df
TH
5410 atapi_check:
5411 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5412 ireason, bytes);
5413 err_out:
11a56d24 5414 qc->err_mask |= AC_ERR_HSM;
14be71f4 5415 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5416}
5417
5418/**
c234fb00
AL
5419 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5420 * @ap: the target ata_port
5421 * @qc: qc on going
1da177e4 5422 *
c234fb00
AL
5423 * RETURNS:
5424 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5425 */
c234fb00
AL
5426
5427static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5428{
c234fb00
AL
5429 if (qc->tf.flags & ATA_TFLAG_POLLING)
5430 return 1;
1da177e4 5431
c234fb00
AL
5432 if (ap->hsm_task_state == HSM_ST_FIRST) {
5433 if (qc->tf.protocol == ATA_PROT_PIO &&
5434 (qc->tf.flags & ATA_TFLAG_WRITE))
5435 return 1;
1da177e4 5436
405e66b3 5437 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5438 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5439 return 1;
fe79e683
AL
5440 }
5441
c234fb00
AL
5442 return 0;
5443}
1da177e4 5444
c17ea20d
TH
5445/**
5446 * ata_hsm_qc_complete - finish a qc running on standard HSM
5447 * @qc: Command to complete
5448 * @in_wq: 1 if called from workqueue, 0 otherwise
5449 *
5450 * Finish @qc which is running on standard HSM.
5451 *
5452 * LOCKING:
cca3974e 5453 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5454 * Otherwise, none on entry and grabs host lock.
5455 */
5456static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5457{
5458 struct ata_port *ap = qc->ap;
5459 unsigned long flags;
5460
5461 if (ap->ops->error_handler) {
5462 if (in_wq) {
ba6a1308 5463 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5464
cca3974e
JG
5465 /* EH might have kicked in while host lock is
5466 * released.
c17ea20d
TH
5467 */
5468 qc = ata_qc_from_tag(ap, qc->tag);
5469 if (qc) {
5470 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5471 ap->ops->irq_on(ap);
c17ea20d
TH
5472 ata_qc_complete(qc);
5473 } else
5474 ata_port_freeze(ap);
5475 }
5476
ba6a1308 5477 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5478 } else {
5479 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5480 ata_qc_complete(qc);
5481 else
5482 ata_port_freeze(ap);
5483 }
5484 } else {
5485 if (in_wq) {
ba6a1308 5486 spin_lock_irqsave(ap->lock, flags);
83625006 5487 ap->ops->irq_on(ap);
c17ea20d 5488 ata_qc_complete(qc);
ba6a1308 5489 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5490 } else
5491 ata_qc_complete(qc);
5492 }
5493}
5494
bb5cb290
AL
5495/**
5496 * ata_hsm_move - move the HSM to the next state.
5497 * @ap: the target ata_port
5498 * @qc: qc on going
5499 * @status: current device status
5500 * @in_wq: 1 if called from workqueue, 0 otherwise
5501 *
5502 * RETURNS:
5503 * 1 when poll next status needed, 0 otherwise.
5504 */
9a1004d0
TH
5505int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5506 u8 status, int in_wq)
e2cec771 5507{
bb5cb290
AL
5508 unsigned long flags = 0;
5509 int poll_next;
5510
6912ccd5
AL
5511 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5512
bb5cb290
AL
5513 /* Make sure ata_qc_issue_prot() does not throw things
5514 * like DMA polling into the workqueue. Notice that
5515 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5516 */
c234fb00 5517 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5518
e2cec771 5519fsm_start:
999bb6f4 5520 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5521 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5522
e2cec771
AL
5523 switch (ap->hsm_task_state) {
5524 case HSM_ST_FIRST:
bb5cb290
AL
5525 /* Send first data block or PACKET CDB */
5526
5527 /* If polling, we will stay in the work queue after
5528 * sending the data. Otherwise, interrupt handler
5529 * takes over after sending the data.
5530 */
5531 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5532
e2cec771 5533 /* check device status */
3655d1d3
AL
5534 if (unlikely((status & ATA_DRQ) == 0)) {
5535 /* handle BSY=0, DRQ=0 as error */
5536 if (likely(status & (ATA_ERR | ATA_DF)))
5537 /* device stops HSM for abort/error */
5538 qc->err_mask |= AC_ERR_DEV;
5539 else
5540 /* HSM violation. Let EH handle this */
5541 qc->err_mask |= AC_ERR_HSM;
5542
14be71f4 5543 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5544 goto fsm_start;
1da177e4
LT
5545 }
5546
71601958
AL
5547 /* Device should not ask for data transfer (DRQ=1)
5548 * when it finds something wrong.
eee6c32f
AL
5549 * We ignore DRQ here and stop the HSM by
5550 * changing hsm_task_state to HSM_ST_ERR and
5551 * let the EH abort the command or reset the device.
71601958
AL
5552 */
5553 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5554 /* Some ATAPI tape drives forget to clear the ERR bit
5555 * when doing the next command (mostly request sense).
5556 * We ignore ERR here to workaround and proceed sending
5557 * the CDB.
5558 */
5559 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5560 ata_port_printk(ap, KERN_WARNING,
5561 "DRQ=1 with device error, "
5562 "dev_stat 0x%X\n", status);
5563 qc->err_mask |= AC_ERR_HSM;
5564 ap->hsm_task_state = HSM_ST_ERR;
5565 goto fsm_start;
5566 }
71601958 5567 }
1da177e4 5568
bb5cb290
AL
5569 /* Send the CDB (atapi) or the first data block (ata pio out).
5570 * During the state transition, interrupt handler shouldn't
5571 * be invoked before the data transfer is complete and
5572 * hsm_task_state is changed. Hence, the following locking.
5573 */
5574 if (in_wq)
ba6a1308 5575 spin_lock_irqsave(ap->lock, flags);
1da177e4 5576
bb5cb290
AL
5577 if (qc->tf.protocol == ATA_PROT_PIO) {
5578 /* PIO data out protocol.
5579 * send first data block.
5580 */
0565c26d 5581
bb5cb290
AL
5582 /* ata_pio_sectors() might change the state
5583 * to HSM_ST_LAST. so, the state is changed here
5584 * before ata_pio_sectors().
5585 */
5586 ap->hsm_task_state = HSM_ST;
5587 ata_pio_sectors(qc);
bb5cb290
AL
5588 } else
5589 /* send CDB */
5590 atapi_send_cdb(ap, qc);
5591
5592 if (in_wq)
ba6a1308 5593 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5594
5595 /* if polling, ata_pio_task() handles the rest.
5596 * otherwise, interrupt handler takes over from here.
5597 */
e2cec771 5598 break;
1c848984 5599
e2cec771
AL
5600 case HSM_ST:
5601 /* complete command or read/write the data register */
0dc36888 5602 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5603 /* ATAPI PIO protocol */
5604 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5605 /* No more data to transfer or device error.
5606 * Device error will be tagged in HSM_ST_LAST.
5607 */
e2cec771
AL
5608 ap->hsm_task_state = HSM_ST_LAST;
5609 goto fsm_start;
5610 }
1da177e4 5611
71601958
AL
5612 /* Device should not ask for data transfer (DRQ=1)
5613 * when it finds something wrong.
eee6c32f
AL
5614 * We ignore DRQ here and stop the HSM by
5615 * changing hsm_task_state to HSM_ST_ERR and
5616 * let the EH abort the command or reset the device.
71601958
AL
5617 */
5618 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5619 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5620 "device error, dev_stat 0x%X\n",
5621 status);
3655d1d3 5622 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5623 ap->hsm_task_state = HSM_ST_ERR;
5624 goto fsm_start;
71601958 5625 }
1da177e4 5626
e2cec771 5627 atapi_pio_bytes(qc);
7fb6ec28 5628
e2cec771
AL
5629 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5630 /* bad ireason reported by device */
5631 goto fsm_start;
1da177e4 5632
e2cec771
AL
5633 } else {
5634 /* ATA PIO protocol */
5635 if (unlikely((status & ATA_DRQ) == 0)) {
5636 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5637 if (likely(status & (ATA_ERR | ATA_DF)))
5638 /* device stops HSM for abort/error */
5639 qc->err_mask |= AC_ERR_DEV;
5640 else
55a8e2c8
TH
5641 /* HSM violation. Let EH handle this.
5642 * Phantom devices also trigger this
5643 * condition. Mark hint.
5644 */
5645 qc->err_mask |= AC_ERR_HSM |
5646 AC_ERR_NODEV_HINT;
3655d1d3 5647
e2cec771
AL
5648 ap->hsm_task_state = HSM_ST_ERR;
5649 goto fsm_start;
5650 }
1da177e4 5651
eee6c32f
AL
5652 /* For PIO reads, some devices may ask for
5653 * data transfer (DRQ=1) alone with ERR=1.
5654 * We respect DRQ here and transfer one
5655 * block of junk data before changing the
5656 * hsm_task_state to HSM_ST_ERR.
5657 *
5658 * For PIO writes, ERR=1 DRQ=1 doesn't make
5659 * sense since the data block has been
5660 * transferred to the device.
71601958
AL
5661 */
5662 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5663 /* data might be corrputed */
5664 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5665
5666 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5667 ata_pio_sectors(qc);
eee6c32f
AL
5668 status = ata_wait_idle(ap);
5669 }
5670
3655d1d3
AL
5671 if (status & (ATA_BUSY | ATA_DRQ))
5672 qc->err_mask |= AC_ERR_HSM;
5673
eee6c32f
AL
5674 /* ata_pio_sectors() might change the
5675 * state to HSM_ST_LAST. so, the state
5676 * is changed after ata_pio_sectors().
5677 */
5678 ap->hsm_task_state = HSM_ST_ERR;
5679 goto fsm_start;
71601958
AL
5680 }
5681
e2cec771
AL
5682 ata_pio_sectors(qc);
5683
5684 if (ap->hsm_task_state == HSM_ST_LAST &&
5685 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5686 /* all data read */
52a32205 5687 status = ata_wait_idle(ap);
e2cec771
AL
5688 goto fsm_start;
5689 }
5690 }
5691
bb5cb290 5692 poll_next = 1;
1da177e4
LT
5693 break;
5694
14be71f4 5695 case HSM_ST_LAST:
6912ccd5
AL
5696 if (unlikely(!ata_ok(status))) {
5697 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5698 ap->hsm_task_state = HSM_ST_ERR;
5699 goto fsm_start;
5700 }
5701
5702 /* no more data to transfer */
4332a771 5703 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5704 ap->print_id, qc->dev->devno, status);
e2cec771 5705
6912ccd5
AL
5706 WARN_ON(qc->err_mask);
5707
e2cec771 5708 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5709
e2cec771 5710 /* complete taskfile transaction */
c17ea20d 5711 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5712
5713 poll_next = 0;
1da177e4
LT
5714 break;
5715
14be71f4 5716 case HSM_ST_ERR:
e2cec771
AL
5717 /* make sure qc->err_mask is available to
5718 * know what's wrong and recover
5719 */
5720 WARN_ON(qc->err_mask == 0);
5721
5722 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5723
999bb6f4 5724 /* complete taskfile transaction */
c17ea20d 5725 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5726
5727 poll_next = 0;
e2cec771
AL
5728 break;
5729 default:
bb5cb290 5730 poll_next = 0;
6912ccd5 5731 BUG();
1da177e4
LT
5732 }
5733
bb5cb290 5734 return poll_next;
1da177e4
LT
5735}
5736
65f27f38 5737static void ata_pio_task(struct work_struct *work)
8061f5f0 5738{
65f27f38
DH
5739 struct ata_port *ap =
5740 container_of(work, struct ata_port, port_task.work);
5741 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5742 u8 status;
a1af3734 5743 int poll_next;
8061f5f0 5744
7fb6ec28 5745fsm_start:
a1af3734 5746 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5747
a1af3734
AL
5748 /*
5749 * This is purely heuristic. This is a fast path.
5750 * Sometimes when we enter, BSY will be cleared in
5751 * a chk-status or two. If not, the drive is probably seeking
5752 * or something. Snooze for a couple msecs, then
5753 * chk-status again. If still busy, queue delayed work.
5754 */
5755 status = ata_busy_wait(ap, ATA_BUSY, 5);
5756 if (status & ATA_BUSY) {
5757 msleep(2);
5758 status = ata_busy_wait(ap, ATA_BUSY, 10);
5759 if (status & ATA_BUSY) {
442eacc3 5760 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5761 return;
5762 }
8061f5f0
TH
5763 }
5764
a1af3734
AL
5765 /* move the HSM */
5766 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5767
a1af3734
AL
5768 /* another command or interrupt handler
5769 * may be running at this point.
5770 */
5771 if (poll_next)
7fb6ec28 5772 goto fsm_start;
8061f5f0
TH
5773}
5774
1da177e4
LT
5775/**
5776 * ata_qc_new - Request an available ATA command, for queueing
5777 * @ap: Port associated with device @dev
5778 * @dev: Device from whom we request an available command structure
5779 *
5780 * LOCKING:
0cba632b 5781 * None.
1da177e4
LT
5782 */
5783
5784static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5785{
5786 struct ata_queued_cmd *qc = NULL;
5787 unsigned int i;
5788
e3180499 5789 /* no command while frozen */
b51e9e5d 5790 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5791 return NULL;
5792
2ab7db1f
TH
5793 /* the last tag is reserved for internal command. */
5794 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5795 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5796 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5797 break;
5798 }
5799
5800 if (qc)
5801 qc->tag = i;
5802
5803 return qc;
5804}
5805
5806/**
5807 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5808 * @dev: Device from whom we request an available command structure
5809 *
5810 * LOCKING:
0cba632b 5811 * None.
1da177e4
LT
5812 */
5813
3373efd8 5814struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5815{
9af5c9c9 5816 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5817 struct ata_queued_cmd *qc;
5818
5819 qc = ata_qc_new(ap);
5820 if (qc) {
1da177e4
LT
5821 qc->scsicmd = NULL;
5822 qc->ap = ap;
5823 qc->dev = dev;
1da177e4 5824
2c13b7ce 5825 ata_qc_reinit(qc);
1da177e4
LT
5826 }
5827
5828 return qc;
5829}
5830
1da177e4
LT
5831/**
5832 * ata_qc_free - free unused ata_queued_cmd
5833 * @qc: Command to complete
5834 *
5835 * Designed to free unused ata_queued_cmd object
5836 * in case something prevents using it.
5837 *
5838 * LOCKING:
cca3974e 5839 * spin_lock_irqsave(host lock)
1da177e4
LT
5840 */
5841void ata_qc_free(struct ata_queued_cmd *qc)
5842{
4ba946e9
TH
5843 struct ata_port *ap = qc->ap;
5844 unsigned int tag;
5845
a4631474 5846 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5847
4ba946e9
TH
5848 qc->flags = 0;
5849 tag = qc->tag;
5850 if (likely(ata_tag_valid(tag))) {
4ba946e9 5851 qc->tag = ATA_TAG_POISON;
6cec4a39 5852 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5853 }
1da177e4
LT
5854}
5855
76014427 5856void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5857{
dedaf2b0 5858 struct ata_port *ap = qc->ap;
9af5c9c9 5859 struct ata_link *link = qc->dev->link;
dedaf2b0 5860
a4631474
TH
5861 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5862 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5863
5864 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5865 ata_sg_clean(qc);
5866
7401abf2 5867 /* command should be marked inactive atomically with qc completion */
da917d69 5868 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5869 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5870 if (!link->sactive)
5871 ap->nr_active_links--;
5872 } else {
9af5c9c9 5873 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5874 ap->nr_active_links--;
5875 }
5876
5877 /* clear exclusive status */
5878 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5879 ap->excl_link == link))
5880 ap->excl_link = NULL;
7401abf2 5881
3f3791d3
AL
5882 /* atapi: mark qc as inactive to prevent the interrupt handler
5883 * from completing the command twice later, before the error handler
5884 * is called. (when rc != 0 and atapi request sense is needed)
5885 */
5886 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5887 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5888
1da177e4 5889 /* call completion callback */
77853bf2 5890 qc->complete_fn(qc);
1da177e4
LT
5891}
5892
39599a53
TH
5893static void fill_result_tf(struct ata_queued_cmd *qc)
5894{
5895 struct ata_port *ap = qc->ap;
5896
39599a53 5897 qc->result_tf.flags = qc->tf.flags;
4742d54f 5898 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5899}
5900
00115e0f
TH
5901static void ata_verify_xfer(struct ata_queued_cmd *qc)
5902{
5903 struct ata_device *dev = qc->dev;
5904
5905 if (ata_tag_internal(qc->tag))
5906 return;
5907
5908 if (ata_is_nodata(qc->tf.protocol))
5909 return;
5910
5911 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5912 return;
5913
5914 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5915}
5916
f686bcb8
TH
5917/**
5918 * ata_qc_complete - Complete an active ATA command
5919 * @qc: Command to complete
5920 * @err_mask: ATA Status register contents
5921 *
5922 * Indicate to the mid and upper layers that an ATA
5923 * command has completed, with either an ok or not-ok status.
5924 *
5925 * LOCKING:
cca3974e 5926 * spin_lock_irqsave(host lock)
f686bcb8
TH
5927 */
5928void ata_qc_complete(struct ata_queued_cmd *qc)
5929{
5930 struct ata_port *ap = qc->ap;
5931
5932 /* XXX: New EH and old EH use different mechanisms to
5933 * synchronize EH with regular execution path.
5934 *
5935 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5936 * Normal execution path is responsible for not accessing a
5937 * failed qc. libata core enforces the rule by returning NULL
5938 * from ata_qc_from_tag() for failed qcs.
5939 *
5940 * Old EH depends on ata_qc_complete() nullifying completion
5941 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5942 * not synchronize with interrupt handler. Only PIO task is
5943 * taken care of.
5944 */
5945 if (ap->ops->error_handler) {
4dbfa39b
TH
5946 struct ata_device *dev = qc->dev;
5947 struct ata_eh_info *ehi = &dev->link->eh_info;
5948
b51e9e5d 5949 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5950
5951 if (unlikely(qc->err_mask))
5952 qc->flags |= ATA_QCFLAG_FAILED;
5953
5954 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5955 if (!ata_tag_internal(qc->tag)) {
5956 /* always fill result TF for failed qc */
39599a53 5957 fill_result_tf(qc);
f686bcb8
TH
5958 ata_qc_schedule_eh(qc);
5959 return;
5960 }
5961 }
5962
5963 /* read result TF if requested */
5964 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5965 fill_result_tf(qc);
f686bcb8 5966
4dbfa39b
TH
5967 /* Some commands need post-processing after successful
5968 * completion.
5969 */
5970 switch (qc->tf.command) {
5971 case ATA_CMD_SET_FEATURES:
5972 if (qc->tf.feature != SETFEATURES_WC_ON &&
5973 qc->tf.feature != SETFEATURES_WC_OFF)
5974 break;
5975 /* fall through */
5976 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5977 case ATA_CMD_SET_MULTI: /* multi_count changed */
5978 /* revalidate device */
5979 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5980 ata_port_schedule_eh(ap);
5981 break;
054a5fba
TH
5982
5983 case ATA_CMD_SLEEP:
5984 dev->flags |= ATA_DFLAG_SLEEPING;
5985 break;
4dbfa39b
TH
5986 }
5987
00115e0f
TH
5988 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5989 ata_verify_xfer(qc);
5990
f686bcb8
TH
5991 __ata_qc_complete(qc);
5992 } else {
5993 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5994 return;
5995
5996 /* read result TF if failed or requested */
5997 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5998 fill_result_tf(qc);
f686bcb8
TH
5999
6000 __ata_qc_complete(qc);
6001 }
6002}
6003
dedaf2b0
TH
6004/**
6005 * ata_qc_complete_multiple - Complete multiple qcs successfully
6006 * @ap: port in question
6007 * @qc_active: new qc_active mask
6008 * @finish_qc: LLDD callback invoked before completing a qc
6009 *
6010 * Complete in-flight commands. This functions is meant to be
6011 * called from low-level driver's interrupt routine to complete
6012 * requests normally. ap->qc_active and @qc_active is compared
6013 * and commands are completed accordingly.
6014 *
6015 * LOCKING:
cca3974e 6016 * spin_lock_irqsave(host lock)
dedaf2b0
TH
6017 *
6018 * RETURNS:
6019 * Number of completed commands on success, -errno otherwise.
6020 */
6021int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
6022 void (*finish_qc)(struct ata_queued_cmd *))
6023{
6024 int nr_done = 0;
6025 u32 done_mask;
6026 int i;
6027
6028 done_mask = ap->qc_active ^ qc_active;
6029
6030 if (unlikely(done_mask & qc_active)) {
6031 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
6032 "(%08x->%08x)\n", ap->qc_active, qc_active);
6033 return -EINVAL;
6034 }
6035
6036 for (i = 0; i < ATA_MAX_QUEUE; i++) {
6037 struct ata_queued_cmd *qc;
6038
6039 if (!(done_mask & (1 << i)))
6040 continue;
6041
6042 if ((qc = ata_qc_from_tag(ap, i))) {
6043 if (finish_qc)
6044 finish_qc(qc);
6045 ata_qc_complete(qc);
6046 nr_done++;
6047 }
6048 }
6049
6050 return nr_done;
6051}
6052
1da177e4
LT
6053/**
6054 * ata_qc_issue - issue taskfile to device
6055 * @qc: command to issue to device
6056 *
6057 * Prepare an ATA command to submission to device.
6058 * This includes mapping the data into a DMA-able
6059 * area, filling in the S/G table, and finally
6060 * writing the taskfile to hardware, starting the command.
6061 *
6062 * LOCKING:
cca3974e 6063 * spin_lock_irqsave(host lock)
1da177e4 6064 */
8e0e694a 6065void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
6066{
6067 struct ata_port *ap = qc->ap;
9af5c9c9 6068 struct ata_link *link = qc->dev->link;
405e66b3 6069 u8 prot = qc->tf.protocol;
1da177e4 6070
dedaf2b0
TH
6071 /* Make sure only one non-NCQ command is outstanding. The
6072 * check is skipped for old EH because it reuses active qc to
6073 * request ATAPI sense.
6074 */
9af5c9c9 6075 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 6076
1973a023 6077 if (ata_is_ncq(prot)) {
9af5c9c9 6078 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
6079
6080 if (!link->sactive)
6081 ap->nr_active_links++;
9af5c9c9 6082 link->sactive |= 1 << qc->tag;
dedaf2b0 6083 } else {
9af5c9c9 6084 WARN_ON(link->sactive);
da917d69
TH
6085
6086 ap->nr_active_links++;
9af5c9c9 6087 link->active_tag = qc->tag;
dedaf2b0
TH
6088 }
6089
e4a70e76 6090 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 6091 ap->qc_active |= 1 << qc->tag;
e4a70e76 6092
f92a2636
TH
6093 /* We guarantee to LLDs that they will have at least one
6094 * non-zero sg if the command is a data command.
6095 */
ff2aeb1e 6096 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 6097
405e66b3 6098 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 6099 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
6100 if (ata_sg_setup(qc))
6101 goto sg_err;
1da177e4 6102
cf480626 6103 /* if device is sleeping, schedule reset and abort the link */
054a5fba 6104 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 6105 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
6106 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6107 ata_link_abort(link);
6108 return;
6109 }
6110
1da177e4
LT
6111 ap->ops->qc_prep(qc);
6112
8e0e694a
TH
6113 qc->err_mask |= ap->ops->qc_issue(qc);
6114 if (unlikely(qc->err_mask))
6115 goto err;
6116 return;
1da177e4 6117
8e436af9 6118sg_err:
8e0e694a
TH
6119 qc->err_mask |= AC_ERR_SYSTEM;
6120err:
6121 ata_qc_complete(qc);
1da177e4
LT
6122}
6123
6124/**
6125 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6126 * @qc: command to issue to device
6127 *
6128 * Using various libata functions and hooks, this function
6129 * starts an ATA command. ATA commands are grouped into
6130 * classes called "protocols", and issuing each type of protocol
6131 * is slightly different.
6132 *
0baab86b
EF
6133 * May be used as the qc_issue() entry in ata_port_operations.
6134 *
1da177e4 6135 * LOCKING:
cca3974e 6136 * spin_lock_irqsave(host lock)
1da177e4
LT
6137 *
6138 * RETURNS:
9a3d9eb0 6139 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6140 */
6141
9a3d9eb0 6142unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6143{
6144 struct ata_port *ap = qc->ap;
6145
e50362ec
AL
6146 /* Use polling pio if the LLD doesn't handle
6147 * interrupt driven pio and atapi CDB interrupt.
6148 */
6149 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6150 switch (qc->tf.protocol) {
6151 case ATA_PROT_PIO:
e3472cbe 6152 case ATA_PROT_NODATA:
0dc36888
TH
6153 case ATAPI_PROT_PIO:
6154 case ATAPI_PROT_NODATA:
e50362ec
AL
6155 qc->tf.flags |= ATA_TFLAG_POLLING;
6156 break;
0dc36888 6157 case ATAPI_PROT_DMA:
e50362ec 6158 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6159 /* see ata_dma_blacklisted() */
e50362ec
AL
6160 BUG();
6161 break;
6162 default:
6163 break;
6164 }
6165 }
6166
312f7da2 6167 /* select the device */
1da177e4
LT
6168 ata_dev_select(ap, qc->dev->devno, 1, 0);
6169
312f7da2 6170 /* start the command */
1da177e4
LT
6171 switch (qc->tf.protocol) {
6172 case ATA_PROT_NODATA:
312f7da2
AL
6173 if (qc->tf.flags & ATA_TFLAG_POLLING)
6174 ata_qc_set_polling(qc);
6175
e5338254 6176 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6177 ap->hsm_task_state = HSM_ST_LAST;
6178
6179 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6180 ata_pio_queue_task(ap, qc, 0);
312f7da2 6181
1da177e4
LT
6182 break;
6183
6184 case ATA_PROT_DMA:
587005de 6185 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6186
1da177e4
LT
6187 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6188 ap->ops->bmdma_setup(qc); /* set up bmdma */
6189 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6190 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6191 break;
6192
312f7da2
AL
6193 case ATA_PROT_PIO:
6194 if (qc->tf.flags & ATA_TFLAG_POLLING)
6195 ata_qc_set_polling(qc);
1da177e4 6196
e5338254 6197 ata_tf_to_host(ap, &qc->tf);
312f7da2 6198
54f00389
AL
6199 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6200 /* PIO data out protocol */
6201 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6202 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6203
6204 /* always send first data block using
e27486db 6205 * the ata_pio_task() codepath.
54f00389 6206 */
312f7da2 6207 } else {
54f00389
AL
6208 /* PIO data in protocol */
6209 ap->hsm_task_state = HSM_ST;
6210
6211 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6212 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6213
6214 /* if polling, ata_pio_task() handles the rest.
6215 * otherwise, interrupt handler takes over from here.
6216 */
312f7da2
AL
6217 }
6218
1da177e4
LT
6219 break;
6220
0dc36888
TH
6221 case ATAPI_PROT_PIO:
6222 case ATAPI_PROT_NODATA:
312f7da2
AL
6223 if (qc->tf.flags & ATA_TFLAG_POLLING)
6224 ata_qc_set_polling(qc);
6225
e5338254 6226 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6227
312f7da2
AL
6228 ap->hsm_task_state = HSM_ST_FIRST;
6229
6230 /* send cdb by polling if no cdb interrupt */
6231 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6232 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6233 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6234 break;
6235
0dc36888 6236 case ATAPI_PROT_DMA:
587005de 6237 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6238
1da177e4
LT
6239 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6240 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6241 ap->hsm_task_state = HSM_ST_FIRST;
6242
6243 /* send cdb by polling if no cdb interrupt */
6244 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6245 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6246 break;
6247
6248 default:
6249 WARN_ON(1);
9a3d9eb0 6250 return AC_ERR_SYSTEM;
1da177e4
LT
6251 }
6252
6253 return 0;
6254}
6255
1da177e4
LT
6256/**
6257 * ata_host_intr - Handle host interrupt for given (port, task)
6258 * @ap: Port on which interrupt arrived (possibly...)
6259 * @qc: Taskfile currently active in engine
6260 *
6261 * Handle host interrupt for given queued command. Currently,
6262 * only DMA interrupts are handled. All other commands are
6263 * handled via polling with interrupts disabled (nIEN bit).
6264 *
6265 * LOCKING:
cca3974e 6266 * spin_lock_irqsave(host lock)
1da177e4
LT
6267 *
6268 * RETURNS:
6269 * One if interrupt was handled, zero if not (shared irq).
6270 */
6271
2dcb407e
JG
6272inline unsigned int ata_host_intr(struct ata_port *ap,
6273 struct ata_queued_cmd *qc)
1da177e4 6274{
9af5c9c9 6275 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6276 u8 status, host_stat = 0;
1da177e4 6277
312f7da2 6278 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6279 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6280
312f7da2
AL
6281 /* Check whether we are expecting interrupt in this state */
6282 switch (ap->hsm_task_state) {
6283 case HSM_ST_FIRST:
6912ccd5
AL
6284 /* Some pre-ATAPI-4 devices assert INTRQ
6285 * at this state when ready to receive CDB.
6286 */
1da177e4 6287
312f7da2 6288 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6289 * The flag was turned on only for atapi devices. No
6290 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6291 */
6292 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6293 goto idle_irq;
1da177e4 6294 break;
312f7da2
AL
6295 case HSM_ST_LAST:
6296 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6297 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6298 /* check status of DMA engine */
6299 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6300 VPRINTK("ata%u: host_stat 0x%X\n",
6301 ap->print_id, host_stat);
312f7da2
AL
6302
6303 /* if it's not our irq... */
6304 if (!(host_stat & ATA_DMA_INTR))
6305 goto idle_irq;
6306
6307 /* before we do anything else, clear DMA-Start bit */
6308 ap->ops->bmdma_stop(qc);
a4f16610
AL
6309
6310 if (unlikely(host_stat & ATA_DMA_ERR)) {
6311 /* error when transfering data to/from memory */
6312 qc->err_mask |= AC_ERR_HOST_BUS;
6313 ap->hsm_task_state = HSM_ST_ERR;
6314 }
312f7da2
AL
6315 }
6316 break;
6317 case HSM_ST:
6318 break;
1da177e4
LT
6319 default:
6320 goto idle_irq;
6321 }
6322
312f7da2
AL
6323 /* check altstatus */
6324 status = ata_altstatus(ap);
6325 if (status & ATA_BUSY)
6326 goto idle_irq;
1da177e4 6327
312f7da2
AL
6328 /* check main status, clearing INTRQ */
6329 status = ata_chk_status(ap);
6330 if (unlikely(status & ATA_BUSY))
6331 goto idle_irq;
1da177e4 6332
312f7da2
AL
6333 /* ack bmdma irq events */
6334 ap->ops->irq_clear(ap);
1da177e4 6335
bb5cb290 6336 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6337
6338 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6339 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6340 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6341
1da177e4
LT
6342 return 1; /* irq handled */
6343
6344idle_irq:
6345 ap->stats.idle_irq++;
6346
6347#ifdef ATA_IRQ_TRAP
6348 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6349 ata_chk_status(ap);
6350 ap->ops->irq_clear(ap);
f15a1daf 6351 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6352 return 1;
1da177e4
LT
6353 }
6354#endif
6355 return 0; /* irq not handled */
6356}
6357
6358/**
6359 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6360 * @irq: irq line (unused)
cca3974e 6361 * @dev_instance: pointer to our ata_host information structure
1da177e4 6362 *
0cba632b
JG
6363 * Default interrupt handler for PCI IDE devices. Calls
6364 * ata_host_intr() for each port that is not disabled.
6365 *
1da177e4 6366 * LOCKING:
cca3974e 6367 * Obtains host lock during operation.
1da177e4
LT
6368 *
6369 * RETURNS:
0cba632b 6370 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6371 */
6372
2dcb407e 6373irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6374{
cca3974e 6375 struct ata_host *host = dev_instance;
1da177e4
LT
6376 unsigned int i;
6377 unsigned int handled = 0;
6378 unsigned long flags;
6379
6380 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6381 spin_lock_irqsave(&host->lock, flags);
1da177e4 6382
cca3974e 6383 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6384 struct ata_port *ap;
6385
cca3974e 6386 ap = host->ports[i];
c1389503 6387 if (ap &&
029f5468 6388 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6389 struct ata_queued_cmd *qc;
6390
9af5c9c9 6391 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6392 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6393 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6394 handled |= ata_host_intr(ap, qc);
6395 }
6396 }
6397
cca3974e 6398 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6399
6400 return IRQ_RETVAL(handled);
6401}
6402
34bf2170
TH
6403/**
6404 * sata_scr_valid - test whether SCRs are accessible
936fd732 6405 * @link: ATA link to test SCR accessibility for
34bf2170 6406 *
936fd732 6407 * Test whether SCRs are accessible for @link.
34bf2170
TH
6408 *
6409 * LOCKING:
6410 * None.
6411 *
6412 * RETURNS:
6413 * 1 if SCRs are accessible, 0 otherwise.
6414 */
936fd732 6415int sata_scr_valid(struct ata_link *link)
34bf2170 6416{
936fd732
TH
6417 struct ata_port *ap = link->ap;
6418
a16abc0b 6419 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6420}
6421
6422/**
6423 * sata_scr_read - read SCR register of the specified port
936fd732 6424 * @link: ATA link to read SCR for
34bf2170
TH
6425 * @reg: SCR to read
6426 * @val: Place to store read value
6427 *
936fd732 6428 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6429 * guaranteed to succeed if @link is ap->link, the cable type of
6430 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6431 *
6432 * LOCKING:
633273a3 6433 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6434 *
6435 * RETURNS:
6436 * 0 on success, negative errno on failure.
6437 */
936fd732 6438int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6439{
633273a3
TH
6440 if (ata_is_host_link(link)) {
6441 struct ata_port *ap = link->ap;
936fd732 6442
633273a3
TH
6443 if (sata_scr_valid(link))
6444 return ap->ops->scr_read(ap, reg, val);
6445 return -EOPNOTSUPP;
6446 }
6447
6448 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6449}
6450
6451/**
6452 * sata_scr_write - write SCR register of the specified port
936fd732 6453 * @link: ATA link to write SCR for
34bf2170
TH
6454 * @reg: SCR to write
6455 * @val: value to write
6456 *
936fd732 6457 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6458 * guaranteed to succeed if @link is ap->link, the cable type of
6459 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6460 *
6461 * LOCKING:
633273a3 6462 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6463 *
6464 * RETURNS:
6465 * 0 on success, negative errno on failure.
6466 */
936fd732 6467int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6468{
633273a3
TH
6469 if (ata_is_host_link(link)) {
6470 struct ata_port *ap = link->ap;
6471
6472 if (sata_scr_valid(link))
6473 return ap->ops->scr_write(ap, reg, val);
6474 return -EOPNOTSUPP;
6475 }
936fd732 6476
633273a3 6477 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6478}
6479
6480/**
6481 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6482 * @link: ATA link to write SCR for
34bf2170
TH
6483 * @reg: SCR to write
6484 * @val: value to write
6485 *
6486 * This function is identical to sata_scr_write() except that this
6487 * function performs flush after writing to the register.
6488 *
6489 * LOCKING:
633273a3 6490 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6491 *
6492 * RETURNS:
6493 * 0 on success, negative errno on failure.
6494 */
936fd732 6495int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6496{
633273a3
TH
6497 if (ata_is_host_link(link)) {
6498 struct ata_port *ap = link->ap;
6499 int rc;
da3dbb17 6500
633273a3
TH
6501 if (sata_scr_valid(link)) {
6502 rc = ap->ops->scr_write(ap, reg, val);
6503 if (rc == 0)
6504 rc = ap->ops->scr_read(ap, reg, &val);
6505 return rc;
6506 }
6507 return -EOPNOTSUPP;
34bf2170 6508 }
633273a3
TH
6509
6510 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6511}
6512
6513/**
936fd732
TH
6514 * ata_link_online - test whether the given link is online
6515 * @link: ATA link to test
34bf2170 6516 *
936fd732
TH
6517 * Test whether @link is online. Note that this function returns
6518 * 0 if online status of @link cannot be obtained, so
6519 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6520 *
6521 * LOCKING:
6522 * None.
6523 *
6524 * RETURNS:
6525 * 1 if the port online status is available and online.
6526 */
936fd732 6527int ata_link_online(struct ata_link *link)
34bf2170
TH
6528{
6529 u32 sstatus;
6530
936fd732
TH
6531 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6532 (sstatus & 0xf) == 0x3)
34bf2170
TH
6533 return 1;
6534 return 0;
6535}
6536
6537/**
936fd732
TH
6538 * ata_link_offline - test whether the given link is offline
6539 * @link: ATA link to test
34bf2170 6540 *
936fd732
TH
6541 * Test whether @link is offline. Note that this function
6542 * returns 0 if offline status of @link cannot be obtained, so
6543 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6544 *
6545 * LOCKING:
6546 * None.
6547 *
6548 * RETURNS:
6549 * 1 if the port offline status is available and offline.
6550 */
936fd732 6551int ata_link_offline(struct ata_link *link)
34bf2170
TH
6552{
6553 u32 sstatus;
6554
936fd732
TH
6555 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6556 (sstatus & 0xf) != 0x3)
34bf2170
TH
6557 return 1;
6558 return 0;
6559}
0baab86b 6560
77b08fb5 6561int ata_flush_cache(struct ata_device *dev)
9b847548 6562{
977e6b9f 6563 unsigned int err_mask;
9b847548
JA
6564 u8 cmd;
6565
6566 if (!ata_try_flush_cache(dev))
6567 return 0;
6568
6fc49adb 6569 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6570 cmd = ATA_CMD_FLUSH_EXT;
6571 else
6572 cmd = ATA_CMD_FLUSH;
6573
4f34337b
AC
6574 /* This is wrong. On a failed flush we get back the LBA of the lost
6575 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6576 a further flush command to continue the writeback until it
4f34337b 6577 does not error */
977e6b9f
TH
6578 err_mask = ata_do_simple_cmd(dev, cmd);
6579 if (err_mask) {
6580 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6581 return -EIO;
6582 }
6583
6584 return 0;
9b847548
JA
6585}
6586
6ffa01d8 6587#ifdef CONFIG_PM
cca3974e
JG
6588static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6589 unsigned int action, unsigned int ehi_flags,
6590 int wait)
500530f6
TH
6591{
6592 unsigned long flags;
6593 int i, rc;
6594
cca3974e
JG
6595 for (i = 0; i < host->n_ports; i++) {
6596 struct ata_port *ap = host->ports[i];
e3667ebf 6597 struct ata_link *link;
500530f6
TH
6598
6599 /* Previous resume operation might still be in
6600 * progress. Wait for PM_PENDING to clear.
6601 */
6602 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6603 ata_port_wait_eh(ap);
6604 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6605 }
6606
6607 /* request PM ops to EH */
6608 spin_lock_irqsave(ap->lock, flags);
6609
6610 ap->pm_mesg = mesg;
6611 if (wait) {
6612 rc = 0;
6613 ap->pm_result = &rc;
6614 }
6615
6616 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6617 __ata_port_for_each_link(link, ap) {
6618 link->eh_info.action |= action;
6619 link->eh_info.flags |= ehi_flags;
6620 }
500530f6
TH
6621
6622 ata_port_schedule_eh(ap);
6623
6624 spin_unlock_irqrestore(ap->lock, flags);
6625
6626 /* wait and check result */
6627 if (wait) {
6628 ata_port_wait_eh(ap);
6629 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6630 if (rc)
6631 return rc;
6632 }
6633 }
6634
6635 return 0;
6636}
6637
6638/**
cca3974e
JG
6639 * ata_host_suspend - suspend host
6640 * @host: host to suspend
500530f6
TH
6641 * @mesg: PM message
6642 *
cca3974e 6643 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6644 * function requests EH to perform PM operations and waits for EH
6645 * to finish.
6646 *
6647 * LOCKING:
6648 * Kernel thread context (may sleep).
6649 *
6650 * RETURNS:
6651 * 0 on success, -errno on failure.
6652 */
cca3974e 6653int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6654{
9666f400 6655 int rc;
500530f6 6656
ca77329f
KCA
6657 /*
6658 * disable link pm on all ports before requesting
6659 * any pm activity
6660 */
6661 ata_lpm_enable(host);
6662
cca3974e 6663 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
6664 if (rc == 0)
6665 host->dev->power.power_state = mesg;
500530f6
TH
6666 return rc;
6667}
6668
6669/**
cca3974e
JG
6670 * ata_host_resume - resume host
6671 * @host: host to resume
500530f6 6672 *
cca3974e 6673 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6674 * function requests EH to perform PM operations and returns.
6675 * Note that all resume operations are performed parallely.
6676 *
6677 * LOCKING:
6678 * Kernel thread context (may sleep).
6679 */
cca3974e 6680void ata_host_resume(struct ata_host *host)
500530f6 6681{
cf480626 6682 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 6683 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 6684 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6685
6686 /* reenable link pm */
6687 ata_lpm_disable(host);
500530f6 6688}
6ffa01d8 6689#endif
500530f6 6690
c893a3ae
RD
6691/**
6692 * ata_port_start - Set port up for dma.
6693 * @ap: Port to initialize
6694 *
6695 * Called just after data structures for each port are
6696 * initialized. Allocates space for PRD table.
6697 *
6698 * May be used as the port_start() entry in ata_port_operations.
6699 *
6700 * LOCKING:
6701 * Inherited from caller.
6702 */
f0d36efd 6703int ata_port_start(struct ata_port *ap)
1da177e4 6704{
2f1f610b 6705 struct device *dev = ap->dev;
1da177e4 6706
f0d36efd
TH
6707 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6708 GFP_KERNEL);
1da177e4
LT
6709 if (!ap->prd)
6710 return -ENOMEM;
6711
1da177e4
LT
6712 return 0;
6713}
6714
3ef3b43d
TH
6715/**
6716 * ata_dev_init - Initialize an ata_device structure
6717 * @dev: Device structure to initialize
6718 *
6719 * Initialize @dev in preparation for probing.
6720 *
6721 * LOCKING:
6722 * Inherited from caller.
6723 */
6724void ata_dev_init(struct ata_device *dev)
6725{
9af5c9c9
TH
6726 struct ata_link *link = dev->link;
6727 struct ata_port *ap = link->ap;
72fa4b74
TH
6728 unsigned long flags;
6729
5a04bf4b 6730 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6731 link->sata_spd_limit = link->hw_sata_spd_limit;
6732 link->sata_spd = 0;
5a04bf4b 6733
72fa4b74
TH
6734 /* High bits of dev->flags are used to record warm plug
6735 * requests which occur asynchronously. Synchronize using
cca3974e 6736 * host lock.
72fa4b74 6737 */
ba6a1308 6738 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6739 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6740 dev->horkage = 0;
ba6a1308 6741 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6742
72fa4b74
TH
6743 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6744 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6745 dev->pio_mask = UINT_MAX;
6746 dev->mwdma_mask = UINT_MAX;
6747 dev->udma_mask = UINT_MAX;
6748}
6749
4fb37a25
TH
6750/**
6751 * ata_link_init - Initialize an ata_link structure
6752 * @ap: ATA port link is attached to
6753 * @link: Link structure to initialize
8989805d 6754 * @pmp: Port multiplier port number
4fb37a25
TH
6755 *
6756 * Initialize @link.
6757 *
6758 * LOCKING:
6759 * Kernel thread context (may sleep)
6760 */
fb7fd614 6761void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6762{
6763 int i;
6764
6765 /* clear everything except for devices */
6766 memset(link, 0, offsetof(struct ata_link, device[0]));
6767
6768 link->ap = ap;
8989805d 6769 link->pmp = pmp;
4fb37a25
TH
6770 link->active_tag = ATA_TAG_POISON;
6771 link->hw_sata_spd_limit = UINT_MAX;
6772
6773 /* can't use iterator, ap isn't initialized yet */
6774 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6775 struct ata_device *dev = &link->device[i];
6776
6777 dev->link = link;
6778 dev->devno = dev - link->device;
6779 ata_dev_init(dev);
6780 }
6781}
6782
6783/**
6784 * sata_link_init_spd - Initialize link->sata_spd_limit
6785 * @link: Link to configure sata_spd_limit for
6786 *
6787 * Initialize @link->[hw_]sata_spd_limit to the currently
6788 * configured value.
6789 *
6790 * LOCKING:
6791 * Kernel thread context (may sleep).
6792 *
6793 * RETURNS:
6794 * 0 on success, -errno on failure.
6795 */
fb7fd614 6796int sata_link_init_spd(struct ata_link *link)
4fb37a25 6797{
33267325
TH
6798 u32 scontrol;
6799 u8 spd;
4fb37a25
TH
6800 int rc;
6801
6802 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6803 if (rc)
6804 return rc;
6805
6806 spd = (scontrol >> 4) & 0xf;
6807 if (spd)
6808 link->hw_sata_spd_limit &= (1 << spd) - 1;
6809
33267325
TH
6810 ata_force_spd_limit(link);
6811
4fb37a25
TH
6812 link->sata_spd_limit = link->hw_sata_spd_limit;
6813
6814 return 0;
6815}
6816
1da177e4 6817/**
f3187195
TH
6818 * ata_port_alloc - allocate and initialize basic ATA port resources
6819 * @host: ATA host this allocated port belongs to
1da177e4 6820 *
f3187195
TH
6821 * Allocate and initialize basic ATA port resources.
6822 *
6823 * RETURNS:
6824 * Allocate ATA port on success, NULL on failure.
0cba632b 6825 *
1da177e4 6826 * LOCKING:
f3187195 6827 * Inherited from calling layer (may sleep).
1da177e4 6828 */
f3187195 6829struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6830{
f3187195 6831 struct ata_port *ap;
1da177e4 6832
f3187195
TH
6833 DPRINTK("ENTER\n");
6834
6835 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6836 if (!ap)
6837 return NULL;
6838
f4d6d004 6839 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6840 ap->lock = &host->lock;
198e0fed 6841 ap->flags = ATA_FLAG_DISABLED;
f3187195 6842 ap->print_id = -1;
1da177e4 6843 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6844 ap->host = host;
f3187195 6845 ap->dev = host->dev;
1da177e4 6846 ap->last_ctl = 0xFF;
bd5d825c
BP
6847
6848#if defined(ATA_VERBOSE_DEBUG)
6849 /* turn on all debugging levels */
6850 ap->msg_enable = 0x00FF;
6851#elif defined(ATA_DEBUG)
6852 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6853#else
0dd4b21f 6854 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6855#endif
1da177e4 6856
442eacc3 6857 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6858 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6859 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6860 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6861 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6862 init_timer_deferrable(&ap->fastdrain_timer);
6863 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6864 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6865
838df628 6866 ap->cbl = ATA_CBL_NONE;
838df628 6867
8989805d 6868 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6869
6870#ifdef ATA_IRQ_TRAP
6871 ap->stats.unhandled_irq = 1;
6872 ap->stats.idle_irq = 1;
6873#endif
1da177e4 6874 return ap;
1da177e4
LT
6875}
6876
f0d36efd
TH
6877static void ata_host_release(struct device *gendev, void *res)
6878{
6879 struct ata_host *host = dev_get_drvdata(gendev);
6880 int i;
6881
1aa506e4
TH
6882 for (i = 0; i < host->n_ports; i++) {
6883 struct ata_port *ap = host->ports[i];
6884
4911487a
TH
6885 if (!ap)
6886 continue;
6887
6888 if (ap->scsi_host)
1aa506e4
TH
6889 scsi_host_put(ap->scsi_host);
6890
633273a3 6891 kfree(ap->pmp_link);
4911487a 6892 kfree(ap);
1aa506e4
TH
6893 host->ports[i] = NULL;
6894 }
6895
1aa56cca 6896 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6897}
6898
f3187195
TH
6899/**
6900 * ata_host_alloc - allocate and init basic ATA host resources
6901 * @dev: generic device this host is associated with
6902 * @max_ports: maximum number of ATA ports associated with this host
6903 *
6904 * Allocate and initialize basic ATA host resources. LLD calls
6905 * this function to allocate a host, initializes it fully and
6906 * attaches it using ata_host_register().
6907 *
6908 * @max_ports ports are allocated and host->n_ports is
6909 * initialized to @max_ports. The caller is allowed to decrease
6910 * host->n_ports before calling ata_host_register(). The unused
6911 * ports will be automatically freed on registration.
6912 *
6913 * RETURNS:
6914 * Allocate ATA host on success, NULL on failure.
6915 *
6916 * LOCKING:
6917 * Inherited from calling layer (may sleep).
6918 */
6919struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6920{
6921 struct ata_host *host;
6922 size_t sz;
6923 int i;
6924
6925 DPRINTK("ENTER\n");
6926
6927 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6928 return NULL;
6929
6930 /* alloc a container for our list of ATA ports (buses) */
6931 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6932 /* alloc a container for our list of ATA ports (buses) */
6933 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6934 if (!host)
6935 goto err_out;
6936
6937 devres_add(dev, host);
6938 dev_set_drvdata(dev, host);
6939
6940 spin_lock_init(&host->lock);
6941 host->dev = dev;
6942 host->n_ports = max_ports;
6943
6944 /* allocate ports bound to this host */
6945 for (i = 0; i < max_ports; i++) {
6946 struct ata_port *ap;
6947
6948 ap = ata_port_alloc(host);
6949 if (!ap)
6950 goto err_out;
6951
6952 ap->port_no = i;
6953 host->ports[i] = ap;
6954 }
6955
6956 devres_remove_group(dev, NULL);
6957 return host;
6958
6959 err_out:
6960 devres_release_group(dev, NULL);
6961 return NULL;
6962}
6963
f5cda257
TH
6964/**
6965 * ata_host_alloc_pinfo - alloc host and init with port_info array
6966 * @dev: generic device this host is associated with
6967 * @ppi: array of ATA port_info to initialize host with
6968 * @n_ports: number of ATA ports attached to this host
6969 *
6970 * Allocate ATA host and initialize with info from @ppi. If NULL
6971 * terminated, @ppi may contain fewer entries than @n_ports. The
6972 * last entry will be used for the remaining ports.
6973 *
6974 * RETURNS:
6975 * Allocate ATA host on success, NULL on failure.
6976 *
6977 * LOCKING:
6978 * Inherited from calling layer (may sleep).
6979 */
6980struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6981 const struct ata_port_info * const * ppi,
6982 int n_ports)
6983{
6984 const struct ata_port_info *pi;
6985 struct ata_host *host;
6986 int i, j;
6987
6988 host = ata_host_alloc(dev, n_ports);
6989 if (!host)
6990 return NULL;
6991
6992 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6993 struct ata_port *ap = host->ports[i];
6994
6995 if (ppi[j])
6996 pi = ppi[j++];
6997
6998 ap->pio_mask = pi->pio_mask;
6999 ap->mwdma_mask = pi->mwdma_mask;
7000 ap->udma_mask = pi->udma_mask;
7001 ap->flags |= pi->flags;
0c88758b 7002 ap->link.flags |= pi->link_flags;
f5cda257
TH
7003 ap->ops = pi->port_ops;
7004
7005 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
7006 host->ops = pi->port_ops;
f5cda257
TH
7007 }
7008
7009 return host;
7010}
7011
32ebbc0c
TH
7012static void ata_host_stop(struct device *gendev, void *res)
7013{
7014 struct ata_host *host = dev_get_drvdata(gendev);
7015 int i;
7016
7017 WARN_ON(!(host->flags & ATA_HOST_STARTED));
7018
7019 for (i = 0; i < host->n_ports; i++) {
7020 struct ata_port *ap = host->ports[i];
7021
7022 if (ap->ops->port_stop)
7023 ap->ops->port_stop(ap);
7024 }
7025
7026 if (host->ops->host_stop)
7027 host->ops->host_stop(host);
7028}
7029
029cfd6b
TH
7030/**
7031 * ata_finalize_port_ops - finalize ata_port_operations
7032 * @ops: ata_port_operations to finalize
7033 *
7034 * An ata_port_operations can inherit from another ops and that
7035 * ops can again inherit from another. This can go on as many
7036 * times as necessary as long as there is no loop in the
7037 * inheritance chain.
7038 *
7039 * Ops tables are finalized when the host is started. NULL or
7040 * unspecified entries are inherited from the closet ancestor
7041 * which has the method and the entry is populated with it.
7042 * After finalization, the ops table directly points to all the
7043 * methods and ->inherits is no longer necessary and cleared.
7044 *
7045 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
7046 *
7047 * LOCKING:
7048 * None.
7049 */
7050static void ata_finalize_port_ops(struct ata_port_operations *ops)
7051{
7052 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
7053 const struct ata_port_operations *cur;
7054 void **begin = (void **)ops;
7055 void **end = (void **)&ops->inherits;
7056 void **pp;
7057
7058 if (!ops || !ops->inherits)
7059 return;
7060
7061 spin_lock(&lock);
7062
7063 for (cur = ops->inherits; cur; cur = cur->inherits) {
7064 void **inherit = (void **)cur;
7065
7066 for (pp = begin; pp < end; pp++, inherit++)
7067 if (!*pp)
7068 *pp = *inherit;
7069 }
7070
7071 for (pp = begin; pp < end; pp++)
7072 if (IS_ERR(*pp))
7073 *pp = NULL;
7074
7075 ops->inherits = NULL;
7076
7077 spin_unlock(&lock);
7078}
7079
ecef7253
TH
7080/**
7081 * ata_host_start - start and freeze ports of an ATA host
7082 * @host: ATA host to start ports for
7083 *
7084 * Start and then freeze ports of @host. Started status is
7085 * recorded in host->flags, so this function can be called
7086 * multiple times. Ports are guaranteed to get started only
f3187195
TH
7087 * once. If host->ops isn't initialized yet, its set to the
7088 * first non-dummy port ops.
ecef7253
TH
7089 *
7090 * LOCKING:
7091 * Inherited from calling layer (may sleep).
7092 *
7093 * RETURNS:
7094 * 0 if all ports are started successfully, -errno otherwise.
7095 */
7096int ata_host_start(struct ata_host *host)
7097{
32ebbc0c
TH
7098 int have_stop = 0;
7099 void *start_dr = NULL;
ecef7253
TH
7100 int i, rc;
7101
7102 if (host->flags & ATA_HOST_STARTED)
7103 return 0;
7104
029cfd6b
TH
7105 ata_finalize_port_ops(host->ops);
7106
ecef7253
TH
7107 for (i = 0; i < host->n_ports; i++) {
7108 struct ata_port *ap = host->ports[i];
7109
029cfd6b
TH
7110 ata_finalize_port_ops(ap->ops);
7111
f3187195
TH
7112 if (!host->ops && !ata_port_is_dummy(ap))
7113 host->ops = ap->ops;
7114
32ebbc0c
TH
7115 if (ap->ops->port_stop)
7116 have_stop = 1;
7117 }
7118
7119 if (host->ops->host_stop)
7120 have_stop = 1;
7121
7122 if (have_stop) {
7123 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7124 if (!start_dr)
7125 return -ENOMEM;
7126 }
7127
7128 for (i = 0; i < host->n_ports; i++) {
7129 struct ata_port *ap = host->ports[i];
7130
ecef7253
TH
7131 if (ap->ops->port_start) {
7132 rc = ap->ops->port_start(ap);
7133 if (rc) {
0f9fe9b7 7134 if (rc != -ENODEV)
0f757743
AM
7135 dev_printk(KERN_ERR, host->dev,
7136 "failed to start port %d "
7137 "(errno=%d)\n", i, rc);
ecef7253
TH
7138 goto err_out;
7139 }
7140 }
ecef7253
TH
7141 ata_eh_freeze_port(ap);
7142 }
7143
32ebbc0c
TH
7144 if (start_dr)
7145 devres_add(host->dev, start_dr);
ecef7253
TH
7146 host->flags |= ATA_HOST_STARTED;
7147 return 0;
7148
7149 err_out:
7150 while (--i >= 0) {
7151 struct ata_port *ap = host->ports[i];
7152
7153 if (ap->ops->port_stop)
7154 ap->ops->port_stop(ap);
7155 }
32ebbc0c 7156 devres_free(start_dr);
ecef7253
TH
7157 return rc;
7158}
7159
b03732f0 7160/**
cca3974e
JG
7161 * ata_sas_host_init - Initialize a host struct
7162 * @host: host to initialize
7163 * @dev: device host is attached to
7164 * @flags: host flags
7165 * @ops: port_ops
b03732f0
BK
7166 *
7167 * LOCKING:
7168 * PCI/etc. bus probe sem.
7169 *
7170 */
f3187195 7171/* KILLME - the only user left is ipr */
cca3974e 7172void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 7173 unsigned long flags, struct ata_port_operations *ops)
b03732f0 7174{
cca3974e
JG
7175 spin_lock_init(&host->lock);
7176 host->dev = dev;
7177 host->flags = flags;
7178 host->ops = ops;
b03732f0
BK
7179}
7180
f3187195
TH
7181/**
7182 * ata_host_register - register initialized ATA host
7183 * @host: ATA host to register
7184 * @sht: template for SCSI host
7185 *
7186 * Register initialized ATA host. @host is allocated using
7187 * ata_host_alloc() and fully initialized by LLD. This function
7188 * starts ports, registers @host with ATA and SCSI layers and
7189 * probe registered devices.
7190 *
7191 * LOCKING:
7192 * Inherited from calling layer (may sleep).
7193 *
7194 * RETURNS:
7195 * 0 on success, -errno otherwise.
7196 */
7197int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7198{
7199 int i, rc;
7200
7201 /* host must have been started */
7202 if (!(host->flags & ATA_HOST_STARTED)) {
7203 dev_printk(KERN_ERR, host->dev,
7204 "BUG: trying to register unstarted host\n");
7205 WARN_ON(1);
7206 return -EINVAL;
7207 }
7208
7209 /* Blow away unused ports. This happens when LLD can't
7210 * determine the exact number of ports to allocate at
7211 * allocation time.
7212 */
7213 for (i = host->n_ports; host->ports[i]; i++)
7214 kfree(host->ports[i]);
7215
7216 /* give ports names and add SCSI hosts */
7217 for (i = 0; i < host->n_ports; i++)
7218 host->ports[i]->print_id = ata_print_id++;
7219
7220 rc = ata_scsi_add_hosts(host, sht);
7221 if (rc)
7222 return rc;
7223
fafbae87
TH
7224 /* associate with ACPI nodes */
7225 ata_acpi_associate(host);
7226
f3187195
TH
7227 /* set cable, sata_spd_limit and report */
7228 for (i = 0; i < host->n_ports; i++) {
7229 struct ata_port *ap = host->ports[i];
f3187195
TH
7230 unsigned long xfer_mask;
7231
7232 /* set SATA cable type if still unset */
7233 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7234 ap->cbl = ATA_CBL_SATA;
7235
7236 /* init sata_spd_limit to the current value */
4fb37a25 7237 sata_link_init_spd(&ap->link);
f3187195 7238
cbcdd875 7239 /* print per-port info to dmesg */
f3187195
TH
7240 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7241 ap->udma_mask);
7242
abf6e8ed 7243 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7244 ata_port_printk(ap, KERN_INFO,
7245 "%cATA max %s %s\n",
a16abc0b 7246 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7247 ata_mode_string(xfer_mask),
cbcdd875 7248 ap->link.eh_info.desc);
abf6e8ed
TH
7249 ata_ehi_clear_desc(&ap->link.eh_info);
7250 } else
f3187195
TH
7251 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7252 }
7253
7254 /* perform each probe synchronously */
7255 DPRINTK("probe begin\n");
7256 for (i = 0; i < host->n_ports; i++) {
7257 struct ata_port *ap = host->ports[i];
f3187195
TH
7258
7259 /* probe */
7260 if (ap->ops->error_handler) {
9af5c9c9 7261 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7262 unsigned long flags;
7263
7264 ata_port_probe(ap);
7265
7266 /* kick EH for boot probing */
7267 spin_lock_irqsave(ap->lock, flags);
7268
b558eddd 7269 ehi->probe_mask |= ATA_ALL_DEVICES;
cf480626 7270 ehi->action |= ATA_EH_RESET;
f3187195
TH
7271 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7272
f4d6d004 7273 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7274 ap->pflags |= ATA_PFLAG_LOADING;
7275 ata_port_schedule_eh(ap);
7276
7277 spin_unlock_irqrestore(ap->lock, flags);
7278
7279 /* wait for EH to finish */
7280 ata_port_wait_eh(ap);
7281 } else {
7282 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7283 rc = ata_bus_probe(ap);
7284 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7285
7286 if (rc) {
7287 /* FIXME: do something useful here?
7288 * Current libata behavior will
7289 * tear down everything when
7290 * the module is removed
7291 * or the h/w is unplugged.
7292 */
7293 }
7294 }
7295 }
7296
7297 /* probes are done, now scan each port's disk(s) */
7298 DPRINTK("host probe begin\n");
7299 for (i = 0; i < host->n_ports; i++) {
7300 struct ata_port *ap = host->ports[i];
7301
1ae46317 7302 ata_scsi_scan_host(ap, 1);
ca77329f 7303 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7304 }
7305
7306 return 0;
7307}
7308
f5cda257
TH
7309/**
7310 * ata_host_activate - start host, request IRQ and register it
7311 * @host: target ATA host
7312 * @irq: IRQ to request
7313 * @irq_handler: irq_handler used when requesting IRQ
7314 * @irq_flags: irq_flags used when requesting IRQ
7315 * @sht: scsi_host_template to use when registering the host
7316 *
7317 * After allocating an ATA host and initializing it, most libata
7318 * LLDs perform three steps to activate the host - start host,
7319 * request IRQ and register it. This helper takes necessasry
7320 * arguments and performs the three steps in one go.
7321 *
3d46b2e2
PM
7322 * An invalid IRQ skips the IRQ registration and expects the host to
7323 * have set polling mode on the port. In this case, @irq_handler
7324 * should be NULL.
7325 *
f5cda257
TH
7326 * LOCKING:
7327 * Inherited from calling layer (may sleep).
7328 *
7329 * RETURNS:
7330 * 0 on success, -errno otherwise.
7331 */
7332int ata_host_activate(struct ata_host *host, int irq,
7333 irq_handler_t irq_handler, unsigned long irq_flags,
7334 struct scsi_host_template *sht)
7335{
cbcdd875 7336 int i, rc;
f5cda257
TH
7337
7338 rc = ata_host_start(host);
7339 if (rc)
7340 return rc;
7341
3d46b2e2
PM
7342 /* Special case for polling mode */
7343 if (!irq) {
7344 WARN_ON(irq_handler);
7345 return ata_host_register(host, sht);
7346 }
7347
f5cda257
TH
7348 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7349 dev_driver_string(host->dev), host);
7350 if (rc)
7351 return rc;
7352
cbcdd875
TH
7353 for (i = 0; i < host->n_ports; i++)
7354 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7355
f5cda257
TH
7356 rc = ata_host_register(host, sht);
7357 /* if failed, just free the IRQ and leave ports alone */
7358 if (rc)
7359 devm_free_irq(host->dev, irq, host);
7360
7361 return rc;
7362}
7363
720ba126
TH
7364/**
7365 * ata_port_detach - Detach ATA port in prepration of device removal
7366 * @ap: ATA port to be detached
7367 *
7368 * Detach all ATA devices and the associated SCSI devices of @ap;
7369 * then, remove the associated SCSI host. @ap is guaranteed to
7370 * be quiescent on return from this function.
7371 *
7372 * LOCKING:
7373 * Kernel thread context (may sleep).
7374 */
741b7763 7375static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7376{
7377 unsigned long flags;
41bda9c9 7378 struct ata_link *link;
f58229f8 7379 struct ata_device *dev;
720ba126
TH
7380
7381 if (!ap->ops->error_handler)
c3cf30a9 7382 goto skip_eh;
720ba126
TH
7383
7384 /* tell EH we're leaving & flush EH */
ba6a1308 7385 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7386 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7387 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7388
7389 ata_port_wait_eh(ap);
7390
7f9ad9b8
TH
7391 /* EH is now guaranteed to see UNLOADING - EH context belongs
7392 * to us. Disable all existing devices.
720ba126 7393 */
41bda9c9
TH
7394 ata_port_for_each_link(link, ap) {
7395 ata_link_for_each_dev(dev, link)
7396 ata_dev_disable(dev);
7397 }
720ba126 7398
720ba126
TH
7399 /* Final freeze & EH. All in-flight commands are aborted. EH
7400 * will be skipped and retrials will be terminated with bad
7401 * target.
7402 */
ba6a1308 7403 spin_lock_irqsave(ap->lock, flags);
720ba126 7404 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7405 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7406
7407 ata_port_wait_eh(ap);
45a66c1c 7408 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7409
c3cf30a9 7410 skip_eh:
720ba126 7411 /* remove the associated SCSI host */
cca3974e 7412 scsi_remove_host(ap->scsi_host);
720ba126
TH
7413}
7414
0529c159
TH
7415/**
7416 * ata_host_detach - Detach all ports of an ATA host
7417 * @host: Host to detach
7418 *
7419 * Detach all ports of @host.
7420 *
7421 * LOCKING:
7422 * Kernel thread context (may sleep).
7423 */
7424void ata_host_detach(struct ata_host *host)
7425{
7426 int i;
7427
7428 for (i = 0; i < host->n_ports; i++)
7429 ata_port_detach(host->ports[i]);
562f0c2d
TH
7430
7431 /* the host is dead now, dissociate ACPI */
7432 ata_acpi_dissociate(host);
0529c159
TH
7433}
7434
1da177e4
LT
7435/**
7436 * ata_std_ports - initialize ioaddr with standard port offsets.
7437 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7438 *
7439 * Utility function which initializes data_addr, error_addr,
7440 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7441 * device_addr, status_addr, and command_addr to standard offsets
7442 * relative to cmd_addr.
7443 *
7444 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7445 */
0baab86b 7446
1da177e4
LT
7447void ata_std_ports(struct ata_ioports *ioaddr)
7448{
7449 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7450 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7451 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7452 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7453 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7454 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7455 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7456 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7457 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7458 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7459}
7460
0baab86b 7461
374b1873
JG
7462#ifdef CONFIG_PCI
7463
1da177e4
LT
7464/**
7465 * ata_pci_remove_one - PCI layer callback for device removal
7466 * @pdev: PCI device that was removed
7467 *
b878ca5d
TH
7468 * PCI layer indicates to libata via this hook that hot-unplug or
7469 * module unload event has occurred. Detach all ports. Resource
7470 * release is handled via devres.
1da177e4
LT
7471 *
7472 * LOCKING:
7473 * Inherited from PCI layer (may sleep).
7474 */
f0d36efd 7475void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7476{
2855568b 7477 struct device *dev = &pdev->dev;
cca3974e 7478 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7479
b878ca5d 7480 ata_host_detach(host);
1da177e4
LT
7481}
7482
7483/* move to PCI subsystem */
057ace5e 7484int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7485{
7486 unsigned long tmp = 0;
7487
7488 switch (bits->width) {
7489 case 1: {
7490 u8 tmp8 = 0;
7491 pci_read_config_byte(pdev, bits->reg, &tmp8);
7492 tmp = tmp8;
7493 break;
7494 }
7495 case 2: {
7496 u16 tmp16 = 0;
7497 pci_read_config_word(pdev, bits->reg, &tmp16);
7498 tmp = tmp16;
7499 break;
7500 }
7501 case 4: {
7502 u32 tmp32 = 0;
7503 pci_read_config_dword(pdev, bits->reg, &tmp32);
7504 tmp = tmp32;
7505 break;
7506 }
7507
7508 default:
7509 return -EINVAL;
7510 }
7511
7512 tmp &= bits->mask;
7513
7514 return (tmp == bits->val) ? 1 : 0;
7515}
9b847548 7516
6ffa01d8 7517#ifdef CONFIG_PM
3c5100c1 7518void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7519{
7520 pci_save_state(pdev);
4c90d971 7521 pci_disable_device(pdev);
500530f6 7522
3a2d5b70 7523 if (mesg.event & PM_EVENT_SLEEP)
500530f6 7524 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7525}
7526
553c4aa6 7527int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7528{
553c4aa6
TH
7529 int rc;
7530
9b847548
JA
7531 pci_set_power_state(pdev, PCI_D0);
7532 pci_restore_state(pdev);
553c4aa6 7533
b878ca5d 7534 rc = pcim_enable_device(pdev);
553c4aa6
TH
7535 if (rc) {
7536 dev_printk(KERN_ERR, &pdev->dev,
7537 "failed to enable device after resume (%d)\n", rc);
7538 return rc;
7539 }
7540
9b847548 7541 pci_set_master(pdev);
553c4aa6 7542 return 0;
500530f6
TH
7543}
7544
3c5100c1 7545int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7546{
cca3974e 7547 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7548 int rc = 0;
7549
cca3974e 7550 rc = ata_host_suspend(host, mesg);
500530f6
TH
7551 if (rc)
7552 return rc;
7553
3c5100c1 7554 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7555
7556 return 0;
7557}
7558
7559int ata_pci_device_resume(struct pci_dev *pdev)
7560{
cca3974e 7561 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7562 int rc;
500530f6 7563
553c4aa6
TH
7564 rc = ata_pci_device_do_resume(pdev);
7565 if (rc == 0)
7566 ata_host_resume(host);
7567 return rc;
9b847548 7568}
6ffa01d8
TH
7569#endif /* CONFIG_PM */
7570
1da177e4
LT
7571#endif /* CONFIG_PCI */
7572
33267325
TH
7573static int __init ata_parse_force_one(char **cur,
7574 struct ata_force_ent *force_ent,
7575 const char **reason)
7576{
7577 /* FIXME: Currently, there's no way to tag init const data and
7578 * using __initdata causes build failure on some versions of
7579 * gcc. Once __initdataconst is implemented, add const to the
7580 * following structure.
7581 */
7582 static struct ata_force_param force_tbl[] __initdata = {
7583 { "40c", .cbl = ATA_CBL_PATA40 },
7584 { "80c", .cbl = ATA_CBL_PATA80 },
7585 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7586 { "unk", .cbl = ATA_CBL_PATA_UNK },
7587 { "ign", .cbl = ATA_CBL_PATA_IGN },
7588 { "sata", .cbl = ATA_CBL_SATA },
7589 { "1.5Gbps", .spd_limit = 1 },
7590 { "3.0Gbps", .spd_limit = 2 },
7591 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7592 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7593 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7594 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7595 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7596 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7597 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7598 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7599 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7600 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7601 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7602 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7603 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7604 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7605 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7606 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7607 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7608 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7609 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7610 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7611 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7612 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7613 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7614 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7615 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7616 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7617 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7618 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7619 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7620 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7621 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7622 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7623 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7624 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7625 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7626 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7627 };
7628 char *start = *cur, *p = *cur;
7629 char *id, *val, *endp;
7630 const struct ata_force_param *match_fp = NULL;
7631 int nr_matches = 0, i;
7632
7633 /* find where this param ends and update *cur */
7634 while (*p != '\0' && *p != ',')
7635 p++;
7636
7637 if (*p == '\0')
7638 *cur = p;
7639 else
7640 *cur = p + 1;
7641
7642 *p = '\0';
7643
7644 /* parse */
7645 p = strchr(start, ':');
7646 if (!p) {
7647 val = strstrip(start);
7648 goto parse_val;
7649 }
7650 *p = '\0';
7651
7652 id = strstrip(start);
7653 val = strstrip(p + 1);
7654
7655 /* parse id */
7656 p = strchr(id, '.');
7657 if (p) {
7658 *p++ = '\0';
7659 force_ent->device = simple_strtoul(p, &endp, 10);
7660 if (p == endp || *endp != '\0') {
7661 *reason = "invalid device";
7662 return -EINVAL;
7663 }
7664 }
7665
7666 force_ent->port = simple_strtoul(id, &endp, 10);
7667 if (p == endp || *endp != '\0') {
7668 *reason = "invalid port/link";
7669 return -EINVAL;
7670 }
7671
7672 parse_val:
7673 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7674 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7675 const struct ata_force_param *fp = &force_tbl[i];
7676
7677 if (strncasecmp(val, fp->name, strlen(val)))
7678 continue;
7679
7680 nr_matches++;
7681 match_fp = fp;
7682
7683 if (strcasecmp(val, fp->name) == 0) {
7684 nr_matches = 1;
7685 break;
7686 }
7687 }
7688
7689 if (!nr_matches) {
7690 *reason = "unknown value";
7691 return -EINVAL;
7692 }
7693 if (nr_matches > 1) {
7694 *reason = "ambigious value";
7695 return -EINVAL;
7696 }
7697
7698 force_ent->param = *match_fp;
7699
7700 return 0;
7701}
7702
7703static void __init ata_parse_force_param(void)
7704{
7705 int idx = 0, size = 1;
7706 int last_port = -1, last_device = -1;
7707 char *p, *cur, *next;
7708
7709 /* calculate maximum number of params and allocate force_tbl */
7710 for (p = ata_force_param_buf; *p; p++)
7711 if (*p == ',')
7712 size++;
7713
7714 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7715 if (!ata_force_tbl) {
7716 printk(KERN_WARNING "ata: failed to extend force table, "
7717 "libata.force ignored\n");
7718 return;
7719 }
7720
7721 /* parse and populate the table */
7722 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7723 const char *reason = "";
7724 struct ata_force_ent te = { .port = -1, .device = -1 };
7725
7726 next = cur;
7727 if (ata_parse_force_one(&next, &te, &reason)) {
7728 printk(KERN_WARNING "ata: failed to parse force "
7729 "parameter \"%s\" (%s)\n",
7730 cur, reason);
7731 continue;
7732 }
7733
7734 if (te.port == -1) {
7735 te.port = last_port;
7736 te.device = last_device;
7737 }
7738
7739 ata_force_tbl[idx++] = te;
7740
7741 last_port = te.port;
7742 last_device = te.device;
7743 }
7744
7745 ata_force_tbl_size = idx;
7746}
1da177e4 7747
1da177e4
LT
7748static int __init ata_init(void)
7749{
a8601e5f 7750 ata_probe_timeout *= HZ;
33267325
TH
7751
7752 ata_parse_force_param();
7753
1da177e4
LT
7754 ata_wq = create_workqueue("ata");
7755 if (!ata_wq)
7756 return -ENOMEM;
7757
453b07ac
TH
7758 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7759 if (!ata_aux_wq) {
7760 destroy_workqueue(ata_wq);
7761 return -ENOMEM;
7762 }
7763
1da177e4
LT
7764 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7765 return 0;
7766}
7767
7768static void __exit ata_exit(void)
7769{
33267325 7770 kfree(ata_force_tbl);
1da177e4 7771 destroy_workqueue(ata_wq);
453b07ac 7772 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7773}
7774
a4625085 7775subsys_initcall(ata_init);
1da177e4
LT
7776module_exit(ata_exit);
7777
67846b30 7778static unsigned long ratelimit_time;
34af946a 7779static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7780
7781int ata_ratelimit(void)
7782{
7783 int rc;
7784 unsigned long flags;
7785
7786 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7787
7788 if (time_after(jiffies, ratelimit_time)) {
7789 rc = 1;
7790 ratelimit_time = jiffies + (HZ/5);
7791 } else
7792 rc = 0;
7793
7794 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7795
7796 return rc;
7797}
7798
c22daff4
TH
7799/**
7800 * ata_wait_register - wait until register value changes
7801 * @reg: IO-mapped register
7802 * @mask: Mask to apply to read register value
7803 * @val: Wait condition
7804 * @interval_msec: polling interval in milliseconds
7805 * @timeout_msec: timeout in milliseconds
7806 *
7807 * Waiting for some bits of register to change is a common
7808 * operation for ATA controllers. This function reads 32bit LE
7809 * IO-mapped register @reg and tests for the following condition.
7810 *
7811 * (*@reg & mask) != val
7812 *
7813 * If the condition is met, it returns; otherwise, the process is
7814 * repeated after @interval_msec until timeout.
7815 *
7816 * LOCKING:
7817 * Kernel thread context (may sleep)
7818 *
7819 * RETURNS:
7820 * The final register value.
7821 */
7822u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7823 unsigned long interval_msec,
7824 unsigned long timeout_msec)
7825{
7826 unsigned long timeout;
7827 u32 tmp;
7828
7829 tmp = ioread32(reg);
7830
7831 /* Calculate timeout _after_ the first read to make sure
7832 * preceding writes reach the controller before starting to
7833 * eat away the timeout.
7834 */
7835 timeout = jiffies + (timeout_msec * HZ) / 1000;
7836
7837 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7838 msleep(interval_msec);
7839 tmp = ioread32(reg);
7840 }
7841
7842 return tmp;
7843}
7844
dd5b06c4
TH
7845/*
7846 * Dummy port_ops
7847 */
7848static void ata_dummy_noret(struct ata_port *ap) { }
7849static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7850static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7851
7852static u8 ata_dummy_check_status(struct ata_port *ap)
7853{
7854 return ATA_DRDY;
7855}
7856
7857static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7858{
7859 return AC_ERR_SYSTEM;
7860}
7861
029cfd6b 7862struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7863 .check_status = ata_dummy_check_status,
7864 .check_altstatus = ata_dummy_check_status,
7865 .dev_select = ata_noop_dev_select,
7866 .qc_prep = ata_noop_qc_prep,
7867 .qc_issue = ata_dummy_qc_issue,
7868 .freeze = ata_dummy_noret,
7869 .thaw = ata_dummy_noret,
7870 .error_handler = ata_dummy_noret,
7871 .post_internal_cmd = ata_dummy_qc_noret,
7872 .irq_clear = ata_dummy_noret,
7873 .port_start = ata_dummy_ret0,
7874 .port_stop = ata_dummy_noret,
7875};
7876
21b0ad4f
TH
7877const struct ata_port_info ata_dummy_port_info = {
7878 .port_ops = &ata_dummy_port_ops,
7879};
7880
1da177e4
LT
7881/*
7882 * libata is essentially a library of internal helper functions for
7883 * low-level ATA host controller drivers. As such, the API/ABI is
7884 * likely to change as new drivers are added and updated.
7885 * Do not depend on ABI/API stability.
7886 */
e9c83914
TH
7887EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7888EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7889EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
7890EXPORT_SYMBOL_GPL(ata_base_port_ops);
7891EXPORT_SYMBOL_GPL(sata_port_ops);
7892EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
7893EXPORT_SYMBOL_GPL(ata_sff_port_ops);
7894EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
dd5b06c4 7895EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7896EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7897EXPORT_SYMBOL_GPL(ata_std_bios_param);
7898EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7899EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7900EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7901EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7902EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7903EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7904EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7905EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7906EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7907EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7908EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7909EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7910EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7911EXPORT_SYMBOL_GPL(ata_tf_load);
7912EXPORT_SYMBOL_GPL(ata_tf_read);
7913EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7914EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7915EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 7916EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7917EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7918EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7919EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7920EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7921EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7922EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7923EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7924EXPORT_SYMBOL_GPL(ata_mode_string);
7925EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7926EXPORT_SYMBOL_GPL(ata_check_status);
7927EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7928EXPORT_SYMBOL_GPL(ata_exec_command);
7929EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7930EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7931EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7932EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7933EXPORT_SYMBOL_GPL(ata_data_xfer);
7934EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7935EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7936EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7937EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7938EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7939EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7940EXPORT_SYMBOL_GPL(ata_bmdma_start);
7941EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
358f9a77 7942EXPORT_SYMBOL_GPL(ata_noop_irq_clear);
1da177e4
LT
7943EXPORT_SYMBOL_GPL(ata_bmdma_status);
7944EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7945EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7946EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6d97dbd7
TH
7947EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7948EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7949EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7950EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7951EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7952EXPORT_SYMBOL_GPL(sata_link_debounce);
7953EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7954EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7955EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7956EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7957EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7958EXPORT_SYMBOL_GPL(sata_std_hardreset);
7959EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7960EXPORT_SYMBOL_GPL(ata_dev_classify);
7961EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7962EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7963EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7964EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7965EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7966EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7967EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7968EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7969EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7970EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7971EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7972EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7973EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7974EXPORT_SYMBOL_GPL(sata_scr_valid);
7975EXPORT_SYMBOL_GPL(sata_scr_read);
7976EXPORT_SYMBOL_GPL(sata_scr_write);
7977EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7978EXPORT_SYMBOL_GPL(ata_link_online);
7979EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7980#ifdef CONFIG_PM
cca3974e
JG
7981EXPORT_SYMBOL_GPL(ata_host_suspend);
7982EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7983#endif /* CONFIG_PM */
6a62a04d
TH
7984EXPORT_SYMBOL_GPL(ata_id_string);
7985EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7986EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7987
1bc4ccff 7988EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7989EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7990EXPORT_SYMBOL_GPL(ata_timing_compute);
7991EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7992EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7993
1da177e4
LT
7994#ifdef CONFIG_PCI
7995EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7996EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7997EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7998EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7999EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
8000EXPORT_SYMBOL_GPL(ata_pci_init_one);
8001EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 8002#ifdef CONFIG_PM
500530f6
TH
8003EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
8004EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
8005EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
8006EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 8007#endif /* CONFIG_PM */
67951ade
AC
8008EXPORT_SYMBOL_GPL(ata_pci_default_filter);
8009EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 8010#endif /* CONFIG_PCI */
9b847548 8011
31f88384 8012EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
8013EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
8014EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
8015EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
a1efdaba 8016EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
3af9a77a 8017
b64bbc39
TH
8018EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
8019EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
8020EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
8021EXPORT_SYMBOL_GPL(ata_port_desc);
8022#ifdef CONFIG_PCI
8023EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
8024#endif /* CONFIG_PCI */
7b70fc03 8025EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 8026EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 8027EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 8028EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 8029EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
8030EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
8031EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
8032EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
8033EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 8034EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 8035EXPORT_SYMBOL_GPL(ata_std_error_handler);
83625006 8036EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 8037EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
8038
8039EXPORT_SYMBOL_GPL(ata_cable_40wire);
8040EXPORT_SYMBOL_GPL(ata_cable_80wire);
8041EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 8042EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 8043EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.242847 seconds and 5 git commands to generate.