libata: report link resume failure as KERN_WARNING instead of ERR
[deliverable/linux.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
378f058c 57#include <linux/scatterlist.h>
2dcb407e 58#include <linux/io.h>
79318057 59#include <linux/async.h>
e18086d6 60#include <linux/log2.h>
5a0e3ad6 61#include <linux/slab.h>
1da177e4 62#include <scsi/scsi.h>
193515d5 63#include <scsi/scsi_cmnd.h>
1da177e4
LT
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
1da177e4 66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
9990b6f3 68#include <linux/ratelimit.h>
1da177e4
LT
69
70#include "libata.h"
d9027470 71#include "libata-transport.h"
fda0efc5 72
d7bb4cc7 73/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 77
029cfd6b 78const struct ata_port_operations ata_base_port_ops = {
0aa1113d 79 .prereset = ata_std_prereset,
203c75b8 80 .postreset = ata_std_postreset,
a1efdaba 81 .error_handler = ata_std_error_handler,
029cfd6b
TH
82};
83
84const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
57c9efdf 88 .hardreset = sata_std_hardreset,
029cfd6b
TH
89};
90
3373efd8
TH
91static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 96
f3187195 97unsigned int ata_print_id = 1;
1da177e4 98
33267325
TH
99struct ata_force_param {
100 const char *name;
101 unsigned int cbl;
102 int spd_limit;
103 unsigned long xfer_mask;
104 unsigned int horkage_on;
105 unsigned int horkage_off;
05944bdf 106 unsigned int lflags;
33267325
TH
107};
108
109struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113};
114
115static struct ata_force_ent *ata_force_tbl;
116static int ata_force_tbl_size;
117
118static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
119/* param_buf is thrown away after initialization, disallow read */
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
2486fa56 123static int atapi_enabled = 1;
1623c81e 124module_param(atapi_enabled, int, 0444);
ad5d8eac 125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 126
c5c61bda 127static int atapi_dmadir = 0;
95de719a 128module_param(atapi_dmadir, int, 0444);
ad5d8eac 129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 130
baf4fdfa
ML
131int atapi_passthru16 = 1;
132module_param(atapi_passthru16, int, 0444);
ad5d8eac 133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 134
c3c013a2
JG
135int libata_fua = 0;
136module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 137MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 138
2dcb407e 139static int ata_ignore_hpa;
1e999736
AC
140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
b3a70601
AC
143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144module_param_named(dma, libata_dma_mask, int, 0444);
145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
87fbc5a0 147static int ata_probe_timeout;
a8601e5f
AM
148module_param(ata_probe_timeout, int, 0444);
149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
6ebe9d86 151int libata_noacpi = 0;
d7d0dad6 152module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 153MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 154
ae8d4ee7
AC
155int libata_allow_tpm = 0;
156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 158
e7ecd435
TH
159static int atapi_an;
160module_param(atapi_an, int, 0444);
161MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
162
1da177e4
LT
163MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(DRV_VERSION);
167
0baab86b 168
9913ff8a
TH
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
1eca4365
TH
174/**
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
179 *
180 * LOCKING:
181 * Host lock or EH context.
aadffb68 182 *
1eca4365
TH
183 * RETURNS:
184 * Pointer to the next link.
aadffb68 185 */
1eca4365
TH
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
aadffb68 188{
1eca4365
TH
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
aadffb68 192 /* NULL link indicates start of iteration */
1eca4365
TH
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
199 /* fall through */
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
aadffb68 203
1eca4365
TH
204 /* we just iterated over the host link, what's next? */
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210 /* fall through */
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
b1c72916 213 return ap->slave_link;
1eca4365
TH
214 /* fall through */
215 case ATA_LITER_EDGE:
aadffb68 216 return NULL;
b1c72916 217 }
aadffb68 218
b1c72916
TH
219 /* slave_link excludes PMP */
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
1eca4365 223 /* we were over a PMP link */
aadffb68
TH
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
1eca4365
TH
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
aadffb68
TH
230 return NULL;
231}
232
1eca4365
TH
233/**
234 * ata_dev_next - device iteration helper
235 * @dev: the previous device, NULL to start
236 * @link: ATA link containing devices to iterate
237 * @mode: iteration mode, one of ATA_DITER_*
238 *
239 * LOCKING:
240 * Host lock or EH context.
241 *
242 * RETURNS:
243 * Pointer to the next device.
244 */
245struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 enum ata_dev_iter_mode mode)
247{
248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250
251 /* NULL dev indicates start of iteration */
252 if (!dev)
253 switch (mode) {
254 case ATA_DITER_ENABLED:
255 case ATA_DITER_ALL:
256 dev = link->device;
257 goto check;
258 case ATA_DITER_ENABLED_REVERSE:
259 case ATA_DITER_ALL_REVERSE:
260 dev = link->device + ata_link_max_devices(link) - 1;
261 goto check;
262 }
263
264 next:
265 /* move to the next one */
266 switch (mode) {
267 case ATA_DITER_ENABLED:
268 case ATA_DITER_ALL:
269 if (++dev < link->device + ata_link_max_devices(link))
270 goto check;
271 return NULL;
272 case ATA_DITER_ENABLED_REVERSE:
273 case ATA_DITER_ALL_REVERSE:
274 if (--dev >= link->device)
275 goto check;
276 return NULL;
277 }
278
279 check:
280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 !ata_dev_enabled(dev))
282 goto next;
283 return dev;
284}
285
b1c72916
TH
286/**
287 * ata_dev_phys_link - find physical link for a device
288 * @dev: ATA device to look up physical link for
289 *
290 * Look up physical link which @dev is attached to. Note that
291 * this is different from @dev->link only when @dev is on slave
292 * link. For all other cases, it's the same as @dev->link.
293 *
294 * LOCKING:
295 * Don't care.
296 *
297 * RETURNS:
298 * Pointer to the found physical link.
299 */
300struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301{
302 struct ata_port *ap = dev->link->ap;
303
304 if (!ap->slave_link)
305 return dev->link;
306 if (!dev->devno)
307 return &ap->link;
308 return ap->slave_link;
309}
310
33267325
TH
311/**
312 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 313 * @ap: ATA port of interest
33267325
TH
314 *
315 * Force cable type according to libata.force and whine about it.
316 * The last entry which has matching port number is used, so it
317 * can be specified as part of device force parameters. For
318 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
319 * same effect.
320 *
321 * LOCKING:
322 * EH context.
323 */
324void ata_force_cbl(struct ata_port *ap)
325{
326 int i;
327
328 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != ap->print_id)
332 continue;
333
334 if (fe->param.cbl == ATA_CBL_NONE)
335 continue;
336
337 ap->cbl = fe->param.cbl;
a9a79dfe 338 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
339 return;
340 }
341}
342
343/**
05944bdf 344 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
345 * @link: ATA link of interest
346 *
05944bdf
TH
347 * Force link flags and SATA spd limit according to libata.force
348 * and whine about it. When only the port part is specified
349 * (e.g. 1:), the limit applies to all links connected to both
350 * the host link and all fan-out ports connected via PMP. If the
351 * device part is specified as 0 (e.g. 1.00:), it specifies the
352 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
353 * points to the host link whether PMP is attached or not. If the
354 * controller has slave link, device number 16 points to it.
33267325
TH
355 *
356 * LOCKING:
357 * EH context.
358 */
05944bdf 359static void ata_force_link_limits(struct ata_link *link)
33267325 360{
05944bdf 361 bool did_spd = false;
b1c72916
TH
362 int linkno = link->pmp;
363 int i;
33267325
TH
364
365 if (ata_is_host_link(link))
b1c72916 366 linkno += 15;
33267325
TH
367
368 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
369 const struct ata_force_ent *fe = &ata_force_tbl[i];
370
371 if (fe->port != -1 && fe->port != link->ap->print_id)
372 continue;
373
374 if (fe->device != -1 && fe->device != linkno)
375 continue;
376
05944bdf
TH
377 /* only honor the first spd limit */
378 if (!did_spd && fe->param.spd_limit) {
379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 380 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
381 fe->param.name);
382 did_spd = true;
383 }
33267325 384
05944bdf
TH
385 /* let lflags stack */
386 if (fe->param.lflags) {
387 link->flags |= fe->param.lflags;
a9a79dfe 388 ata_link_notice(link,
05944bdf
TH
389 "FORCE: link flag 0x%x forced -> 0x%x\n",
390 fe->param.lflags, link->flags);
391 }
33267325
TH
392 }
393}
394
395/**
396 * ata_force_xfermask - force xfermask according to libata.force
397 * @dev: ATA device of interest
398 *
399 * Force xfer_mask according to libata.force and whine about it.
400 * For consistency with link selection, device number 15 selects
401 * the first device connected to the host link.
402 *
403 * LOCKING:
404 * EH context.
405 */
406static void ata_force_xfermask(struct ata_device *dev)
407{
408 int devno = dev->link->pmp + dev->devno;
409 int alt_devno = devno;
410 int i;
411
b1c72916
TH
412 /* allow n.15/16 for devices attached to host port */
413 if (ata_is_host_link(dev->link))
414 alt_devno += 15;
33267325
TH
415
416 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
417 const struct ata_force_ent *fe = &ata_force_tbl[i];
418 unsigned long pio_mask, mwdma_mask, udma_mask;
419
420 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
421 continue;
422
423 if (fe->device != -1 && fe->device != devno &&
424 fe->device != alt_devno)
425 continue;
426
427 if (!fe->param.xfer_mask)
428 continue;
429
430 ata_unpack_xfermask(fe->param.xfer_mask,
431 &pio_mask, &mwdma_mask, &udma_mask);
432 if (udma_mask)
433 dev->udma_mask = udma_mask;
434 else if (mwdma_mask) {
435 dev->udma_mask = 0;
436 dev->mwdma_mask = mwdma_mask;
437 } else {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = 0;
440 dev->pio_mask = pio_mask;
441 }
442
a9a79dfe
JP
443 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
444 fe->param.name);
33267325
TH
445 return;
446 }
447}
448
449/**
450 * ata_force_horkage - force horkage according to libata.force
451 * @dev: ATA device of interest
452 *
453 * Force horkage according to libata.force and whine about it.
454 * For consistency with link selection, device number 15 selects
455 * the first device connected to the host link.
456 *
457 * LOCKING:
458 * EH context.
459 */
460static void ata_force_horkage(struct ata_device *dev)
461{
462 int devno = dev->link->pmp + dev->devno;
463 int alt_devno = devno;
464 int i;
465
b1c72916
TH
466 /* allow n.15/16 for devices attached to host port */
467 if (ata_is_host_link(dev->link))
468 alt_devno += 15;
33267325
TH
469
470 for (i = 0; i < ata_force_tbl_size; i++) {
471 const struct ata_force_ent *fe = &ata_force_tbl[i];
472
473 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
474 continue;
475
476 if (fe->device != -1 && fe->device != devno &&
477 fe->device != alt_devno)
478 continue;
479
480 if (!(~dev->horkage & fe->param.horkage_on) &&
481 !(dev->horkage & fe->param.horkage_off))
482 continue;
483
484 dev->horkage |= fe->param.horkage_on;
485 dev->horkage &= ~fe->param.horkage_off;
486
a9a79dfe
JP
487 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
488 fe->param.name);
33267325
TH
489 }
490}
491
436d34b3
TH
492/**
493 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
494 * @opcode: SCSI opcode
495 *
496 * Determine ATAPI command type from @opcode.
497 *
498 * LOCKING:
499 * None.
500 *
501 * RETURNS:
502 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
503 */
504int atapi_cmd_type(u8 opcode)
505{
506 switch (opcode) {
507 case GPCMD_READ_10:
508 case GPCMD_READ_12:
509 return ATAPI_READ;
510
511 case GPCMD_WRITE_10:
512 case GPCMD_WRITE_12:
513 case GPCMD_WRITE_AND_VERIFY_10:
514 return ATAPI_WRITE;
515
516 case GPCMD_READ_CD:
517 case GPCMD_READ_CD_MSF:
518 return ATAPI_READ_CD;
519
e52dcc48
TH
520 case ATA_16:
521 case ATA_12:
522 if (atapi_passthru16)
523 return ATAPI_PASS_THRU;
524 /* fall thru */
436d34b3
TH
525 default:
526 return ATAPI_MISC;
527 }
528}
529
1da177e4
LT
530/**
531 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
532 * @tf: Taskfile to convert
1da177e4 533 * @pmp: Port multiplier port
9977126c
TH
534 * @is_cmd: This FIS is for command
535 * @fis: Buffer into which data will output
1da177e4
LT
536 *
537 * Converts a standard ATA taskfile to a Serial ATA
538 * FIS structure (Register - Host to Device).
539 *
540 * LOCKING:
541 * Inherited from caller.
542 */
9977126c 543void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 544{
9977126c
TH
545 fis[0] = 0x27; /* Register - Host to Device FIS */
546 fis[1] = pmp & 0xf; /* Port multiplier number*/
547 if (is_cmd)
548 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
549
1da177e4
LT
550 fis[2] = tf->command;
551 fis[3] = tf->feature;
552
553 fis[4] = tf->lbal;
554 fis[5] = tf->lbam;
555 fis[6] = tf->lbah;
556 fis[7] = tf->device;
557
558 fis[8] = tf->hob_lbal;
559 fis[9] = tf->hob_lbam;
560 fis[10] = tf->hob_lbah;
561 fis[11] = tf->hob_feature;
562
563 fis[12] = tf->nsect;
564 fis[13] = tf->hob_nsect;
565 fis[14] = 0;
566 fis[15] = tf->ctl;
567
568 fis[16] = 0;
569 fis[17] = 0;
570 fis[18] = 0;
571 fis[19] = 0;
572}
573
574/**
575 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
576 * @fis: Buffer from which data will be input
577 * @tf: Taskfile to output
578 *
e12a1be6 579 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
580 *
581 * LOCKING:
582 * Inherited from caller.
583 */
584
057ace5e 585void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
586{
587 tf->command = fis[2]; /* status */
588 tf->feature = fis[3]; /* error */
589
590 tf->lbal = fis[4];
591 tf->lbam = fis[5];
592 tf->lbah = fis[6];
593 tf->device = fis[7];
594
595 tf->hob_lbal = fis[8];
596 tf->hob_lbam = fis[9];
597 tf->hob_lbah = fis[10];
598
599 tf->nsect = fis[12];
600 tf->hob_nsect = fis[13];
601}
602
8cbd6df1
AL
603static const u8 ata_rw_cmds[] = {
604 /* pio multi */
605 ATA_CMD_READ_MULTI,
606 ATA_CMD_WRITE_MULTI,
607 ATA_CMD_READ_MULTI_EXT,
608 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
609 0,
610 0,
611 0,
612 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
613 /* pio */
614 ATA_CMD_PIO_READ,
615 ATA_CMD_PIO_WRITE,
616 ATA_CMD_PIO_READ_EXT,
617 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
618 0,
619 0,
620 0,
621 0,
8cbd6df1
AL
622 /* dma */
623 ATA_CMD_READ,
624 ATA_CMD_WRITE,
625 ATA_CMD_READ_EXT,
9a3dccc4
TH
626 ATA_CMD_WRITE_EXT,
627 0,
628 0,
629 0,
630 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 631};
1da177e4
LT
632
633/**
8cbd6df1 634 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
635 * @tf: command to examine and configure
636 * @dev: device tf belongs to
1da177e4 637 *
2e9edbf8 638 * Examine the device configuration and tf->flags to calculate
8cbd6df1 639 * the proper read/write commands and protocol to use.
1da177e4
LT
640 *
641 * LOCKING:
642 * caller.
643 */
bd056d7e 644static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 645{
9a3dccc4 646 u8 cmd;
1da177e4 647
9a3dccc4 648 int index, fua, lba48, write;
2e9edbf8 649
9a3dccc4 650 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
651 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
652 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 653
8cbd6df1
AL
654 if (dev->flags & ATA_DFLAG_PIO) {
655 tf->protocol = ATA_PROT_PIO;
9a3dccc4 656 index = dev->multi_count ? 0 : 8;
9af5c9c9 657 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
658 /* Unable to use DMA due to host limitation */
659 tf->protocol = ATA_PROT_PIO;
0565c26d 660 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
661 } else {
662 tf->protocol = ATA_PROT_DMA;
9a3dccc4 663 index = 16;
8cbd6df1 664 }
1da177e4 665
9a3dccc4
TH
666 cmd = ata_rw_cmds[index + fua + lba48 + write];
667 if (cmd) {
668 tf->command = cmd;
669 return 0;
670 }
671 return -1;
1da177e4
LT
672}
673
35b649fe
TH
674/**
675 * ata_tf_read_block - Read block address from ATA taskfile
676 * @tf: ATA taskfile of interest
677 * @dev: ATA device @tf belongs to
678 *
679 * LOCKING:
680 * None.
681 *
682 * Read block address from @tf. This function can handle all
683 * three address formats - LBA, LBA48 and CHS. tf->protocol and
684 * flags select the address format to use.
685 *
686 * RETURNS:
687 * Block address read from @tf.
688 */
689u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
690{
691 u64 block = 0;
692
693 if (tf->flags & ATA_TFLAG_LBA) {
694 if (tf->flags & ATA_TFLAG_LBA48) {
695 block |= (u64)tf->hob_lbah << 40;
696 block |= (u64)tf->hob_lbam << 32;
44901a96 697 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
698 } else
699 block |= (tf->device & 0xf) << 24;
700
701 block |= tf->lbah << 16;
702 block |= tf->lbam << 8;
703 block |= tf->lbal;
704 } else {
705 u32 cyl, head, sect;
706
707 cyl = tf->lbam | (tf->lbah << 8);
708 head = tf->device & 0xf;
709 sect = tf->lbal;
710
ac8672ea 711 if (!sect) {
a9a79dfe
JP
712 ata_dev_warn(dev,
713 "device reported invalid CHS sector 0\n");
ac8672ea
TH
714 sect = 1; /* oh well */
715 }
716
717 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
718 }
719
720 return block;
721}
722
bd056d7e
TH
723/**
724 * ata_build_rw_tf - Build ATA taskfile for given read/write request
725 * @tf: Target ATA taskfile
726 * @dev: ATA device @tf belongs to
727 * @block: Block address
728 * @n_block: Number of blocks
729 * @tf_flags: RW/FUA etc...
730 * @tag: tag
731 *
732 * LOCKING:
733 * None.
734 *
735 * Build ATA taskfile @tf for read/write request described by
736 * @block, @n_block, @tf_flags and @tag on @dev.
737 *
738 * RETURNS:
739 *
740 * 0 on success, -ERANGE if the request is too large for @dev,
741 * -EINVAL if the request is invalid.
742 */
743int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
744 u64 block, u32 n_block, unsigned int tf_flags,
745 unsigned int tag)
746{
747 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
748 tf->flags |= tf_flags;
749
6d1245bf 750 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
751 /* yay, NCQ */
752 if (!lba_48_ok(block, n_block))
753 return -ERANGE;
754
755 tf->protocol = ATA_PROT_NCQ;
756 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
757
758 if (tf->flags & ATA_TFLAG_WRITE)
759 tf->command = ATA_CMD_FPDMA_WRITE;
760 else
761 tf->command = ATA_CMD_FPDMA_READ;
762
763 tf->nsect = tag << 3;
764 tf->hob_feature = (n_block >> 8) & 0xff;
765 tf->feature = n_block & 0xff;
766
767 tf->hob_lbah = (block >> 40) & 0xff;
768 tf->hob_lbam = (block >> 32) & 0xff;
769 tf->hob_lbal = (block >> 24) & 0xff;
770 tf->lbah = (block >> 16) & 0xff;
771 tf->lbam = (block >> 8) & 0xff;
772 tf->lbal = block & 0xff;
773
774 tf->device = 1 << 6;
775 if (tf->flags & ATA_TFLAG_FUA)
776 tf->device |= 1 << 7;
777 } else if (dev->flags & ATA_DFLAG_LBA) {
778 tf->flags |= ATA_TFLAG_LBA;
779
780 if (lba_28_ok(block, n_block)) {
781 /* use LBA28 */
782 tf->device |= (block >> 24) & 0xf;
783 } else if (lba_48_ok(block, n_block)) {
784 if (!(dev->flags & ATA_DFLAG_LBA48))
785 return -ERANGE;
786
787 /* use LBA48 */
788 tf->flags |= ATA_TFLAG_LBA48;
789
790 tf->hob_nsect = (n_block >> 8) & 0xff;
791
792 tf->hob_lbah = (block >> 40) & 0xff;
793 tf->hob_lbam = (block >> 32) & 0xff;
794 tf->hob_lbal = (block >> 24) & 0xff;
795 } else
796 /* request too large even for LBA48 */
797 return -ERANGE;
798
799 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
800 return -EINVAL;
801
802 tf->nsect = n_block & 0xff;
803
804 tf->lbah = (block >> 16) & 0xff;
805 tf->lbam = (block >> 8) & 0xff;
806 tf->lbal = block & 0xff;
807
808 tf->device |= ATA_LBA;
809 } else {
810 /* CHS */
811 u32 sect, head, cyl, track;
812
813 /* The request -may- be too large for CHS addressing. */
814 if (!lba_28_ok(block, n_block))
815 return -ERANGE;
816
817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 return -EINVAL;
819
820 /* Convert LBA to CHS */
821 track = (u32)block / dev->sectors;
822 cyl = track / dev->heads;
823 head = track % dev->heads;
824 sect = (u32)block % dev->sectors + 1;
825
826 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
827 (u32)block, track, cyl, head, sect);
828
829 /* Check whether the converted CHS can fit.
830 Cylinder: 0-65535
831 Head: 0-15
832 Sector: 1-255*/
833 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
834 return -ERANGE;
835
836 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
837 tf->lbal = sect;
838 tf->lbam = cyl;
839 tf->lbah = cyl >> 8;
840 tf->device |= head;
841 }
842
843 return 0;
844}
845
cb95d562
TH
846/**
847 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
848 * @pio_mask: pio_mask
849 * @mwdma_mask: mwdma_mask
850 * @udma_mask: udma_mask
851 *
852 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
853 * unsigned int xfer_mask.
854 *
855 * LOCKING:
856 * None.
857 *
858 * RETURNS:
859 * Packed xfer_mask.
860 */
7dc951ae
TH
861unsigned long ata_pack_xfermask(unsigned long pio_mask,
862 unsigned long mwdma_mask,
863 unsigned long udma_mask)
cb95d562
TH
864{
865 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
866 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
867 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
868}
869
c0489e4e
TH
870/**
871 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
872 * @xfer_mask: xfer_mask to unpack
873 * @pio_mask: resulting pio_mask
874 * @mwdma_mask: resulting mwdma_mask
875 * @udma_mask: resulting udma_mask
876 *
877 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
878 * Any NULL distination masks will be ignored.
879 */
7dc951ae
TH
880void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
881 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
882{
883 if (pio_mask)
884 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
885 if (mwdma_mask)
886 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
887 if (udma_mask)
888 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
889}
890
cb95d562 891static const struct ata_xfer_ent {
be9a50c8 892 int shift, bits;
cb95d562
TH
893 u8 base;
894} ata_xfer_tbl[] = {
70cd071e
TH
895 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
896 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
897 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
898 { -1, },
899};
900
901/**
902 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
903 * @xfer_mask: xfer_mask of interest
904 *
905 * Return matching XFER_* value for @xfer_mask. Only the highest
906 * bit of @xfer_mask is considered.
907 *
908 * LOCKING:
909 * None.
910 *
911 * RETURNS:
70cd071e 912 * Matching XFER_* value, 0xff if no match found.
cb95d562 913 */
7dc951ae 914u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
915{
916 int highbit = fls(xfer_mask) - 1;
917 const struct ata_xfer_ent *ent;
918
919 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
920 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
921 return ent->base + highbit - ent->shift;
70cd071e 922 return 0xff;
cb95d562
TH
923}
924
925/**
926 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
927 * @xfer_mode: XFER_* of interest
928 *
929 * Return matching xfer_mask for @xfer_mode.
930 *
931 * LOCKING:
932 * None.
933 *
934 * RETURNS:
935 * Matching xfer_mask, 0 if no match found.
936 */
7dc951ae 937unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
938{
939 const struct ata_xfer_ent *ent;
940
941 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
942 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
943 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
944 & ~((1 << ent->shift) - 1);
cb95d562
TH
945 return 0;
946}
947
948/**
949 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
950 * @xfer_mode: XFER_* of interest
951 *
952 * Return matching xfer_shift for @xfer_mode.
953 *
954 * LOCKING:
955 * None.
956 *
957 * RETURNS:
958 * Matching xfer_shift, -1 if no match found.
959 */
7dc951ae 960int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
961{
962 const struct ata_xfer_ent *ent;
963
964 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
965 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
966 return ent->shift;
967 return -1;
968}
969
1da177e4 970/**
1da7b0d0
TH
971 * ata_mode_string - convert xfer_mask to string
972 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
973 *
974 * Determine string which represents the highest speed
1da7b0d0 975 * (highest bit in @modemask).
1da177e4
LT
976 *
977 * LOCKING:
978 * None.
979 *
980 * RETURNS:
981 * Constant C string representing highest speed listed in
1da7b0d0 982 * @mode_mask, or the constant C string "<n/a>".
1da177e4 983 */
7dc951ae 984const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 985{
75f554bc
TH
986 static const char * const xfer_mode_str[] = {
987 "PIO0",
988 "PIO1",
989 "PIO2",
990 "PIO3",
991 "PIO4",
b352e57d
AC
992 "PIO5",
993 "PIO6",
75f554bc
TH
994 "MWDMA0",
995 "MWDMA1",
996 "MWDMA2",
b352e57d
AC
997 "MWDMA3",
998 "MWDMA4",
75f554bc
TH
999 "UDMA/16",
1000 "UDMA/25",
1001 "UDMA/33",
1002 "UDMA/44",
1003 "UDMA/66",
1004 "UDMA/100",
1005 "UDMA/133",
1006 "UDMA7",
1007 };
1da7b0d0 1008 int highbit;
1da177e4 1009
1da7b0d0
TH
1010 highbit = fls(xfer_mask) - 1;
1011 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1012 return xfer_mode_str[highbit];
1da177e4 1013 return "<n/a>";
1da177e4
LT
1014}
1015
d9027470 1016const char *sata_spd_string(unsigned int spd)
4c360c81
TH
1017{
1018 static const char * const spd_str[] = {
1019 "1.5 Gbps",
1020 "3.0 Gbps",
8522ee25 1021 "6.0 Gbps",
4c360c81
TH
1022 };
1023
1024 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1025 return "<unknown>";
1026 return spd_str[spd - 1];
1027}
1028
1da177e4
LT
1029/**
1030 * ata_dev_classify - determine device type based on ATA-spec signature
1031 * @tf: ATA taskfile register set for device to be identified
1032 *
1033 * Determine from taskfile register contents whether a device is
1034 * ATA or ATAPI, as per "Signature and persistence" section
1035 * of ATA/PI spec (volume 1, sect 5.14).
1036 *
1037 * LOCKING:
1038 * None.
1039 *
1040 * RETURNS:
633273a3
TH
1041 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1042 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1043 */
057ace5e 1044unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1045{
1046 /* Apple's open source Darwin code hints that some devices only
1047 * put a proper signature into the LBA mid/high registers,
1048 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1049 *
1050 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1051 * signatures for ATA and ATAPI devices attached on SerialATA,
1052 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1053 * spec has never mentioned about using different signatures
1054 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1055 * Multiplier specification began to use 0x69/0x96 to identify
1056 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1057 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1058 * 0x69/0x96 shortly and described them as reserved for
1059 * SerialATA.
1060 *
1061 * We follow the current spec and consider that 0x69/0x96
1062 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1063 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1064 * SEMB signature. This is worked around in
1065 * ata_dev_read_id().
1da177e4 1066 */
633273a3 1067 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1068 DPRINTK("found ATA device by sig\n");
1069 return ATA_DEV_ATA;
1070 }
1071
633273a3 1072 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1073 DPRINTK("found ATAPI device by sig\n");
1074 return ATA_DEV_ATAPI;
1075 }
1076
633273a3
TH
1077 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1078 DPRINTK("found PMP device by sig\n");
1079 return ATA_DEV_PMP;
1080 }
1081
1082 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
79b42bab
TH
1083 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1084 return ATA_DEV_SEMB;
633273a3
TH
1085 }
1086
1da177e4
LT
1087 DPRINTK("unknown device\n");
1088 return ATA_DEV_UNKNOWN;
1089}
1090
1da177e4 1091/**
6a62a04d 1092 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1093 * @id: IDENTIFY DEVICE results we will examine
1094 * @s: string into which data is output
1095 * @ofs: offset into identify device page
1096 * @len: length of string to return. must be an even number.
1097 *
1098 * The strings in the IDENTIFY DEVICE page are broken up into
1099 * 16-bit chunks. Run through the string, and output each
1100 * 8-bit chunk linearly, regardless of platform.
1101 *
1102 * LOCKING:
1103 * caller.
1104 */
1105
6a62a04d
TH
1106void ata_id_string(const u16 *id, unsigned char *s,
1107 unsigned int ofs, unsigned int len)
1da177e4
LT
1108{
1109 unsigned int c;
1110
963e4975
AC
1111 BUG_ON(len & 1);
1112
1da177e4
LT
1113 while (len > 0) {
1114 c = id[ofs] >> 8;
1115 *s = c;
1116 s++;
1117
1118 c = id[ofs] & 0xff;
1119 *s = c;
1120 s++;
1121
1122 ofs++;
1123 len -= 2;
1124 }
1125}
1126
0e949ff3 1127/**
6a62a04d 1128 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1129 * @id: IDENTIFY DEVICE results we will examine
1130 * @s: string into which data is output
1131 * @ofs: offset into identify device page
1132 * @len: length of string to return. must be an odd number.
1133 *
6a62a04d 1134 * This function is identical to ata_id_string except that it
0e949ff3
TH
1135 * trims trailing spaces and terminates the resulting string with
1136 * null. @len must be actual maximum length (even number) + 1.
1137 *
1138 * LOCKING:
1139 * caller.
1140 */
6a62a04d
TH
1141void ata_id_c_string(const u16 *id, unsigned char *s,
1142 unsigned int ofs, unsigned int len)
0e949ff3
TH
1143{
1144 unsigned char *p;
1145
6a62a04d 1146 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1147
1148 p = s + strnlen(s, len - 1);
1149 while (p > s && p[-1] == ' ')
1150 p--;
1151 *p = '\0';
1152}
0baab86b 1153
db6f8759
TH
1154static u64 ata_id_n_sectors(const u16 *id)
1155{
1156 if (ata_id_has_lba(id)) {
1157 if (ata_id_has_lba48(id))
968e594a 1158 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1159 else
968e594a 1160 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1161 } else {
1162 if (ata_id_current_chs_valid(id))
968e594a
RH
1163 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1164 id[ATA_ID_CUR_SECTORS];
db6f8759 1165 else
968e594a
RH
1166 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1167 id[ATA_ID_SECTORS];
db6f8759
TH
1168 }
1169}
1170
a5987e0a 1171u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1172{
1173 u64 sectors = 0;
1174
1175 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1176 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1177 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1178 sectors |= (tf->lbah & 0xff) << 16;
1179 sectors |= (tf->lbam & 0xff) << 8;
1180 sectors |= (tf->lbal & 0xff);
1181
a5987e0a 1182 return sectors;
1e999736
AC
1183}
1184
a5987e0a 1185u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1186{
1187 u64 sectors = 0;
1188
1189 sectors |= (tf->device & 0x0f) << 24;
1190 sectors |= (tf->lbah & 0xff) << 16;
1191 sectors |= (tf->lbam & 0xff) << 8;
1192 sectors |= (tf->lbal & 0xff);
1193
a5987e0a 1194 return sectors;
1e999736
AC
1195}
1196
1197/**
c728a914
TH
1198 * ata_read_native_max_address - Read native max address
1199 * @dev: target device
1200 * @max_sectors: out parameter for the result native max address
1e999736 1201 *
c728a914
TH
1202 * Perform an LBA48 or LBA28 native size query upon the device in
1203 * question.
1e999736 1204 *
c728a914
TH
1205 * RETURNS:
1206 * 0 on success, -EACCES if command is aborted by the drive.
1207 * -EIO on other errors.
1e999736 1208 */
c728a914 1209static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1210{
c728a914 1211 unsigned int err_mask;
1e999736 1212 struct ata_taskfile tf;
c728a914 1213 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1214
1215 ata_tf_init(dev, &tf);
1216
c728a914 1217 /* always clear all address registers */
1e999736 1218 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1219
c728a914
TH
1220 if (lba48) {
1221 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1222 tf.flags |= ATA_TFLAG_LBA48;
1223 } else
1224 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1225
1e999736 1226 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1227 tf.device |= ATA_LBA;
1228
2b789108 1229 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1230 if (err_mask) {
a9a79dfe
JP
1231 ata_dev_warn(dev,
1232 "failed to read native max address (err_mask=0x%x)\n",
1233 err_mask);
c728a914
TH
1234 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1235 return -EACCES;
1236 return -EIO;
1237 }
1e999736 1238
c728a914 1239 if (lba48)
a5987e0a 1240 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1241 else
a5987e0a 1242 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1243 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1244 (*max_sectors)--;
c728a914 1245 return 0;
1e999736
AC
1246}
1247
1248/**
c728a914
TH
1249 * ata_set_max_sectors - Set max sectors
1250 * @dev: target device
6b38d1d1 1251 * @new_sectors: new max sectors value to set for the device
1e999736 1252 *
c728a914
TH
1253 * Set max sectors of @dev to @new_sectors.
1254 *
1255 * RETURNS:
1256 * 0 on success, -EACCES if command is aborted or denied (due to
1257 * previous non-volatile SET_MAX) by the drive. -EIO on other
1258 * errors.
1e999736 1259 */
05027adc 1260static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1261{
c728a914 1262 unsigned int err_mask;
1e999736 1263 struct ata_taskfile tf;
c728a914 1264 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1265
1266 new_sectors--;
1267
1268 ata_tf_init(dev, &tf);
1269
1e999736 1270 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1271
1272 if (lba48) {
1273 tf.command = ATA_CMD_SET_MAX_EXT;
1274 tf.flags |= ATA_TFLAG_LBA48;
1275
1276 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1277 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1278 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1279 } else {
c728a914
TH
1280 tf.command = ATA_CMD_SET_MAX;
1281
1e582ba4
TH
1282 tf.device |= (new_sectors >> 24) & 0xf;
1283 }
1284
1e999736 1285 tf.protocol |= ATA_PROT_NODATA;
c728a914 1286 tf.device |= ATA_LBA;
1e999736
AC
1287
1288 tf.lbal = (new_sectors >> 0) & 0xff;
1289 tf.lbam = (new_sectors >> 8) & 0xff;
1290 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1291
2b789108 1292 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1293 if (err_mask) {
a9a79dfe
JP
1294 ata_dev_warn(dev,
1295 "failed to set max address (err_mask=0x%x)\n",
1296 err_mask);
c728a914
TH
1297 if (err_mask == AC_ERR_DEV &&
1298 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299 return -EACCES;
1300 return -EIO;
1301 }
1302
c728a914 1303 return 0;
1e999736
AC
1304}
1305
1306/**
1307 * ata_hpa_resize - Resize a device with an HPA set
1308 * @dev: Device to resize
1309 *
1310 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1311 * it if required to the full size of the media. The caller must check
1312 * the drive has the HPA feature set enabled.
05027adc
TH
1313 *
1314 * RETURNS:
1315 * 0 on success, -errno on failure.
1e999736 1316 */
05027adc 1317static int ata_hpa_resize(struct ata_device *dev)
1e999736 1318{
05027adc
TH
1319 struct ata_eh_context *ehc = &dev->link->eh_context;
1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
445d211b 1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1322 u64 sectors = ata_id_n_sectors(dev->id);
1323 u64 native_sectors;
c728a914 1324 int rc;
a617c09f 1325
05027adc
TH
1326 /* do we need to do it? */
1327 if (dev->class != ATA_DEV_ATA ||
1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1330 return 0;
1e999736 1331
05027adc
TH
1332 /* read native max address */
1333 rc = ata_read_native_max_address(dev, &native_sectors);
1334 if (rc) {
dda7aba1
TH
1335 /* If device aborted the command or HPA isn't going to
1336 * be unlocked, skip HPA resizing.
05027adc 1337 */
445d211b 1338 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1339 ata_dev_warn(dev,
1340 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343 /* we can continue if device aborted the command */
1344 if (rc == -EACCES)
1345 rc = 0;
1e999736 1346 }
37301a55 1347
05027adc
TH
1348 return rc;
1349 }
5920dadf 1350 dev->n_native_sectors = native_sectors;
05027adc
TH
1351
1352 /* nothing to do? */
445d211b 1353 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1354 if (!print_info || native_sectors == sectors)
1355 return 0;
1356
1357 if (native_sectors > sectors)
a9a79dfe 1358 ata_dev_info(dev,
05027adc
TH
1359 "HPA detected: current %llu, native %llu\n",
1360 (unsigned long long)sectors,
1361 (unsigned long long)native_sectors);
1362 else if (native_sectors < sectors)
a9a79dfe
JP
1363 ata_dev_warn(dev,
1364 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1365 (unsigned long long)native_sectors,
1366 (unsigned long long)sectors);
1367 return 0;
1368 }
1369
1370 /* let's unlock HPA */
1371 rc = ata_set_max_sectors(dev, native_sectors);
1372 if (rc == -EACCES) {
1373 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1374 ata_dev_warn(dev,
1375 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1376 (unsigned long long)sectors,
1377 (unsigned long long)native_sectors);
05027adc
TH
1378 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1379 return 0;
1380 } else if (rc)
1381 return rc;
1382
1383 /* re-read IDENTIFY data */
1384 rc = ata_dev_reread_id(dev, 0);
1385 if (rc) {
a9a79dfe
JP
1386 ata_dev_err(dev,
1387 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1388 return rc;
1389 }
1390
1391 if (print_info) {
1392 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1393 ata_dev_info(dev,
05027adc
TH
1394 "HPA unlocked: %llu -> %llu, native %llu\n",
1395 (unsigned long long)sectors,
1396 (unsigned long long)new_sectors,
1397 (unsigned long long)native_sectors);
1398 }
1399
1400 return 0;
1e999736
AC
1401}
1402
1da177e4
LT
1403/**
1404 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1405 * @id: IDENTIFY DEVICE page to dump
1da177e4 1406 *
0bd3300a
TH
1407 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1408 * page.
1da177e4
LT
1409 *
1410 * LOCKING:
1411 * caller.
1412 */
1413
0bd3300a 1414static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1415{
1416 DPRINTK("49==0x%04x "
1417 "53==0x%04x "
1418 "63==0x%04x "
1419 "64==0x%04x "
1420 "75==0x%04x \n",
0bd3300a
TH
1421 id[49],
1422 id[53],
1423 id[63],
1424 id[64],
1425 id[75]);
1da177e4
LT
1426 DPRINTK("80==0x%04x "
1427 "81==0x%04x "
1428 "82==0x%04x "
1429 "83==0x%04x "
1430 "84==0x%04x \n",
0bd3300a
TH
1431 id[80],
1432 id[81],
1433 id[82],
1434 id[83],
1435 id[84]);
1da177e4
LT
1436 DPRINTK("88==0x%04x "
1437 "93==0x%04x\n",
0bd3300a
TH
1438 id[88],
1439 id[93]);
1da177e4
LT
1440}
1441
cb95d562
TH
1442/**
1443 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444 * @id: IDENTIFY data to compute xfer mask from
1445 *
1446 * Compute the xfermask for this device. This is not as trivial
1447 * as it seems if we must consider early devices correctly.
1448 *
1449 * FIXME: pre IDE drive timing (do we care ?).
1450 *
1451 * LOCKING:
1452 * None.
1453 *
1454 * RETURNS:
1455 * Computed xfermask
1456 */
7dc951ae 1457unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1458{
7dc951ae 1459 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1460
1461 /* Usual case. Word 53 indicates word 64 is valid */
1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464 pio_mask <<= 3;
1465 pio_mask |= 0x7;
1466 } else {
1467 /* If word 64 isn't valid then Word 51 high byte holds
1468 * the PIO timing number for the maximum. Turn it into
1469 * a mask.
1470 */
7a0f1c8a 1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1472 if (mode < 5) /* Valid PIO range */
2dcb407e 1473 pio_mask = (2 << mode) - 1;
46767aeb
AC
1474 else
1475 pio_mask = 1;
cb95d562
TH
1476
1477 /* But wait.. there's more. Design your standards by
1478 * committee and you too can get a free iordy field to
1479 * process. However its the speeds not the modes that
1480 * are supported... Note drivers using the timing API
1481 * will get this right anyway
1482 */
1483 }
1484
1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1486
b352e57d
AC
1487 if (ata_id_is_cfa(id)) {
1488 /*
1489 * Process compact flash extended modes
1490 */
62afe5d7
SS
1491 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1492 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1493
1494 if (pio)
1495 pio_mask |= (1 << 5);
1496 if (pio > 1)
1497 pio_mask |= (1 << 6);
1498 if (dma)
1499 mwdma_mask |= (1 << 3);
1500 if (dma > 1)
1501 mwdma_mask |= (1 << 4);
1502 }
1503
fb21f0d0
TH
1504 udma_mask = 0;
1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1507
1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
1510
7102d230 1511static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1512{
77853bf2 1513 struct completion *waiting = qc->private_data;
a2a7a662 1514
a2a7a662 1515 complete(waiting);
a2a7a662
TH
1516}
1517
1518/**
2432697b 1519 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1520 * @dev: Device to which the command is sent
1521 * @tf: Taskfile registers for the command and the result
d69cf37d 1522 * @cdb: CDB for packet command
a2a7a662 1523 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1524 * @sgl: sg list for the data buffer of the command
2432697b 1525 * @n_elem: Number of sg entries
2b789108 1526 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1527 *
1528 * Executes libata internal command with timeout. @tf contains
1529 * command on entry and result on return. Timeout and error
1530 * conditions are reported via return value. No recovery action
1531 * is taken after a command times out. It's caller's duty to
1532 * clean up after timeout.
1533 *
1534 * LOCKING:
1535 * None. Should be called with kernel context, might sleep.
551e8889
TH
1536 *
1537 * RETURNS:
1538 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1539 */
2432697b
TH
1540unsigned ata_exec_internal_sg(struct ata_device *dev,
1541 struct ata_taskfile *tf, const u8 *cdb,
87260216 1542 int dma_dir, struct scatterlist *sgl,
2b789108 1543 unsigned int n_elem, unsigned long timeout)
a2a7a662 1544{
9af5c9c9
TH
1545 struct ata_link *link = dev->link;
1546 struct ata_port *ap = link->ap;
a2a7a662 1547 u8 command = tf->command;
87fbc5a0 1548 int auto_timeout = 0;
a2a7a662 1549 struct ata_queued_cmd *qc;
2ab7db1f 1550 unsigned int tag, preempted_tag;
dedaf2b0 1551 u32 preempted_sactive, preempted_qc_active;
da917d69 1552 int preempted_nr_active_links;
60be6b9a 1553 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1554 unsigned long flags;
77853bf2 1555 unsigned int err_mask;
d95a717f 1556 int rc;
a2a7a662 1557
ba6a1308 1558 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1559
e3180499 1560 /* no internal command while frozen */
b51e9e5d 1561 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1562 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1563 return AC_ERR_SYSTEM;
1564 }
1565
2ab7db1f 1566 /* initialize internal qc */
a2a7a662 1567
2ab7db1f
TH
1568 /* XXX: Tag 0 is used for drivers with legacy EH as some
1569 * drivers choke if any other tag is given. This breaks
1570 * ata_tag_internal() test for those drivers. Don't use new
1571 * EH stuff without converting to it.
1572 */
1573 if (ap->ops->error_handler)
1574 tag = ATA_TAG_INTERNAL;
1575 else
1576 tag = 0;
1577
8a8bc223
TH
1578 if (test_and_set_bit(tag, &ap->qc_allocated))
1579 BUG();
f69499f4 1580 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1581
1582 qc->tag = tag;
1583 qc->scsicmd = NULL;
1584 qc->ap = ap;
1585 qc->dev = dev;
1586 ata_qc_reinit(qc);
1587
9af5c9c9
TH
1588 preempted_tag = link->active_tag;
1589 preempted_sactive = link->sactive;
dedaf2b0 1590 preempted_qc_active = ap->qc_active;
da917d69 1591 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1592 link->active_tag = ATA_TAG_POISON;
1593 link->sactive = 0;
dedaf2b0 1594 ap->qc_active = 0;
da917d69 1595 ap->nr_active_links = 0;
2ab7db1f
TH
1596
1597 /* prepare & issue qc */
a2a7a662 1598 qc->tf = *tf;
d69cf37d
TH
1599 if (cdb)
1600 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1601 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1602 qc->dma_dir = dma_dir;
1603 if (dma_dir != DMA_NONE) {
2432697b 1604 unsigned int i, buflen = 0;
87260216 1605 struct scatterlist *sg;
2432697b 1606
87260216
JA
1607 for_each_sg(sgl, sg, n_elem, i)
1608 buflen += sg->length;
2432697b 1609
87260216 1610 ata_sg_init(qc, sgl, n_elem);
49c80429 1611 qc->nbytes = buflen;
a2a7a662
TH
1612 }
1613
77853bf2 1614 qc->private_data = &wait;
a2a7a662
TH
1615 qc->complete_fn = ata_qc_complete_internal;
1616
8e0e694a 1617 ata_qc_issue(qc);
a2a7a662 1618
ba6a1308 1619 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1620
87fbc5a0
TH
1621 if (!timeout) {
1622 if (ata_probe_timeout)
1623 timeout = ata_probe_timeout * 1000;
1624 else {
1625 timeout = ata_internal_cmd_timeout(dev, command);
1626 auto_timeout = 1;
1627 }
1628 }
2b789108 1629
c0c362b6
TH
1630 if (ap->ops->error_handler)
1631 ata_eh_release(ap);
1632
2b789108 1633 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1634
c0c362b6
TH
1635 if (ap->ops->error_handler)
1636 ata_eh_acquire(ap);
1637
c429137a 1638 ata_sff_flush_pio_task(ap);
41ade50c 1639
d95a717f 1640 if (!rc) {
ba6a1308 1641 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1642
1643 /* We're racing with irq here. If we lose, the
1644 * following test prevents us from completing the qc
d95a717f
TH
1645 * twice. If we win, the port is frozen and will be
1646 * cleaned up by ->post_internal_cmd().
a2a7a662 1647 */
77853bf2 1648 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1649 qc->err_mask |= AC_ERR_TIMEOUT;
1650
1651 if (ap->ops->error_handler)
1652 ata_port_freeze(ap);
1653 else
1654 ata_qc_complete(qc);
f15a1daf 1655
0dd4b21f 1656 if (ata_msg_warn(ap))
a9a79dfe
JP
1657 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1658 command);
a2a7a662
TH
1659 }
1660
ba6a1308 1661 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1662 }
1663
d95a717f
TH
1664 /* do post_internal_cmd */
1665 if (ap->ops->post_internal_cmd)
1666 ap->ops->post_internal_cmd(qc);
1667
a51d644a
TH
1668 /* perform minimal error analysis */
1669 if (qc->flags & ATA_QCFLAG_FAILED) {
1670 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1671 qc->err_mask |= AC_ERR_DEV;
1672
1673 if (!qc->err_mask)
1674 qc->err_mask |= AC_ERR_OTHER;
1675
1676 if (qc->err_mask & ~AC_ERR_OTHER)
1677 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1678 }
1679
15869303 1680 /* finish up */
ba6a1308 1681 spin_lock_irqsave(ap->lock, flags);
15869303 1682
e61e0672 1683 *tf = qc->result_tf;
77853bf2
TH
1684 err_mask = qc->err_mask;
1685
1686 ata_qc_free(qc);
9af5c9c9
TH
1687 link->active_tag = preempted_tag;
1688 link->sactive = preempted_sactive;
dedaf2b0 1689 ap->qc_active = preempted_qc_active;
da917d69 1690 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1691
ba6a1308 1692 spin_unlock_irqrestore(ap->lock, flags);
15869303 1693
87fbc5a0
TH
1694 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1695 ata_internal_cmd_timed_out(dev, command);
1696
77853bf2 1697 return err_mask;
a2a7a662
TH
1698}
1699
2432697b 1700/**
33480a0e 1701 * ata_exec_internal - execute libata internal command
2432697b
TH
1702 * @dev: Device to which the command is sent
1703 * @tf: Taskfile registers for the command and the result
1704 * @cdb: CDB for packet command
1705 * @dma_dir: Data tranfer direction of the command
1706 * @buf: Data buffer of the command
1707 * @buflen: Length of data buffer
2b789108 1708 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1709 *
1710 * Wrapper around ata_exec_internal_sg() which takes simple
1711 * buffer instead of sg list.
1712 *
1713 * LOCKING:
1714 * None. Should be called with kernel context, might sleep.
1715 *
1716 * RETURNS:
1717 * Zero on success, AC_ERR_* mask on failure
1718 */
1719unsigned ata_exec_internal(struct ata_device *dev,
1720 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1721 int dma_dir, void *buf, unsigned int buflen,
1722 unsigned long timeout)
2432697b 1723{
33480a0e
TH
1724 struct scatterlist *psg = NULL, sg;
1725 unsigned int n_elem = 0;
2432697b 1726
33480a0e
TH
1727 if (dma_dir != DMA_NONE) {
1728 WARN_ON(!buf);
1729 sg_init_one(&sg, buf, buflen);
1730 psg = &sg;
1731 n_elem++;
1732 }
2432697b 1733
2b789108
TH
1734 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1735 timeout);
2432697b
TH
1736}
1737
977e6b9f
TH
1738/**
1739 * ata_do_simple_cmd - execute simple internal command
1740 * @dev: Device to which the command is sent
1741 * @cmd: Opcode to execute
1742 *
1743 * Execute a 'simple' command, that only consists of the opcode
1744 * 'cmd' itself, without filling any other registers
1745 *
1746 * LOCKING:
1747 * Kernel thread context (may sleep).
1748 *
1749 * RETURNS:
1750 * Zero on success, AC_ERR_* mask on failure
e58eb583 1751 */
77b08fb5 1752unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1753{
1754 struct ata_taskfile tf;
e58eb583
TH
1755
1756 ata_tf_init(dev, &tf);
1757
1758 tf.command = cmd;
1759 tf.flags |= ATA_TFLAG_DEVICE;
1760 tf.protocol = ATA_PROT_NODATA;
1761
2b789108 1762 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1763}
1764
1bc4ccff
AC
1765/**
1766 * ata_pio_need_iordy - check if iordy needed
1767 * @adev: ATA device
1768 *
1769 * Check if the current speed of the device requires IORDY. Used
1770 * by various controllers for chip configuration.
1771 */
1bc4ccff
AC
1772unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1773{
0d9e6659
TH
1774 /* Don't set IORDY if we're preparing for reset. IORDY may
1775 * lead to controller lock up on certain controllers if the
1776 * port is not occupied. See bko#11703 for details.
1777 */
1778 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1779 return 0;
1780 /* Controller doesn't support IORDY. Probably a pointless
1781 * check as the caller should know this.
1782 */
9af5c9c9 1783 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1784 return 0;
5c18c4d2
DD
1785 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1786 if (ata_id_is_cfa(adev->id)
1787 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1788 return 0;
432729f0
AC
1789 /* PIO3 and higher it is mandatory */
1790 if (adev->pio_mode > XFER_PIO_2)
1791 return 1;
1792 /* We turn it on when possible */
1793 if (ata_id_has_iordy(adev->id))
1bc4ccff 1794 return 1;
432729f0
AC
1795 return 0;
1796}
2e9edbf8 1797
432729f0
AC
1798/**
1799 * ata_pio_mask_no_iordy - Return the non IORDY mask
1800 * @adev: ATA device
1801 *
1802 * Compute the highest mode possible if we are not using iordy. Return
1803 * -1 if no iordy mode is available.
1804 */
432729f0
AC
1805static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1806{
1bc4ccff 1807 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1808 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1809 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1810 /* Is the speed faster than the drive allows non IORDY ? */
1811 if (pio) {
1812 /* This is cycle times not frequency - watch the logic! */
1813 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1814 return 3 << ATA_SHIFT_PIO;
1815 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1816 }
1817 }
432729f0 1818 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1819}
1820
963e4975
AC
1821/**
1822 * ata_do_dev_read_id - default ID read method
1823 * @dev: device
1824 * @tf: proposed taskfile
1825 * @id: data buffer
1826 *
1827 * Issue the identify taskfile and hand back the buffer containing
1828 * identify data. For some RAID controllers and for pre ATA devices
1829 * this function is wrapped or replaced by the driver
1830 */
1831unsigned int ata_do_dev_read_id(struct ata_device *dev,
1832 struct ata_taskfile *tf, u16 *id)
1833{
1834 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1835 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1836}
1837
1da177e4 1838/**
49016aca 1839 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1840 * @dev: target device
1841 * @p_class: pointer to class of the target device (may be changed)
bff04647 1842 * @flags: ATA_READID_* flags
fe635c7e 1843 * @id: buffer to read IDENTIFY data into
1da177e4 1844 *
49016aca
TH
1845 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1846 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1847 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1848 * for pre-ATA4 drives.
1da177e4 1849 *
50a99018 1850 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1851 * now we abort if we hit that case.
50a99018 1852 *
1da177e4 1853 * LOCKING:
49016aca
TH
1854 * Kernel thread context (may sleep)
1855 *
1856 * RETURNS:
1857 * 0 on success, -errno otherwise.
1da177e4 1858 */
a9beec95 1859int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1860 unsigned int flags, u16 *id)
1da177e4 1861{
9af5c9c9 1862 struct ata_port *ap = dev->link->ap;
49016aca 1863 unsigned int class = *p_class;
a0123703 1864 struct ata_taskfile tf;
49016aca
TH
1865 unsigned int err_mask = 0;
1866 const char *reason;
79b42bab 1867 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1868 int may_fallback = 1, tried_spinup = 0;
49016aca 1869 int rc;
1da177e4 1870
0dd4b21f 1871 if (ata_msg_ctl(ap))
a9a79dfe 1872 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 1873
963e4975 1874retry:
3373efd8 1875 ata_tf_init(dev, &tf);
a0123703 1876
49016aca 1877 switch (class) {
79b42bab
TH
1878 case ATA_DEV_SEMB:
1879 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
49016aca 1880 case ATA_DEV_ATA:
a0123703 1881 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1882 break;
1883 case ATA_DEV_ATAPI:
a0123703 1884 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1885 break;
1886 default:
1887 rc = -ENODEV;
1888 reason = "unsupported class";
1889 goto err_out;
1da177e4
LT
1890 }
1891
a0123703 1892 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1893
1894 /* Some devices choke if TF registers contain garbage. Make
1895 * sure those are properly initialized.
1896 */
1897 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1898
1899 /* Device presence detection is unreliable on some
1900 * controllers. Always poll IDENTIFY if available.
1901 */
1902 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1903
963e4975
AC
1904 if (ap->ops->read_id)
1905 err_mask = ap->ops->read_id(dev, &tf, id);
1906 else
1907 err_mask = ata_do_dev_read_id(dev, &tf, id);
1908
a0123703 1909 if (err_mask) {
800b3996 1910 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1911 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1912 return -ENOENT;
1913 }
1914
79b42bab 1915 if (is_semb) {
a9a79dfe
JP
1916 ata_dev_info(dev,
1917 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1918 /* SEMB is not supported yet */
1919 *p_class = ATA_DEV_SEMB_UNSUP;
1920 return 0;
1921 }
1922
1ffc151f
TH
1923 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1924 /* Device or controller might have reported
1925 * the wrong device class. Give a shot at the
1926 * other IDENTIFY if the current one is
1927 * aborted by the device.
1928 */
1929 if (may_fallback) {
1930 may_fallback = 0;
1931
1932 if (class == ATA_DEV_ATA)
1933 class = ATA_DEV_ATAPI;
1934 else
1935 class = ATA_DEV_ATA;
1936 goto retry;
1937 }
1938
1939 /* Control reaches here iff the device aborted
1940 * both flavors of IDENTIFYs which happens
1941 * sometimes with phantom devices.
1942 */
a9a79dfe
JP
1943 ata_dev_dbg(dev,
1944 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1945 return -ENOENT;
54936f8b
TH
1946 }
1947
49016aca
TH
1948 rc = -EIO;
1949 reason = "I/O error";
1da177e4
LT
1950 goto err_out;
1951 }
1952
43c9c591 1953 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
a9a79dfe
JP
1954 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1955 "class=%d may_fallback=%d tried_spinup=%d\n",
1956 class, may_fallback, tried_spinup);
43c9c591
TH
1957 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1958 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1959 }
1960
54936f8b
TH
1961 /* Falling back doesn't make sense if ID data was read
1962 * successfully at least once.
1963 */
1964 may_fallback = 0;
1965
49016aca 1966 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1967
49016aca 1968 /* sanity check */
a4f5749b 1969 rc = -EINVAL;
6070068b 1970 reason = "device reports invalid type";
a4f5749b
TH
1971
1972 if (class == ATA_DEV_ATA) {
1973 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1974 goto err_out;
1975 } else {
1976 if (ata_id_is_ata(id))
1977 goto err_out;
49016aca
TH
1978 }
1979
169439c2
ML
1980 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1981 tried_spinup = 1;
1982 /*
1983 * Drive powered-up in standby mode, and requires a specific
1984 * SET_FEATURES spin-up subcommand before it will accept
1985 * anything other than the original IDENTIFY command.
1986 */
218f3d30 1987 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1988 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1989 rc = -EIO;
1990 reason = "SPINUP failed";
1991 goto err_out;
1992 }
1993 /*
1994 * If the drive initially returned incomplete IDENTIFY info,
1995 * we now must reissue the IDENTIFY command.
1996 */
1997 if (id[2] == 0x37c8)
1998 goto retry;
1999 }
2000
bff04647 2001 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2002 /*
2003 * The exact sequence expected by certain pre-ATA4 drives is:
2004 * SRST RESET
50a99018
AC
2005 * IDENTIFY (optional in early ATA)
2006 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2007 * anything else..
2008 * Some drives were very specific about that exact sequence.
50a99018
AC
2009 *
2010 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 2011 * should never trigger.
49016aca
TH
2012 */
2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2014 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2015 if (err_mask) {
2016 rc = -EIO;
2017 reason = "INIT_DEV_PARAMS failed";
2018 goto err_out;
2019 }
2020
2021 /* current CHS translation info (id[53-58]) might be
2022 * changed. reread the identify device info.
2023 */
bff04647 2024 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2025 goto retry;
2026 }
2027 }
2028
2029 *p_class = class;
fe635c7e 2030
49016aca
TH
2031 return 0;
2032
2033 err_out:
88574551 2034 if (ata_msg_warn(ap))
a9a79dfe
JP
2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036 reason, err_mask);
49016aca
TH
2037 return rc;
2038}
2039
9062712f
TH
2040static int ata_do_link_spd_horkage(struct ata_device *dev)
2041{
2042 struct ata_link *plink = ata_dev_phys_link(dev);
2043 u32 target, target_limit;
2044
2045 if (!sata_scr_valid(plink))
2046 return 0;
2047
2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049 target = 1;
2050 else
2051 return 0;
2052
2053 target_limit = (1 << target) - 1;
2054
2055 /* if already on stricter limit, no need to push further */
2056 if (plink->sata_spd_limit <= target_limit)
2057 return 0;
2058
2059 plink->sata_spd_limit = target_limit;
2060
2061 /* Request another EH round by returning -EAGAIN if link is
2062 * going faster than the target speed. Forward progress is
2063 * guaranteed by setting sata_spd_limit to target_limit above.
2064 */
2065 if (plink->sata_spd > target) {
a9a79dfe
JP
2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067 sata_spd_string(target));
9062712f
TH
2068 return -EAGAIN;
2069 }
2070 return 0;
2071}
2072
3373efd8 2073static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2074{
9af5c9c9 2075 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2076
2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078 return 0;
2079
9af5c9c9 2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2081}
2082
388539f3 2083static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2084 char *desc, size_t desc_sz)
2085{
9af5c9c9 2086 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2087 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2088 unsigned int err_mask;
2089 char *aa_desc = "";
a6e6ce8e
TH
2090
2091 if (!ata_id_has_ncq(dev->id)) {
2092 desc[0] = '\0';
388539f3 2093 return 0;
a6e6ce8e 2094 }
75683fe7 2095 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2096 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2097 return 0;
6919a0a6 2098 }
a6e6ce8e 2099 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2100 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2101 dev->flags |= ATA_DFLAG_NCQ;
2102 }
2103
388539f3
SL
2104 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2105 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2106 ata_id_has_fpdma_aa(dev->id)) {
2107 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2108 SATA_FPDMA_AA);
2109 if (err_mask) {
a9a79dfe
JP
2110 ata_dev_err(dev,
2111 "failed to enable AA (error_mask=0x%x)\n",
2112 err_mask);
388539f3
SL
2113 if (err_mask != AC_ERR_DEV) {
2114 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2115 return -EIO;
2116 }
2117 } else
2118 aa_desc = ", AA";
2119 }
2120
a6e6ce8e 2121 if (hdepth >= ddepth)
388539f3 2122 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2123 else
388539f3
SL
2124 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2125 ddepth, aa_desc);
2126 return 0;
a6e6ce8e
TH
2127}
2128
49016aca 2129/**
ffeae418 2130 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2131 * @dev: Target device to configure
2132 *
2133 * Configure @dev according to @dev->id. Generic and low-level
2134 * driver specific fixups are also applied.
49016aca
TH
2135 *
2136 * LOCKING:
ffeae418
TH
2137 * Kernel thread context (may sleep)
2138 *
2139 * RETURNS:
2140 * 0 on success, -errno otherwise
49016aca 2141 */
efdaedc4 2142int ata_dev_configure(struct ata_device *dev)
49016aca 2143{
9af5c9c9
TH
2144 struct ata_port *ap = dev->link->ap;
2145 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2146 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2147 const u16 *id = dev->id;
7dc951ae 2148 unsigned long xfer_mask;
b352e57d 2149 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2150 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2151 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2152 int rc;
49016aca 2153
0dd4b21f 2154 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
a9a79dfe 2155 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
ffeae418 2156 return 0;
49016aca
TH
2157 }
2158
0dd4b21f 2159 if (ata_msg_probe(ap))
a9a79dfe 2160 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 2161
75683fe7
TH
2162 /* set horkage */
2163 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2164 ata_force_horkage(dev);
75683fe7 2165
50af2fa1 2166 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2167 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2168 ata_dev_disable(dev);
2169 return 0;
2170 }
2171
2486fa56
TH
2172 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2173 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2174 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2175 atapi_enabled ? "not supported with this driver"
2176 : "disabled");
2486fa56
TH
2177 ata_dev_disable(dev);
2178 return 0;
2179 }
2180
9062712f
TH
2181 rc = ata_do_link_spd_horkage(dev);
2182 if (rc)
2183 return rc;
2184
6746544c
TH
2185 /* let ACPI work its magic */
2186 rc = ata_acpi_on_devcfg(dev);
2187 if (rc)
2188 return rc;
08573a86 2189
05027adc
TH
2190 /* massage HPA, do it early as it might change IDENTIFY data */
2191 rc = ata_hpa_resize(dev);
2192 if (rc)
2193 return rc;
2194
c39f5ebe 2195 /* print device capabilities */
0dd4b21f 2196 if (ata_msg_probe(ap))
a9a79dfe
JP
2197 ata_dev_dbg(dev,
2198 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2199 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2200 __func__,
2201 id[49], id[82], id[83], id[84],
2202 id[85], id[86], id[87], id[88]);
c39f5ebe 2203
208a9933 2204 /* initialize to-be-configured parameters */
ea1dd4e1 2205 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2206 dev->max_sectors = 0;
2207 dev->cdb_len = 0;
2208 dev->n_sectors = 0;
2209 dev->cylinders = 0;
2210 dev->heads = 0;
2211 dev->sectors = 0;
e18086d6 2212 dev->multi_count = 0;
208a9933 2213
1da177e4
LT
2214 /*
2215 * common ATA, ATAPI feature tests
2216 */
2217
ff8854b2 2218 /* find max transfer mode; for printk only */
1148c3a7 2219 xfer_mask = ata_id_xfermask(id);
1da177e4 2220
0dd4b21f
BP
2221 if (ata_msg_probe(ap))
2222 ata_dump_id(id);
1da177e4 2223
ef143d57
AL
2224 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2225 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2226 sizeof(fwrevbuf));
2227
2228 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2229 sizeof(modelbuf));
2230
1da177e4
LT
2231 /* ATA-specific feature tests */
2232 if (dev->class == ATA_DEV_ATA) {
b352e57d 2233 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2234 /* CPRM may make this media unusable */
2235 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2236 ata_dev_warn(dev,
2237 "supports DRM functions and may not be fully accessible\n");
b352e57d 2238 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2239 } else {
2dcb407e 2240 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2241 /* Warn the user if the device has TPM extensions */
2242 if (ata_id_has_tpm(id))
a9a79dfe
JP
2243 ata_dev_warn(dev,
2244 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2245 }
b352e57d 2246
1148c3a7 2247 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2248
e18086d6
ML
2249 /* get current R/W Multiple count setting */
2250 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2251 unsigned int max = dev->id[47] & 0xff;
2252 unsigned int cnt = dev->id[59] & 0xff;
2253 /* only recognize/allow powers of two here */
2254 if (is_power_of_2(max) && is_power_of_2(cnt))
2255 if (cnt <= max)
2256 dev->multi_count = cnt;
2257 }
3f64f565 2258
1148c3a7 2259 if (ata_id_has_lba(id)) {
4c2d721a 2260 const char *lba_desc;
388539f3 2261 char ncq_desc[24];
8bf62ece 2262
4c2d721a
TH
2263 lba_desc = "LBA";
2264 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2265 if (ata_id_has_lba48(id)) {
8bf62ece 2266 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2267 lba_desc = "LBA48";
6fc49adb
TH
2268
2269 if (dev->n_sectors >= (1UL << 28) &&
2270 ata_id_has_flush_ext(id))
2271 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2272 }
8bf62ece 2273
a6e6ce8e 2274 /* config NCQ */
388539f3
SL
2275 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2276 if (rc)
2277 return rc;
a6e6ce8e 2278
8bf62ece 2279 /* print device info to dmesg */
3f64f565 2280 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2281 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2282 revbuf, modelbuf, fwrevbuf,
2283 ata_mode_string(xfer_mask));
2284 ata_dev_info(dev,
2285 "%llu sectors, multi %u: %s %s\n",
f15a1daf 2286 (unsigned long long)dev->n_sectors,
3f64f565
EM
2287 dev->multi_count, lba_desc, ncq_desc);
2288 }
ffeae418 2289 } else {
8bf62ece
AL
2290 /* CHS */
2291
2292 /* Default translation */
1148c3a7
TH
2293 dev->cylinders = id[1];
2294 dev->heads = id[3];
2295 dev->sectors = id[6];
8bf62ece 2296
1148c3a7 2297 if (ata_id_current_chs_valid(id)) {
8bf62ece 2298 /* Current CHS translation is valid. */
1148c3a7
TH
2299 dev->cylinders = id[54];
2300 dev->heads = id[55];
2301 dev->sectors = id[56];
8bf62ece
AL
2302 }
2303
2304 /* print device info to dmesg */
3f64f565 2305 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2306 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2307 revbuf, modelbuf, fwrevbuf,
2308 ata_mode_string(xfer_mask));
2309 ata_dev_info(dev,
2310 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2311 (unsigned long long)dev->n_sectors,
2312 dev->multi_count, dev->cylinders,
2313 dev->heads, dev->sectors);
3f64f565 2314 }
07f6f7d0
AL
2315 }
2316
6e7846e9 2317 dev->cdb_len = 16;
1da177e4
LT
2318 }
2319
2320 /* ATAPI-specific feature tests */
2c13b7ce 2321 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2322 const char *cdb_intr_string = "";
2323 const char *atapi_an_string = "";
91163006 2324 const char *dma_dir_string = "";
7d77b247 2325 u32 sntf;
08a556db 2326
1148c3a7 2327 rc = atapi_cdb_len(id);
1da177e4 2328 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2329 if (ata_msg_warn(ap))
a9a79dfe 2330 ata_dev_warn(dev, "unsupported CDB len\n");
ffeae418 2331 rc = -EINVAL;
1da177e4
LT
2332 goto err_out_nosup;
2333 }
6e7846e9 2334 dev->cdb_len = (unsigned int) rc;
1da177e4 2335
7d77b247
TH
2336 /* Enable ATAPI AN if both the host and device have
2337 * the support. If PMP is attached, SNTF is required
2338 * to enable ATAPI AN to discern between PHY status
2339 * changed notifications and ATAPI ANs.
9f45cbd3 2340 */
e7ecd435
TH
2341 if (atapi_an &&
2342 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2343 (!sata_pmp_attached(ap) ||
7d77b247 2344 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2345 unsigned int err_mask;
2346
9f45cbd3 2347 /* issue SET feature command to turn this on */
218f3d30
JG
2348 err_mask = ata_dev_set_feature(dev,
2349 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2350 if (err_mask)
a9a79dfe
JP
2351 ata_dev_err(dev,
2352 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2353 err_mask);
854c73a2 2354 else {
9f45cbd3 2355 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2356 atapi_an_string = ", ATAPI AN";
2357 }
9f45cbd3
KCA
2358 }
2359
08a556db 2360 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2361 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2362 cdb_intr_string = ", CDB intr";
2363 }
312f7da2 2364
91163006
TH
2365 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2366 dev->flags |= ATA_DFLAG_DMADIR;
2367 dma_dir_string = ", DMADIR";
2368 }
2369
1da177e4 2370 /* print device info to dmesg */
5afc8142 2371 if (ata_msg_drv(ap) && print_info)
a9a79dfe
JP
2372 ata_dev_info(dev,
2373 "ATAPI: %s, %s, max %s%s%s%s\n",
2374 modelbuf, fwrevbuf,
2375 ata_mode_string(xfer_mask),
2376 cdb_intr_string, atapi_an_string,
2377 dma_dir_string);
1da177e4
LT
2378 }
2379
914ed354
TH
2380 /* determine max_sectors */
2381 dev->max_sectors = ATA_MAX_SECTORS;
2382 if (dev->flags & ATA_DFLAG_LBA48)
2383 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2384
c5038fc0
AC
2385 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2386 200 sectors */
3373efd8 2387 if (ata_dev_knobble(dev)) {
5afc8142 2388 if (ata_msg_drv(ap) && print_info)
a9a79dfe 2389 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2390 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2391 dev->max_sectors = ATA_MAX_SECTORS;
2392 }
2393
f8d8e579 2394 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2395 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2396 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2397 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2398 }
f8d8e579 2399
75683fe7 2400 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2401 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2402 dev->max_sectors);
18d6e9d5 2403
4b2f3ede 2404 if (ap->ops->dev_config)
cd0d3bbc 2405 ap->ops->dev_config(dev);
4b2f3ede 2406
c5038fc0
AC
2407 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2408 /* Let the user know. We don't want to disallow opens for
2409 rescue purposes, or in case the vendor is just a blithering
2410 idiot. Do this after the dev_config call as some controllers
2411 with buggy firmware may want to avoid reporting false device
2412 bugs */
2413
2414 if (print_info) {
a9a79dfe 2415 ata_dev_warn(dev,
c5038fc0 2416"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2417 ata_dev_warn(dev,
c5038fc0
AC
2418"fault or invalid emulation. Contact drive vendor for information.\n");
2419 }
2420 }
2421
ac70a964 2422 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2423 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2424 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2425 }
2426
ffeae418 2427 return 0;
1da177e4
LT
2428
2429err_out_nosup:
0dd4b21f 2430 if (ata_msg_probe(ap))
a9a79dfe 2431 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
ffeae418 2432 return rc;
1da177e4
LT
2433}
2434
be0d18df 2435/**
2e41e8e6 2436 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2437 * @ap: port
2438 *
2e41e8e6 2439 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2440 * detection.
2441 */
2442
2443int ata_cable_40wire(struct ata_port *ap)
2444{
2445 return ATA_CBL_PATA40;
2446}
2447
2448/**
2e41e8e6 2449 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2450 * @ap: port
2451 *
2e41e8e6 2452 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2453 * detection.
2454 */
2455
2456int ata_cable_80wire(struct ata_port *ap)
2457{
2458 return ATA_CBL_PATA80;
2459}
2460
2461/**
2462 * ata_cable_unknown - return unknown PATA cable.
2463 * @ap: port
2464 *
2465 * Helper method for drivers which have no PATA cable detection.
2466 */
2467
2468int ata_cable_unknown(struct ata_port *ap)
2469{
2470 return ATA_CBL_PATA_UNK;
2471}
2472
c88f90c3
TH
2473/**
2474 * ata_cable_ignore - return ignored PATA cable.
2475 * @ap: port
2476 *
2477 * Helper method for drivers which don't use cable type to limit
2478 * transfer mode.
2479 */
2480int ata_cable_ignore(struct ata_port *ap)
2481{
2482 return ATA_CBL_PATA_IGN;
2483}
2484
be0d18df
AC
2485/**
2486 * ata_cable_sata - return SATA cable type
2487 * @ap: port
2488 *
2489 * Helper method for drivers which have SATA cables
2490 */
2491
2492int ata_cable_sata(struct ata_port *ap)
2493{
2494 return ATA_CBL_SATA;
2495}
2496
1da177e4
LT
2497/**
2498 * ata_bus_probe - Reset and probe ATA bus
2499 * @ap: Bus to probe
2500 *
0cba632b
JG
2501 * Master ATA bus probing function. Initiates a hardware-dependent
2502 * bus reset, then attempts to identify any devices found on
2503 * the bus.
2504 *
1da177e4 2505 * LOCKING:
0cba632b 2506 * PCI/etc. bus probe sem.
1da177e4
LT
2507 *
2508 * RETURNS:
96072e69 2509 * Zero on success, negative errno otherwise.
1da177e4
LT
2510 */
2511
80289167 2512int ata_bus_probe(struct ata_port *ap)
1da177e4 2513{
28ca5c57 2514 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2515 int tries[ATA_MAX_DEVICES];
f58229f8 2516 int rc;
e82cbdb9 2517 struct ata_device *dev;
1da177e4 2518
1eca4365 2519 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2520 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2521
2522 retry:
1eca4365 2523 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2524 /* If we issue an SRST then an ATA drive (not ATAPI)
2525 * may change configuration and be in PIO0 timing. If
2526 * we do a hard reset (or are coming from power on)
2527 * this is true for ATA or ATAPI. Until we've set a
2528 * suitable controller mode we should not touch the
2529 * bus as we may be talking too fast.
2530 */
2531 dev->pio_mode = XFER_PIO_0;
2532
2533 /* If the controller has a pio mode setup function
2534 * then use it to set the chipset to rights. Don't
2535 * touch the DMA setup as that will be dealt with when
2536 * configuring devices.
2537 */
2538 if (ap->ops->set_piomode)
2539 ap->ops->set_piomode(ap, dev);
2540 }
2541
2044470c 2542 /* reset and determine device classes */
52783c5d 2543 ap->ops->phy_reset(ap);
2061a47a 2544
1eca4365 2545 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2546 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2547 classes[dev->devno] = dev->class;
2548 else
2549 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2550
52783c5d 2551 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2552 }
1da177e4 2553
f31f0cc2
JG
2554 /* read IDENTIFY page and configure devices. We have to do the identify
2555 specific sequence bass-ackwards so that PDIAG- is released by
2556 the slave device */
2557
1eca4365 2558 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2559 if (tries[dev->devno])
2560 dev->class = classes[dev->devno];
ffeae418 2561
14d2bac1 2562 if (!ata_dev_enabled(dev))
ffeae418 2563 continue;
ffeae418 2564
bff04647
TH
2565 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2566 dev->id);
14d2bac1
TH
2567 if (rc)
2568 goto fail;
f31f0cc2
JG
2569 }
2570
be0d18df
AC
2571 /* Now ask for the cable type as PDIAG- should have been released */
2572 if (ap->ops->cable_detect)
2573 ap->cbl = ap->ops->cable_detect(ap);
2574
1eca4365
TH
2575 /* We may have SATA bridge glue hiding here irrespective of
2576 * the reported cable types and sensed types. When SATA
2577 * drives indicate we have a bridge, we don't know which end
2578 * of the link the bridge is which is a problem.
2579 */
2580 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2581 if (ata_id_is_sata(dev->id))
2582 ap->cbl = ATA_CBL_SATA;
614fe29b 2583
f31f0cc2
JG
2584 /* After the identify sequence we can now set up the devices. We do
2585 this in the normal order so that the user doesn't get confused */
2586
1eca4365 2587 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2588 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2589 rc = ata_dev_configure(dev);
9af5c9c9 2590 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2591 if (rc)
2592 goto fail;
1da177e4
LT
2593 }
2594
e82cbdb9 2595 /* configure transfer mode */
0260731f 2596 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2597 if (rc)
51713d35 2598 goto fail;
1da177e4 2599
1eca4365
TH
2600 ata_for_each_dev(dev, &ap->link, ENABLED)
2601 return 0;
1da177e4 2602
96072e69 2603 return -ENODEV;
14d2bac1
TH
2604
2605 fail:
4ae72a1e
TH
2606 tries[dev->devno]--;
2607
14d2bac1
TH
2608 switch (rc) {
2609 case -EINVAL:
4ae72a1e 2610 /* eeek, something went very wrong, give up */
14d2bac1
TH
2611 tries[dev->devno] = 0;
2612 break;
4ae72a1e
TH
2613
2614 case -ENODEV:
2615 /* give it just one more chance */
2616 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2617 case -EIO:
4ae72a1e
TH
2618 if (tries[dev->devno] == 1) {
2619 /* This is the last chance, better to slow
2620 * down than lose it.
2621 */
a07d499b 2622 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2623 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2624 }
14d2bac1
TH
2625 }
2626
4ae72a1e 2627 if (!tries[dev->devno])
3373efd8 2628 ata_dev_disable(dev);
ec573755 2629
14d2bac1 2630 goto retry;
1da177e4
LT
2631}
2632
3be680b7
TH
2633/**
2634 * sata_print_link_status - Print SATA link status
936fd732 2635 * @link: SATA link to printk link status about
3be680b7
TH
2636 *
2637 * This function prints link speed and status of a SATA link.
2638 *
2639 * LOCKING:
2640 * None.
2641 */
6bdb4fc9 2642static void sata_print_link_status(struct ata_link *link)
3be680b7 2643{
6d5f9732 2644 u32 sstatus, scontrol, tmp;
3be680b7 2645
936fd732 2646 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2647 return;
936fd732 2648 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2649
b1c72916 2650 if (ata_phys_link_online(link)) {
3be680b7 2651 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
2652 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2653 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2654 } else {
a9a79dfe
JP
2655 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2656 sstatus, scontrol);
3be680b7
TH
2657 }
2658}
2659
ebdfca6e
AC
2660/**
2661 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2662 * @adev: device
2663 *
2664 * Obtain the other device on the same cable, or if none is
2665 * present NULL is returned
2666 */
2e9edbf8 2667
3373efd8 2668struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2669{
9af5c9c9
TH
2670 struct ata_link *link = adev->link;
2671 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2672 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2673 return NULL;
2674 return pair;
2675}
2676
1c3fae4d 2677/**
3c567b7d 2678 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2679 * @link: Link to adjust SATA spd limit for
a07d499b 2680 * @spd_limit: Additional limit
1c3fae4d 2681 *
936fd732 2682 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2683 * function only adjusts the limit. The change must be applied
3c567b7d 2684 * using sata_set_spd().
1c3fae4d 2685 *
a07d499b
TH
2686 * If @spd_limit is non-zero, the speed is limited to equal to or
2687 * lower than @spd_limit if such speed is supported. If
2688 * @spd_limit is slower than any supported speed, only the lowest
2689 * supported speed is allowed.
2690 *
1c3fae4d
TH
2691 * LOCKING:
2692 * Inherited from caller.
2693 *
2694 * RETURNS:
2695 * 0 on success, negative errno on failure
2696 */
a07d499b 2697int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 2698{
81952c54 2699 u32 sstatus, spd, mask;
a07d499b 2700 int rc, bit;
1c3fae4d 2701
936fd732 2702 if (!sata_scr_valid(link))
008a7896
TH
2703 return -EOPNOTSUPP;
2704
2705 /* If SCR can be read, use it to determine the current SPD.
936fd732 2706 * If not, use cached value in link->sata_spd.
008a7896 2707 */
936fd732 2708 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 2709 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
2710 spd = (sstatus >> 4) & 0xf;
2711 else
936fd732 2712 spd = link->sata_spd;
1c3fae4d 2713
936fd732 2714 mask = link->sata_spd_limit;
1c3fae4d
TH
2715 if (mask <= 1)
2716 return -EINVAL;
008a7896
TH
2717
2718 /* unconditionally mask off the highest bit */
a07d499b
TH
2719 bit = fls(mask) - 1;
2720 mask &= ~(1 << bit);
1c3fae4d 2721
008a7896
TH
2722 /* Mask off all speeds higher than or equal to the current
2723 * one. Force 1.5Gbps if current SPD is not available.
2724 */
2725 if (spd > 1)
2726 mask &= (1 << (spd - 1)) - 1;
2727 else
2728 mask &= 1;
2729
2730 /* were we already at the bottom? */
1c3fae4d
TH
2731 if (!mask)
2732 return -EINVAL;
2733
a07d499b
TH
2734 if (spd_limit) {
2735 if (mask & ((1 << spd_limit) - 1))
2736 mask &= (1 << spd_limit) - 1;
2737 else {
2738 bit = ffs(mask) - 1;
2739 mask = 1 << bit;
2740 }
2741 }
2742
936fd732 2743 link->sata_spd_limit = mask;
1c3fae4d 2744
a9a79dfe
JP
2745 ata_link_warn(link, "limiting SATA link speed to %s\n",
2746 sata_spd_string(fls(mask)));
1c3fae4d
TH
2747
2748 return 0;
2749}
2750
936fd732 2751static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2752{
5270222f
TH
2753 struct ata_link *host_link = &link->ap->link;
2754 u32 limit, target, spd;
1c3fae4d 2755
5270222f
TH
2756 limit = link->sata_spd_limit;
2757
2758 /* Don't configure downstream link faster than upstream link.
2759 * It doesn't speed up anything and some PMPs choke on such
2760 * configuration.
2761 */
2762 if (!ata_is_host_link(link) && host_link->sata_spd)
2763 limit &= (1 << host_link->sata_spd) - 1;
2764
2765 if (limit == UINT_MAX)
2766 target = 0;
1c3fae4d 2767 else
5270222f 2768 target = fls(limit);
1c3fae4d
TH
2769
2770 spd = (*scontrol >> 4) & 0xf;
5270222f 2771 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2772
5270222f 2773 return spd != target;
1c3fae4d
TH
2774}
2775
2776/**
3c567b7d 2777 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2778 * @link: Link in question
1c3fae4d
TH
2779 *
2780 * Test whether the spd limit in SControl matches
936fd732 2781 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2782 * whether hardreset is necessary to apply SATA spd
2783 * configuration.
2784 *
2785 * LOCKING:
2786 * Inherited from caller.
2787 *
2788 * RETURNS:
2789 * 1 if SATA spd configuration is needed, 0 otherwise.
2790 */
1dc55e87 2791static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2792{
2793 u32 scontrol;
2794
936fd732 2795 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2796 return 1;
1c3fae4d 2797
936fd732 2798 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2799}
2800
2801/**
3c567b7d 2802 * sata_set_spd - set SATA spd according to spd limit
936fd732 2803 * @link: Link to set SATA spd for
1c3fae4d 2804 *
936fd732 2805 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2806 *
2807 * LOCKING:
2808 * Inherited from caller.
2809 *
2810 * RETURNS:
2811 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2812 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2813 */
936fd732 2814int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2815{
2816 u32 scontrol;
81952c54 2817 int rc;
1c3fae4d 2818
936fd732 2819 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2820 return rc;
1c3fae4d 2821
936fd732 2822 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2823 return 0;
2824
936fd732 2825 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2826 return rc;
2827
1c3fae4d
TH
2828 return 1;
2829}
2830
452503f9
AC
2831/*
2832 * This mode timing computation functionality is ported over from
2833 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2834 */
2835/*
b352e57d 2836 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2837 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2838 * for UDMA6, which is currently supported only by Maxtor drives.
2839 *
2840 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2841 */
2842
2843static const struct ata_timing ata_timing[] = {
3ada9c12
DD
2844/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2845 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2846 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2847 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2848 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2849 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2850 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2851 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2852
2853 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2854 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2855 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2856
2857 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2858 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2859 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2860 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2861 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2862
2863/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2864 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2865 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2866 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2867 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2868 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2869 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2870 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2871
2872 { 0xFF }
2873};
2874
2dcb407e
JG
2875#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2876#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2877
2878static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2879{
3ada9c12
DD
2880 q->setup = EZ(t->setup * 1000, T);
2881 q->act8b = EZ(t->act8b * 1000, T);
2882 q->rec8b = EZ(t->rec8b * 1000, T);
2883 q->cyc8b = EZ(t->cyc8b * 1000, T);
2884 q->active = EZ(t->active * 1000, T);
2885 q->recover = EZ(t->recover * 1000, T);
2886 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2887 q->cycle = EZ(t->cycle * 1000, T);
2888 q->udma = EZ(t->udma * 1000, UT);
452503f9
AC
2889}
2890
2891void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2892 struct ata_timing *m, unsigned int what)
2893{
2894 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2895 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2896 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2897 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2898 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2899 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3ada9c12 2900 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
452503f9
AC
2901 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2902 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2903}
2904
6357357c 2905const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2906{
70cd071e
TH
2907 const struct ata_timing *t = ata_timing;
2908
2909 while (xfer_mode > t->mode)
2910 t++;
452503f9 2911
70cd071e
TH
2912 if (xfer_mode == t->mode)
2913 return t;
2914 return NULL;
452503f9
AC
2915}
2916
2917int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2918 struct ata_timing *t, int T, int UT)
2919{
9e8808a9 2920 const u16 *id = adev->id;
452503f9
AC
2921 const struct ata_timing *s;
2922 struct ata_timing p;
2923
2924 /*
2e9edbf8 2925 * Find the mode.
75b1f2f8 2926 */
452503f9
AC
2927
2928 if (!(s = ata_timing_find_mode(speed)))
2929 return -EINVAL;
2930
75b1f2f8
AL
2931 memcpy(t, s, sizeof(*s));
2932
452503f9
AC
2933 /*
2934 * If the drive is an EIDE drive, it can tell us it needs extended
2935 * PIO/MW_DMA cycle timing.
2936 */
2937
9e8808a9 2938 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
452503f9 2939 memset(&p, 0, sizeof(p));
9e8808a9 2940
2dcb407e 2941 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
9e8808a9
BZ
2942 if (speed <= XFER_PIO_2)
2943 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2944 else if ((speed <= XFER_PIO_4) ||
2945 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2946 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2947 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2948 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2949
452503f9
AC
2950 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2951 }
2952
2953 /*
2954 * Convert the timing to bus clock counts.
2955 */
2956
75b1f2f8 2957 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2958
2959 /*
c893a3ae
RD
2960 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2961 * S.M.A.R.T * and some other commands. We have to ensure that the
2962 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2963 */
2964
fd3367af 2965 if (speed > XFER_PIO_6) {
452503f9
AC
2966 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2967 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2968 }
2969
2970 /*
c893a3ae 2971 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2972 */
2973
2974 if (t->act8b + t->rec8b < t->cyc8b) {
2975 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2976 t->rec8b = t->cyc8b - t->act8b;
2977 }
2978
2979 if (t->active + t->recover < t->cycle) {
2980 t->active += (t->cycle - (t->active + t->recover)) / 2;
2981 t->recover = t->cycle - t->active;
2982 }
a617c09f 2983
4f701d1e
AC
2984 /* In a few cases quantisation may produce enough errors to
2985 leave t->cycle too low for the sum of active and recovery
2986 if so we must correct this */
2987 if (t->active + t->recover > t->cycle)
2988 t->cycle = t->active + t->recover;
452503f9
AC
2989
2990 return 0;
2991}
2992
a0f79b92
TH
2993/**
2994 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2995 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2996 * @cycle: cycle duration in ns
2997 *
2998 * Return matching xfer mode for @cycle. The returned mode is of
2999 * the transfer type specified by @xfer_shift. If @cycle is too
3000 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3001 * than the fastest known mode, the fasted mode is returned.
3002 *
3003 * LOCKING:
3004 * None.
3005 *
3006 * RETURNS:
3007 * Matching xfer_mode, 0xff if no match found.
3008 */
3009u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3010{
3011 u8 base_mode = 0xff, last_mode = 0xff;
3012 const struct ata_xfer_ent *ent;
3013 const struct ata_timing *t;
3014
3015 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3016 if (ent->shift == xfer_shift)
3017 base_mode = ent->base;
3018
3019 for (t = ata_timing_find_mode(base_mode);
3020 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3021 unsigned short this_cycle;
3022
3023 switch (xfer_shift) {
3024 case ATA_SHIFT_PIO:
3025 case ATA_SHIFT_MWDMA:
3026 this_cycle = t->cycle;
3027 break;
3028 case ATA_SHIFT_UDMA:
3029 this_cycle = t->udma;
3030 break;
3031 default:
3032 return 0xff;
3033 }
3034
3035 if (cycle > this_cycle)
3036 break;
3037
3038 last_mode = t->mode;
3039 }
3040
3041 return last_mode;
3042}
3043
cf176e1a
TH
3044/**
3045 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3046 * @dev: Device to adjust xfer masks
458337db 3047 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3048 *
3049 * Adjust xfer masks of @dev downward. Note that this function
3050 * does not apply the change. Invoking ata_set_mode() afterwards
3051 * will apply the limit.
3052 *
3053 * LOCKING:
3054 * Inherited from caller.
3055 *
3056 * RETURNS:
3057 * 0 on success, negative errno on failure
3058 */
458337db 3059int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3060{
458337db 3061 char buf[32];
7dc951ae
TH
3062 unsigned long orig_mask, xfer_mask;
3063 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3064 int quiet, highbit;
cf176e1a 3065
458337db
TH
3066 quiet = !!(sel & ATA_DNXFER_QUIET);
3067 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3068
458337db
TH
3069 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3070 dev->mwdma_mask,
3071 dev->udma_mask);
3072 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3073
458337db
TH
3074 switch (sel) {
3075 case ATA_DNXFER_PIO:
3076 highbit = fls(pio_mask) - 1;
3077 pio_mask &= ~(1 << highbit);
3078 break;
3079
3080 case ATA_DNXFER_DMA:
3081 if (udma_mask) {
3082 highbit = fls(udma_mask) - 1;
3083 udma_mask &= ~(1 << highbit);
3084 if (!udma_mask)
3085 return -ENOENT;
3086 } else if (mwdma_mask) {
3087 highbit = fls(mwdma_mask) - 1;
3088 mwdma_mask &= ~(1 << highbit);
3089 if (!mwdma_mask)
3090 return -ENOENT;
3091 }
3092 break;
3093
3094 case ATA_DNXFER_40C:
3095 udma_mask &= ATA_UDMA_MASK_40C;
3096 break;
3097
3098 case ATA_DNXFER_FORCE_PIO0:
3099 pio_mask &= 1;
3100 case ATA_DNXFER_FORCE_PIO:
3101 mwdma_mask = 0;
3102 udma_mask = 0;
3103 break;
3104
458337db
TH
3105 default:
3106 BUG();
3107 }
3108
3109 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3110
3111 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3112 return -ENOENT;
3113
3114 if (!quiet) {
3115 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3116 snprintf(buf, sizeof(buf), "%s:%s",
3117 ata_mode_string(xfer_mask),
3118 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3119 else
3120 snprintf(buf, sizeof(buf), "%s",
3121 ata_mode_string(xfer_mask));
3122
a9a79dfe 3123 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3124 }
cf176e1a
TH
3125
3126 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3127 &dev->udma_mask);
3128
cf176e1a 3129 return 0;
cf176e1a
TH
3130}
3131
3373efd8 3132static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3133{
d0cb43b3 3134 struct ata_port *ap = dev->link->ap;
9af5c9c9 3135 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3136 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3137 const char *dev_err_whine = "";
3138 int ign_dev_err = 0;
d0cb43b3 3139 unsigned int err_mask = 0;
83206a29 3140 int rc;
1da177e4 3141
e8384607 3142 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3143 if (dev->xfer_shift == ATA_SHIFT_PIO)
3144 dev->flags |= ATA_DFLAG_PIO;
3145
d0cb43b3
TH
3146 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3147 dev_err_whine = " (SET_XFERMODE skipped)";
3148 else {
3149 if (nosetxfer)
a9a79dfe
JP
3150 ata_dev_warn(dev,
3151 "NOSETXFER but PATA detected - can't "
3152 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3153 err_mask = ata_dev_set_xfermode(dev);
3154 }
2dcb407e 3155
4055dee7
TH
3156 if (err_mask & ~AC_ERR_DEV)
3157 goto fail;
3158
3159 /* revalidate */
3160 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3161 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3162 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3163 if (rc)
3164 return rc;
3165
b93fda12
AC
3166 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3167 /* Old CFA may refuse this command, which is just fine */
3168 if (ata_id_is_cfa(dev->id))
3169 ign_dev_err = 1;
3170 /* Catch several broken garbage emulations plus some pre
3171 ATA devices */
3172 if (ata_id_major_version(dev->id) == 0 &&
3173 dev->pio_mode <= XFER_PIO_2)
3174 ign_dev_err = 1;
3175 /* Some very old devices and some bad newer ones fail
3176 any kind of SET_XFERMODE request but support PIO0-2
3177 timings and no IORDY */
3178 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3179 ign_dev_err = 1;
3180 }
3acaf94b
AC
3181 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3182 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3183 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3184 dev->dma_mode == XFER_MW_DMA_0 &&
3185 (dev->id[63] >> 8) & 1)
4055dee7 3186 ign_dev_err = 1;
3acaf94b 3187
4055dee7
TH
3188 /* if the device is actually configured correctly, ignore dev err */
3189 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3190 ign_dev_err = 1;
1da177e4 3191
4055dee7
TH
3192 if (err_mask & AC_ERR_DEV) {
3193 if (!ign_dev_err)
3194 goto fail;
3195 else
3196 dev_err_whine = " (device error ignored)";
3197 }
48a8a14f 3198
23e71c3d
TH
3199 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3200 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3201
a9a79dfe
JP
3202 ata_dev_info(dev, "configured for %s%s\n",
3203 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3204 dev_err_whine);
4055dee7 3205
83206a29 3206 return 0;
4055dee7
TH
3207
3208 fail:
a9a79dfe 3209 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3210 return -EIO;
1da177e4
LT
3211}
3212
1da177e4 3213/**
04351821 3214 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3215 * @link: link on which timings will be programmed
1967b7ff 3216 * @r_failed_dev: out parameter for failed device
1da177e4 3217 *
04351821
A
3218 * Standard implementation of the function used to tune and set
3219 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3220 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3221 * returned in @r_failed_dev.
780a87f7 3222 *
1da177e4 3223 * LOCKING:
0cba632b 3224 * PCI/etc. bus probe sem.
e82cbdb9
TH
3225 *
3226 * RETURNS:
3227 * 0 on success, negative errno otherwise
1da177e4 3228 */
04351821 3229
0260731f 3230int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3231{
0260731f 3232 struct ata_port *ap = link->ap;
e8e0619f 3233 struct ata_device *dev;
f58229f8 3234 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3235
a6d5a51c 3236 /* step 1: calculate xfer_mask */
1eca4365 3237 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3238 unsigned long pio_mask, dma_mask;
b3a70601 3239 unsigned int mode_mask;
a6d5a51c 3240
b3a70601
AC
3241 mode_mask = ATA_DMA_MASK_ATA;
3242 if (dev->class == ATA_DEV_ATAPI)
3243 mode_mask = ATA_DMA_MASK_ATAPI;
3244 else if (ata_id_is_cfa(dev->id))
3245 mode_mask = ATA_DMA_MASK_CFA;
3246
3373efd8 3247 ata_dev_xfermask(dev);
33267325 3248 ata_force_xfermask(dev);
1da177e4 3249
acf356b1
TH
3250 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3251 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3252
3253 if (libata_dma_mask & mode_mask)
3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3255 else
3256 dma_mask = 0;
3257
acf356b1
TH
3258 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3259 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3260
4f65977d 3261 found = 1;
b15b3eba 3262 if (ata_dma_enabled(dev))
5444a6f4 3263 used_dma = 1;
a6d5a51c 3264 }
4f65977d 3265 if (!found)
e82cbdb9 3266 goto out;
a6d5a51c
TH
3267
3268 /* step 2: always set host PIO timings */
1eca4365 3269 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3270 if (dev->pio_mode == 0xff) {
a9a79dfe 3271 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3272 rc = -EINVAL;
e82cbdb9 3273 goto out;
e8e0619f
TH
3274 }
3275
3276 dev->xfer_mode = dev->pio_mode;
3277 dev->xfer_shift = ATA_SHIFT_PIO;
3278 if (ap->ops->set_piomode)
3279 ap->ops->set_piomode(ap, dev);
3280 }
1da177e4 3281
a6d5a51c 3282 /* step 3: set host DMA timings */
1eca4365
TH
3283 ata_for_each_dev(dev, link, ENABLED) {
3284 if (!ata_dma_enabled(dev))
e8e0619f
TH
3285 continue;
3286
3287 dev->xfer_mode = dev->dma_mode;
3288 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3289 if (ap->ops->set_dmamode)
3290 ap->ops->set_dmamode(ap, dev);
3291 }
1da177e4
LT
3292
3293 /* step 4: update devices' xfer mode */
1eca4365 3294 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3295 rc = ata_dev_set_mode(dev);
5bbc53f4 3296 if (rc)
e82cbdb9 3297 goto out;
83206a29 3298 }
1da177e4 3299
e8e0619f
TH
3300 /* Record simplex status. If we selected DMA then the other
3301 * host channels are not permitted to do so.
5444a6f4 3302 */
cca3974e 3303 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3304 ap->host->simplex_claimed = ap;
5444a6f4 3305
e82cbdb9
TH
3306 out:
3307 if (rc)
3308 *r_failed_dev = dev;
3309 return rc;
1da177e4
LT
3310}
3311
aa2731ad
TH
3312/**
3313 * ata_wait_ready - wait for link to become ready
3314 * @link: link to be waited on
3315 * @deadline: deadline jiffies for the operation
3316 * @check_ready: callback to check link readiness
3317 *
3318 * Wait for @link to become ready. @check_ready should return
3319 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3320 * link doesn't seem to be occupied, other errno for other error
3321 * conditions.
3322 *
3323 * Transient -ENODEV conditions are allowed for
3324 * ATA_TMOUT_FF_WAIT.
3325 *
3326 * LOCKING:
3327 * EH context.
3328 *
3329 * RETURNS:
3330 * 0 if @linke is ready before @deadline; otherwise, -errno.
3331 */
3332int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3333 int (*check_ready)(struct ata_link *link))
3334{
3335 unsigned long start = jiffies;
b48d58f5 3336 unsigned long nodev_deadline;
aa2731ad
TH
3337 int warned = 0;
3338
b48d58f5
TH
3339 /* choose which 0xff timeout to use, read comment in libata.h */
3340 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3341 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3342 else
3343 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3344
b1c72916
TH
3345 /* Slave readiness can't be tested separately from master. On
3346 * M/S emulation configuration, this function should be called
3347 * only on the master and it will handle both master and slave.
3348 */
3349 WARN_ON(link == link->ap->slave_link);
3350
aa2731ad
TH
3351 if (time_after(nodev_deadline, deadline))
3352 nodev_deadline = deadline;
3353
3354 while (1) {
3355 unsigned long now = jiffies;
3356 int ready, tmp;
3357
3358 ready = tmp = check_ready(link);
3359 if (ready > 0)
3360 return 0;
3361
b48d58f5
TH
3362 /*
3363 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3364 * is online. Also, some SATA devices take a long
b48d58f5
TH
3365 * time to clear 0xff after reset. Wait for
3366 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3367 * offline.
aa2731ad
TH
3368 *
3369 * Note that some PATA controllers (pata_ali) explode
3370 * if status register is read more than once when
3371 * there's no device attached.
3372 */
3373 if (ready == -ENODEV) {
3374 if (ata_link_online(link))
3375 ready = 0;
3376 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3377 !ata_link_offline(link) &&
3378 time_before(now, nodev_deadline))
3379 ready = 0;
3380 }
3381
3382 if (ready)
3383 return ready;
3384 if (time_after(now, deadline))
3385 return -EBUSY;
3386
3387 if (!warned && time_after(now, start + 5 * HZ) &&
3388 (deadline - now > 3 * HZ)) {
a9a79dfe 3389 ata_link_warn(link,
aa2731ad
TH
3390 "link is slow to respond, please be patient "
3391 "(ready=%d)\n", tmp);
3392 warned = 1;
3393 }
3394
97750ceb 3395 ata_msleep(link->ap, 50);
aa2731ad
TH
3396 }
3397}
3398
3399/**
3400 * ata_wait_after_reset - wait for link to become ready after reset
3401 * @link: link to be waited on
3402 * @deadline: deadline jiffies for the operation
3403 * @check_ready: callback to check link readiness
3404 *
3405 * Wait for @link to become ready after reset.
3406 *
3407 * LOCKING:
3408 * EH context.
3409 *
3410 * RETURNS:
3411 * 0 if @linke is ready before @deadline; otherwise, -errno.
3412 */
2b4221bb 3413int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3414 int (*check_ready)(struct ata_link *link))
3415{
97750ceb 3416 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3417
3418 return ata_wait_ready(link, deadline, check_ready);
3419}
3420
d7bb4cc7 3421/**
936fd732
TH
3422 * sata_link_debounce - debounce SATA phy status
3423 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3424 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3425 * @deadline: deadline jiffies for the operation
d7bb4cc7 3426 *
1152b261 3427 * Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3428 * holding the same value where DET is not 1 for @duration polled
3429 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3430 * beginning of the stable state. Because DET gets stuck at 1 on
3431 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3432 * until timeout then returns 0 if DET is stable at 1.
3433 *
d4b2bab4
TH
3434 * @timeout is further limited by @deadline. The sooner of the
3435 * two is used.
3436 *
d7bb4cc7
TH
3437 * LOCKING:
3438 * Kernel thread context (may sleep)
3439 *
3440 * RETURNS:
3441 * 0 on success, -errno on failure.
3442 */
936fd732
TH
3443int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3444 unsigned long deadline)
7a7921e8 3445{
341c2c95
TH
3446 unsigned long interval = params[0];
3447 unsigned long duration = params[1];
d4b2bab4 3448 unsigned long last_jiffies, t;
d7bb4cc7
TH
3449 u32 last, cur;
3450 int rc;
3451
341c2c95 3452 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3453 if (time_before(t, deadline))
3454 deadline = t;
3455
936fd732 3456 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3457 return rc;
3458 cur &= 0xf;
3459
3460 last = cur;
3461 last_jiffies = jiffies;
3462
3463 while (1) {
97750ceb 3464 ata_msleep(link->ap, interval);
936fd732 3465 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3466 return rc;
3467 cur &= 0xf;
3468
3469 /* DET stable? */
3470 if (cur == last) {
d4b2bab4 3471 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3472 continue;
341c2c95
TH
3473 if (time_after(jiffies,
3474 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3475 return 0;
3476 continue;
3477 }
3478
3479 /* unstable, start over */
3480 last = cur;
3481 last_jiffies = jiffies;
3482
f1545154
TH
3483 /* Check deadline. If debouncing failed, return
3484 * -EPIPE to tell upper layer to lower link speed.
3485 */
d4b2bab4 3486 if (time_after(jiffies, deadline))
f1545154 3487 return -EPIPE;
d7bb4cc7
TH
3488 }
3489}
3490
3491/**
936fd732
TH
3492 * sata_link_resume - resume SATA link
3493 * @link: ATA link to resume SATA
d7bb4cc7 3494 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3495 * @deadline: deadline jiffies for the operation
d7bb4cc7 3496 *
936fd732 3497 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3498 *
3499 * LOCKING:
3500 * Kernel thread context (may sleep)
3501 *
3502 * RETURNS:
3503 * 0 on success, -errno on failure.
3504 */
936fd732
TH
3505int sata_link_resume(struct ata_link *link, const unsigned long *params,
3506 unsigned long deadline)
d7bb4cc7 3507{
5040ab67 3508 int tries = ATA_LINK_RESUME_TRIES;
ac371987 3509 u32 scontrol, serror;
81952c54
TH
3510 int rc;
3511
936fd732 3512 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3513 return rc;
7a7921e8 3514
5040ab67
TH
3515 /*
3516 * Writes to SControl sometimes get ignored under certain
3517 * controllers (ata_piix SIDPR). Make sure DET actually is
3518 * cleared.
3519 */
3520 do {
3521 scontrol = (scontrol & 0x0f0) | 0x300;
3522 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3523 return rc;
3524 /*
3525 * Some PHYs react badly if SStatus is pounded
3526 * immediately after resuming. Delay 200ms before
3527 * debouncing.
3528 */
97750ceb 3529 ata_msleep(link->ap, 200);
81952c54 3530
5040ab67
TH
3531 /* is SControl restored correctly? */
3532 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3533 return rc;
3534 } while ((scontrol & 0xf0f) != 0x300 && --tries);
7a7921e8 3535
5040ab67 3536 if ((scontrol & 0xf0f) != 0x300) {
38941c95 3537 ata_link_warn(link, "failed to resume link (SControl %X)\n",
a9a79dfe 3538 scontrol);
5040ab67
TH
3539 return 0;
3540 }
3541
3542 if (tries < ATA_LINK_RESUME_TRIES)
a9a79dfe
JP
3543 ata_link_warn(link, "link resume succeeded after %d retries\n",
3544 ATA_LINK_RESUME_TRIES - tries);
7a7921e8 3545
ac371987
TH
3546 if ((rc = sata_link_debounce(link, params, deadline)))
3547 return rc;
3548
f046519f 3549 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3550 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3551 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3552
f046519f 3553 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3554}
3555
1152b261
TH
3556/**
3557 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3558 * @link: ATA link to manipulate SControl for
3559 * @policy: LPM policy to configure
3560 * @spm_wakeup: initiate LPM transition to active state
3561 *
3562 * Manipulate the IPM field of the SControl register of @link
3563 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3564 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3565 * the link. This function also clears PHYRDY_CHG before
3566 * returning.
3567 *
3568 * LOCKING:
3569 * EH context.
3570 *
3571 * RETURNS:
3572 * 0 on succes, -errno otherwise.
3573 */
3574int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3575 bool spm_wakeup)
3576{
3577 struct ata_eh_context *ehc = &link->eh_context;
3578 bool woken_up = false;
3579 u32 scontrol;
3580 int rc;
3581
3582 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3583 if (rc)
3584 return rc;
3585
3586 switch (policy) {
3587 case ATA_LPM_MAX_POWER:
3588 /* disable all LPM transitions */
3589 scontrol |= (0x3 << 8);
3590 /* initiate transition to active state */
3591 if (spm_wakeup) {
3592 scontrol |= (0x4 << 12);
3593 woken_up = true;
3594 }
3595 break;
3596 case ATA_LPM_MED_POWER:
3597 /* allow LPM to PARTIAL */
3598 scontrol &= ~(0x1 << 8);
3599 scontrol |= (0x2 << 8);
3600 break;
3601 case ATA_LPM_MIN_POWER:
8a745f1f
KCA
3602 if (ata_link_nr_enabled(link) > 0)
3603 /* no restrictions on LPM transitions */
3604 scontrol &= ~(0x3 << 8);
3605 else {
3606 /* empty port, power off */
3607 scontrol &= ~0xf;
3608 scontrol |= (0x1 << 2);
3609 }
1152b261
TH
3610 break;
3611 default:
3612 WARN_ON(1);
3613 }
3614
3615 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3616 if (rc)
3617 return rc;
3618
3619 /* give the link time to transit out of LPM state */
3620 if (woken_up)
3621 msleep(10);
3622
3623 /* clear PHYRDY_CHG from SError */
3624 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3625 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3626}
3627
f5914a46 3628/**
0aa1113d 3629 * ata_std_prereset - prepare for reset
cc0680a5 3630 * @link: ATA link to be reset
d4b2bab4 3631 * @deadline: deadline jiffies for the operation
f5914a46 3632 *
cc0680a5 3633 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3634 * prereset makes libata abort whole reset sequence and give up
3635 * that port, so prereset should be best-effort. It does its
3636 * best to prepare for reset sequence but if things go wrong, it
3637 * should just whine, not fail.
f5914a46
TH
3638 *
3639 * LOCKING:
3640 * Kernel thread context (may sleep)
3641 *
3642 * RETURNS:
3643 * 0 on success, -errno otherwise.
3644 */
0aa1113d 3645int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3646{
cc0680a5 3647 struct ata_port *ap = link->ap;
936fd732 3648 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3649 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3650 int rc;
3651
f5914a46
TH
3652 /* if we're about to do hardreset, nothing more to do */
3653 if (ehc->i.action & ATA_EH_HARDRESET)
3654 return 0;
3655
936fd732 3656 /* if SATA, resume link */
a16abc0b 3657 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3658 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3659 /* whine about phy resume failure but proceed */
3660 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
3661 ata_link_warn(link,
3662 "failed to resume link for reset (errno=%d)\n",
3663 rc);
f5914a46
TH
3664 }
3665
45db2f6c 3666 /* no point in trying softreset on offline link */
b1c72916 3667 if (ata_phys_link_offline(link))
45db2f6c
TH
3668 ehc->i.action &= ~ATA_EH_SOFTRESET;
3669
f5914a46
TH
3670 return 0;
3671}
3672
c2bd5804 3673/**
624d5c51
TH
3674 * sata_link_hardreset - reset link via SATA phy reset
3675 * @link: link to reset
3676 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3677 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3678 * @online: optional out parameter indicating link onlineness
3679 * @check_ready: optional callback to check link readiness
c2bd5804 3680 *
624d5c51 3681 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3682 * After hardreset, link readiness is waited upon using
3683 * ata_wait_ready() if @check_ready is specified. LLDs are
3684 * allowed to not specify @check_ready and wait itself after this
3685 * function returns. Device classification is LLD's
3686 * responsibility.
3687 *
3688 * *@online is set to one iff reset succeeded and @link is online
3689 * after reset.
c2bd5804
TH
3690 *
3691 * LOCKING:
3692 * Kernel thread context (may sleep)
3693 *
3694 * RETURNS:
3695 * 0 on success, -errno otherwise.
3696 */
624d5c51 3697int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3698 unsigned long deadline,
3699 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3700{
624d5c51 3701 u32 scontrol;
81952c54 3702 int rc;
852ee16a 3703
c2bd5804
TH
3704 DPRINTK("ENTER\n");
3705
9dadd45b
TH
3706 if (online)
3707 *online = false;
3708
936fd732 3709 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3710 /* SATA spec says nothing about how to reconfigure
3711 * spd. To be on the safe side, turn off phy during
3712 * reconfiguration. This works for at least ICH7 AHCI
3713 * and Sil3124.
3714 */
936fd732 3715 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3716 goto out;
81952c54 3717
a34b6fc0 3718 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3719
936fd732 3720 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3721 goto out;
1c3fae4d 3722
936fd732 3723 sata_set_spd(link);
1c3fae4d
TH
3724 }
3725
3726 /* issue phy wake/reset */
936fd732 3727 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3728 goto out;
81952c54 3729
852ee16a 3730 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3731
936fd732 3732 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3733 goto out;
c2bd5804 3734
1c3fae4d 3735 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3736 * 10.4.2 says at least 1 ms.
3737 */
97750ceb 3738 ata_msleep(link->ap, 1);
c2bd5804 3739
936fd732
TH
3740 /* bring link back */
3741 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3742 if (rc)
3743 goto out;
3744 /* if link is offline nothing more to do */
b1c72916 3745 if (ata_phys_link_offline(link))
9dadd45b
TH
3746 goto out;
3747
3748 /* Link is online. From this point, -ENODEV too is an error. */
3749 if (online)
3750 *online = true;
3751
071f44b1 3752 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
3753 /* If PMP is supported, we have to do follow-up SRST.
3754 * Some PMPs don't send D2H Reg FIS after hardreset if
3755 * the first port is empty. Wait only for
3756 * ATA_TMOUT_PMP_SRST_WAIT.
3757 */
3758 if (check_ready) {
3759 unsigned long pmp_deadline;
3760
341c2c95
TH
3761 pmp_deadline = ata_deadline(jiffies,
3762 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
3763 if (time_after(pmp_deadline, deadline))
3764 pmp_deadline = deadline;
3765 ata_wait_ready(link, pmp_deadline, check_ready);
3766 }
3767 rc = -EAGAIN;
3768 goto out;
3769 }
3770
3771 rc = 0;
3772 if (check_ready)
3773 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3774 out:
0cbf0711
TH
3775 if (rc && rc != -EAGAIN) {
3776 /* online is set iff link is online && reset succeeded */
3777 if (online)
3778 *online = false;
a9a79dfe 3779 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
0cbf0711 3780 }
b6103f6d
TH
3781 DPRINTK("EXIT, rc=%d\n", rc);
3782 return rc;
3783}
3784
57c9efdf
TH
3785/**
3786 * sata_std_hardreset - COMRESET w/o waiting or classification
3787 * @link: link to reset
3788 * @class: resulting class of attached device
3789 * @deadline: deadline jiffies for the operation
3790 *
3791 * Standard SATA COMRESET w/o waiting or classification.
3792 *
3793 * LOCKING:
3794 * Kernel thread context (may sleep)
3795 *
3796 * RETURNS:
3797 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3798 */
3799int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3800 unsigned long deadline)
3801{
3802 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3803 bool online;
3804 int rc;
3805
3806 /* do hardreset */
3807 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3808 return online ? -EAGAIN : rc;
3809}
3810
c2bd5804 3811/**
203c75b8 3812 * ata_std_postreset - standard postreset callback
cc0680a5 3813 * @link: the target ata_link
c2bd5804
TH
3814 * @classes: classes of attached devices
3815 *
3816 * This function is invoked after a successful reset. Note that
3817 * the device might have been reset more than once using
3818 * different reset methods before postreset is invoked.
c2bd5804 3819 *
c2bd5804
TH
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3822 */
203c75b8 3823void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3824{
f046519f
TH
3825 u32 serror;
3826
c2bd5804
TH
3827 DPRINTK("ENTER\n");
3828
f046519f
TH
3829 /* reset complete, clear SError */
3830 if (!sata_scr_read(link, SCR_ERROR, &serror))
3831 sata_scr_write(link, SCR_ERROR, serror);
3832
c2bd5804 3833 /* print link status */
936fd732 3834 sata_print_link_status(link);
c2bd5804 3835
c2bd5804
TH
3836 DPRINTK("EXIT\n");
3837}
3838
623a3128
TH
3839/**
3840 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3841 * @dev: device to compare against
3842 * @new_class: class of the new device
3843 * @new_id: IDENTIFY page of the new device
3844 *
3845 * Compare @new_class and @new_id against @dev and determine
3846 * whether @dev is the device indicated by @new_class and
3847 * @new_id.
3848 *
3849 * LOCKING:
3850 * None.
3851 *
3852 * RETURNS:
3853 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3854 */
3373efd8
TH
3855static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3856 const u16 *new_id)
623a3128
TH
3857{
3858 const u16 *old_id = dev->id;
a0cf733b
TH
3859 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3860 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3861
3862 if (dev->class != new_class) {
a9a79dfe
JP
3863 ata_dev_info(dev, "class mismatch %d != %d\n",
3864 dev->class, new_class);
623a3128
TH
3865 return 0;
3866 }
3867
a0cf733b
TH
3868 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3869 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3870 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3871 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3872
3873 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
3874 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3875 model[0], model[1]);
623a3128
TH
3876 return 0;
3877 }
3878
3879 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
3880 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3881 serial[0], serial[1]);
623a3128
TH
3882 return 0;
3883 }
3884
623a3128
TH
3885 return 1;
3886}
3887
3888/**
fe30911b 3889 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3890 * @dev: target ATA device
bff04647 3891 * @readid_flags: read ID flags
623a3128
TH
3892 *
3893 * Re-read IDENTIFY page and make sure @dev is still attached to
3894 * the port.
3895 *
3896 * LOCKING:
3897 * Kernel thread context (may sleep)
3898 *
3899 * RETURNS:
3900 * 0 on success, negative errno otherwise
3901 */
fe30911b 3902int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3903{
5eb45c02 3904 unsigned int class = dev->class;
9af5c9c9 3905 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3906 int rc;
3907
fe635c7e 3908 /* read ID data */
bff04647 3909 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3910 if (rc)
fe30911b 3911 return rc;
623a3128
TH
3912
3913 /* is the device still there? */
fe30911b
TH
3914 if (!ata_dev_same_device(dev, class, id))
3915 return -ENODEV;
623a3128 3916
fe635c7e 3917 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3918 return 0;
3919}
3920
3921/**
3922 * ata_dev_revalidate - Revalidate ATA device
3923 * @dev: device to revalidate
422c9daa 3924 * @new_class: new class code
fe30911b
TH
3925 * @readid_flags: read ID flags
3926 *
3927 * Re-read IDENTIFY page, make sure @dev is still attached to the
3928 * port and reconfigure it according to the new IDENTIFY page.
3929 *
3930 * LOCKING:
3931 * Kernel thread context (may sleep)
3932 *
3933 * RETURNS:
3934 * 0 on success, negative errno otherwise
3935 */
422c9daa
TH
3936int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3937 unsigned int readid_flags)
fe30911b 3938{
6ddcd3b0 3939 u64 n_sectors = dev->n_sectors;
5920dadf 3940 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
3941 int rc;
3942
3943 if (!ata_dev_enabled(dev))
3944 return -ENODEV;
3945
422c9daa
TH
3946 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3947 if (ata_class_enabled(new_class) &&
f0d0613d
BP
3948 new_class != ATA_DEV_ATA &&
3949 new_class != ATA_DEV_ATAPI &&
3950 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
3951 ata_dev_info(dev, "class mismatch %u != %u\n",
3952 dev->class, new_class);
422c9daa
TH
3953 rc = -ENODEV;
3954 goto fail;
3955 }
3956
fe30911b
TH
3957 /* re-read ID */
3958 rc = ata_dev_reread_id(dev, readid_flags);
3959 if (rc)
3960 goto fail;
623a3128
TH
3961
3962 /* configure device according to the new ID */
efdaedc4 3963 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3964 if (rc)
3965 goto fail;
3966
3967 /* verify n_sectors hasn't changed */
445d211b
TH
3968 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3969 dev->n_sectors == n_sectors)
3970 return 0;
3971
3972 /* n_sectors has changed */
a9a79dfe
JP
3973 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3974 (unsigned long long)n_sectors,
3975 (unsigned long long)dev->n_sectors);
445d211b
TH
3976
3977 /*
3978 * Something could have caused HPA to be unlocked
3979 * involuntarily. If n_native_sectors hasn't changed and the
3980 * new size matches it, keep the device.
3981 */
3982 if (dev->n_native_sectors == n_native_sectors &&
3983 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
3984 ata_dev_warn(dev,
3985 "new n_sectors matches native, probably "
3986 "late HPA unlock, n_sectors updated\n");
68939ce5 3987 /* use the larger n_sectors */
445d211b 3988 return 0;
6ddcd3b0
TH
3989 }
3990
445d211b
TH
3991 /*
3992 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3993 * unlocking HPA in those cases.
3994 *
3995 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3996 */
3997 if (dev->n_native_sectors == n_native_sectors &&
3998 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3999 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
4000 ata_dev_warn(dev,
4001 "old n_sectors matches native, probably "
4002 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
4003 /* try unlocking HPA */
4004 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4005 rc = -EIO;
4006 } else
4007 rc = -ENODEV;
623a3128 4008
445d211b
TH
4009 /* restore original n_[native_]sectors and fail */
4010 dev->n_native_sectors = n_native_sectors;
4011 dev->n_sectors = n_sectors;
623a3128 4012 fail:
a9a79dfe 4013 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4014 return rc;
4015}
4016
6919a0a6
AC
4017struct ata_blacklist_entry {
4018 const char *model_num;
4019 const char *model_rev;
4020 unsigned long horkage;
4021};
4022
4023static const struct ata_blacklist_entry ata_device_blacklist [] = {
4024 /* Devices with DMA related problems under Linux */
4025 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4026 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4027 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4028 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4029 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4030 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4031 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4032 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4033 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 4034 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4035 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4036 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4037 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4038 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4039 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 4040 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4041 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4042 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4043 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4044 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4045 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4046 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4047 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4048 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4049 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4050 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4051 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4052 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a 4053 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 4054 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 4055
18d6e9d5 4056 /* Weird ATAPI devices */
40a1d531 4057 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 4058 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
18d6e9d5 4059
6919a0a6
AC
4060 /* Devices we expect to fail diagnostics */
4061
4062 /* Devices where NCQ should be avoided */
4063 /* NCQ is slow */
2dcb407e 4064 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4065 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4066 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4067 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4068 /* NCQ is broken */
539cc7c7 4069 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4070 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4071 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4072 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 4073 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 4074
ac70a964 4075 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 4076 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 4077 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 4078
4d1f9082 4079 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4080 ATA_HORKAGE_FIRMWARE_WARN },
4081
4d1f9082 4082 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4083 ATA_HORKAGE_FIRMWARE_WARN },
4084
4d1f9082 4085 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
4086 ATA_HORKAGE_FIRMWARE_WARN },
4087
36e337d0
RH
4088 /* Blacklist entries taken from Silicon Image 3124/3132
4089 Windows driver .inf file - also several Linux problem reports */
4090 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4091 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4092 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4093
68b0ddb2
TH
4094 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4095 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4096
16c55b03
TH
4097 /* devices which puke on READ_NATIVE_MAX */
4098 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4099 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4100 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4101 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4102
7831387b
TH
4103 /* this one allows HPA unlocking but fails IOs on the area */
4104 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4105
93328e11
AC
4106 /* Devices which report 1 sector over size HPA */
4107 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4108 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4109 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4110
6bbfd53d
AC
4111 /* Devices which get the IVB wrong */
4112 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5 4113 /* Maybe we should just blacklist TSSTcorp... */
7da4c935 4114 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
6bbfd53d 4115
9ce8e307
JA
4116 /* Devices that do not need bridging limits applied */
4117 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4118
9062712f
TH
4119 /* Devices which aren't very happy with higher link speeds */
4120 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4121
d0cb43b3
TH
4122 /*
4123 * Devices which choke on SETXFER. Applies only if both the
4124 * device and controller are SATA.
4125 */
cd691876
TH
4126 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4127 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 4129
6919a0a6
AC
4130 /* End Marker */
4131 { }
1da177e4 4132};
2e9edbf8 4133
bce036ce
ML
4134/**
4135 * glob_match - match a text string against a glob-style pattern
4136 * @text: the string to be examined
4137 * @pattern: the glob-style pattern to be matched against
4138 *
4139 * Either/both of text and pattern can be empty strings.
4140 *
4141 * Match text against a glob-style pattern, with wildcards and simple sets:
4142 *
4143 * ? matches any single character.
4144 * * matches any run of characters.
4145 * [xyz] matches a single character from the set: x, y, or z.
2f9e4d16
ML
4146 * [a-d] matches a single character from the range: a, b, c, or d.
4147 * [a-d0-9] matches a single character from either range.
bce036ce 4148 *
2f9e4d16
ML
4149 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4150 * Behaviour with malformed patterns is undefined, though generally reasonable.
bce036ce 4151 *
3d2be54b 4152 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
bce036ce
ML
4153 *
4154 * This function uses one level of recursion per '*' in pattern.
4155 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4156 * this will not cause stack problems for any reasonable use here.
4157 *
4158 * RETURNS:
4159 * 0 on match, 1 otherwise.
4160 */
4161static int glob_match (const char *text, const char *pattern)
539cc7c7 4162{
bce036ce
ML
4163 do {
4164 /* Match single character or a '?' wildcard */
4165 if (*text == *pattern || *pattern == '?') {
4166 if (!*pattern++)
4167 return 0; /* End of both strings: match */
4168 } else {
4169 /* Match single char against a '[' bracketed ']' pattern set */
4170 if (!*text || *pattern != '[')
4171 break; /* Not a pattern set */
2f9e4d16
ML
4172 while (*++pattern && *pattern != ']' && *text != *pattern) {
4173 if (*pattern == '-' && *(pattern - 1) != '[')
4174 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4175 ++pattern;
4176 break;
4177 }
4178 }
bce036ce
ML
4179 if (!*pattern || *pattern == ']')
4180 return 1; /* No match */
4181 while (*pattern && *pattern++ != ']');
4182 }
4183 } while (*++text && *pattern);
4184
4185 /* Match any run of chars against a '*' wildcard */
4186 if (*pattern == '*') {
4187 if (!*++pattern)
4188 return 0; /* Match: avoid recursion at end of pattern */
4189 /* Loop to handle additional pattern chars after the wildcard */
4190 while (*text) {
4191 if (glob_match(text, pattern) == 0)
4192 return 0; /* Remainder matched */
4193 ++text; /* Absorb (match) this char and try again */
317b50b8
AP
4194 }
4195 }
bce036ce
ML
4196 if (!*text && !*pattern)
4197 return 0; /* End of both strings: match */
4198 return 1; /* No match */
539cc7c7 4199}
4fca377f 4200
75683fe7 4201static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4202{
8bfa79fc
TH
4203 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4204 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4205 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4206
8bfa79fc
TH
4207 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4208 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4209
6919a0a6 4210 while (ad->model_num) {
bce036ce 4211 if (!glob_match(model_num, ad->model_num)) {
6919a0a6
AC
4212 if (ad->model_rev == NULL)
4213 return ad->horkage;
bce036ce 4214 if (!glob_match(model_rev, ad->model_rev))
6919a0a6 4215 return ad->horkage;
f4b15fef 4216 }
6919a0a6 4217 ad++;
f4b15fef 4218 }
1da177e4
LT
4219 return 0;
4220}
4221
6919a0a6
AC
4222static int ata_dma_blacklisted(const struct ata_device *dev)
4223{
4224 /* We don't support polling DMA.
4225 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4226 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4227 */
9af5c9c9 4228 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4229 (dev->flags & ATA_DFLAG_CDB_INTR))
4230 return 1;
75683fe7 4231 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4232}
4233
6bbfd53d
AC
4234/**
4235 * ata_is_40wire - check drive side detection
4236 * @dev: device
4237 *
4238 * Perform drive side detection decoding, allowing for device vendors
4239 * who can't follow the documentation.
4240 */
4241
4242static int ata_is_40wire(struct ata_device *dev)
4243{
4244 if (dev->horkage & ATA_HORKAGE_IVB)
4245 return ata_drive_40wire_relaxed(dev->id);
4246 return ata_drive_40wire(dev->id);
4247}
4248
15a5551c
AC
4249/**
4250 * cable_is_40wire - 40/80/SATA decider
4251 * @ap: port to consider
4252 *
4253 * This function encapsulates the policy for speed management
4254 * in one place. At the moment we don't cache the result but
4255 * there is a good case for setting ap->cbl to the result when
4256 * we are called with unknown cables (and figuring out if it
4257 * impacts hotplug at all).
4258 *
4259 * Return 1 if the cable appears to be 40 wire.
4260 */
4261
4262static int cable_is_40wire(struct ata_port *ap)
4263{
4264 struct ata_link *link;
4265 struct ata_device *dev;
4266
4a9c7b33 4267 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4268 if (ap->cbl == ATA_CBL_PATA40)
4269 return 1;
4a9c7b33
TH
4270
4271 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4272 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4273 return 0;
4a9c7b33
TH
4274
4275 /* If the system is known to be 40 wire short cable (eg
4276 * laptop), then we allow 80 wire modes even if the drive
4277 * isn't sure.
4278 */
f792068e
AC
4279 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4280 return 0;
4a9c7b33
TH
4281
4282 /* If the controller doesn't know, we scan.
4283 *
4284 * Note: We look for all 40 wire detects at this point. Any
4285 * 80 wire detect is taken to be 80 wire cable because
4286 * - in many setups only the one drive (slave if present) will
4287 * give a valid detect
4288 * - if you have a non detect capable drive you don't want it
4289 * to colour the choice
4290 */
1eca4365
TH
4291 ata_for_each_link(link, ap, EDGE) {
4292 ata_for_each_dev(dev, link, ENABLED) {
4293 if (!ata_is_40wire(dev))
15a5551c
AC
4294 return 0;
4295 }
4296 }
4297 return 1;
4298}
4299
a6d5a51c
TH
4300/**
4301 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4302 * @dev: Device to compute xfermask for
4303 *
acf356b1
TH
4304 * Compute supported xfermask of @dev and store it in
4305 * dev->*_mask. This function is responsible for applying all
4306 * known limits including host controller limits, device
4307 * blacklist, etc...
a6d5a51c
TH
4308 *
4309 * LOCKING:
4310 * None.
a6d5a51c 4311 */
3373efd8 4312static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4313{
9af5c9c9
TH
4314 struct ata_link *link = dev->link;
4315 struct ata_port *ap = link->ap;
cca3974e 4316 struct ata_host *host = ap->host;
a6d5a51c 4317 unsigned long xfer_mask;
1da177e4 4318
37deecb5 4319 /* controller modes available */
565083e1
TH
4320 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4321 ap->mwdma_mask, ap->udma_mask);
4322
8343f889 4323 /* drive modes available */
37deecb5
TH
4324 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4325 dev->mwdma_mask, dev->udma_mask);
4326 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4327
b352e57d
AC
4328 /*
4329 * CFA Advanced TrueIDE timings are not allowed on a shared
4330 * cable
4331 */
4332 if (ata_dev_pair(dev)) {
4333 /* No PIO5 or PIO6 */
4334 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4335 /* No MWDMA3 or MWDMA 4 */
4336 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4337 }
4338
37deecb5
TH
4339 if (ata_dma_blacklisted(dev)) {
4340 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4341 ata_dev_warn(dev,
4342 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4343 }
a6d5a51c 4344
14d66ab7 4345 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4346 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4347 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4348 ata_dev_warn(dev,
4349 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4350 }
565083e1 4351
e424675f
JG
4352 if (ap->flags & ATA_FLAG_NO_IORDY)
4353 xfer_mask &= ata_pio_mask_no_iordy(dev);
4354
5444a6f4 4355 if (ap->ops->mode_filter)
a76b62ca 4356 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4357
8343f889
RH
4358 /* Apply cable rule here. Don't apply it early because when
4359 * we handle hot plug the cable type can itself change.
4360 * Check this last so that we know if the transfer rate was
4361 * solely limited by the cable.
4362 * Unknown or 80 wire cables reported host side are checked
4363 * drive side as well. Cases where we know a 40wire cable
4364 * is used safely for 80 are not checked here.
4365 */
4366 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4367 /* UDMA/44 or higher would be available */
15a5551c 4368 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4369 ata_dev_warn(dev,
4370 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4371 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4372 }
4373
565083e1
TH
4374 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4375 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4376}
4377
1da177e4
LT
4378/**
4379 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4380 * @dev: Device to which command will be sent
4381 *
780a87f7
JG
4382 * Issue SET FEATURES - XFER MODE command to device @dev
4383 * on port @ap.
4384 *
1da177e4 4385 * LOCKING:
0cba632b 4386 * PCI/etc. bus probe sem.
83206a29
TH
4387 *
4388 * RETURNS:
4389 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4390 */
4391
3373efd8 4392static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4393{
a0123703 4394 struct ata_taskfile tf;
83206a29 4395 unsigned int err_mask;
1da177e4
LT
4396
4397 /* set up set-features taskfile */
4398 DPRINTK("set features - xfer mode\n");
4399
464cf177
TH
4400 /* Some controllers and ATAPI devices show flaky interrupt
4401 * behavior after setting xfer mode. Use polling instead.
4402 */
3373efd8 4403 ata_tf_init(dev, &tf);
a0123703
TH
4404 tf.command = ATA_CMD_SET_FEATURES;
4405 tf.feature = SETFEATURES_XFER;
464cf177 4406 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4407 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4408 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4409 if (ata_pio_need_iordy(dev))
4410 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4411 /* If the device has IORDY and the controller does not - turn it off */
4412 else if (ata_id_has_iordy(dev->id))
11b7becc 4413 tf.nsect = 0x01;
b9f8ab2d
AC
4414 else /* In the ancient relic department - skip all of this */
4415 return 0;
1da177e4 4416
2b789108 4417 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4418
4419 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4420 return err_mask;
4421}
1152b261 4422
9f45cbd3 4423/**
218f3d30 4424 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4425 * @dev: Device to which command will be sent
4426 * @enable: Whether to enable or disable the feature
218f3d30 4427 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4428 *
4429 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4430 * on port @ap with sector count
9f45cbd3
KCA
4431 *
4432 * LOCKING:
4433 * PCI/etc. bus probe sem.
4434 *
4435 * RETURNS:
4436 * 0 on success, AC_ERR_* mask otherwise.
4437 */
1152b261 4438unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4439{
4440 struct ata_taskfile tf;
4441 unsigned int err_mask;
4442
4443 /* set up set-features taskfile */
4444 DPRINTK("set features - SATA features\n");
4445
4446 ata_tf_init(dev, &tf);
4447 tf.command = ATA_CMD_SET_FEATURES;
4448 tf.feature = enable;
4449 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4450 tf.protocol = ATA_PROT_NODATA;
218f3d30 4451 tf.nsect = feature;
9f45cbd3 4452
2b789108 4453 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4454
83206a29
TH
4455 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4456 return err_mask;
1da177e4
LT
4457}
4458
8bf62ece
AL
4459/**
4460 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4461 * @dev: Device to which command will be sent
e2a7f77a
RD
4462 * @heads: Number of heads (taskfile parameter)
4463 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4464 *
4465 * LOCKING:
6aff8f1f
TH
4466 * Kernel thread context (may sleep)
4467 *
4468 * RETURNS:
4469 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4470 */
3373efd8
TH
4471static unsigned int ata_dev_init_params(struct ata_device *dev,
4472 u16 heads, u16 sectors)
8bf62ece 4473{
a0123703 4474 struct ata_taskfile tf;
6aff8f1f 4475 unsigned int err_mask;
8bf62ece
AL
4476
4477 /* Number of sectors per track 1-255. Number of heads 1-16 */
4478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4479 return AC_ERR_INVALID;
8bf62ece
AL
4480
4481 /* set up init dev params taskfile */
4482 DPRINTK("init dev params \n");
4483
3373efd8 4484 ata_tf_init(dev, &tf);
a0123703
TH
4485 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4486 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4487 tf.protocol = ATA_PROT_NODATA;
4488 tf.nsect = sectors;
4489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4490
2b789108 4491 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4492 /* A clean abort indicates an original or just out of spec drive
4493 and we should continue as we issue the setup based on the
4494 drive reported working geometry */
4495 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4496 err_mask = 0;
8bf62ece 4497
6aff8f1f
TH
4498 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4499 return err_mask;
8bf62ece
AL
4500}
4501
1da177e4 4502/**
0cba632b
JG
4503 * ata_sg_clean - Unmap DMA memory associated with command
4504 * @qc: Command containing DMA memory to be released
4505 *
4506 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4507 *
4508 * LOCKING:
cca3974e 4509 * spin_lock_irqsave(host lock)
1da177e4 4510 */
70e6ad0c 4511void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4512{
4513 struct ata_port *ap = qc->ap;
ff2aeb1e 4514 struct scatterlist *sg = qc->sg;
1da177e4
LT
4515 int dir = qc->dma_dir;
4516
efcb3cf7 4517 WARN_ON_ONCE(sg == NULL);
1da177e4 4518
dde20207 4519 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4520
dde20207 4521 if (qc->n_elem)
5825627c 4522 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
1da177e4
LT
4523
4524 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4525 qc->sg = NULL;
1da177e4
LT
4526}
4527
1da177e4 4528/**
5895ef9a 4529 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4530 * @qc: Metadata associated with taskfile to check
4531 *
780a87f7
JG
4532 * Allow low-level driver to filter ATA PACKET commands, returning
4533 * a status indicating whether or not it is OK to use DMA for the
4534 * supplied PACKET command.
4535 *
1da177e4 4536 * LOCKING:
624d5c51
TH
4537 * spin_lock_irqsave(host lock)
4538 *
4539 * RETURNS: 0 when ATAPI DMA can be used
4540 * nonzero otherwise
4541 */
5895ef9a 4542int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4543{
4544 struct ata_port *ap = qc->ap;
71601958 4545
624d5c51
TH
4546 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4547 * few ATAPI devices choke on such DMA requests.
4548 */
6a87e42e
TH
4549 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4550 unlikely(qc->nbytes & 15))
624d5c51 4551 return 1;
e2cec771 4552
624d5c51
TH
4553 if (ap->ops->check_atapi_dma)
4554 return ap->ops->check_atapi_dma(qc);
e2cec771 4555
624d5c51
TH
4556 return 0;
4557}
1da177e4 4558
624d5c51
TH
4559/**
4560 * ata_std_qc_defer - Check whether a qc needs to be deferred
4561 * @qc: ATA command in question
4562 *
4563 * Non-NCQ commands cannot run with any other command, NCQ or
4564 * not. As upper layer only knows the queue depth, we are
4565 * responsible for maintaining exclusion. This function checks
4566 * whether a new command @qc can be issued.
4567 *
4568 * LOCKING:
4569 * spin_lock_irqsave(host lock)
4570 *
4571 * RETURNS:
4572 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4573 */
4574int ata_std_qc_defer(struct ata_queued_cmd *qc)
4575{
4576 struct ata_link *link = qc->dev->link;
e2cec771 4577
624d5c51
TH
4578 if (qc->tf.protocol == ATA_PROT_NCQ) {
4579 if (!ata_tag_valid(link->active_tag))
4580 return 0;
4581 } else {
4582 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4583 return 0;
4584 }
e2cec771 4585
624d5c51
TH
4586 return ATA_DEFER_LINK;
4587}
6912ccd5 4588
624d5c51 4589void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4590
624d5c51
TH
4591/**
4592 * ata_sg_init - Associate command with scatter-gather table.
4593 * @qc: Command to be associated
4594 * @sg: Scatter-gather table.
4595 * @n_elem: Number of elements in s/g table.
4596 *
4597 * Initialize the data-related elements of queued_cmd @qc
4598 * to point to a scatter-gather table @sg, containing @n_elem
4599 * elements.
4600 *
4601 * LOCKING:
4602 * spin_lock_irqsave(host lock)
4603 */
4604void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4605 unsigned int n_elem)
4606{
4607 qc->sg = sg;
4608 qc->n_elem = n_elem;
4609 qc->cursg = qc->sg;
4610}
bb5cb290 4611
624d5c51
TH
4612/**
4613 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4614 * @qc: Command with scatter-gather table to be mapped.
4615 *
4616 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4617 *
4618 * LOCKING:
4619 * spin_lock_irqsave(host lock)
4620 *
4621 * RETURNS:
4622 * Zero on success, negative on error.
4623 *
4624 */
4625static int ata_sg_setup(struct ata_queued_cmd *qc)
4626{
4627 struct ata_port *ap = qc->ap;
4628 unsigned int n_elem;
1da177e4 4629
624d5c51 4630 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4631
624d5c51
TH
4632 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4633 if (n_elem < 1)
4634 return -1;
bb5cb290 4635
624d5c51 4636 DPRINTK("%d sg elements mapped\n", n_elem);
5825627c 4637 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
4638 qc->n_elem = n_elem;
4639 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4640
624d5c51 4641 return 0;
1da177e4
LT
4642}
4643
624d5c51
TH
4644/**
4645 * swap_buf_le16 - swap halves of 16-bit words in place
4646 * @buf: Buffer to swap
4647 * @buf_words: Number of 16-bit words in buffer.
4648 *
4649 * Swap halves of 16-bit words if needed to convert from
4650 * little-endian byte order to native cpu byte order, or
4651 * vice-versa.
4652 *
4653 * LOCKING:
4654 * Inherited from caller.
4655 */
4656void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4657{
624d5c51
TH
4658#ifdef __BIG_ENDIAN
4659 unsigned int i;
8061f5f0 4660
624d5c51
TH
4661 for (i = 0; i < buf_words; i++)
4662 buf[i] = le16_to_cpu(buf[i]);
4663#endif /* __BIG_ENDIAN */
8061f5f0
TH
4664}
4665
8a8bc223
TH
4666/**
4667 * ata_qc_new - Request an available ATA command, for queueing
5eb66fe0 4668 * @ap: target port
8a8bc223
TH
4669 *
4670 * LOCKING:
4671 * None.
4672 */
4673
4674static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4675{
4676 struct ata_queued_cmd *qc = NULL;
4677 unsigned int i;
4678
4679 /* no command while frozen */
4680 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4681 return NULL;
4682
4683 /* the last tag is reserved for internal command. */
4684 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4685 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4686 qc = __ata_qc_from_tag(ap, i);
4687 break;
4688 }
4689
4690 if (qc)
4691 qc->tag = i;
4692
4693 return qc;
4694}
4695
1da177e4
LT
4696/**
4697 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4698 * @dev: Device from whom we request an available command structure
4699 *
4700 * LOCKING:
0cba632b 4701 * None.
1da177e4
LT
4702 */
4703
8a8bc223 4704struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4705{
9af5c9c9 4706 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4707 struct ata_queued_cmd *qc;
4708
8a8bc223 4709 qc = ata_qc_new(ap);
1da177e4 4710 if (qc) {
1da177e4
LT
4711 qc->scsicmd = NULL;
4712 qc->ap = ap;
4713 qc->dev = dev;
1da177e4 4714
2c13b7ce 4715 ata_qc_reinit(qc);
1da177e4
LT
4716 }
4717
4718 return qc;
4719}
4720
8a8bc223
TH
4721/**
4722 * ata_qc_free - free unused ata_queued_cmd
4723 * @qc: Command to complete
4724 *
4725 * Designed to free unused ata_queued_cmd object
4726 * in case something prevents using it.
4727 *
4728 * LOCKING:
4729 * spin_lock_irqsave(host lock)
4730 */
4731void ata_qc_free(struct ata_queued_cmd *qc)
4732{
a1104016 4733 struct ata_port *ap;
8a8bc223
TH
4734 unsigned int tag;
4735
efcb3cf7 4736 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
a1104016 4737 ap = qc->ap;
8a8bc223
TH
4738
4739 qc->flags = 0;
4740 tag = qc->tag;
4741 if (likely(ata_tag_valid(tag))) {
4742 qc->tag = ATA_TAG_POISON;
4743 clear_bit(tag, &ap->qc_allocated);
4744 }
4745}
4746
76014427 4747void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4748{
a1104016
JL
4749 struct ata_port *ap;
4750 struct ata_link *link;
dedaf2b0 4751
efcb3cf7
TH
4752 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4753 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
4754 ap = qc->ap;
4755 link = qc->dev->link;
1da177e4
LT
4756
4757 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4758 ata_sg_clean(qc);
4759
7401abf2 4760 /* command should be marked inactive atomically with qc completion */
da917d69 4761 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4762 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4763 if (!link->sactive)
4764 ap->nr_active_links--;
4765 } else {
9af5c9c9 4766 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4767 ap->nr_active_links--;
4768 }
4769
4770 /* clear exclusive status */
4771 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4772 ap->excl_link == link))
4773 ap->excl_link = NULL;
7401abf2 4774
3f3791d3
AL
4775 /* atapi: mark qc as inactive to prevent the interrupt handler
4776 * from completing the command twice later, before the error handler
4777 * is called. (when rc != 0 and atapi request sense is needed)
4778 */
4779 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4780 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4781
1da177e4 4782 /* call completion callback */
77853bf2 4783 qc->complete_fn(qc);
1da177e4
LT
4784}
4785
39599a53
TH
4786static void fill_result_tf(struct ata_queued_cmd *qc)
4787{
4788 struct ata_port *ap = qc->ap;
4789
39599a53 4790 qc->result_tf.flags = qc->tf.flags;
22183bf5 4791 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4792}
4793
00115e0f
TH
4794static void ata_verify_xfer(struct ata_queued_cmd *qc)
4795{
4796 struct ata_device *dev = qc->dev;
4797
00115e0f
TH
4798 if (ata_is_nodata(qc->tf.protocol))
4799 return;
4800
4801 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4802 return;
4803
4804 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4805}
4806
f686bcb8
TH
4807/**
4808 * ata_qc_complete - Complete an active ATA command
4809 * @qc: Command to complete
f686bcb8 4810 *
1aadf5c3
TH
4811 * Indicate to the mid and upper layers that an ATA command has
4812 * completed, with either an ok or not-ok status.
4813 *
4814 * Refrain from calling this function multiple times when
4815 * successfully completing multiple NCQ commands.
4816 * ata_qc_complete_multiple() should be used instead, which will
4817 * properly update IRQ expect state.
f686bcb8
TH
4818 *
4819 * LOCKING:
cca3974e 4820 * spin_lock_irqsave(host lock)
f686bcb8
TH
4821 */
4822void ata_qc_complete(struct ata_queued_cmd *qc)
4823{
4824 struct ata_port *ap = qc->ap;
4825
4826 /* XXX: New EH and old EH use different mechanisms to
4827 * synchronize EH with regular execution path.
4828 *
4829 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4830 * Normal execution path is responsible for not accessing a
4831 * failed qc. libata core enforces the rule by returning NULL
4832 * from ata_qc_from_tag() for failed qcs.
4833 *
4834 * Old EH depends on ata_qc_complete() nullifying completion
4835 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4836 * not synchronize with interrupt handler. Only PIO task is
4837 * taken care of.
4838 */
4839 if (ap->ops->error_handler) {
4dbfa39b
TH
4840 struct ata_device *dev = qc->dev;
4841 struct ata_eh_info *ehi = &dev->link->eh_info;
4842
f686bcb8
TH
4843 if (unlikely(qc->err_mask))
4844 qc->flags |= ATA_QCFLAG_FAILED;
4845
f08dc1ac
TH
4846 /*
4847 * Finish internal commands without any further processing
4848 * and always with the result TF filled.
4849 */
4850 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 4851 fill_result_tf(qc);
f08dc1ac
TH
4852 __ata_qc_complete(qc);
4853 return;
4854 }
f4b31db9 4855
f08dc1ac
TH
4856 /*
4857 * Non-internal qc has failed. Fill the result TF and
4858 * summon EH.
4859 */
4860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4861 fill_result_tf(qc);
4862 ata_qc_schedule_eh(qc);
f4b31db9 4863 return;
f686bcb8
TH
4864 }
4865
4dc738ed
TH
4866 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4867
f686bcb8
TH
4868 /* read result TF if requested */
4869 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4870 fill_result_tf(qc);
f686bcb8 4871
4dbfa39b
TH
4872 /* Some commands need post-processing after successful
4873 * completion.
4874 */
4875 switch (qc->tf.command) {
4876 case ATA_CMD_SET_FEATURES:
4877 if (qc->tf.feature != SETFEATURES_WC_ON &&
4878 qc->tf.feature != SETFEATURES_WC_OFF)
4879 break;
4880 /* fall through */
4881 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4882 case ATA_CMD_SET_MULTI: /* multi_count changed */
4883 /* revalidate device */
4884 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4885 ata_port_schedule_eh(ap);
4886 break;
054a5fba
TH
4887
4888 case ATA_CMD_SLEEP:
4889 dev->flags |= ATA_DFLAG_SLEEPING;
4890 break;
4dbfa39b
TH
4891 }
4892
00115e0f
TH
4893 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4894 ata_verify_xfer(qc);
4895
f686bcb8
TH
4896 __ata_qc_complete(qc);
4897 } else {
4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4899 return;
4900
4901 /* read result TF if failed or requested */
4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4903 fill_result_tf(qc);
f686bcb8
TH
4904
4905 __ata_qc_complete(qc);
4906 }
4907}
4908
dedaf2b0
TH
4909/**
4910 * ata_qc_complete_multiple - Complete multiple qcs successfully
4911 * @ap: port in question
4912 * @qc_active: new qc_active mask
dedaf2b0
TH
4913 *
4914 * Complete in-flight commands. This functions is meant to be
4915 * called from low-level driver's interrupt routine to complete
4916 * requests normally. ap->qc_active and @qc_active is compared
4917 * and commands are completed accordingly.
4918 *
1aadf5c3
TH
4919 * Always use this function when completing multiple NCQ commands
4920 * from IRQ handlers instead of calling ata_qc_complete()
4921 * multiple times to keep IRQ expect status properly in sync.
4922 *
dedaf2b0 4923 * LOCKING:
cca3974e 4924 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4925 *
4926 * RETURNS:
4927 * Number of completed commands on success, -errno otherwise.
4928 */
79f97dad 4929int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
dedaf2b0
TH
4930{
4931 int nr_done = 0;
4932 u32 done_mask;
dedaf2b0
TH
4933
4934 done_mask = ap->qc_active ^ qc_active;
4935
4936 if (unlikely(done_mask & qc_active)) {
a9a79dfe
JP
4937 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4938 ap->qc_active, qc_active);
dedaf2b0
TH
4939 return -EINVAL;
4940 }
4941
43768180 4942 while (done_mask) {
dedaf2b0 4943 struct ata_queued_cmd *qc;
43768180 4944 unsigned int tag = __ffs(done_mask);
dedaf2b0 4945
43768180
JA
4946 qc = ata_qc_from_tag(ap, tag);
4947 if (qc) {
dedaf2b0
TH
4948 ata_qc_complete(qc);
4949 nr_done++;
4950 }
43768180 4951 done_mask &= ~(1 << tag);
dedaf2b0
TH
4952 }
4953
4954 return nr_done;
4955}
4956
1da177e4
LT
4957/**
4958 * ata_qc_issue - issue taskfile to device
4959 * @qc: command to issue to device
4960 *
4961 * Prepare an ATA command to submission to device.
4962 * This includes mapping the data into a DMA-able
4963 * area, filling in the S/G table, and finally
4964 * writing the taskfile to hardware, starting the command.
4965 *
4966 * LOCKING:
cca3974e 4967 * spin_lock_irqsave(host lock)
1da177e4 4968 */
8e0e694a 4969void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4970{
4971 struct ata_port *ap = qc->ap;
9af5c9c9 4972 struct ata_link *link = qc->dev->link;
405e66b3 4973 u8 prot = qc->tf.protocol;
1da177e4 4974
dedaf2b0
TH
4975 /* Make sure only one non-NCQ command is outstanding. The
4976 * check is skipped for old EH because it reuses active qc to
4977 * request ATAPI sense.
4978 */
efcb3cf7 4979 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4980
1973a023 4981 if (ata_is_ncq(prot)) {
efcb3cf7 4982 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
da917d69
TH
4983
4984 if (!link->sactive)
4985 ap->nr_active_links++;
9af5c9c9 4986 link->sactive |= 1 << qc->tag;
dedaf2b0 4987 } else {
efcb3cf7 4988 WARN_ON_ONCE(link->sactive);
da917d69
TH
4989
4990 ap->nr_active_links++;
9af5c9c9 4991 link->active_tag = qc->tag;
dedaf2b0
TH
4992 }
4993
e4a70e76 4994 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4995 ap->qc_active |= 1 << qc->tag;
e4a70e76 4996
60f5d6ef
TH
4997 /*
4998 * We guarantee to LLDs that they will have at least one
f92a2636
TH
4999 * non-zero sg if the command is a data command.
5000 */
60f5d6ef
TH
5001 if (WARN_ON_ONCE(ata_is_data(prot) &&
5002 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5003 goto sys_err;
f92a2636 5004
405e66b3 5005 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5006 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 5007 if (ata_sg_setup(qc))
60f5d6ef 5008 goto sys_err;
1da177e4 5009
cf480626 5010 /* if device is sleeping, schedule reset and abort the link */
054a5fba 5011 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 5012 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
5013 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5014 ata_link_abort(link);
5015 return;
5016 }
5017
1da177e4
LT
5018 ap->ops->qc_prep(qc);
5019
8e0e694a
TH
5020 qc->err_mask |= ap->ops->qc_issue(qc);
5021 if (unlikely(qc->err_mask))
5022 goto err;
5023 return;
1da177e4 5024
60f5d6ef 5025sys_err:
8e0e694a
TH
5026 qc->err_mask |= AC_ERR_SYSTEM;
5027err:
5028 ata_qc_complete(qc);
1da177e4
LT
5029}
5030
34bf2170
TH
5031/**
5032 * sata_scr_valid - test whether SCRs are accessible
936fd732 5033 * @link: ATA link to test SCR accessibility for
34bf2170 5034 *
936fd732 5035 * Test whether SCRs are accessible for @link.
34bf2170
TH
5036 *
5037 * LOCKING:
5038 * None.
5039 *
5040 * RETURNS:
5041 * 1 if SCRs are accessible, 0 otherwise.
5042 */
936fd732 5043int sata_scr_valid(struct ata_link *link)
34bf2170 5044{
936fd732
TH
5045 struct ata_port *ap = link->ap;
5046
a16abc0b 5047 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5048}
5049
5050/**
5051 * sata_scr_read - read SCR register of the specified port
936fd732 5052 * @link: ATA link to read SCR for
34bf2170
TH
5053 * @reg: SCR to read
5054 * @val: Place to store read value
5055 *
936fd732 5056 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
5057 * guaranteed to succeed if @link is ap->link, the cable type of
5058 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5059 *
5060 * LOCKING:
633273a3 5061 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5062 *
5063 * RETURNS:
5064 * 0 on success, negative errno on failure.
5065 */
936fd732 5066int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5067{
633273a3 5068 if (ata_is_host_link(link)) {
633273a3 5069 if (sata_scr_valid(link))
82ef04fb 5070 return link->ap->ops->scr_read(link, reg, val);
633273a3
TH
5071 return -EOPNOTSUPP;
5072 }
5073
5074 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
5075}
5076
5077/**
5078 * sata_scr_write - write SCR register of the specified port
936fd732 5079 * @link: ATA link to write SCR for
34bf2170
TH
5080 * @reg: SCR to write
5081 * @val: value to write
5082 *
936fd732 5083 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
5084 * guaranteed to succeed if @link is ap->link, the cable type of
5085 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5086 *
5087 * LOCKING:
633273a3 5088 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5089 *
5090 * RETURNS:
5091 * 0 on success, negative errno on failure.
5092 */
936fd732 5093int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5094{
633273a3 5095 if (ata_is_host_link(link)) {
633273a3 5096 if (sata_scr_valid(link))
82ef04fb 5097 return link->ap->ops->scr_write(link, reg, val);
633273a3
TH
5098 return -EOPNOTSUPP;
5099 }
936fd732 5100
633273a3 5101 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5102}
5103
5104/**
5105 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5106 * @link: ATA link to write SCR for
34bf2170
TH
5107 * @reg: SCR to write
5108 * @val: value to write
5109 *
5110 * This function is identical to sata_scr_write() except that this
5111 * function performs flush after writing to the register.
5112 *
5113 * LOCKING:
633273a3 5114 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5115 *
5116 * RETURNS:
5117 * 0 on success, negative errno on failure.
5118 */
936fd732 5119int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5120{
633273a3 5121 if (ata_is_host_link(link)) {
633273a3 5122 int rc;
da3dbb17 5123
633273a3 5124 if (sata_scr_valid(link)) {
82ef04fb 5125 rc = link->ap->ops->scr_write(link, reg, val);
633273a3 5126 if (rc == 0)
82ef04fb 5127 rc = link->ap->ops->scr_read(link, reg, &val);
633273a3
TH
5128 return rc;
5129 }
5130 return -EOPNOTSUPP;
34bf2170 5131 }
633273a3
TH
5132
5133 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5134}
5135
5136/**
b1c72916 5137 * ata_phys_link_online - test whether the given link is online
936fd732 5138 * @link: ATA link to test
34bf2170 5139 *
936fd732
TH
5140 * Test whether @link is online. Note that this function returns
5141 * 0 if online status of @link cannot be obtained, so
5142 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5143 *
5144 * LOCKING:
5145 * None.
5146 *
5147 * RETURNS:
b5b3fa38 5148 * True if the port online status is available and online.
34bf2170 5149 */
b1c72916 5150bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
5151{
5152 u32 sstatus;
5153
936fd732 5154 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5155 ata_sstatus_online(sstatus))
b5b3fa38
TH
5156 return true;
5157 return false;
34bf2170
TH
5158}
5159
5160/**
b1c72916 5161 * ata_phys_link_offline - test whether the given link is offline
936fd732 5162 * @link: ATA link to test
34bf2170 5163 *
936fd732
TH
5164 * Test whether @link is offline. Note that this function
5165 * returns 0 if offline status of @link cannot be obtained, so
5166 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5167 *
5168 * LOCKING:
5169 * None.
5170 *
5171 * RETURNS:
b5b3fa38 5172 * True if the port offline status is available and offline.
34bf2170 5173 */
b1c72916 5174bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
5175{
5176 u32 sstatus;
5177
936fd732 5178 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5179 !ata_sstatus_online(sstatus))
b5b3fa38
TH
5180 return true;
5181 return false;
34bf2170 5182}
0baab86b 5183
b1c72916
TH
5184/**
5185 * ata_link_online - test whether the given link is online
5186 * @link: ATA link to test
5187 *
5188 * Test whether @link is online. This is identical to
5189 * ata_phys_link_online() when there's no slave link. When
5190 * there's a slave link, this function should only be called on
5191 * the master link and will return true if any of M/S links is
5192 * online.
5193 *
5194 * LOCKING:
5195 * None.
5196 *
5197 * RETURNS:
5198 * True if the port online status is available and online.
5199 */
5200bool ata_link_online(struct ata_link *link)
5201{
5202 struct ata_link *slave = link->ap->slave_link;
5203
5204 WARN_ON(link == slave); /* shouldn't be called on slave link */
5205
5206 return ata_phys_link_online(link) ||
5207 (slave && ata_phys_link_online(slave));
5208}
5209
5210/**
5211 * ata_link_offline - test whether the given link is offline
5212 * @link: ATA link to test
5213 *
5214 * Test whether @link is offline. This is identical to
5215 * ata_phys_link_offline() when there's no slave link. When
5216 * there's a slave link, this function should only be called on
5217 * the master link and will return true if both M/S links are
5218 * offline.
5219 *
5220 * LOCKING:
5221 * None.
5222 *
5223 * RETURNS:
5224 * True if the port offline status is available and offline.
5225 */
5226bool ata_link_offline(struct ata_link *link)
5227{
5228 struct ata_link *slave = link->ap->slave_link;
5229
5230 WARN_ON(link == slave); /* shouldn't be called on slave link */
5231
5232 return ata_phys_link_offline(link) &&
5233 (!slave || ata_phys_link_offline(slave));
5234}
5235
6ffa01d8 5236#ifdef CONFIG_PM
cca3974e
JG
5237static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5238 unsigned int action, unsigned int ehi_flags,
5239 int wait)
500530f6
TH
5240{
5241 unsigned long flags;
5242 int i, rc;
5243
cca3974e
JG
5244 for (i = 0; i < host->n_ports; i++) {
5245 struct ata_port *ap = host->ports[i];
e3667ebf 5246 struct ata_link *link;
500530f6
TH
5247
5248 /* Previous resume operation might still be in
5249 * progress. Wait for PM_PENDING to clear.
5250 */
5251 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5252 ata_port_wait_eh(ap);
5253 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5254 }
5255
5256 /* request PM ops to EH */
5257 spin_lock_irqsave(ap->lock, flags);
5258
5259 ap->pm_mesg = mesg;
5260 if (wait) {
5261 rc = 0;
5262 ap->pm_result = &rc;
5263 }
5264
5265 ap->pflags |= ATA_PFLAG_PM_PENDING;
1eca4365 5266 ata_for_each_link(link, ap, HOST_FIRST) {
e3667ebf
TH
5267 link->eh_info.action |= action;
5268 link->eh_info.flags |= ehi_flags;
5269 }
500530f6
TH
5270
5271 ata_port_schedule_eh(ap);
5272
5273 spin_unlock_irqrestore(ap->lock, flags);
5274
5275 /* wait and check result */
5276 if (wait) {
5277 ata_port_wait_eh(ap);
5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5279 if (rc)
5280 return rc;
5281 }
5282 }
5283
5284 return 0;
5285}
5286
5287/**
cca3974e
JG
5288 * ata_host_suspend - suspend host
5289 * @host: host to suspend
500530f6
TH
5290 * @mesg: PM message
5291 *
cca3974e 5292 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5293 * function requests EH to perform PM operations and waits for EH
5294 * to finish.
5295 *
5296 * LOCKING:
5297 * Kernel thread context (may sleep).
5298 *
5299 * RETURNS:
5300 * 0 on success, -errno on failure.
5301 */
cca3974e 5302int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5303{
e2f3d75f 5304 unsigned int ehi_flags = ATA_EHI_QUIET;
9666f400 5305 int rc;
500530f6 5306
e2f3d75f
TH
5307 /*
5308 * On some hardware, device fails to respond after spun down
5309 * for suspend. As the device won't be used before being
5310 * resumed, we don't need to touch the device. Ask EH to skip
5311 * the usual stuff and proceed directly to suspend.
5312 *
5313 * http://thread.gmane.org/gmane.linux.ide/46764
5314 */
5315 if (mesg.event == PM_EVENT_SUSPEND)
5316 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5317
5318 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
72ad6ec4
JG
5319 if (rc == 0)
5320 host->dev->power.power_state = mesg;
500530f6
TH
5321 return rc;
5322}
5323
5324/**
cca3974e
JG
5325 * ata_host_resume - resume host
5326 * @host: host to resume
500530f6 5327 *
cca3974e 5328 * Resume @host. Actual operation is performed by EH. This
500530f6 5329 * function requests EH to perform PM operations and returns.
25985edc 5330 * Note that all resume operations are performed parallelly.
500530f6
TH
5331 *
5332 * LOCKING:
5333 * Kernel thread context (may sleep).
5334 */
cca3974e 5335void ata_host_resume(struct ata_host *host)
500530f6 5336{
cf480626 5337 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 5338 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 5339 host->dev->power.power_state = PMSG_ON;
500530f6 5340}
6ffa01d8 5341#endif
500530f6 5342
3ef3b43d
TH
5343/**
5344 * ata_dev_init - Initialize an ata_device structure
5345 * @dev: Device structure to initialize
5346 *
5347 * Initialize @dev in preparation for probing.
5348 *
5349 * LOCKING:
5350 * Inherited from caller.
5351 */
5352void ata_dev_init(struct ata_device *dev)
5353{
b1c72916 5354 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5355 struct ata_port *ap = link->ap;
72fa4b74
TH
5356 unsigned long flags;
5357
b1c72916 5358 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5359 link->sata_spd_limit = link->hw_sata_spd_limit;
5360 link->sata_spd = 0;
5a04bf4b 5361
72fa4b74
TH
5362 /* High bits of dev->flags are used to record warm plug
5363 * requests which occur asynchronously. Synchronize using
cca3974e 5364 * host lock.
72fa4b74 5365 */
ba6a1308 5366 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5367 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5368 dev->horkage = 0;
ba6a1308 5369 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5370
99cf610a
TH
5371 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5372 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5373 dev->pio_mask = UINT_MAX;
5374 dev->mwdma_mask = UINT_MAX;
5375 dev->udma_mask = UINT_MAX;
5376}
5377
4fb37a25
TH
5378/**
5379 * ata_link_init - Initialize an ata_link structure
5380 * @ap: ATA port link is attached to
5381 * @link: Link structure to initialize
8989805d 5382 * @pmp: Port multiplier port number
4fb37a25
TH
5383 *
5384 * Initialize @link.
5385 *
5386 * LOCKING:
5387 * Kernel thread context (may sleep)
5388 */
fb7fd614 5389void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5390{
5391 int i;
5392
5393 /* clear everything except for devices */
d9027470
GG
5394 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5395 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5396
5397 link->ap = ap;
8989805d 5398 link->pmp = pmp;
4fb37a25
TH
5399 link->active_tag = ATA_TAG_POISON;
5400 link->hw_sata_spd_limit = UINT_MAX;
5401
5402 /* can't use iterator, ap isn't initialized yet */
5403 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5404 struct ata_device *dev = &link->device[i];
5405
5406 dev->link = link;
5407 dev->devno = dev - link->device;
110f66d2
TH
5408#ifdef CONFIG_ATA_ACPI
5409 dev->gtf_filter = ata_acpi_gtf_filter;
5410#endif
4fb37a25
TH
5411 ata_dev_init(dev);
5412 }
5413}
5414
5415/**
5416 * sata_link_init_spd - Initialize link->sata_spd_limit
5417 * @link: Link to configure sata_spd_limit for
5418 *
5419 * Initialize @link->[hw_]sata_spd_limit to the currently
5420 * configured value.
5421 *
5422 * LOCKING:
5423 * Kernel thread context (may sleep).
5424 *
5425 * RETURNS:
5426 * 0 on success, -errno on failure.
5427 */
fb7fd614 5428int sata_link_init_spd(struct ata_link *link)
4fb37a25 5429{
33267325 5430 u8 spd;
4fb37a25
TH
5431 int rc;
5432
d127ea7b 5433 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5434 if (rc)
5435 return rc;
5436
d127ea7b 5437 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5438 if (spd)
5439 link->hw_sata_spd_limit &= (1 << spd) - 1;
5440
05944bdf 5441 ata_force_link_limits(link);
33267325 5442
4fb37a25
TH
5443 link->sata_spd_limit = link->hw_sata_spd_limit;
5444
5445 return 0;
5446}
5447
1da177e4 5448/**
f3187195
TH
5449 * ata_port_alloc - allocate and initialize basic ATA port resources
5450 * @host: ATA host this allocated port belongs to
1da177e4 5451 *
f3187195
TH
5452 * Allocate and initialize basic ATA port resources.
5453 *
5454 * RETURNS:
5455 * Allocate ATA port on success, NULL on failure.
0cba632b 5456 *
1da177e4 5457 * LOCKING:
f3187195 5458 * Inherited from calling layer (may sleep).
1da177e4 5459 */
f3187195 5460struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5461{
f3187195 5462 struct ata_port *ap;
1da177e4 5463
f3187195
TH
5464 DPRINTK("ENTER\n");
5465
5466 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5467 if (!ap)
5468 return NULL;
4fca377f 5469
7b3a24c5 5470 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5471 ap->lock = &host->lock;
f3187195 5472 ap->print_id = -1;
cca3974e 5473 ap->host = host;
f3187195 5474 ap->dev = host->dev;
bd5d825c
BP
5475
5476#if defined(ATA_VERBOSE_DEBUG)
5477 /* turn on all debugging levels */
5478 ap->msg_enable = 0x00FF;
5479#elif defined(ATA_DEBUG)
5480 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5481#else
0dd4b21f 5482 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5483#endif
1da177e4 5484
ad72cf98 5485 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
5486 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5487 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5488 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5489 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 5490 init_completion(&ap->park_req_pending);
5ddf24c5
TH
5491 init_timer_deferrable(&ap->fastdrain_timer);
5492 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5493 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5494
838df628 5495 ap->cbl = ATA_CBL_NONE;
838df628 5496
8989805d 5497 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5498
5499#ifdef ATA_IRQ_TRAP
5500 ap->stats.unhandled_irq = 1;
5501 ap->stats.idle_irq = 1;
5502#endif
270390e1
TH
5503 ata_sff_port_init(ap);
5504
1da177e4 5505 return ap;
1da177e4
LT
5506}
5507
f0d36efd
TH
5508static void ata_host_release(struct device *gendev, void *res)
5509{
5510 struct ata_host *host = dev_get_drvdata(gendev);
5511 int i;
5512
1aa506e4
TH
5513 for (i = 0; i < host->n_ports; i++) {
5514 struct ata_port *ap = host->ports[i];
5515
4911487a
TH
5516 if (!ap)
5517 continue;
5518
5519 if (ap->scsi_host)
1aa506e4
TH
5520 scsi_host_put(ap->scsi_host);
5521
633273a3 5522 kfree(ap->pmp_link);
b1c72916 5523 kfree(ap->slave_link);
4911487a 5524 kfree(ap);
1aa506e4
TH
5525 host->ports[i] = NULL;
5526 }
5527
1aa56cca 5528 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5529}
5530
f3187195
TH
5531/**
5532 * ata_host_alloc - allocate and init basic ATA host resources
5533 * @dev: generic device this host is associated with
5534 * @max_ports: maximum number of ATA ports associated with this host
5535 *
5536 * Allocate and initialize basic ATA host resources. LLD calls
5537 * this function to allocate a host, initializes it fully and
5538 * attaches it using ata_host_register().
5539 *
5540 * @max_ports ports are allocated and host->n_ports is
5541 * initialized to @max_ports. The caller is allowed to decrease
5542 * host->n_ports before calling ata_host_register(). The unused
5543 * ports will be automatically freed on registration.
5544 *
5545 * RETURNS:
5546 * Allocate ATA host on success, NULL on failure.
5547 *
5548 * LOCKING:
5549 * Inherited from calling layer (may sleep).
5550 */
5551struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5552{
5553 struct ata_host *host;
5554 size_t sz;
5555 int i;
5556
5557 DPRINTK("ENTER\n");
5558
5559 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5560 return NULL;
5561
5562 /* alloc a container for our list of ATA ports (buses) */
5563 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5564 /* alloc a container for our list of ATA ports (buses) */
5565 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5566 if (!host)
5567 goto err_out;
5568
5569 devres_add(dev, host);
5570 dev_set_drvdata(dev, host);
5571
5572 spin_lock_init(&host->lock);
c0c362b6 5573 mutex_init(&host->eh_mutex);
f3187195
TH
5574 host->dev = dev;
5575 host->n_ports = max_ports;
5576
5577 /* allocate ports bound to this host */
5578 for (i = 0; i < max_ports; i++) {
5579 struct ata_port *ap;
5580
5581 ap = ata_port_alloc(host);
5582 if (!ap)
5583 goto err_out;
5584
5585 ap->port_no = i;
5586 host->ports[i] = ap;
5587 }
5588
5589 devres_remove_group(dev, NULL);
5590 return host;
5591
5592 err_out:
5593 devres_release_group(dev, NULL);
5594 return NULL;
5595}
5596
f5cda257
TH
5597/**
5598 * ata_host_alloc_pinfo - alloc host and init with port_info array
5599 * @dev: generic device this host is associated with
5600 * @ppi: array of ATA port_info to initialize host with
5601 * @n_ports: number of ATA ports attached to this host
5602 *
5603 * Allocate ATA host and initialize with info from @ppi. If NULL
5604 * terminated, @ppi may contain fewer entries than @n_ports. The
5605 * last entry will be used for the remaining ports.
5606 *
5607 * RETURNS:
5608 * Allocate ATA host on success, NULL on failure.
5609 *
5610 * LOCKING:
5611 * Inherited from calling layer (may sleep).
5612 */
5613struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5614 const struct ata_port_info * const * ppi,
5615 int n_ports)
5616{
5617 const struct ata_port_info *pi;
5618 struct ata_host *host;
5619 int i, j;
5620
5621 host = ata_host_alloc(dev, n_ports);
5622 if (!host)
5623 return NULL;
5624
5625 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5626 struct ata_port *ap = host->ports[i];
5627
5628 if (ppi[j])
5629 pi = ppi[j++];
5630
5631 ap->pio_mask = pi->pio_mask;
5632 ap->mwdma_mask = pi->mwdma_mask;
5633 ap->udma_mask = pi->udma_mask;
5634 ap->flags |= pi->flags;
0c88758b 5635 ap->link.flags |= pi->link_flags;
f5cda257
TH
5636 ap->ops = pi->port_ops;
5637
5638 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5639 host->ops = pi->port_ops;
f5cda257
TH
5640 }
5641
5642 return host;
5643}
5644
b1c72916
TH
5645/**
5646 * ata_slave_link_init - initialize slave link
5647 * @ap: port to initialize slave link for
5648 *
5649 * Create and initialize slave link for @ap. This enables slave
5650 * link handling on the port.
5651 *
5652 * In libata, a port contains links and a link contains devices.
5653 * There is single host link but if a PMP is attached to it,
5654 * there can be multiple fan-out links. On SATA, there's usually
5655 * a single device connected to a link but PATA and SATA
5656 * controllers emulating TF based interface can have two - master
5657 * and slave.
5658 *
5659 * However, there are a few controllers which don't fit into this
5660 * abstraction too well - SATA controllers which emulate TF
5661 * interface with both master and slave devices but also have
5662 * separate SCR register sets for each device. These controllers
5663 * need separate links for physical link handling
5664 * (e.g. onlineness, link speed) but should be treated like a
5665 * traditional M/S controller for everything else (e.g. command
5666 * issue, softreset).
5667 *
5668 * slave_link is libata's way of handling this class of
5669 * controllers without impacting core layer too much. For
5670 * anything other than physical link handling, the default host
5671 * link is used for both master and slave. For physical link
5672 * handling, separate @ap->slave_link is used. All dirty details
5673 * are implemented inside libata core layer. From LLD's POV, the
5674 * only difference is that prereset, hardreset and postreset are
5675 * called once more for the slave link, so the reset sequence
5676 * looks like the following.
5677 *
5678 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5679 * softreset(M) -> postreset(M) -> postreset(S)
5680 *
5681 * Note that softreset is called only for the master. Softreset
5682 * resets both M/S by definition, so SRST on master should handle
5683 * both (the standard method will work just fine).
5684 *
5685 * LOCKING:
5686 * Should be called before host is registered.
5687 *
5688 * RETURNS:
5689 * 0 on success, -errno on failure.
5690 */
5691int ata_slave_link_init(struct ata_port *ap)
5692{
5693 struct ata_link *link;
5694
5695 WARN_ON(ap->slave_link);
5696 WARN_ON(ap->flags & ATA_FLAG_PMP);
5697
5698 link = kzalloc(sizeof(*link), GFP_KERNEL);
5699 if (!link)
5700 return -ENOMEM;
5701
5702 ata_link_init(ap, link, 1);
5703 ap->slave_link = link;
5704 return 0;
5705}
5706
32ebbc0c
TH
5707static void ata_host_stop(struct device *gendev, void *res)
5708{
5709 struct ata_host *host = dev_get_drvdata(gendev);
5710 int i;
5711
5712 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5713
5714 for (i = 0; i < host->n_ports; i++) {
5715 struct ata_port *ap = host->ports[i];
5716
5717 if (ap->ops->port_stop)
5718 ap->ops->port_stop(ap);
5719 }
5720
5721 if (host->ops->host_stop)
5722 host->ops->host_stop(host);
5723}
5724
029cfd6b
TH
5725/**
5726 * ata_finalize_port_ops - finalize ata_port_operations
5727 * @ops: ata_port_operations to finalize
5728 *
5729 * An ata_port_operations can inherit from another ops and that
5730 * ops can again inherit from another. This can go on as many
5731 * times as necessary as long as there is no loop in the
5732 * inheritance chain.
5733 *
5734 * Ops tables are finalized when the host is started. NULL or
5735 * unspecified entries are inherited from the closet ancestor
5736 * which has the method and the entry is populated with it.
5737 * After finalization, the ops table directly points to all the
5738 * methods and ->inherits is no longer necessary and cleared.
5739 *
5740 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5741 *
5742 * LOCKING:
5743 * None.
5744 */
5745static void ata_finalize_port_ops(struct ata_port_operations *ops)
5746{
2da67659 5747 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5748 const struct ata_port_operations *cur;
5749 void **begin = (void **)ops;
5750 void **end = (void **)&ops->inherits;
5751 void **pp;
5752
5753 if (!ops || !ops->inherits)
5754 return;
5755
5756 spin_lock(&lock);
5757
5758 for (cur = ops->inherits; cur; cur = cur->inherits) {
5759 void **inherit = (void **)cur;
5760
5761 for (pp = begin; pp < end; pp++, inherit++)
5762 if (!*pp)
5763 *pp = *inherit;
5764 }
5765
5766 for (pp = begin; pp < end; pp++)
5767 if (IS_ERR(*pp))
5768 *pp = NULL;
5769
5770 ops->inherits = NULL;
5771
5772 spin_unlock(&lock);
5773}
5774
ecef7253
TH
5775/**
5776 * ata_host_start - start and freeze ports of an ATA host
5777 * @host: ATA host to start ports for
5778 *
5779 * Start and then freeze ports of @host. Started status is
5780 * recorded in host->flags, so this function can be called
5781 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5782 * once. If host->ops isn't initialized yet, its set to the
5783 * first non-dummy port ops.
ecef7253
TH
5784 *
5785 * LOCKING:
5786 * Inherited from calling layer (may sleep).
5787 *
5788 * RETURNS:
5789 * 0 if all ports are started successfully, -errno otherwise.
5790 */
5791int ata_host_start(struct ata_host *host)
5792{
32ebbc0c
TH
5793 int have_stop = 0;
5794 void *start_dr = NULL;
ecef7253
TH
5795 int i, rc;
5796
5797 if (host->flags & ATA_HOST_STARTED)
5798 return 0;
5799
029cfd6b
TH
5800 ata_finalize_port_ops(host->ops);
5801
ecef7253
TH
5802 for (i = 0; i < host->n_ports; i++) {
5803 struct ata_port *ap = host->ports[i];
5804
029cfd6b
TH
5805 ata_finalize_port_ops(ap->ops);
5806
f3187195
TH
5807 if (!host->ops && !ata_port_is_dummy(ap))
5808 host->ops = ap->ops;
5809
32ebbc0c
TH
5810 if (ap->ops->port_stop)
5811 have_stop = 1;
5812 }
5813
5814 if (host->ops->host_stop)
5815 have_stop = 1;
5816
5817 if (have_stop) {
5818 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5819 if (!start_dr)
5820 return -ENOMEM;
5821 }
5822
5823 for (i = 0; i < host->n_ports; i++) {
5824 struct ata_port *ap = host->ports[i];
5825
ecef7253
TH
5826 if (ap->ops->port_start) {
5827 rc = ap->ops->port_start(ap);
5828 if (rc) {
0f9fe9b7 5829 if (rc != -ENODEV)
a44fec1f
JP
5830 dev_err(host->dev,
5831 "failed to start port %d (errno=%d)\n",
5832 i, rc);
ecef7253
TH
5833 goto err_out;
5834 }
5835 }
ecef7253
TH
5836 ata_eh_freeze_port(ap);
5837 }
5838
32ebbc0c
TH
5839 if (start_dr)
5840 devres_add(host->dev, start_dr);
ecef7253
TH
5841 host->flags |= ATA_HOST_STARTED;
5842 return 0;
5843
5844 err_out:
5845 while (--i >= 0) {
5846 struct ata_port *ap = host->ports[i];
5847
5848 if (ap->ops->port_stop)
5849 ap->ops->port_stop(ap);
5850 }
32ebbc0c 5851 devres_free(start_dr);
ecef7253
TH
5852 return rc;
5853}
5854
b03732f0 5855/**
cca3974e
JG
5856 * ata_sas_host_init - Initialize a host struct
5857 * @host: host to initialize
5858 * @dev: device host is attached to
5859 * @flags: host flags
5860 * @ops: port_ops
b03732f0
BK
5861 *
5862 * LOCKING:
5863 * PCI/etc. bus probe sem.
5864 *
5865 */
f3187195 5866/* KILLME - the only user left is ipr */
cca3974e 5867void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 5868 unsigned long flags, struct ata_port_operations *ops)
b03732f0 5869{
cca3974e 5870 spin_lock_init(&host->lock);
c0c362b6 5871 mutex_init(&host->eh_mutex);
cca3974e
JG
5872 host->dev = dev;
5873 host->flags = flags;
5874 host->ops = ops;
b03732f0
BK
5875}
5876
238c9cf9 5877int ata_port_probe(struct ata_port *ap)
79318057 5878{
238c9cf9 5879 int rc = 0;
886ad09f 5880
79318057
AV
5881 /* probe */
5882 if (ap->ops->error_handler) {
5883 struct ata_eh_info *ehi = &ap->link.eh_info;
5884 unsigned long flags;
5885
79318057
AV
5886 /* kick EH for boot probing */
5887 spin_lock_irqsave(ap->lock, flags);
5888
5889 ehi->probe_mask |= ATA_ALL_DEVICES;
6b7ae954 5890 ehi->action |= ATA_EH_RESET;
79318057
AV
5891 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5892
5893 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5894 ap->pflags |= ATA_PFLAG_LOADING;
5895 ata_port_schedule_eh(ap);
5896
5897 spin_unlock_irqrestore(ap->lock, flags);
5898
5899 /* wait for EH to finish */
5900 ata_port_wait_eh(ap);
5901 } else {
5902 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5903 rc = ata_bus_probe(ap);
5904 DPRINTK("ata%u: bus probe end\n", ap->print_id);
79318057 5905 }
238c9cf9
JB
5906 return rc;
5907}
5908
5909
5910static void async_port_probe(void *data, async_cookie_t cookie)
5911{
5912 struct ata_port *ap = data;
4fca377f 5913
238c9cf9
JB
5914 /*
5915 * If we're not allowed to scan this host in parallel,
5916 * we need to wait until all previous scans have completed
5917 * before going further.
5918 * Jeff Garzik says this is only within a controller, so we
5919 * don't need to wait for port 0, only for later ports.
5920 */
5921 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5922 async_synchronize_cookie(cookie);
5923
5924 (void)ata_port_probe(ap);
f29d3b23
AV
5925
5926 /* in order to keep device order, we need to synchronize at this point */
5927 async_synchronize_cookie(cookie);
5928
5929 ata_scsi_scan_host(ap, 1);
79318057 5930}
238c9cf9 5931
f3187195
TH
5932/**
5933 * ata_host_register - register initialized ATA host
5934 * @host: ATA host to register
5935 * @sht: template for SCSI host
5936 *
5937 * Register initialized ATA host. @host is allocated using
5938 * ata_host_alloc() and fully initialized by LLD. This function
5939 * starts ports, registers @host with ATA and SCSI layers and
5940 * probe registered devices.
5941 *
5942 * LOCKING:
5943 * Inherited from calling layer (may sleep).
5944 *
5945 * RETURNS:
5946 * 0 on success, -errno otherwise.
5947 */
5948int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5949{
5950 int i, rc;
5951
5952 /* host must have been started */
5953 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 5954 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
5955 WARN_ON(1);
5956 return -EINVAL;
5957 }
5958
5959 /* Blow away unused ports. This happens when LLD can't
5960 * determine the exact number of ports to allocate at
5961 * allocation time.
5962 */
5963 for (i = host->n_ports; host->ports[i]; i++)
5964 kfree(host->ports[i]);
5965
5966 /* give ports names and add SCSI hosts */
5967 for (i = 0; i < host->n_ports; i++)
5968 host->ports[i]->print_id = ata_print_id++;
5969
4fca377f 5970
d9027470
GG
5971 /* Create associated sysfs transport objects */
5972 for (i = 0; i < host->n_ports; i++) {
5973 rc = ata_tport_add(host->dev,host->ports[i]);
5974 if (rc) {
5975 goto err_tadd;
5976 }
5977 }
5978
f3187195
TH
5979 rc = ata_scsi_add_hosts(host, sht);
5980 if (rc)
d9027470 5981 goto err_tadd;
f3187195 5982
fafbae87
TH
5983 /* associate with ACPI nodes */
5984 ata_acpi_associate(host);
5985
f3187195
TH
5986 /* set cable, sata_spd_limit and report */
5987 for (i = 0; i < host->n_ports; i++) {
5988 struct ata_port *ap = host->ports[i];
f3187195
TH
5989 unsigned long xfer_mask;
5990
5991 /* set SATA cable type if still unset */
5992 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5993 ap->cbl = ATA_CBL_SATA;
5994
5995 /* init sata_spd_limit to the current value */
4fb37a25 5996 sata_link_init_spd(&ap->link);
b1c72916
TH
5997 if (ap->slave_link)
5998 sata_link_init_spd(ap->slave_link);
f3187195 5999
cbcdd875 6000 /* print per-port info to dmesg */
f3187195
TH
6001 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6002 ap->udma_mask);
6003
abf6e8ed 6004 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
6005 ata_port_info(ap, "%cATA max %s %s\n",
6006 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6007 ata_mode_string(xfer_mask),
6008 ap->link.eh_info.desc);
abf6e8ed
TH
6009 ata_ehi_clear_desc(&ap->link.eh_info);
6010 } else
a9a79dfe 6011 ata_port_info(ap, "DUMMY\n");
f3187195
TH
6012 }
6013
f6005354 6014 /* perform each probe asynchronously */
f3187195
TH
6015 for (i = 0; i < host->n_ports; i++) {
6016 struct ata_port *ap = host->ports[i];
79318057 6017 async_schedule(async_port_probe, ap);
f3187195 6018 }
f3187195
TH
6019
6020 return 0;
d9027470
GG
6021
6022 err_tadd:
6023 while (--i >= 0) {
6024 ata_tport_delete(host->ports[i]);
6025 }
6026 return rc;
6027
f3187195
TH
6028}
6029
f5cda257
TH
6030/**
6031 * ata_host_activate - start host, request IRQ and register it
6032 * @host: target ATA host
6033 * @irq: IRQ to request
6034 * @irq_handler: irq_handler used when requesting IRQ
6035 * @irq_flags: irq_flags used when requesting IRQ
6036 * @sht: scsi_host_template to use when registering the host
6037 *
6038 * After allocating an ATA host and initializing it, most libata
6039 * LLDs perform three steps to activate the host - start host,
6040 * request IRQ and register it. This helper takes necessasry
6041 * arguments and performs the three steps in one go.
6042 *
3d46b2e2
PM
6043 * An invalid IRQ skips the IRQ registration and expects the host to
6044 * have set polling mode on the port. In this case, @irq_handler
6045 * should be NULL.
6046 *
f5cda257
TH
6047 * LOCKING:
6048 * Inherited from calling layer (may sleep).
6049 *
6050 * RETURNS:
6051 * 0 on success, -errno otherwise.
6052 */
6053int ata_host_activate(struct ata_host *host, int irq,
6054 irq_handler_t irq_handler, unsigned long irq_flags,
6055 struct scsi_host_template *sht)
6056{
cbcdd875 6057 int i, rc;
f5cda257
TH
6058
6059 rc = ata_host_start(host);
6060 if (rc)
6061 return rc;
6062
3d46b2e2
PM
6063 /* Special case for polling mode */
6064 if (!irq) {
6065 WARN_ON(irq_handler);
6066 return ata_host_register(host, sht);
6067 }
6068
f5cda257
TH
6069 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6070 dev_driver_string(host->dev), host);
6071 if (rc)
6072 return rc;
6073
cbcdd875
TH
6074 for (i = 0; i < host->n_ports; i++)
6075 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6076
f5cda257
TH
6077 rc = ata_host_register(host, sht);
6078 /* if failed, just free the IRQ and leave ports alone */
6079 if (rc)
6080 devm_free_irq(host->dev, irq, host);
6081
6082 return rc;
6083}
6084
720ba126
TH
6085/**
6086 * ata_port_detach - Detach ATA port in prepration of device removal
6087 * @ap: ATA port to be detached
6088 *
6089 * Detach all ATA devices and the associated SCSI devices of @ap;
6090 * then, remove the associated SCSI host. @ap is guaranteed to
6091 * be quiescent on return from this function.
6092 *
6093 * LOCKING:
6094 * Kernel thread context (may sleep).
6095 */
741b7763 6096static void ata_port_detach(struct ata_port *ap)
720ba126
TH
6097{
6098 unsigned long flags;
720ba126
TH
6099
6100 if (!ap->ops->error_handler)
c3cf30a9 6101 goto skip_eh;
720ba126
TH
6102
6103 /* tell EH we're leaving & flush EH */
ba6a1308 6104 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6105 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 6106 ata_port_schedule_eh(ap);
ba6a1308 6107 spin_unlock_irqrestore(ap->lock, flags);
720ba126 6108
ece180d1 6109 /* wait till EH commits suicide */
720ba126
TH
6110 ata_port_wait_eh(ap);
6111
ece180d1
TH
6112 /* it better be dead now */
6113 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 6114
afe2c511 6115 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 6116
c3cf30a9 6117 skip_eh:
d9027470
GG
6118 if (ap->pmp_link) {
6119 int i;
6120 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6121 ata_tlink_delete(&ap->pmp_link[i]);
6122 }
6123 ata_tport_delete(ap);
6124
720ba126 6125 /* remove the associated SCSI host */
cca3974e 6126 scsi_remove_host(ap->scsi_host);
720ba126
TH
6127}
6128
0529c159
TH
6129/**
6130 * ata_host_detach - Detach all ports of an ATA host
6131 * @host: Host to detach
6132 *
6133 * Detach all ports of @host.
6134 *
6135 * LOCKING:
6136 * Kernel thread context (may sleep).
6137 */
6138void ata_host_detach(struct ata_host *host)
6139{
6140 int i;
6141
6142 for (i = 0; i < host->n_ports; i++)
6143 ata_port_detach(host->ports[i]);
562f0c2d
TH
6144
6145 /* the host is dead now, dissociate ACPI */
6146 ata_acpi_dissociate(host);
0529c159
TH
6147}
6148
374b1873
JG
6149#ifdef CONFIG_PCI
6150
1da177e4
LT
6151/**
6152 * ata_pci_remove_one - PCI layer callback for device removal
6153 * @pdev: PCI device that was removed
6154 *
b878ca5d
TH
6155 * PCI layer indicates to libata via this hook that hot-unplug or
6156 * module unload event has occurred. Detach all ports. Resource
6157 * release is handled via devres.
1da177e4
LT
6158 *
6159 * LOCKING:
6160 * Inherited from PCI layer (may sleep).
6161 */
f0d36efd 6162void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6163{
2855568b 6164 struct device *dev = &pdev->dev;
cca3974e 6165 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6166
b878ca5d 6167 ata_host_detach(host);
1da177e4
LT
6168}
6169
6170/* move to PCI subsystem */
057ace5e 6171int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6172{
6173 unsigned long tmp = 0;
6174
6175 switch (bits->width) {
6176 case 1: {
6177 u8 tmp8 = 0;
6178 pci_read_config_byte(pdev, bits->reg, &tmp8);
6179 tmp = tmp8;
6180 break;
6181 }
6182 case 2: {
6183 u16 tmp16 = 0;
6184 pci_read_config_word(pdev, bits->reg, &tmp16);
6185 tmp = tmp16;
6186 break;
6187 }
6188 case 4: {
6189 u32 tmp32 = 0;
6190 pci_read_config_dword(pdev, bits->reg, &tmp32);
6191 tmp = tmp32;
6192 break;
6193 }
6194
6195 default:
6196 return -EINVAL;
6197 }
6198
6199 tmp &= bits->mask;
6200
6201 return (tmp == bits->val) ? 1 : 0;
6202}
9b847548 6203
6ffa01d8 6204#ifdef CONFIG_PM
3c5100c1 6205void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6206{
6207 pci_save_state(pdev);
4c90d971 6208 pci_disable_device(pdev);
500530f6 6209
3a2d5b70 6210 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6211 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6212}
6213
553c4aa6 6214int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6215{
553c4aa6
TH
6216 int rc;
6217
9b847548
JA
6218 pci_set_power_state(pdev, PCI_D0);
6219 pci_restore_state(pdev);
553c4aa6 6220
b878ca5d 6221 rc = pcim_enable_device(pdev);
553c4aa6 6222 if (rc) {
a44fec1f
JP
6223 dev_err(&pdev->dev,
6224 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6225 return rc;
6226 }
6227
9b847548 6228 pci_set_master(pdev);
553c4aa6 6229 return 0;
500530f6
TH
6230}
6231
3c5100c1 6232int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6233{
cca3974e 6234 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6235 int rc = 0;
6236
cca3974e 6237 rc = ata_host_suspend(host, mesg);
500530f6
TH
6238 if (rc)
6239 return rc;
6240
3c5100c1 6241 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6242
6243 return 0;
6244}
6245
6246int ata_pci_device_resume(struct pci_dev *pdev)
6247{
cca3974e 6248 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6249 int rc;
500530f6 6250
553c4aa6
TH
6251 rc = ata_pci_device_do_resume(pdev);
6252 if (rc == 0)
6253 ata_host_resume(host);
6254 return rc;
9b847548 6255}
6ffa01d8
TH
6256#endif /* CONFIG_PM */
6257
1da177e4
LT
6258#endif /* CONFIG_PCI */
6259
33267325
TH
6260static int __init ata_parse_force_one(char **cur,
6261 struct ata_force_ent *force_ent,
6262 const char **reason)
6263{
6264 /* FIXME: Currently, there's no way to tag init const data and
6265 * using __initdata causes build failure on some versions of
6266 * gcc. Once __initdataconst is implemented, add const to the
6267 * following structure.
6268 */
6269 static struct ata_force_param force_tbl[] __initdata = {
6270 { "40c", .cbl = ATA_CBL_PATA40 },
6271 { "80c", .cbl = ATA_CBL_PATA80 },
6272 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6273 { "unk", .cbl = ATA_CBL_PATA_UNK },
6274 { "ign", .cbl = ATA_CBL_PATA_IGN },
6275 { "sata", .cbl = ATA_CBL_SATA },
6276 { "1.5Gbps", .spd_limit = 1 },
6277 { "3.0Gbps", .spd_limit = 2 },
6278 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6279 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
43c9c591 6280 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6281 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6282 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6283 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6284 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6285 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6286 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6287 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6288 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6289 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6290 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6291 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6292 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6293 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6294 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6295 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6296 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6297 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6298 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6299 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6300 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6301 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6302 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6303 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6304 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6305 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6306 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6307 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6308 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6309 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6310 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6311 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6312 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6313 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6314 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6315 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6316 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6317 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
33267325
TH
6318 };
6319 char *start = *cur, *p = *cur;
6320 char *id, *val, *endp;
6321 const struct ata_force_param *match_fp = NULL;
6322 int nr_matches = 0, i;
6323
6324 /* find where this param ends and update *cur */
6325 while (*p != '\0' && *p != ',')
6326 p++;
6327
6328 if (*p == '\0')
6329 *cur = p;
6330 else
6331 *cur = p + 1;
6332
6333 *p = '\0';
6334
6335 /* parse */
6336 p = strchr(start, ':');
6337 if (!p) {
6338 val = strstrip(start);
6339 goto parse_val;
6340 }
6341 *p = '\0';
6342
6343 id = strstrip(start);
6344 val = strstrip(p + 1);
6345
6346 /* parse id */
6347 p = strchr(id, '.');
6348 if (p) {
6349 *p++ = '\0';
6350 force_ent->device = simple_strtoul(p, &endp, 10);
6351 if (p == endp || *endp != '\0') {
6352 *reason = "invalid device";
6353 return -EINVAL;
6354 }
6355 }
6356
6357 force_ent->port = simple_strtoul(id, &endp, 10);
6358 if (p == endp || *endp != '\0') {
6359 *reason = "invalid port/link";
6360 return -EINVAL;
6361 }
6362
6363 parse_val:
6364 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6365 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6366 const struct ata_force_param *fp = &force_tbl[i];
6367
6368 if (strncasecmp(val, fp->name, strlen(val)))
6369 continue;
6370
6371 nr_matches++;
6372 match_fp = fp;
6373
6374 if (strcasecmp(val, fp->name) == 0) {
6375 nr_matches = 1;
6376 break;
6377 }
6378 }
6379
6380 if (!nr_matches) {
6381 *reason = "unknown value";
6382 return -EINVAL;
6383 }
6384 if (nr_matches > 1) {
6385 *reason = "ambigious value";
6386 return -EINVAL;
6387 }
6388
6389 force_ent->param = *match_fp;
6390
6391 return 0;
6392}
6393
6394static void __init ata_parse_force_param(void)
6395{
6396 int idx = 0, size = 1;
6397 int last_port = -1, last_device = -1;
6398 char *p, *cur, *next;
6399
6400 /* calculate maximum number of params and allocate force_tbl */
6401 for (p = ata_force_param_buf; *p; p++)
6402 if (*p == ',')
6403 size++;
6404
6405 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6406 if (!ata_force_tbl) {
6407 printk(KERN_WARNING "ata: failed to extend force table, "
6408 "libata.force ignored\n");
6409 return;
6410 }
6411
6412 /* parse and populate the table */
6413 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6414 const char *reason = "";
6415 struct ata_force_ent te = { .port = -1, .device = -1 };
6416
6417 next = cur;
6418 if (ata_parse_force_one(&next, &te, &reason)) {
6419 printk(KERN_WARNING "ata: failed to parse force "
6420 "parameter \"%s\" (%s)\n",
6421 cur, reason);
6422 continue;
6423 }
6424
6425 if (te.port == -1) {
6426 te.port = last_port;
6427 te.device = last_device;
6428 }
6429
6430 ata_force_tbl[idx++] = te;
6431
6432 last_port = te.port;
6433 last_device = te.device;
6434 }
6435
6436 ata_force_tbl_size = idx;
6437}
1da177e4 6438
1da177e4
LT
6439static int __init ata_init(void)
6440{
d9027470 6441 int rc;
270390e1 6442
33267325
TH
6443 ata_parse_force_param();
6444
270390e1 6445 rc = ata_sff_init();
ad72cf98
TH
6446 if (rc) {
6447 kfree(ata_force_tbl);
6448 return rc;
6449 }
453b07ac 6450
d9027470
GG
6451 libata_transport_init();
6452 ata_scsi_transport_template = ata_attach_transport();
6453 if (!ata_scsi_transport_template) {
6454 ata_sff_exit();
6455 rc = -ENOMEM;
6456 goto err_out;
4fca377f 6457 }
d9027470 6458
1da177e4
LT
6459 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6460 return 0;
d9027470
GG
6461
6462err_out:
6463 return rc;
1da177e4
LT
6464}
6465
6466static void __exit ata_exit(void)
6467{
d9027470
GG
6468 ata_release_transport(ata_scsi_transport_template);
6469 libata_transport_exit();
270390e1 6470 ata_sff_exit();
33267325 6471 kfree(ata_force_tbl);
1da177e4
LT
6472}
6473
a4625085 6474subsys_initcall(ata_init);
1da177e4
LT
6475module_exit(ata_exit);
6476
9990b6f3 6477static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
6478
6479int ata_ratelimit(void)
6480{
9990b6f3 6481 return __ratelimit(&ratelimit);
67846b30
JG
6482}
6483
c0c362b6
TH
6484/**
6485 * ata_msleep - ATA EH owner aware msleep
6486 * @ap: ATA port to attribute the sleep to
6487 * @msecs: duration to sleep in milliseconds
6488 *
6489 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6490 * ownership is released before going to sleep and reacquired
6491 * after the sleep is complete. IOW, other ports sharing the
6492 * @ap->host will be allowed to own the EH while this task is
6493 * sleeping.
6494 *
6495 * LOCKING:
6496 * Might sleep.
6497 */
97750ceb
TH
6498void ata_msleep(struct ata_port *ap, unsigned int msecs)
6499{
c0c362b6
TH
6500 bool owns_eh = ap && ap->host->eh_owner == current;
6501
6502 if (owns_eh)
6503 ata_eh_release(ap);
6504
97750ceb 6505 msleep(msecs);
c0c362b6
TH
6506
6507 if (owns_eh)
6508 ata_eh_acquire(ap);
97750ceb
TH
6509}
6510
c22daff4
TH
6511/**
6512 * ata_wait_register - wait until register value changes
97750ceb 6513 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
6514 * @reg: IO-mapped register
6515 * @mask: Mask to apply to read register value
6516 * @val: Wait condition
341c2c95
TH
6517 * @interval: polling interval in milliseconds
6518 * @timeout: timeout in milliseconds
c22daff4
TH
6519 *
6520 * Waiting for some bits of register to change is a common
6521 * operation for ATA controllers. This function reads 32bit LE
6522 * IO-mapped register @reg and tests for the following condition.
6523 *
6524 * (*@reg & mask) != val
6525 *
6526 * If the condition is met, it returns; otherwise, the process is
6527 * repeated after @interval_msec until timeout.
6528 *
6529 * LOCKING:
6530 * Kernel thread context (may sleep)
6531 *
6532 * RETURNS:
6533 * The final register value.
6534 */
97750ceb 6535u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 6536 unsigned long interval, unsigned long timeout)
c22daff4 6537{
341c2c95 6538 unsigned long deadline;
c22daff4
TH
6539 u32 tmp;
6540
6541 tmp = ioread32(reg);
6542
6543 /* Calculate timeout _after_ the first read to make sure
6544 * preceding writes reach the controller before starting to
6545 * eat away the timeout.
6546 */
341c2c95 6547 deadline = ata_deadline(jiffies, timeout);
c22daff4 6548
341c2c95 6549 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 6550 ata_msleep(ap, interval);
c22daff4
TH
6551 tmp = ioread32(reg);
6552 }
6553
6554 return tmp;
6555}
6556
dd5b06c4
TH
6557/*
6558 * Dummy port_ops
6559 */
182d7bba 6560static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6561{
182d7bba 6562 return AC_ERR_SYSTEM;
dd5b06c4
TH
6563}
6564
182d7bba 6565static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6566{
182d7bba 6567 /* truly dummy */
dd5b06c4
TH
6568}
6569
029cfd6b 6570struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6571 .qc_prep = ata_noop_qc_prep,
6572 .qc_issue = ata_dummy_qc_issue,
182d7bba 6573 .error_handler = ata_dummy_error_handler,
dd5b06c4
TH
6574};
6575
21b0ad4f
TH
6576const struct ata_port_info ata_dummy_port_info = {
6577 .port_ops = &ata_dummy_port_ops,
6578};
6579
a9a79dfe
JP
6580/*
6581 * Utility print functions
6582 */
6583int ata_port_printk(const struct ata_port *ap, const char *level,
6584 const char *fmt, ...)
6585{
6586 struct va_format vaf;
6587 va_list args;
6588 int r;
6589
6590 va_start(args, fmt);
6591
6592 vaf.fmt = fmt;
6593 vaf.va = &args;
6594
6595 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6596
6597 va_end(args);
6598
6599 return r;
6600}
6601EXPORT_SYMBOL(ata_port_printk);
6602
6603int ata_link_printk(const struct ata_link *link, const char *level,
6604 const char *fmt, ...)
6605{
6606 struct va_format vaf;
6607 va_list args;
6608 int r;
6609
6610 va_start(args, fmt);
6611
6612 vaf.fmt = fmt;
6613 vaf.va = &args;
6614
6615 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6616 r = printk("%sata%u.%02u: %pV",
6617 level, link->ap->print_id, link->pmp, &vaf);
6618 else
6619 r = printk("%sata%u: %pV",
6620 level, link->ap->print_id, &vaf);
6621
6622 va_end(args);
6623
6624 return r;
6625}
6626EXPORT_SYMBOL(ata_link_printk);
6627
6628int ata_dev_printk(const struct ata_device *dev, const char *level,
6629 const char *fmt, ...)
6630{
6631 struct va_format vaf;
6632 va_list args;
6633 int r;
6634
6635 va_start(args, fmt);
6636
6637 vaf.fmt = fmt;
6638 vaf.va = &args;
6639
6640 r = printk("%sata%u.%02u: %pV",
6641 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6642 &vaf);
6643
6644 va_end(args);
6645
6646 return r;
6647}
6648EXPORT_SYMBOL(ata_dev_printk);
6649
06296a1e
JP
6650void ata_print_version(const struct device *dev, const char *version)
6651{
6652 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6653}
6654EXPORT_SYMBOL(ata_print_version);
6655
1da177e4
LT
6656/*
6657 * libata is essentially a library of internal helper functions for
6658 * low-level ATA host controller drivers. As such, the API/ABI is
6659 * likely to change as new drivers are added and updated.
6660 * Do not depend on ABI/API stability.
6661 */
e9c83914
TH
6662EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6663EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6664EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6665EXPORT_SYMBOL_GPL(ata_base_port_ops);
6666EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 6667EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6668EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1eca4365
TH
6669EXPORT_SYMBOL_GPL(ata_link_next);
6670EXPORT_SYMBOL_GPL(ata_dev_next);
1da177e4 6671EXPORT_SYMBOL_GPL(ata_std_bios_param);
d8d9129e 6672EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
cca3974e 6673EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6674EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6675EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
b1c72916 6676EXPORT_SYMBOL_GPL(ata_slave_link_init);
ecef7253 6677EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6678EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6679EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6680EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6681EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6682EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6683EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 6684EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6685EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6686EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6687EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6688EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6689EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6690EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6691EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6692EXPORT_SYMBOL_GPL(ata_mode_string);
6693EXPORT_SYMBOL_GPL(ata_id_xfermask);
04351821 6694EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6695EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6696EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
10305f0f 6697EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6698EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6699EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6700EXPORT_SYMBOL_GPL(sata_link_debounce);
6701EXPORT_SYMBOL_GPL(sata_link_resume);
1152b261 6702EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
0aa1113d 6703EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6704EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6705EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6706EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6707EXPORT_SYMBOL_GPL(ata_dev_classify);
6708EXPORT_SYMBOL_GPL(ata_dev_pair);
67846b30 6709EXPORT_SYMBOL_GPL(ata_ratelimit);
97750ceb 6710EXPORT_SYMBOL_GPL(ata_msleep);
c22daff4 6711EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4 6712EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
34bf2170
TH
6716EXPORT_SYMBOL_GPL(sata_scr_valid);
6717EXPORT_SYMBOL_GPL(sata_scr_read);
6718EXPORT_SYMBOL_GPL(sata_scr_write);
6719EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6720EXPORT_SYMBOL_GPL(ata_link_online);
6721EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6722#ifdef CONFIG_PM
cca3974e
JG
6723EXPORT_SYMBOL_GPL(ata_host_suspend);
6724EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6725#endif /* CONFIG_PM */
6a62a04d
TH
6726EXPORT_SYMBOL_GPL(ata_id_string);
6727EXPORT_SYMBOL_GPL(ata_id_c_string);
963e4975 6728EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1da177e4
LT
6729EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6730
1bc4ccff 6731EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6732EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6733EXPORT_SYMBOL_GPL(ata_timing_compute);
6734EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6735EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6736
1da177e4
LT
6737#ifdef CONFIG_PCI
6738EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6739EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6740#ifdef CONFIG_PM
500530f6
TH
6741EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6742EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6743EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6744EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6745#endif /* CONFIG_PM */
1da177e4 6746#endif /* CONFIG_PCI */
9b847548 6747
b64bbc39
TH
6748EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6749EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6750EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6751EXPORT_SYMBOL_GPL(ata_port_desc);
6752#ifdef CONFIG_PCI
6753EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6754#endif /* CONFIG_PCI */
7b70fc03 6755EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6756EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6757EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6758EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6759EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6760EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6761EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6762EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6763EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 6764EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 6765EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6766EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6767
6768EXPORT_SYMBOL_GPL(ata_cable_40wire);
6769EXPORT_SYMBOL_GPL(ata_cable_80wire);
6770EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6771EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6772EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.330329 seconds and 5 git commands to generate.