[libata] Add ATA transport class
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69
70 #include "libata.h"
71 #include "libata-transport.h"
72
73 /* debounce timing parameters in msecs { interval, duration, timeout } */
74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77
78 const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler,
82 };
83
84 const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89 };
90
91 static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94 static unsigned int ata_dev_set_feature(struct ata_device *dev,
95 u8 enable, u8 feature);
96 static void ata_dev_xfermask(struct ata_device *dev);
97 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
98
99 unsigned int ata_print_id = 1;
100
101 struct ata_force_param {
102 const char *name;
103 unsigned int cbl;
104 int spd_limit;
105 unsigned long xfer_mask;
106 unsigned int horkage_on;
107 unsigned int horkage_off;
108 unsigned int lflags;
109 };
110
111 struct ata_force_ent {
112 int port;
113 int device;
114 struct ata_force_param param;
115 };
116
117 static struct ata_force_ent *ata_force_tbl;
118 static int ata_force_tbl_size;
119
120 static char ata_force_param_buf[PAGE_SIZE] __initdata;
121 /* param_buf is thrown away after initialization, disallow read */
122 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
123 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
124
125 static int atapi_enabled = 1;
126 module_param(atapi_enabled, int, 0444);
127 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
128
129 static int atapi_dmadir = 0;
130 module_param(atapi_dmadir, int, 0444);
131 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
132
133 int atapi_passthru16 = 1;
134 module_param(atapi_passthru16, int, 0444);
135 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
136
137 int libata_fua = 0;
138 module_param_named(fua, libata_fua, int, 0444);
139 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
140
141 static int ata_ignore_hpa;
142 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
143 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
144
145 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
146 module_param_named(dma, libata_dma_mask, int, 0444);
147 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
148
149 static int ata_probe_timeout;
150 module_param(ata_probe_timeout, int, 0444);
151 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
152
153 int libata_noacpi = 0;
154 module_param_named(noacpi, libata_noacpi, int, 0444);
155 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
156
157 int libata_allow_tpm = 0;
158 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
159 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
160
161 static int atapi_an;
162 module_param(atapi_an, int, 0444);
163 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
164
165 MODULE_AUTHOR("Jeff Garzik");
166 MODULE_DESCRIPTION("Library module for ATA devices");
167 MODULE_LICENSE("GPL");
168 MODULE_VERSION(DRV_VERSION);
169
170
171 static bool ata_sstatus_online(u32 sstatus)
172 {
173 return (sstatus & 0xf) == 0x3;
174 }
175
176 /**
177 * ata_link_next - link iteration helper
178 * @link: the previous link, NULL to start
179 * @ap: ATA port containing links to iterate
180 * @mode: iteration mode, one of ATA_LITER_*
181 *
182 * LOCKING:
183 * Host lock or EH context.
184 *
185 * RETURNS:
186 * Pointer to the next link.
187 */
188 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
189 enum ata_link_iter_mode mode)
190 {
191 BUG_ON(mode != ATA_LITER_EDGE &&
192 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
193
194 /* NULL link indicates start of iteration */
195 if (!link)
196 switch (mode) {
197 case ATA_LITER_EDGE:
198 case ATA_LITER_PMP_FIRST:
199 if (sata_pmp_attached(ap))
200 return ap->pmp_link;
201 /* fall through */
202 case ATA_LITER_HOST_FIRST:
203 return &ap->link;
204 }
205
206 /* we just iterated over the host link, what's next? */
207 if (link == &ap->link)
208 switch (mode) {
209 case ATA_LITER_HOST_FIRST:
210 if (sata_pmp_attached(ap))
211 return ap->pmp_link;
212 /* fall through */
213 case ATA_LITER_PMP_FIRST:
214 if (unlikely(ap->slave_link))
215 return ap->slave_link;
216 /* fall through */
217 case ATA_LITER_EDGE:
218 return NULL;
219 }
220
221 /* slave_link excludes PMP */
222 if (unlikely(link == ap->slave_link))
223 return NULL;
224
225 /* we were over a PMP link */
226 if (++link < ap->pmp_link + ap->nr_pmp_links)
227 return link;
228
229 if (mode == ATA_LITER_PMP_FIRST)
230 return &ap->link;
231
232 return NULL;
233 }
234
235 /**
236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_*
240 *
241 * LOCKING:
242 * Host lock or EH context.
243 *
244 * RETURNS:
245 * Pointer to the next device.
246 */
247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode)
249 {
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252
253 /* NULL dev indicates start of iteration */
254 if (!dev)
255 switch (mode) {
256 case ATA_DITER_ENABLED:
257 case ATA_DITER_ALL:
258 dev = link->device;
259 goto check;
260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1;
263 goto check;
264 }
265
266 next:
267 /* move to the next one */
268 switch (mode) {
269 case ATA_DITER_ENABLED:
270 case ATA_DITER_ALL:
271 if (++dev < link->device + ata_link_max_devices(link))
272 goto check;
273 return NULL;
274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device)
277 goto check;
278 return NULL;
279 }
280
281 check:
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev))
284 goto next;
285 return dev;
286 }
287
288 /**
289 * ata_dev_phys_link - find physical link for a device
290 * @dev: ATA device to look up physical link for
291 *
292 * Look up physical link which @dev is attached to. Note that
293 * this is different from @dev->link only when @dev is on slave
294 * link. For all other cases, it's the same as @dev->link.
295 *
296 * LOCKING:
297 * Don't care.
298 *
299 * RETURNS:
300 * Pointer to the found physical link.
301 */
302 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
303 {
304 struct ata_port *ap = dev->link->ap;
305
306 if (!ap->slave_link)
307 return dev->link;
308 if (!dev->devno)
309 return &ap->link;
310 return ap->slave_link;
311 }
312
313 /**
314 * ata_force_cbl - force cable type according to libata.force
315 * @ap: ATA port of interest
316 *
317 * Force cable type according to libata.force and whine about it.
318 * The last entry which has matching port number is used, so it
319 * can be specified as part of device force parameters. For
320 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
321 * same effect.
322 *
323 * LOCKING:
324 * EH context.
325 */
326 void ata_force_cbl(struct ata_port *ap)
327 {
328 int i;
329
330 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
331 const struct ata_force_ent *fe = &ata_force_tbl[i];
332
333 if (fe->port != -1 && fe->port != ap->print_id)
334 continue;
335
336 if (fe->param.cbl == ATA_CBL_NONE)
337 continue;
338
339 ap->cbl = fe->param.cbl;
340 ata_port_printk(ap, KERN_NOTICE,
341 "FORCE: cable set to %s\n", fe->param.name);
342 return;
343 }
344 }
345
346 /**
347 * ata_force_link_limits - force link limits according to libata.force
348 * @link: ATA link of interest
349 *
350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always
356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it.
358 *
359 * LOCKING:
360 * EH context.
361 */
362 static void ata_force_link_limits(struct ata_link *link)
363 {
364 bool did_spd = false;
365 int linkno = link->pmp;
366 int i;
367
368 if (ata_is_host_link(link))
369 linkno += 15;
370
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372 const struct ata_force_ent *fe = &ata_force_tbl[i];
373
374 if (fe->port != -1 && fe->port != link->ap->print_id)
375 continue;
376
377 if (fe->device != -1 && fe->device != linkno)
378 continue;
379
380 /* only honor the first spd limit */
381 if (!did_spd && fe->param.spd_limit) {
382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
383 ata_link_printk(link, KERN_NOTICE,
384 "FORCE: PHY spd limit set to %s\n",
385 fe->param.name);
386 did_spd = true;
387 }
388
389 /* let lflags stack */
390 if (fe->param.lflags) {
391 link->flags |= fe->param.lflags;
392 ata_link_printk(link, KERN_NOTICE,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags, link->flags);
395 }
396 }
397 }
398
399 /**
400 * ata_force_xfermask - force xfermask according to libata.force
401 * @dev: ATA device of interest
402 *
403 * Force xfer_mask according to libata.force and whine about it.
404 * For consistency with link selection, device number 15 selects
405 * the first device connected to the host link.
406 *
407 * LOCKING:
408 * EH context.
409 */
410 static void ata_force_xfermask(struct ata_device *dev)
411 {
412 int devno = dev->link->pmp + dev->devno;
413 int alt_devno = devno;
414 int i;
415
416 /* allow n.15/16 for devices attached to host port */
417 if (ata_is_host_link(dev->link))
418 alt_devno += 15;
419
420 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 const struct ata_force_ent *fe = &ata_force_tbl[i];
422 unsigned long pio_mask, mwdma_mask, udma_mask;
423
424 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 continue;
426
427 if (fe->device != -1 && fe->device != devno &&
428 fe->device != alt_devno)
429 continue;
430
431 if (!fe->param.xfer_mask)
432 continue;
433
434 ata_unpack_xfermask(fe->param.xfer_mask,
435 &pio_mask, &mwdma_mask, &udma_mask);
436 if (udma_mask)
437 dev->udma_mask = udma_mask;
438 else if (mwdma_mask) {
439 dev->udma_mask = 0;
440 dev->mwdma_mask = mwdma_mask;
441 } else {
442 dev->udma_mask = 0;
443 dev->mwdma_mask = 0;
444 dev->pio_mask = pio_mask;
445 }
446
447 ata_dev_printk(dev, KERN_NOTICE,
448 "FORCE: xfer_mask set to %s\n", fe->param.name);
449 return;
450 }
451 }
452
453 /**
454 * ata_force_horkage - force horkage according to libata.force
455 * @dev: ATA device of interest
456 *
457 * Force horkage according to libata.force and whine about it.
458 * For consistency with link selection, device number 15 selects
459 * the first device connected to the host link.
460 *
461 * LOCKING:
462 * EH context.
463 */
464 static void ata_force_horkage(struct ata_device *dev)
465 {
466 int devno = dev->link->pmp + dev->devno;
467 int alt_devno = devno;
468 int i;
469
470 /* allow n.15/16 for devices attached to host port */
471 if (ata_is_host_link(dev->link))
472 alt_devno += 15;
473
474 for (i = 0; i < ata_force_tbl_size; i++) {
475 const struct ata_force_ent *fe = &ata_force_tbl[i];
476
477 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 continue;
479
480 if (fe->device != -1 && fe->device != devno &&
481 fe->device != alt_devno)
482 continue;
483
484 if (!(~dev->horkage & fe->param.horkage_on) &&
485 !(dev->horkage & fe->param.horkage_off))
486 continue;
487
488 dev->horkage |= fe->param.horkage_on;
489 dev->horkage &= ~fe->param.horkage_off;
490
491 ata_dev_printk(dev, KERN_NOTICE,
492 "FORCE: horkage modified (%s)\n", fe->param.name);
493 }
494 }
495
496 /**
497 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
498 * @opcode: SCSI opcode
499 *
500 * Determine ATAPI command type from @opcode.
501 *
502 * LOCKING:
503 * None.
504 *
505 * RETURNS:
506 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
507 */
508 int atapi_cmd_type(u8 opcode)
509 {
510 switch (opcode) {
511 case GPCMD_READ_10:
512 case GPCMD_READ_12:
513 return ATAPI_READ;
514
515 case GPCMD_WRITE_10:
516 case GPCMD_WRITE_12:
517 case GPCMD_WRITE_AND_VERIFY_10:
518 return ATAPI_WRITE;
519
520 case GPCMD_READ_CD:
521 case GPCMD_READ_CD_MSF:
522 return ATAPI_READ_CD;
523
524 case ATA_16:
525 case ATA_12:
526 if (atapi_passthru16)
527 return ATAPI_PASS_THRU;
528 /* fall thru */
529 default:
530 return ATAPI_MISC;
531 }
532 }
533
534 /**
535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
536 * @tf: Taskfile to convert
537 * @pmp: Port multiplier port
538 * @is_cmd: This FIS is for command
539 * @fis: Buffer into which data will output
540 *
541 * Converts a standard ATA taskfile to a Serial ATA
542 * FIS structure (Register - Host to Device).
543 *
544 * LOCKING:
545 * Inherited from caller.
546 */
547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548 {
549 fis[0] = 0x27; /* Register - Host to Device FIS */
550 fis[1] = pmp & 0xf; /* Port multiplier number*/
551 if (is_cmd)
552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
553
554 fis[2] = tf->command;
555 fis[3] = tf->feature;
556
557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah;
560 fis[7] = tf->device;
561
562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature;
566
567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect;
569 fis[14] = 0;
570 fis[15] = tf->ctl;
571
572 fis[16] = 0;
573 fis[17] = 0;
574 fis[18] = 0;
575 fis[19] = 0;
576 }
577
578 /**
579 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
580 * @fis: Buffer from which data will be input
581 * @tf: Taskfile to output
582 *
583 * Converts a serial ATA FIS structure to a standard ATA taskfile.
584 *
585 * LOCKING:
586 * Inherited from caller.
587 */
588
589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590 {
591 tf->command = fis[2]; /* status */
592 tf->feature = fis[3]; /* error */
593
594 tf->lbal = fis[4];
595 tf->lbam = fis[5];
596 tf->lbah = fis[6];
597 tf->device = fis[7];
598
599 tf->hob_lbal = fis[8];
600 tf->hob_lbam = fis[9];
601 tf->hob_lbah = fis[10];
602
603 tf->nsect = fis[12];
604 tf->hob_nsect = fis[13];
605 }
606
607 static const u8 ata_rw_cmds[] = {
608 /* pio multi */
609 ATA_CMD_READ_MULTI,
610 ATA_CMD_WRITE_MULTI,
611 ATA_CMD_READ_MULTI_EXT,
612 ATA_CMD_WRITE_MULTI_EXT,
613 0,
614 0,
615 0,
616 ATA_CMD_WRITE_MULTI_FUA_EXT,
617 /* pio */
618 ATA_CMD_PIO_READ,
619 ATA_CMD_PIO_WRITE,
620 ATA_CMD_PIO_READ_EXT,
621 ATA_CMD_PIO_WRITE_EXT,
622 0,
623 0,
624 0,
625 0,
626 /* dma */
627 ATA_CMD_READ,
628 ATA_CMD_WRITE,
629 ATA_CMD_READ_EXT,
630 ATA_CMD_WRITE_EXT,
631 0,
632 0,
633 0,
634 ATA_CMD_WRITE_FUA_EXT
635 };
636
637 /**
638 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
639 * @tf: command to examine and configure
640 * @dev: device tf belongs to
641 *
642 * Examine the device configuration and tf->flags to calculate
643 * the proper read/write commands and protocol to use.
644 *
645 * LOCKING:
646 * caller.
647 */
648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649 {
650 u8 cmd;
651
652 int index, fua, lba48, write;
653
654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657
658 if (dev->flags & ATA_DFLAG_PIO) {
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662 /* Unable to use DMA due to host limitation */
663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8;
665 } else {
666 tf->protocol = ATA_PROT_DMA;
667 index = 16;
668 }
669
670 cmd = ata_rw_cmds[index + fua + lba48 + write];
671 if (cmd) {
672 tf->command = cmd;
673 return 0;
674 }
675 return -1;
676 }
677
678 /**
679 * ata_tf_read_block - Read block address from ATA taskfile
680 * @tf: ATA taskfile of interest
681 * @dev: ATA device @tf belongs to
682 *
683 * LOCKING:
684 * None.
685 *
686 * Read block address from @tf. This function can handle all
687 * three address formats - LBA, LBA48 and CHS. tf->protocol and
688 * flags select the address format to use.
689 *
690 * RETURNS:
691 * Block address read from @tf.
692 */
693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694 {
695 u64 block = 0;
696
697 if (tf->flags & ATA_TFLAG_LBA) {
698 if (tf->flags & ATA_TFLAG_LBA48) {
699 block |= (u64)tf->hob_lbah << 40;
700 block |= (u64)tf->hob_lbam << 32;
701 block |= (u64)tf->hob_lbal << 24;
702 } else
703 block |= (tf->device & 0xf) << 24;
704
705 block |= tf->lbah << 16;
706 block |= tf->lbam << 8;
707 block |= tf->lbal;
708 } else {
709 u32 cyl, head, sect;
710
711 cyl = tf->lbam | (tf->lbah << 8);
712 head = tf->device & 0xf;
713 sect = tf->lbal;
714
715 if (!sect) {
716 ata_dev_printk(dev, KERN_WARNING, "device reported "
717 "invalid CHS sector 0\n");
718 sect = 1; /* oh well */
719 }
720
721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 }
723
724 return block;
725 }
726
727 /**
728 * ata_build_rw_tf - Build ATA taskfile for given read/write request
729 * @tf: Target ATA taskfile
730 * @dev: ATA device @tf belongs to
731 * @block: Block address
732 * @n_block: Number of blocks
733 * @tf_flags: RW/FUA etc...
734 * @tag: tag
735 *
736 * LOCKING:
737 * None.
738 *
739 * Build ATA taskfile @tf for read/write request described by
740 * @block, @n_block, @tf_flags and @tag on @dev.
741 *
742 * RETURNS:
743 *
744 * 0 on success, -ERANGE if the request is too large for @dev,
745 * -EINVAL if the request is invalid.
746 */
747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 u64 block, u32 n_block, unsigned int tf_flags,
749 unsigned int tag)
750 {
751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 tf->flags |= tf_flags;
753
754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755 /* yay, NCQ */
756 if (!lba_48_ok(block, n_block))
757 return -ERANGE;
758
759 tf->protocol = ATA_PROT_NCQ;
760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761
762 if (tf->flags & ATA_TFLAG_WRITE)
763 tf->command = ATA_CMD_FPDMA_WRITE;
764 else
765 tf->command = ATA_CMD_FPDMA_READ;
766
767 tf->nsect = tag << 3;
768 tf->hob_feature = (n_block >> 8) & 0xff;
769 tf->feature = n_block & 0xff;
770
771 tf->hob_lbah = (block >> 40) & 0xff;
772 tf->hob_lbam = (block >> 32) & 0xff;
773 tf->hob_lbal = (block >> 24) & 0xff;
774 tf->lbah = (block >> 16) & 0xff;
775 tf->lbam = (block >> 8) & 0xff;
776 tf->lbal = block & 0xff;
777
778 tf->device = 1 << 6;
779 if (tf->flags & ATA_TFLAG_FUA)
780 tf->device |= 1 << 7;
781 } else if (dev->flags & ATA_DFLAG_LBA) {
782 tf->flags |= ATA_TFLAG_LBA;
783
784 if (lba_28_ok(block, n_block)) {
785 /* use LBA28 */
786 tf->device |= (block >> 24) & 0xf;
787 } else if (lba_48_ok(block, n_block)) {
788 if (!(dev->flags & ATA_DFLAG_LBA48))
789 return -ERANGE;
790
791 /* use LBA48 */
792 tf->flags |= ATA_TFLAG_LBA48;
793
794 tf->hob_nsect = (n_block >> 8) & 0xff;
795
796 tf->hob_lbah = (block >> 40) & 0xff;
797 tf->hob_lbam = (block >> 32) & 0xff;
798 tf->hob_lbal = (block >> 24) & 0xff;
799 } else
800 /* request too large even for LBA48 */
801 return -ERANGE;
802
803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 return -EINVAL;
805
806 tf->nsect = n_block & 0xff;
807
808 tf->lbah = (block >> 16) & 0xff;
809 tf->lbam = (block >> 8) & 0xff;
810 tf->lbal = block & 0xff;
811
812 tf->device |= ATA_LBA;
813 } else {
814 /* CHS */
815 u32 sect, head, cyl, track;
816
817 /* The request -may- be too large for CHS addressing. */
818 if (!lba_28_ok(block, n_block))
819 return -ERANGE;
820
821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 return -EINVAL;
823
824 /* Convert LBA to CHS */
825 track = (u32)block / dev->sectors;
826 cyl = track / dev->heads;
827 head = track % dev->heads;
828 sect = (u32)block % dev->sectors + 1;
829
830 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 (u32)block, track, cyl, head, sect);
832
833 /* Check whether the converted CHS can fit.
834 Cylinder: 0-65535
835 Head: 0-15
836 Sector: 1-255*/
837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 return -ERANGE;
839
840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
841 tf->lbal = sect;
842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8;
844 tf->device |= head;
845 }
846
847 return 0;
848 }
849
850 /**
851 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
852 * @pio_mask: pio_mask
853 * @mwdma_mask: mwdma_mask
854 * @udma_mask: udma_mask
855 *
856 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
857 * unsigned int xfer_mask.
858 *
859 * LOCKING:
860 * None.
861 *
862 * RETURNS:
863 * Packed xfer_mask.
864 */
865 unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 unsigned long mwdma_mask,
867 unsigned long udma_mask)
868 {
869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872 }
873
874 /**
875 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
876 * @xfer_mask: xfer_mask to unpack
877 * @pio_mask: resulting pio_mask
878 * @mwdma_mask: resulting mwdma_mask
879 * @udma_mask: resulting udma_mask
880 *
881 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
882 * Any NULL distination masks will be ignored.
883 */
884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 unsigned long *mwdma_mask, unsigned long *udma_mask)
886 {
887 if (pio_mask)
888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 if (mwdma_mask)
890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 if (udma_mask)
892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893 }
894
895 static const struct ata_xfer_ent {
896 int shift, bits;
897 u8 base;
898 } ata_xfer_tbl[] = {
899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 { -1, },
903 };
904
905 /**
906 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
907 * @xfer_mask: xfer_mask of interest
908 *
909 * Return matching XFER_* value for @xfer_mask. Only the highest
910 * bit of @xfer_mask is considered.
911 *
912 * LOCKING:
913 * None.
914 *
915 * RETURNS:
916 * Matching XFER_* value, 0xff if no match found.
917 */
918 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919 {
920 int highbit = fls(xfer_mask) - 1;
921 const struct ata_xfer_ent *ent;
922
923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 return ent->base + highbit - ent->shift;
926 return 0xff;
927 }
928
929 /**
930 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
931 * @xfer_mode: XFER_* of interest
932 *
933 * Return matching xfer_mask for @xfer_mode.
934 *
935 * LOCKING:
936 * None.
937 *
938 * RETURNS:
939 * Matching xfer_mask, 0 if no match found.
940 */
941 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942 {
943 const struct ata_xfer_ent *ent;
944
945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 & ~((1 << ent->shift) - 1);
949 return 0;
950 }
951
952 /**
953 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
954 * @xfer_mode: XFER_* of interest
955 *
956 * Return matching xfer_shift for @xfer_mode.
957 *
958 * LOCKING:
959 * None.
960 *
961 * RETURNS:
962 * Matching xfer_shift, -1 if no match found.
963 */
964 int ata_xfer_mode2shift(unsigned long xfer_mode)
965 {
966 const struct ata_xfer_ent *ent;
967
968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 return ent->shift;
971 return -1;
972 }
973
974 /**
975 * ata_mode_string - convert xfer_mask to string
976 * @xfer_mask: mask of bits supported; only highest bit counts.
977 *
978 * Determine string which represents the highest speed
979 * (highest bit in @modemask).
980 *
981 * LOCKING:
982 * None.
983 *
984 * RETURNS:
985 * Constant C string representing highest speed listed in
986 * @mode_mask, or the constant C string "<n/a>".
987 */
988 const char *ata_mode_string(unsigned long xfer_mask)
989 {
990 static const char * const xfer_mode_str[] = {
991 "PIO0",
992 "PIO1",
993 "PIO2",
994 "PIO3",
995 "PIO4",
996 "PIO5",
997 "PIO6",
998 "MWDMA0",
999 "MWDMA1",
1000 "MWDMA2",
1001 "MWDMA3",
1002 "MWDMA4",
1003 "UDMA/16",
1004 "UDMA/25",
1005 "UDMA/33",
1006 "UDMA/44",
1007 "UDMA/66",
1008 "UDMA/100",
1009 "UDMA/133",
1010 "UDMA7",
1011 };
1012 int highbit;
1013
1014 highbit = fls(xfer_mask) - 1;
1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 return xfer_mode_str[highbit];
1017 return "<n/a>";
1018 }
1019
1020 const char *sata_spd_string(unsigned int spd)
1021 {
1022 static const char * const spd_str[] = {
1023 "1.5 Gbps",
1024 "3.0 Gbps",
1025 "6.0 Gbps",
1026 };
1027
1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 return "<unknown>";
1030 return spd_str[spd - 1];
1031 }
1032
1033 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1034 {
1035 struct ata_link *link = dev->link;
1036 struct ata_port *ap = link->ap;
1037 u32 scontrol;
1038 unsigned int err_mask;
1039 int rc;
1040
1041 /*
1042 * disallow DIPM for drivers which haven't set
1043 * ATA_FLAG_IPM. This is because when DIPM is enabled,
1044 * phy ready will be set in the interrupt status on
1045 * state changes, which will cause some drivers to
1046 * think there are errors - additionally drivers will
1047 * need to disable hot plug.
1048 */
1049 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1050 ap->pm_policy = NOT_AVAILABLE;
1051 return -EINVAL;
1052 }
1053
1054 /*
1055 * For DIPM, we will only enable it for the
1056 * min_power setting.
1057 *
1058 * Why? Because Disks are too stupid to know that
1059 * If the host rejects a request to go to SLUMBER
1060 * they should retry at PARTIAL, and instead it
1061 * just would give up. So, for medium_power to
1062 * work at all, we need to only allow HIPM.
1063 */
1064 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1065 if (rc)
1066 return rc;
1067
1068 switch (policy) {
1069 case MIN_POWER:
1070 /* no restrictions on IPM transitions */
1071 scontrol &= ~(0x3 << 8);
1072 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1073 if (rc)
1074 return rc;
1075
1076 /* enable DIPM */
1077 if (dev->flags & ATA_DFLAG_DIPM)
1078 err_mask = ata_dev_set_feature(dev,
1079 SETFEATURES_SATA_ENABLE, SATA_DIPM);
1080 break;
1081 case MEDIUM_POWER:
1082 /* allow IPM to PARTIAL */
1083 scontrol &= ~(0x1 << 8);
1084 scontrol |= (0x2 << 8);
1085 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1086 if (rc)
1087 return rc;
1088
1089 /*
1090 * we don't have to disable DIPM since IPM flags
1091 * disallow transitions to SLUMBER, which effectively
1092 * disable DIPM if it does not support PARTIAL
1093 */
1094 break;
1095 case NOT_AVAILABLE:
1096 case MAX_PERFORMANCE:
1097 /* disable all IPM transitions */
1098 scontrol |= (0x3 << 8);
1099 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1100 if (rc)
1101 return rc;
1102
1103 /*
1104 * we don't have to disable DIPM since IPM flags
1105 * disallow all transitions which effectively
1106 * disable DIPM anyway.
1107 */
1108 break;
1109 }
1110
1111 /* FIXME: handle SET FEATURES failure */
1112 (void) err_mask;
1113
1114 return 0;
1115 }
1116
1117 /**
1118 * ata_dev_enable_pm - enable SATA interface power management
1119 * @dev: device to enable power management
1120 * @policy: the link power management policy
1121 *
1122 * Enable SATA Interface power management. This will enable
1123 * Device Interface Power Management (DIPM) for min_power
1124 * policy, and then call driver specific callbacks for
1125 * enabling Host Initiated Power management.
1126 *
1127 * Locking: Caller.
1128 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1129 */
1130 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1131 {
1132 int rc = 0;
1133 struct ata_port *ap = dev->link->ap;
1134
1135 /* set HIPM first, then DIPM */
1136 if (ap->ops->enable_pm)
1137 rc = ap->ops->enable_pm(ap, policy);
1138 if (rc)
1139 goto enable_pm_out;
1140 rc = ata_dev_set_dipm(dev, policy);
1141
1142 enable_pm_out:
1143 if (rc)
1144 ap->pm_policy = MAX_PERFORMANCE;
1145 else
1146 ap->pm_policy = policy;
1147 return /* rc */; /* hopefully we can use 'rc' eventually */
1148 }
1149
1150 #ifdef CONFIG_PM
1151 /**
1152 * ata_dev_disable_pm - disable SATA interface power management
1153 * @dev: device to disable power management
1154 *
1155 * Disable SATA Interface power management. This will disable
1156 * Device Interface Power Management (DIPM) without changing
1157 * policy, call driver specific callbacks for disabling Host
1158 * Initiated Power management.
1159 *
1160 * Locking: Caller.
1161 * Returns: void
1162 */
1163 static void ata_dev_disable_pm(struct ata_device *dev)
1164 {
1165 struct ata_port *ap = dev->link->ap;
1166
1167 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1168 if (ap->ops->disable_pm)
1169 ap->ops->disable_pm(ap);
1170 }
1171 #endif /* CONFIG_PM */
1172
1173 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1174 {
1175 ap->pm_policy = policy;
1176 ap->link.eh_info.action |= ATA_EH_LPM;
1177 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1178 ata_port_schedule_eh(ap);
1179 }
1180
1181 #ifdef CONFIG_PM
1182 static void ata_lpm_enable(struct ata_host *host)
1183 {
1184 struct ata_link *link;
1185 struct ata_port *ap;
1186 struct ata_device *dev;
1187 int i;
1188
1189 for (i = 0; i < host->n_ports; i++) {
1190 ap = host->ports[i];
1191 ata_for_each_link(link, ap, EDGE) {
1192 ata_for_each_dev(dev, link, ALL)
1193 ata_dev_disable_pm(dev);
1194 }
1195 }
1196 }
1197
1198 static void ata_lpm_disable(struct ata_host *host)
1199 {
1200 int i;
1201
1202 for (i = 0; i < host->n_ports; i++) {
1203 struct ata_port *ap = host->ports[i];
1204 ata_lpm_schedule(ap, ap->pm_policy);
1205 }
1206 }
1207 #endif /* CONFIG_PM */
1208
1209 /**
1210 * ata_dev_classify - determine device type based on ATA-spec signature
1211 * @tf: ATA taskfile register set for device to be identified
1212 *
1213 * Determine from taskfile register contents whether a device is
1214 * ATA or ATAPI, as per "Signature and persistence" section
1215 * of ATA/PI spec (volume 1, sect 5.14).
1216 *
1217 * LOCKING:
1218 * None.
1219 *
1220 * RETURNS:
1221 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1222 * %ATA_DEV_UNKNOWN the event of failure.
1223 */
1224 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1225 {
1226 /* Apple's open source Darwin code hints that some devices only
1227 * put a proper signature into the LBA mid/high registers,
1228 * So, we only check those. It's sufficient for uniqueness.
1229 *
1230 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1231 * signatures for ATA and ATAPI devices attached on SerialATA,
1232 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1233 * spec has never mentioned about using different signatures
1234 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1235 * Multiplier specification began to use 0x69/0x96 to identify
1236 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1237 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1238 * 0x69/0x96 shortly and described them as reserved for
1239 * SerialATA.
1240 *
1241 * We follow the current spec and consider that 0x69/0x96
1242 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1243 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1244 * SEMB signature. This is worked around in
1245 * ata_dev_read_id().
1246 */
1247 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1248 DPRINTK("found ATA device by sig\n");
1249 return ATA_DEV_ATA;
1250 }
1251
1252 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1253 DPRINTK("found ATAPI device by sig\n");
1254 return ATA_DEV_ATAPI;
1255 }
1256
1257 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1258 DPRINTK("found PMP device by sig\n");
1259 return ATA_DEV_PMP;
1260 }
1261
1262 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1263 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1264 return ATA_DEV_SEMB;
1265 }
1266
1267 DPRINTK("unknown device\n");
1268 return ATA_DEV_UNKNOWN;
1269 }
1270
1271 /**
1272 * ata_id_string - Convert IDENTIFY DEVICE page into string
1273 * @id: IDENTIFY DEVICE results we will examine
1274 * @s: string into which data is output
1275 * @ofs: offset into identify device page
1276 * @len: length of string to return. must be an even number.
1277 *
1278 * The strings in the IDENTIFY DEVICE page are broken up into
1279 * 16-bit chunks. Run through the string, and output each
1280 * 8-bit chunk linearly, regardless of platform.
1281 *
1282 * LOCKING:
1283 * caller.
1284 */
1285
1286 void ata_id_string(const u16 *id, unsigned char *s,
1287 unsigned int ofs, unsigned int len)
1288 {
1289 unsigned int c;
1290
1291 BUG_ON(len & 1);
1292
1293 while (len > 0) {
1294 c = id[ofs] >> 8;
1295 *s = c;
1296 s++;
1297
1298 c = id[ofs] & 0xff;
1299 *s = c;
1300 s++;
1301
1302 ofs++;
1303 len -= 2;
1304 }
1305 }
1306
1307 /**
1308 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1309 * @id: IDENTIFY DEVICE results we will examine
1310 * @s: string into which data is output
1311 * @ofs: offset into identify device page
1312 * @len: length of string to return. must be an odd number.
1313 *
1314 * This function is identical to ata_id_string except that it
1315 * trims trailing spaces and terminates the resulting string with
1316 * null. @len must be actual maximum length (even number) + 1.
1317 *
1318 * LOCKING:
1319 * caller.
1320 */
1321 void ata_id_c_string(const u16 *id, unsigned char *s,
1322 unsigned int ofs, unsigned int len)
1323 {
1324 unsigned char *p;
1325
1326 ata_id_string(id, s, ofs, len - 1);
1327
1328 p = s + strnlen(s, len - 1);
1329 while (p > s && p[-1] == ' ')
1330 p--;
1331 *p = '\0';
1332 }
1333
1334 static u64 ata_id_n_sectors(const u16 *id)
1335 {
1336 if (ata_id_has_lba(id)) {
1337 if (ata_id_has_lba48(id))
1338 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1339 else
1340 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1341 } else {
1342 if (ata_id_current_chs_valid(id))
1343 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1344 id[ATA_ID_CUR_SECTORS];
1345 else
1346 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1347 id[ATA_ID_SECTORS];
1348 }
1349 }
1350
1351 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1352 {
1353 u64 sectors = 0;
1354
1355 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1356 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1357 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1358 sectors |= (tf->lbah & 0xff) << 16;
1359 sectors |= (tf->lbam & 0xff) << 8;
1360 sectors |= (tf->lbal & 0xff);
1361
1362 return sectors;
1363 }
1364
1365 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1366 {
1367 u64 sectors = 0;
1368
1369 sectors |= (tf->device & 0x0f) << 24;
1370 sectors |= (tf->lbah & 0xff) << 16;
1371 sectors |= (tf->lbam & 0xff) << 8;
1372 sectors |= (tf->lbal & 0xff);
1373
1374 return sectors;
1375 }
1376
1377 /**
1378 * ata_read_native_max_address - Read native max address
1379 * @dev: target device
1380 * @max_sectors: out parameter for the result native max address
1381 *
1382 * Perform an LBA48 or LBA28 native size query upon the device in
1383 * question.
1384 *
1385 * RETURNS:
1386 * 0 on success, -EACCES if command is aborted by the drive.
1387 * -EIO on other errors.
1388 */
1389 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1390 {
1391 unsigned int err_mask;
1392 struct ata_taskfile tf;
1393 int lba48 = ata_id_has_lba48(dev->id);
1394
1395 ata_tf_init(dev, &tf);
1396
1397 /* always clear all address registers */
1398 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1399
1400 if (lba48) {
1401 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1402 tf.flags |= ATA_TFLAG_LBA48;
1403 } else
1404 tf.command = ATA_CMD_READ_NATIVE_MAX;
1405
1406 tf.protocol |= ATA_PROT_NODATA;
1407 tf.device |= ATA_LBA;
1408
1409 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1410 if (err_mask) {
1411 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1412 "max address (err_mask=0x%x)\n", err_mask);
1413 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1414 return -EACCES;
1415 return -EIO;
1416 }
1417
1418 if (lba48)
1419 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1420 else
1421 *max_sectors = ata_tf_to_lba(&tf) + 1;
1422 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1423 (*max_sectors)--;
1424 return 0;
1425 }
1426
1427 /**
1428 * ata_set_max_sectors - Set max sectors
1429 * @dev: target device
1430 * @new_sectors: new max sectors value to set for the device
1431 *
1432 * Set max sectors of @dev to @new_sectors.
1433 *
1434 * RETURNS:
1435 * 0 on success, -EACCES if command is aborted or denied (due to
1436 * previous non-volatile SET_MAX) by the drive. -EIO on other
1437 * errors.
1438 */
1439 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1440 {
1441 unsigned int err_mask;
1442 struct ata_taskfile tf;
1443 int lba48 = ata_id_has_lba48(dev->id);
1444
1445 new_sectors--;
1446
1447 ata_tf_init(dev, &tf);
1448
1449 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1450
1451 if (lba48) {
1452 tf.command = ATA_CMD_SET_MAX_EXT;
1453 tf.flags |= ATA_TFLAG_LBA48;
1454
1455 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1456 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1457 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1458 } else {
1459 tf.command = ATA_CMD_SET_MAX;
1460
1461 tf.device |= (new_sectors >> 24) & 0xf;
1462 }
1463
1464 tf.protocol |= ATA_PROT_NODATA;
1465 tf.device |= ATA_LBA;
1466
1467 tf.lbal = (new_sectors >> 0) & 0xff;
1468 tf.lbam = (new_sectors >> 8) & 0xff;
1469 tf.lbah = (new_sectors >> 16) & 0xff;
1470
1471 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1472 if (err_mask) {
1473 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1474 "max address (err_mask=0x%x)\n", err_mask);
1475 if (err_mask == AC_ERR_DEV &&
1476 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1477 return -EACCES;
1478 return -EIO;
1479 }
1480
1481 return 0;
1482 }
1483
1484 /**
1485 * ata_hpa_resize - Resize a device with an HPA set
1486 * @dev: Device to resize
1487 *
1488 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1489 * it if required to the full size of the media. The caller must check
1490 * the drive has the HPA feature set enabled.
1491 *
1492 * RETURNS:
1493 * 0 on success, -errno on failure.
1494 */
1495 static int ata_hpa_resize(struct ata_device *dev)
1496 {
1497 struct ata_eh_context *ehc = &dev->link->eh_context;
1498 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1499 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1500 u64 sectors = ata_id_n_sectors(dev->id);
1501 u64 native_sectors;
1502 int rc;
1503
1504 /* do we need to do it? */
1505 if (dev->class != ATA_DEV_ATA ||
1506 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1507 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1508 return 0;
1509
1510 /* read native max address */
1511 rc = ata_read_native_max_address(dev, &native_sectors);
1512 if (rc) {
1513 /* If device aborted the command or HPA isn't going to
1514 * be unlocked, skip HPA resizing.
1515 */
1516 if (rc == -EACCES || !unlock_hpa) {
1517 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1518 "broken, skipping HPA handling\n");
1519 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1520
1521 /* we can continue if device aborted the command */
1522 if (rc == -EACCES)
1523 rc = 0;
1524 }
1525
1526 return rc;
1527 }
1528 dev->n_native_sectors = native_sectors;
1529
1530 /* nothing to do? */
1531 if (native_sectors <= sectors || !unlock_hpa) {
1532 if (!print_info || native_sectors == sectors)
1533 return 0;
1534
1535 if (native_sectors > sectors)
1536 ata_dev_printk(dev, KERN_INFO,
1537 "HPA detected: current %llu, native %llu\n",
1538 (unsigned long long)sectors,
1539 (unsigned long long)native_sectors);
1540 else if (native_sectors < sectors)
1541 ata_dev_printk(dev, KERN_WARNING,
1542 "native sectors (%llu) is smaller than "
1543 "sectors (%llu)\n",
1544 (unsigned long long)native_sectors,
1545 (unsigned long long)sectors);
1546 return 0;
1547 }
1548
1549 /* let's unlock HPA */
1550 rc = ata_set_max_sectors(dev, native_sectors);
1551 if (rc == -EACCES) {
1552 /* if device aborted the command, skip HPA resizing */
1553 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1554 "(%llu -> %llu), skipping HPA handling\n",
1555 (unsigned long long)sectors,
1556 (unsigned long long)native_sectors);
1557 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1558 return 0;
1559 } else if (rc)
1560 return rc;
1561
1562 /* re-read IDENTIFY data */
1563 rc = ata_dev_reread_id(dev, 0);
1564 if (rc) {
1565 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1566 "data after HPA resizing\n");
1567 return rc;
1568 }
1569
1570 if (print_info) {
1571 u64 new_sectors = ata_id_n_sectors(dev->id);
1572 ata_dev_printk(dev, KERN_INFO,
1573 "HPA unlocked: %llu -> %llu, native %llu\n",
1574 (unsigned long long)sectors,
1575 (unsigned long long)new_sectors,
1576 (unsigned long long)native_sectors);
1577 }
1578
1579 return 0;
1580 }
1581
1582 /**
1583 * ata_dump_id - IDENTIFY DEVICE info debugging output
1584 * @id: IDENTIFY DEVICE page to dump
1585 *
1586 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1587 * page.
1588 *
1589 * LOCKING:
1590 * caller.
1591 */
1592
1593 static inline void ata_dump_id(const u16 *id)
1594 {
1595 DPRINTK("49==0x%04x "
1596 "53==0x%04x "
1597 "63==0x%04x "
1598 "64==0x%04x "
1599 "75==0x%04x \n",
1600 id[49],
1601 id[53],
1602 id[63],
1603 id[64],
1604 id[75]);
1605 DPRINTK("80==0x%04x "
1606 "81==0x%04x "
1607 "82==0x%04x "
1608 "83==0x%04x "
1609 "84==0x%04x \n",
1610 id[80],
1611 id[81],
1612 id[82],
1613 id[83],
1614 id[84]);
1615 DPRINTK("88==0x%04x "
1616 "93==0x%04x\n",
1617 id[88],
1618 id[93]);
1619 }
1620
1621 /**
1622 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1623 * @id: IDENTIFY data to compute xfer mask from
1624 *
1625 * Compute the xfermask for this device. This is not as trivial
1626 * as it seems if we must consider early devices correctly.
1627 *
1628 * FIXME: pre IDE drive timing (do we care ?).
1629 *
1630 * LOCKING:
1631 * None.
1632 *
1633 * RETURNS:
1634 * Computed xfermask
1635 */
1636 unsigned long ata_id_xfermask(const u16 *id)
1637 {
1638 unsigned long pio_mask, mwdma_mask, udma_mask;
1639
1640 /* Usual case. Word 53 indicates word 64 is valid */
1641 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1642 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1643 pio_mask <<= 3;
1644 pio_mask |= 0x7;
1645 } else {
1646 /* If word 64 isn't valid then Word 51 high byte holds
1647 * the PIO timing number for the maximum. Turn it into
1648 * a mask.
1649 */
1650 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1651 if (mode < 5) /* Valid PIO range */
1652 pio_mask = (2 << mode) - 1;
1653 else
1654 pio_mask = 1;
1655
1656 /* But wait.. there's more. Design your standards by
1657 * committee and you too can get a free iordy field to
1658 * process. However its the speeds not the modes that
1659 * are supported... Note drivers using the timing API
1660 * will get this right anyway
1661 */
1662 }
1663
1664 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1665
1666 if (ata_id_is_cfa(id)) {
1667 /*
1668 * Process compact flash extended modes
1669 */
1670 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1671 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1672
1673 if (pio)
1674 pio_mask |= (1 << 5);
1675 if (pio > 1)
1676 pio_mask |= (1 << 6);
1677 if (dma)
1678 mwdma_mask |= (1 << 3);
1679 if (dma > 1)
1680 mwdma_mask |= (1 << 4);
1681 }
1682
1683 udma_mask = 0;
1684 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1685 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1686
1687 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1688 }
1689
1690 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1691 {
1692 struct completion *waiting = qc->private_data;
1693
1694 complete(waiting);
1695 }
1696
1697 /**
1698 * ata_exec_internal_sg - execute libata internal command
1699 * @dev: Device to which the command is sent
1700 * @tf: Taskfile registers for the command and the result
1701 * @cdb: CDB for packet command
1702 * @dma_dir: Data tranfer direction of the command
1703 * @sgl: sg list for the data buffer of the command
1704 * @n_elem: Number of sg entries
1705 * @timeout: Timeout in msecs (0 for default)
1706 *
1707 * Executes libata internal command with timeout. @tf contains
1708 * command on entry and result on return. Timeout and error
1709 * conditions are reported via return value. No recovery action
1710 * is taken after a command times out. It's caller's duty to
1711 * clean up after timeout.
1712 *
1713 * LOCKING:
1714 * None. Should be called with kernel context, might sleep.
1715 *
1716 * RETURNS:
1717 * Zero on success, AC_ERR_* mask on failure
1718 */
1719 unsigned ata_exec_internal_sg(struct ata_device *dev,
1720 struct ata_taskfile *tf, const u8 *cdb,
1721 int dma_dir, struct scatterlist *sgl,
1722 unsigned int n_elem, unsigned long timeout)
1723 {
1724 struct ata_link *link = dev->link;
1725 struct ata_port *ap = link->ap;
1726 u8 command = tf->command;
1727 int auto_timeout = 0;
1728 struct ata_queued_cmd *qc;
1729 unsigned int tag, preempted_tag;
1730 u32 preempted_sactive, preempted_qc_active;
1731 int preempted_nr_active_links;
1732 DECLARE_COMPLETION_ONSTACK(wait);
1733 unsigned long flags;
1734 unsigned int err_mask;
1735 int rc;
1736
1737 spin_lock_irqsave(ap->lock, flags);
1738
1739 /* no internal command while frozen */
1740 if (ap->pflags & ATA_PFLAG_FROZEN) {
1741 spin_unlock_irqrestore(ap->lock, flags);
1742 return AC_ERR_SYSTEM;
1743 }
1744
1745 /* initialize internal qc */
1746
1747 /* XXX: Tag 0 is used for drivers with legacy EH as some
1748 * drivers choke if any other tag is given. This breaks
1749 * ata_tag_internal() test for those drivers. Don't use new
1750 * EH stuff without converting to it.
1751 */
1752 if (ap->ops->error_handler)
1753 tag = ATA_TAG_INTERNAL;
1754 else
1755 tag = 0;
1756
1757 if (test_and_set_bit(tag, &ap->qc_allocated))
1758 BUG();
1759 qc = __ata_qc_from_tag(ap, tag);
1760
1761 qc->tag = tag;
1762 qc->scsicmd = NULL;
1763 qc->ap = ap;
1764 qc->dev = dev;
1765 ata_qc_reinit(qc);
1766
1767 preempted_tag = link->active_tag;
1768 preempted_sactive = link->sactive;
1769 preempted_qc_active = ap->qc_active;
1770 preempted_nr_active_links = ap->nr_active_links;
1771 link->active_tag = ATA_TAG_POISON;
1772 link->sactive = 0;
1773 ap->qc_active = 0;
1774 ap->nr_active_links = 0;
1775
1776 /* prepare & issue qc */
1777 qc->tf = *tf;
1778 if (cdb)
1779 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1780 qc->flags |= ATA_QCFLAG_RESULT_TF;
1781 qc->dma_dir = dma_dir;
1782 if (dma_dir != DMA_NONE) {
1783 unsigned int i, buflen = 0;
1784 struct scatterlist *sg;
1785
1786 for_each_sg(sgl, sg, n_elem, i)
1787 buflen += sg->length;
1788
1789 ata_sg_init(qc, sgl, n_elem);
1790 qc->nbytes = buflen;
1791 }
1792
1793 qc->private_data = &wait;
1794 qc->complete_fn = ata_qc_complete_internal;
1795
1796 ata_qc_issue(qc);
1797
1798 spin_unlock_irqrestore(ap->lock, flags);
1799
1800 if (!timeout) {
1801 if (ata_probe_timeout)
1802 timeout = ata_probe_timeout * 1000;
1803 else {
1804 timeout = ata_internal_cmd_timeout(dev, command);
1805 auto_timeout = 1;
1806 }
1807 }
1808
1809 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1810
1811 ata_sff_flush_pio_task(ap);
1812
1813 if (!rc) {
1814 spin_lock_irqsave(ap->lock, flags);
1815
1816 /* We're racing with irq here. If we lose, the
1817 * following test prevents us from completing the qc
1818 * twice. If we win, the port is frozen and will be
1819 * cleaned up by ->post_internal_cmd().
1820 */
1821 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1822 qc->err_mask |= AC_ERR_TIMEOUT;
1823
1824 if (ap->ops->error_handler)
1825 ata_port_freeze(ap);
1826 else
1827 ata_qc_complete(qc);
1828
1829 if (ata_msg_warn(ap))
1830 ata_dev_printk(dev, KERN_WARNING,
1831 "qc timeout (cmd 0x%x)\n", command);
1832 }
1833
1834 spin_unlock_irqrestore(ap->lock, flags);
1835 }
1836
1837 /* do post_internal_cmd */
1838 if (ap->ops->post_internal_cmd)
1839 ap->ops->post_internal_cmd(qc);
1840
1841 /* perform minimal error analysis */
1842 if (qc->flags & ATA_QCFLAG_FAILED) {
1843 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1844 qc->err_mask |= AC_ERR_DEV;
1845
1846 if (!qc->err_mask)
1847 qc->err_mask |= AC_ERR_OTHER;
1848
1849 if (qc->err_mask & ~AC_ERR_OTHER)
1850 qc->err_mask &= ~AC_ERR_OTHER;
1851 }
1852
1853 /* finish up */
1854 spin_lock_irqsave(ap->lock, flags);
1855
1856 *tf = qc->result_tf;
1857 err_mask = qc->err_mask;
1858
1859 ata_qc_free(qc);
1860 link->active_tag = preempted_tag;
1861 link->sactive = preempted_sactive;
1862 ap->qc_active = preempted_qc_active;
1863 ap->nr_active_links = preempted_nr_active_links;
1864
1865 spin_unlock_irqrestore(ap->lock, flags);
1866
1867 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1868 ata_internal_cmd_timed_out(dev, command);
1869
1870 return err_mask;
1871 }
1872
1873 /**
1874 * ata_exec_internal - execute libata internal command
1875 * @dev: Device to which the command is sent
1876 * @tf: Taskfile registers for the command and the result
1877 * @cdb: CDB for packet command
1878 * @dma_dir: Data tranfer direction of the command
1879 * @buf: Data buffer of the command
1880 * @buflen: Length of data buffer
1881 * @timeout: Timeout in msecs (0 for default)
1882 *
1883 * Wrapper around ata_exec_internal_sg() which takes simple
1884 * buffer instead of sg list.
1885 *
1886 * LOCKING:
1887 * None. Should be called with kernel context, might sleep.
1888 *
1889 * RETURNS:
1890 * Zero on success, AC_ERR_* mask on failure
1891 */
1892 unsigned ata_exec_internal(struct ata_device *dev,
1893 struct ata_taskfile *tf, const u8 *cdb,
1894 int dma_dir, void *buf, unsigned int buflen,
1895 unsigned long timeout)
1896 {
1897 struct scatterlist *psg = NULL, sg;
1898 unsigned int n_elem = 0;
1899
1900 if (dma_dir != DMA_NONE) {
1901 WARN_ON(!buf);
1902 sg_init_one(&sg, buf, buflen);
1903 psg = &sg;
1904 n_elem++;
1905 }
1906
1907 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1908 timeout);
1909 }
1910
1911 /**
1912 * ata_do_simple_cmd - execute simple internal command
1913 * @dev: Device to which the command is sent
1914 * @cmd: Opcode to execute
1915 *
1916 * Execute a 'simple' command, that only consists of the opcode
1917 * 'cmd' itself, without filling any other registers
1918 *
1919 * LOCKING:
1920 * Kernel thread context (may sleep).
1921 *
1922 * RETURNS:
1923 * Zero on success, AC_ERR_* mask on failure
1924 */
1925 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1926 {
1927 struct ata_taskfile tf;
1928
1929 ata_tf_init(dev, &tf);
1930
1931 tf.command = cmd;
1932 tf.flags |= ATA_TFLAG_DEVICE;
1933 tf.protocol = ATA_PROT_NODATA;
1934
1935 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1936 }
1937
1938 /**
1939 * ata_pio_need_iordy - check if iordy needed
1940 * @adev: ATA device
1941 *
1942 * Check if the current speed of the device requires IORDY. Used
1943 * by various controllers for chip configuration.
1944 */
1945 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1946 {
1947 /* Don't set IORDY if we're preparing for reset. IORDY may
1948 * lead to controller lock up on certain controllers if the
1949 * port is not occupied. See bko#11703 for details.
1950 */
1951 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1952 return 0;
1953 /* Controller doesn't support IORDY. Probably a pointless
1954 * check as the caller should know this.
1955 */
1956 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1957 return 0;
1958 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1959 if (ata_id_is_cfa(adev->id)
1960 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1961 return 0;
1962 /* PIO3 and higher it is mandatory */
1963 if (adev->pio_mode > XFER_PIO_2)
1964 return 1;
1965 /* We turn it on when possible */
1966 if (ata_id_has_iordy(adev->id))
1967 return 1;
1968 return 0;
1969 }
1970
1971 /**
1972 * ata_pio_mask_no_iordy - Return the non IORDY mask
1973 * @adev: ATA device
1974 *
1975 * Compute the highest mode possible if we are not using iordy. Return
1976 * -1 if no iordy mode is available.
1977 */
1978 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1979 {
1980 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1981 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1982 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1983 /* Is the speed faster than the drive allows non IORDY ? */
1984 if (pio) {
1985 /* This is cycle times not frequency - watch the logic! */
1986 if (pio > 240) /* PIO2 is 240nS per cycle */
1987 return 3 << ATA_SHIFT_PIO;
1988 return 7 << ATA_SHIFT_PIO;
1989 }
1990 }
1991 return 3 << ATA_SHIFT_PIO;
1992 }
1993
1994 /**
1995 * ata_do_dev_read_id - default ID read method
1996 * @dev: device
1997 * @tf: proposed taskfile
1998 * @id: data buffer
1999 *
2000 * Issue the identify taskfile and hand back the buffer containing
2001 * identify data. For some RAID controllers and for pre ATA devices
2002 * this function is wrapped or replaced by the driver
2003 */
2004 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2005 struct ata_taskfile *tf, u16 *id)
2006 {
2007 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2008 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2009 }
2010
2011 /**
2012 * ata_dev_read_id - Read ID data from the specified device
2013 * @dev: target device
2014 * @p_class: pointer to class of the target device (may be changed)
2015 * @flags: ATA_READID_* flags
2016 * @id: buffer to read IDENTIFY data into
2017 *
2018 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2019 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2020 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2021 * for pre-ATA4 drives.
2022 *
2023 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2024 * now we abort if we hit that case.
2025 *
2026 * LOCKING:
2027 * Kernel thread context (may sleep)
2028 *
2029 * RETURNS:
2030 * 0 on success, -errno otherwise.
2031 */
2032 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2033 unsigned int flags, u16 *id)
2034 {
2035 struct ata_port *ap = dev->link->ap;
2036 unsigned int class = *p_class;
2037 struct ata_taskfile tf;
2038 unsigned int err_mask = 0;
2039 const char *reason;
2040 bool is_semb = class == ATA_DEV_SEMB;
2041 int may_fallback = 1, tried_spinup = 0;
2042 int rc;
2043
2044 if (ata_msg_ctl(ap))
2045 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2046
2047 retry:
2048 ata_tf_init(dev, &tf);
2049
2050 switch (class) {
2051 case ATA_DEV_SEMB:
2052 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
2053 case ATA_DEV_ATA:
2054 tf.command = ATA_CMD_ID_ATA;
2055 break;
2056 case ATA_DEV_ATAPI:
2057 tf.command = ATA_CMD_ID_ATAPI;
2058 break;
2059 default:
2060 rc = -ENODEV;
2061 reason = "unsupported class";
2062 goto err_out;
2063 }
2064
2065 tf.protocol = ATA_PROT_PIO;
2066
2067 /* Some devices choke if TF registers contain garbage. Make
2068 * sure those are properly initialized.
2069 */
2070 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2071
2072 /* Device presence detection is unreliable on some
2073 * controllers. Always poll IDENTIFY if available.
2074 */
2075 tf.flags |= ATA_TFLAG_POLLING;
2076
2077 if (ap->ops->read_id)
2078 err_mask = ap->ops->read_id(dev, &tf, id);
2079 else
2080 err_mask = ata_do_dev_read_id(dev, &tf, id);
2081
2082 if (err_mask) {
2083 if (err_mask & AC_ERR_NODEV_HINT) {
2084 ata_dev_printk(dev, KERN_DEBUG,
2085 "NODEV after polling detection\n");
2086 return -ENOENT;
2087 }
2088
2089 if (is_semb) {
2090 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2091 "device w/ SEMB sig, disabled\n");
2092 /* SEMB is not supported yet */
2093 *p_class = ATA_DEV_SEMB_UNSUP;
2094 return 0;
2095 }
2096
2097 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2098 /* Device or controller might have reported
2099 * the wrong device class. Give a shot at the
2100 * other IDENTIFY if the current one is
2101 * aborted by the device.
2102 */
2103 if (may_fallback) {
2104 may_fallback = 0;
2105
2106 if (class == ATA_DEV_ATA)
2107 class = ATA_DEV_ATAPI;
2108 else
2109 class = ATA_DEV_ATA;
2110 goto retry;
2111 }
2112
2113 /* Control reaches here iff the device aborted
2114 * both flavors of IDENTIFYs which happens
2115 * sometimes with phantom devices.
2116 */
2117 ata_dev_printk(dev, KERN_DEBUG,
2118 "both IDENTIFYs aborted, assuming NODEV\n");
2119 return -ENOENT;
2120 }
2121
2122 rc = -EIO;
2123 reason = "I/O error";
2124 goto err_out;
2125 }
2126
2127 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
2128 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
2129 "class=%d may_fallback=%d tried_spinup=%d\n",
2130 class, may_fallback, tried_spinup);
2131 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
2132 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
2133 }
2134
2135 /* Falling back doesn't make sense if ID data was read
2136 * successfully at least once.
2137 */
2138 may_fallback = 0;
2139
2140 swap_buf_le16(id, ATA_ID_WORDS);
2141
2142 /* sanity check */
2143 rc = -EINVAL;
2144 reason = "device reports invalid type";
2145
2146 if (class == ATA_DEV_ATA) {
2147 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2148 goto err_out;
2149 } else {
2150 if (ata_id_is_ata(id))
2151 goto err_out;
2152 }
2153
2154 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2155 tried_spinup = 1;
2156 /*
2157 * Drive powered-up in standby mode, and requires a specific
2158 * SET_FEATURES spin-up subcommand before it will accept
2159 * anything other than the original IDENTIFY command.
2160 */
2161 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2162 if (err_mask && id[2] != 0x738c) {
2163 rc = -EIO;
2164 reason = "SPINUP failed";
2165 goto err_out;
2166 }
2167 /*
2168 * If the drive initially returned incomplete IDENTIFY info,
2169 * we now must reissue the IDENTIFY command.
2170 */
2171 if (id[2] == 0x37c8)
2172 goto retry;
2173 }
2174
2175 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2176 /*
2177 * The exact sequence expected by certain pre-ATA4 drives is:
2178 * SRST RESET
2179 * IDENTIFY (optional in early ATA)
2180 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2181 * anything else..
2182 * Some drives were very specific about that exact sequence.
2183 *
2184 * Note that ATA4 says lba is mandatory so the second check
2185 * should never trigger.
2186 */
2187 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2188 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2189 if (err_mask) {
2190 rc = -EIO;
2191 reason = "INIT_DEV_PARAMS failed";
2192 goto err_out;
2193 }
2194
2195 /* current CHS translation info (id[53-58]) might be
2196 * changed. reread the identify device info.
2197 */
2198 flags &= ~ATA_READID_POSTRESET;
2199 goto retry;
2200 }
2201 }
2202
2203 *p_class = class;
2204
2205 return 0;
2206
2207 err_out:
2208 if (ata_msg_warn(ap))
2209 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2210 "(%s, err_mask=0x%x)\n", reason, err_mask);
2211 return rc;
2212 }
2213
2214 static int ata_do_link_spd_horkage(struct ata_device *dev)
2215 {
2216 struct ata_link *plink = ata_dev_phys_link(dev);
2217 u32 target, target_limit;
2218
2219 if (!sata_scr_valid(plink))
2220 return 0;
2221
2222 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2223 target = 1;
2224 else
2225 return 0;
2226
2227 target_limit = (1 << target) - 1;
2228
2229 /* if already on stricter limit, no need to push further */
2230 if (plink->sata_spd_limit <= target_limit)
2231 return 0;
2232
2233 plink->sata_spd_limit = target_limit;
2234
2235 /* Request another EH round by returning -EAGAIN if link is
2236 * going faster than the target speed. Forward progress is
2237 * guaranteed by setting sata_spd_limit to target_limit above.
2238 */
2239 if (plink->sata_spd > target) {
2240 ata_dev_printk(dev, KERN_INFO,
2241 "applying link speed limit horkage to %s\n",
2242 sata_spd_string(target));
2243 return -EAGAIN;
2244 }
2245 return 0;
2246 }
2247
2248 static inline u8 ata_dev_knobble(struct ata_device *dev)
2249 {
2250 struct ata_port *ap = dev->link->ap;
2251
2252 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2253 return 0;
2254
2255 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2256 }
2257
2258 static int ata_dev_config_ncq(struct ata_device *dev,
2259 char *desc, size_t desc_sz)
2260 {
2261 struct ata_port *ap = dev->link->ap;
2262 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2263 unsigned int err_mask;
2264 char *aa_desc = "";
2265
2266 if (!ata_id_has_ncq(dev->id)) {
2267 desc[0] = '\0';
2268 return 0;
2269 }
2270 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2271 snprintf(desc, desc_sz, "NCQ (not used)");
2272 return 0;
2273 }
2274 if (ap->flags & ATA_FLAG_NCQ) {
2275 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2276 dev->flags |= ATA_DFLAG_NCQ;
2277 }
2278
2279 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2280 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2281 ata_id_has_fpdma_aa(dev->id)) {
2282 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2283 SATA_FPDMA_AA);
2284 if (err_mask) {
2285 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2286 "(error_mask=0x%x)\n", err_mask);
2287 if (err_mask != AC_ERR_DEV) {
2288 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2289 return -EIO;
2290 }
2291 } else
2292 aa_desc = ", AA";
2293 }
2294
2295 if (hdepth >= ddepth)
2296 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2297 else
2298 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2299 ddepth, aa_desc);
2300 return 0;
2301 }
2302
2303 /**
2304 * ata_dev_configure - Configure the specified ATA/ATAPI device
2305 * @dev: Target device to configure
2306 *
2307 * Configure @dev according to @dev->id. Generic and low-level
2308 * driver specific fixups are also applied.
2309 *
2310 * LOCKING:
2311 * Kernel thread context (may sleep)
2312 *
2313 * RETURNS:
2314 * 0 on success, -errno otherwise
2315 */
2316 int ata_dev_configure(struct ata_device *dev)
2317 {
2318 struct ata_port *ap = dev->link->ap;
2319 struct ata_eh_context *ehc = &dev->link->eh_context;
2320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2321 const u16 *id = dev->id;
2322 unsigned long xfer_mask;
2323 char revbuf[7]; /* XYZ-99\0 */
2324 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2325 char modelbuf[ATA_ID_PROD_LEN+1];
2326 int rc;
2327
2328 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2329 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2330 __func__);
2331 return 0;
2332 }
2333
2334 if (ata_msg_probe(ap))
2335 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2336
2337 /* set horkage */
2338 dev->horkage |= ata_dev_blacklisted(dev);
2339 ata_force_horkage(dev);
2340
2341 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2342 ata_dev_printk(dev, KERN_INFO,
2343 "unsupported device, disabling\n");
2344 ata_dev_disable(dev);
2345 return 0;
2346 }
2347
2348 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2349 dev->class == ATA_DEV_ATAPI) {
2350 ata_dev_printk(dev, KERN_WARNING,
2351 "WARNING: ATAPI is %s, device ignored.\n",
2352 atapi_enabled ? "not supported with this driver"
2353 : "disabled");
2354 ata_dev_disable(dev);
2355 return 0;
2356 }
2357
2358 rc = ata_do_link_spd_horkage(dev);
2359 if (rc)
2360 return rc;
2361
2362 /* let ACPI work its magic */
2363 rc = ata_acpi_on_devcfg(dev);
2364 if (rc)
2365 return rc;
2366
2367 /* massage HPA, do it early as it might change IDENTIFY data */
2368 rc = ata_hpa_resize(dev);
2369 if (rc)
2370 return rc;
2371
2372 /* print device capabilities */
2373 if (ata_msg_probe(ap))
2374 ata_dev_printk(dev, KERN_DEBUG,
2375 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2376 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2377 __func__,
2378 id[49], id[82], id[83], id[84],
2379 id[85], id[86], id[87], id[88]);
2380
2381 /* initialize to-be-configured parameters */
2382 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2383 dev->max_sectors = 0;
2384 dev->cdb_len = 0;
2385 dev->n_sectors = 0;
2386 dev->cylinders = 0;
2387 dev->heads = 0;
2388 dev->sectors = 0;
2389 dev->multi_count = 0;
2390
2391 /*
2392 * common ATA, ATAPI feature tests
2393 */
2394
2395 /* find max transfer mode; for printk only */
2396 xfer_mask = ata_id_xfermask(id);
2397
2398 if (ata_msg_probe(ap))
2399 ata_dump_id(id);
2400
2401 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2402 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2403 sizeof(fwrevbuf));
2404
2405 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2406 sizeof(modelbuf));
2407
2408 /* ATA-specific feature tests */
2409 if (dev->class == ATA_DEV_ATA) {
2410 if (ata_id_is_cfa(id)) {
2411 /* CPRM may make this media unusable */
2412 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2413 ata_dev_printk(dev, KERN_WARNING,
2414 "supports DRM functions and may "
2415 "not be fully accessable.\n");
2416 snprintf(revbuf, 7, "CFA");
2417 } else {
2418 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2419 /* Warn the user if the device has TPM extensions */
2420 if (ata_id_has_tpm(id))
2421 ata_dev_printk(dev, KERN_WARNING,
2422 "supports DRM functions and may "
2423 "not be fully accessable.\n");
2424 }
2425
2426 dev->n_sectors = ata_id_n_sectors(id);
2427
2428 /* get current R/W Multiple count setting */
2429 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2430 unsigned int max = dev->id[47] & 0xff;
2431 unsigned int cnt = dev->id[59] & 0xff;
2432 /* only recognize/allow powers of two here */
2433 if (is_power_of_2(max) && is_power_of_2(cnt))
2434 if (cnt <= max)
2435 dev->multi_count = cnt;
2436 }
2437
2438 if (ata_id_has_lba(id)) {
2439 const char *lba_desc;
2440 char ncq_desc[24];
2441
2442 lba_desc = "LBA";
2443 dev->flags |= ATA_DFLAG_LBA;
2444 if (ata_id_has_lba48(id)) {
2445 dev->flags |= ATA_DFLAG_LBA48;
2446 lba_desc = "LBA48";
2447
2448 if (dev->n_sectors >= (1UL << 28) &&
2449 ata_id_has_flush_ext(id))
2450 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2451 }
2452
2453 /* config NCQ */
2454 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2455 if (rc)
2456 return rc;
2457
2458 /* print device info to dmesg */
2459 if (ata_msg_drv(ap) && print_info) {
2460 ata_dev_printk(dev, KERN_INFO,
2461 "%s: %s, %s, max %s\n",
2462 revbuf, modelbuf, fwrevbuf,
2463 ata_mode_string(xfer_mask));
2464 ata_dev_printk(dev, KERN_INFO,
2465 "%Lu sectors, multi %u: %s %s\n",
2466 (unsigned long long)dev->n_sectors,
2467 dev->multi_count, lba_desc, ncq_desc);
2468 }
2469 } else {
2470 /* CHS */
2471
2472 /* Default translation */
2473 dev->cylinders = id[1];
2474 dev->heads = id[3];
2475 dev->sectors = id[6];
2476
2477 if (ata_id_current_chs_valid(id)) {
2478 /* Current CHS translation is valid. */
2479 dev->cylinders = id[54];
2480 dev->heads = id[55];
2481 dev->sectors = id[56];
2482 }
2483
2484 /* print device info to dmesg */
2485 if (ata_msg_drv(ap) && print_info) {
2486 ata_dev_printk(dev, KERN_INFO,
2487 "%s: %s, %s, max %s\n",
2488 revbuf, modelbuf, fwrevbuf,
2489 ata_mode_string(xfer_mask));
2490 ata_dev_printk(dev, KERN_INFO,
2491 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2492 (unsigned long long)dev->n_sectors,
2493 dev->multi_count, dev->cylinders,
2494 dev->heads, dev->sectors);
2495 }
2496 }
2497
2498 dev->cdb_len = 16;
2499 }
2500
2501 /* ATAPI-specific feature tests */
2502 else if (dev->class == ATA_DEV_ATAPI) {
2503 const char *cdb_intr_string = "";
2504 const char *atapi_an_string = "";
2505 const char *dma_dir_string = "";
2506 u32 sntf;
2507
2508 rc = atapi_cdb_len(id);
2509 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2510 if (ata_msg_warn(ap))
2511 ata_dev_printk(dev, KERN_WARNING,
2512 "unsupported CDB len\n");
2513 rc = -EINVAL;
2514 goto err_out_nosup;
2515 }
2516 dev->cdb_len = (unsigned int) rc;
2517
2518 /* Enable ATAPI AN if both the host and device have
2519 * the support. If PMP is attached, SNTF is required
2520 * to enable ATAPI AN to discern between PHY status
2521 * changed notifications and ATAPI ANs.
2522 */
2523 if (atapi_an &&
2524 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2525 (!sata_pmp_attached(ap) ||
2526 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2527 unsigned int err_mask;
2528
2529 /* issue SET feature command to turn this on */
2530 err_mask = ata_dev_set_feature(dev,
2531 SETFEATURES_SATA_ENABLE, SATA_AN);
2532 if (err_mask)
2533 ata_dev_printk(dev, KERN_ERR,
2534 "failed to enable ATAPI AN "
2535 "(err_mask=0x%x)\n", err_mask);
2536 else {
2537 dev->flags |= ATA_DFLAG_AN;
2538 atapi_an_string = ", ATAPI AN";
2539 }
2540 }
2541
2542 if (ata_id_cdb_intr(dev->id)) {
2543 dev->flags |= ATA_DFLAG_CDB_INTR;
2544 cdb_intr_string = ", CDB intr";
2545 }
2546
2547 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2548 dev->flags |= ATA_DFLAG_DMADIR;
2549 dma_dir_string = ", DMADIR";
2550 }
2551
2552 /* print device info to dmesg */
2553 if (ata_msg_drv(ap) && print_info)
2554 ata_dev_printk(dev, KERN_INFO,
2555 "ATAPI: %s, %s, max %s%s%s%s\n",
2556 modelbuf, fwrevbuf,
2557 ata_mode_string(xfer_mask),
2558 cdb_intr_string, atapi_an_string,
2559 dma_dir_string);
2560 }
2561
2562 /* determine max_sectors */
2563 dev->max_sectors = ATA_MAX_SECTORS;
2564 if (dev->flags & ATA_DFLAG_LBA48)
2565 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2566
2567 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2568 if (ata_id_has_hipm(dev->id))
2569 dev->flags |= ATA_DFLAG_HIPM;
2570 if (ata_id_has_dipm(dev->id))
2571 dev->flags |= ATA_DFLAG_DIPM;
2572 }
2573
2574 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2575 200 sectors */
2576 if (ata_dev_knobble(dev)) {
2577 if (ata_msg_drv(ap) && print_info)
2578 ata_dev_printk(dev, KERN_INFO,
2579 "applying bridge limits\n");
2580 dev->udma_mask &= ATA_UDMA5;
2581 dev->max_sectors = ATA_MAX_SECTORS;
2582 }
2583
2584 if ((dev->class == ATA_DEV_ATAPI) &&
2585 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2586 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2587 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2588 }
2589
2590 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2591 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2592 dev->max_sectors);
2593
2594 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2595 dev->horkage |= ATA_HORKAGE_IPM;
2596
2597 /* reset link pm_policy for this port to no pm */
2598 ap->pm_policy = MAX_PERFORMANCE;
2599 }
2600
2601 if (ap->ops->dev_config)
2602 ap->ops->dev_config(dev);
2603
2604 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2605 /* Let the user know. We don't want to disallow opens for
2606 rescue purposes, or in case the vendor is just a blithering
2607 idiot. Do this after the dev_config call as some controllers
2608 with buggy firmware may want to avoid reporting false device
2609 bugs */
2610
2611 if (print_info) {
2612 ata_dev_printk(dev, KERN_WARNING,
2613 "Drive reports diagnostics failure. This may indicate a drive\n");
2614 ata_dev_printk(dev, KERN_WARNING,
2615 "fault or invalid emulation. Contact drive vendor for information.\n");
2616 }
2617 }
2618
2619 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2620 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2621 "firmware update to be fully functional.\n");
2622 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2623 "or visit http://ata.wiki.kernel.org.\n");
2624 }
2625
2626 return 0;
2627
2628 err_out_nosup:
2629 if (ata_msg_probe(ap))
2630 ata_dev_printk(dev, KERN_DEBUG,
2631 "%s: EXIT, err\n", __func__);
2632 return rc;
2633 }
2634
2635 /**
2636 * ata_cable_40wire - return 40 wire cable type
2637 * @ap: port
2638 *
2639 * Helper method for drivers which want to hardwire 40 wire cable
2640 * detection.
2641 */
2642
2643 int ata_cable_40wire(struct ata_port *ap)
2644 {
2645 return ATA_CBL_PATA40;
2646 }
2647
2648 /**
2649 * ata_cable_80wire - return 80 wire cable type
2650 * @ap: port
2651 *
2652 * Helper method for drivers which want to hardwire 80 wire cable
2653 * detection.
2654 */
2655
2656 int ata_cable_80wire(struct ata_port *ap)
2657 {
2658 return ATA_CBL_PATA80;
2659 }
2660
2661 /**
2662 * ata_cable_unknown - return unknown PATA cable.
2663 * @ap: port
2664 *
2665 * Helper method for drivers which have no PATA cable detection.
2666 */
2667
2668 int ata_cable_unknown(struct ata_port *ap)
2669 {
2670 return ATA_CBL_PATA_UNK;
2671 }
2672
2673 /**
2674 * ata_cable_ignore - return ignored PATA cable.
2675 * @ap: port
2676 *
2677 * Helper method for drivers which don't use cable type to limit
2678 * transfer mode.
2679 */
2680 int ata_cable_ignore(struct ata_port *ap)
2681 {
2682 return ATA_CBL_PATA_IGN;
2683 }
2684
2685 /**
2686 * ata_cable_sata - return SATA cable type
2687 * @ap: port
2688 *
2689 * Helper method for drivers which have SATA cables
2690 */
2691
2692 int ata_cable_sata(struct ata_port *ap)
2693 {
2694 return ATA_CBL_SATA;
2695 }
2696
2697 /**
2698 * ata_bus_probe - Reset and probe ATA bus
2699 * @ap: Bus to probe
2700 *
2701 * Master ATA bus probing function. Initiates a hardware-dependent
2702 * bus reset, then attempts to identify any devices found on
2703 * the bus.
2704 *
2705 * LOCKING:
2706 * PCI/etc. bus probe sem.
2707 *
2708 * RETURNS:
2709 * Zero on success, negative errno otherwise.
2710 */
2711
2712 int ata_bus_probe(struct ata_port *ap)
2713 {
2714 unsigned int classes[ATA_MAX_DEVICES];
2715 int tries[ATA_MAX_DEVICES];
2716 int rc;
2717 struct ata_device *dev;
2718
2719 ata_for_each_dev(dev, &ap->link, ALL)
2720 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2721
2722 retry:
2723 ata_for_each_dev(dev, &ap->link, ALL) {
2724 /* If we issue an SRST then an ATA drive (not ATAPI)
2725 * may change configuration and be in PIO0 timing. If
2726 * we do a hard reset (or are coming from power on)
2727 * this is true for ATA or ATAPI. Until we've set a
2728 * suitable controller mode we should not touch the
2729 * bus as we may be talking too fast.
2730 */
2731 dev->pio_mode = XFER_PIO_0;
2732
2733 /* If the controller has a pio mode setup function
2734 * then use it to set the chipset to rights. Don't
2735 * touch the DMA setup as that will be dealt with when
2736 * configuring devices.
2737 */
2738 if (ap->ops->set_piomode)
2739 ap->ops->set_piomode(ap, dev);
2740 }
2741
2742 /* reset and determine device classes */
2743 ap->ops->phy_reset(ap);
2744
2745 ata_for_each_dev(dev, &ap->link, ALL) {
2746 if (dev->class != ATA_DEV_UNKNOWN)
2747 classes[dev->devno] = dev->class;
2748 else
2749 classes[dev->devno] = ATA_DEV_NONE;
2750
2751 dev->class = ATA_DEV_UNKNOWN;
2752 }
2753
2754 /* read IDENTIFY page and configure devices. We have to do the identify
2755 specific sequence bass-ackwards so that PDIAG- is released by
2756 the slave device */
2757
2758 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2759 if (tries[dev->devno])
2760 dev->class = classes[dev->devno];
2761
2762 if (!ata_dev_enabled(dev))
2763 continue;
2764
2765 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2766 dev->id);
2767 if (rc)
2768 goto fail;
2769 }
2770
2771 /* Now ask for the cable type as PDIAG- should have been released */
2772 if (ap->ops->cable_detect)
2773 ap->cbl = ap->ops->cable_detect(ap);
2774
2775 /* We may have SATA bridge glue hiding here irrespective of
2776 * the reported cable types and sensed types. When SATA
2777 * drives indicate we have a bridge, we don't know which end
2778 * of the link the bridge is which is a problem.
2779 */
2780 ata_for_each_dev(dev, &ap->link, ENABLED)
2781 if (ata_id_is_sata(dev->id))
2782 ap->cbl = ATA_CBL_SATA;
2783
2784 /* After the identify sequence we can now set up the devices. We do
2785 this in the normal order so that the user doesn't get confused */
2786
2787 ata_for_each_dev(dev, &ap->link, ENABLED) {
2788 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2789 rc = ata_dev_configure(dev);
2790 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2791 if (rc)
2792 goto fail;
2793 }
2794
2795 /* configure transfer mode */
2796 rc = ata_set_mode(&ap->link, &dev);
2797 if (rc)
2798 goto fail;
2799
2800 ata_for_each_dev(dev, &ap->link, ENABLED)
2801 return 0;
2802
2803 return -ENODEV;
2804
2805 fail:
2806 tries[dev->devno]--;
2807
2808 switch (rc) {
2809 case -EINVAL:
2810 /* eeek, something went very wrong, give up */
2811 tries[dev->devno] = 0;
2812 break;
2813
2814 case -ENODEV:
2815 /* give it just one more chance */
2816 tries[dev->devno] = min(tries[dev->devno], 1);
2817 case -EIO:
2818 if (tries[dev->devno] == 1) {
2819 /* This is the last chance, better to slow
2820 * down than lose it.
2821 */
2822 sata_down_spd_limit(&ap->link, 0);
2823 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2824 }
2825 }
2826
2827 if (!tries[dev->devno])
2828 ata_dev_disable(dev);
2829
2830 goto retry;
2831 }
2832
2833 /**
2834 * sata_print_link_status - Print SATA link status
2835 * @link: SATA link to printk link status about
2836 *
2837 * This function prints link speed and status of a SATA link.
2838 *
2839 * LOCKING:
2840 * None.
2841 */
2842 static void sata_print_link_status(struct ata_link *link)
2843 {
2844 u32 sstatus, scontrol, tmp;
2845
2846 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2847 return;
2848 sata_scr_read(link, SCR_CONTROL, &scontrol);
2849
2850 if (ata_phys_link_online(link)) {
2851 tmp = (sstatus >> 4) & 0xf;
2852 ata_link_printk(link, KERN_INFO,
2853 "SATA link up %s (SStatus %X SControl %X)\n",
2854 sata_spd_string(tmp), sstatus, scontrol);
2855 } else {
2856 ata_link_printk(link, KERN_INFO,
2857 "SATA link down (SStatus %X SControl %X)\n",
2858 sstatus, scontrol);
2859 }
2860 }
2861
2862 /**
2863 * ata_dev_pair - return other device on cable
2864 * @adev: device
2865 *
2866 * Obtain the other device on the same cable, or if none is
2867 * present NULL is returned
2868 */
2869
2870 struct ata_device *ata_dev_pair(struct ata_device *adev)
2871 {
2872 struct ata_link *link = adev->link;
2873 struct ata_device *pair = &link->device[1 - adev->devno];
2874 if (!ata_dev_enabled(pair))
2875 return NULL;
2876 return pair;
2877 }
2878
2879 /**
2880 * sata_down_spd_limit - adjust SATA spd limit downward
2881 * @link: Link to adjust SATA spd limit for
2882 * @spd_limit: Additional limit
2883 *
2884 * Adjust SATA spd limit of @link downward. Note that this
2885 * function only adjusts the limit. The change must be applied
2886 * using sata_set_spd().
2887 *
2888 * If @spd_limit is non-zero, the speed is limited to equal to or
2889 * lower than @spd_limit if such speed is supported. If
2890 * @spd_limit is slower than any supported speed, only the lowest
2891 * supported speed is allowed.
2892 *
2893 * LOCKING:
2894 * Inherited from caller.
2895 *
2896 * RETURNS:
2897 * 0 on success, negative errno on failure
2898 */
2899 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2900 {
2901 u32 sstatus, spd, mask;
2902 int rc, bit;
2903
2904 if (!sata_scr_valid(link))
2905 return -EOPNOTSUPP;
2906
2907 /* If SCR can be read, use it to determine the current SPD.
2908 * If not, use cached value in link->sata_spd.
2909 */
2910 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2911 if (rc == 0 && ata_sstatus_online(sstatus))
2912 spd = (sstatus >> 4) & 0xf;
2913 else
2914 spd = link->sata_spd;
2915
2916 mask = link->sata_spd_limit;
2917 if (mask <= 1)
2918 return -EINVAL;
2919
2920 /* unconditionally mask off the highest bit */
2921 bit = fls(mask) - 1;
2922 mask &= ~(1 << bit);
2923
2924 /* Mask off all speeds higher than or equal to the current
2925 * one. Force 1.5Gbps if current SPD is not available.
2926 */
2927 if (spd > 1)
2928 mask &= (1 << (spd - 1)) - 1;
2929 else
2930 mask &= 1;
2931
2932 /* were we already at the bottom? */
2933 if (!mask)
2934 return -EINVAL;
2935
2936 if (spd_limit) {
2937 if (mask & ((1 << spd_limit) - 1))
2938 mask &= (1 << spd_limit) - 1;
2939 else {
2940 bit = ffs(mask) - 1;
2941 mask = 1 << bit;
2942 }
2943 }
2944
2945 link->sata_spd_limit = mask;
2946
2947 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2948 sata_spd_string(fls(mask)));
2949
2950 return 0;
2951 }
2952
2953 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2954 {
2955 struct ata_link *host_link = &link->ap->link;
2956 u32 limit, target, spd;
2957
2958 limit = link->sata_spd_limit;
2959
2960 /* Don't configure downstream link faster than upstream link.
2961 * It doesn't speed up anything and some PMPs choke on such
2962 * configuration.
2963 */
2964 if (!ata_is_host_link(link) && host_link->sata_spd)
2965 limit &= (1 << host_link->sata_spd) - 1;
2966
2967 if (limit == UINT_MAX)
2968 target = 0;
2969 else
2970 target = fls(limit);
2971
2972 spd = (*scontrol >> 4) & 0xf;
2973 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2974
2975 return spd != target;
2976 }
2977
2978 /**
2979 * sata_set_spd_needed - is SATA spd configuration needed
2980 * @link: Link in question
2981 *
2982 * Test whether the spd limit in SControl matches
2983 * @link->sata_spd_limit. This function is used to determine
2984 * whether hardreset is necessary to apply SATA spd
2985 * configuration.
2986 *
2987 * LOCKING:
2988 * Inherited from caller.
2989 *
2990 * RETURNS:
2991 * 1 if SATA spd configuration is needed, 0 otherwise.
2992 */
2993 static int sata_set_spd_needed(struct ata_link *link)
2994 {
2995 u32 scontrol;
2996
2997 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2998 return 1;
2999
3000 return __sata_set_spd_needed(link, &scontrol);
3001 }
3002
3003 /**
3004 * sata_set_spd - set SATA spd according to spd limit
3005 * @link: Link to set SATA spd for
3006 *
3007 * Set SATA spd of @link according to sata_spd_limit.
3008 *
3009 * LOCKING:
3010 * Inherited from caller.
3011 *
3012 * RETURNS:
3013 * 0 if spd doesn't need to be changed, 1 if spd has been
3014 * changed. Negative errno if SCR registers are inaccessible.
3015 */
3016 int sata_set_spd(struct ata_link *link)
3017 {
3018 u32 scontrol;
3019 int rc;
3020
3021 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3022 return rc;
3023
3024 if (!__sata_set_spd_needed(link, &scontrol))
3025 return 0;
3026
3027 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3028 return rc;
3029
3030 return 1;
3031 }
3032
3033 /*
3034 * This mode timing computation functionality is ported over from
3035 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3036 */
3037 /*
3038 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3039 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3040 * for UDMA6, which is currently supported only by Maxtor drives.
3041 *
3042 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3043 */
3044
3045 static const struct ata_timing ata_timing[] = {
3046 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3047 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3048 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3049 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3050 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3051 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3052 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3053 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3054
3055 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3056 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3057 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3058
3059 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3060 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3061 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3062 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3063 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3064
3065 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3066 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3067 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3068 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3069 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3070 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3071 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3072 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3073
3074 { 0xFF }
3075 };
3076
3077 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3078 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3079
3080 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3081 {
3082 q->setup = EZ(t->setup * 1000, T);
3083 q->act8b = EZ(t->act8b * 1000, T);
3084 q->rec8b = EZ(t->rec8b * 1000, T);
3085 q->cyc8b = EZ(t->cyc8b * 1000, T);
3086 q->active = EZ(t->active * 1000, T);
3087 q->recover = EZ(t->recover * 1000, T);
3088 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3089 q->cycle = EZ(t->cycle * 1000, T);
3090 q->udma = EZ(t->udma * 1000, UT);
3091 }
3092
3093 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3094 struct ata_timing *m, unsigned int what)
3095 {
3096 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3097 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3098 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3099 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3100 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3101 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3102 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3103 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3104 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3105 }
3106
3107 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3108 {
3109 const struct ata_timing *t = ata_timing;
3110
3111 while (xfer_mode > t->mode)
3112 t++;
3113
3114 if (xfer_mode == t->mode)
3115 return t;
3116 return NULL;
3117 }
3118
3119 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3120 struct ata_timing *t, int T, int UT)
3121 {
3122 const u16 *id = adev->id;
3123 const struct ata_timing *s;
3124 struct ata_timing p;
3125
3126 /*
3127 * Find the mode.
3128 */
3129
3130 if (!(s = ata_timing_find_mode(speed)))
3131 return -EINVAL;
3132
3133 memcpy(t, s, sizeof(*s));
3134
3135 /*
3136 * If the drive is an EIDE drive, it can tell us it needs extended
3137 * PIO/MW_DMA cycle timing.
3138 */
3139
3140 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3141 memset(&p, 0, sizeof(p));
3142
3143 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3144 if (speed <= XFER_PIO_2)
3145 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3146 else if ((speed <= XFER_PIO_4) ||
3147 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3148 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3149 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3150 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3151
3152 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3153 }
3154
3155 /*
3156 * Convert the timing to bus clock counts.
3157 */
3158
3159 ata_timing_quantize(t, t, T, UT);
3160
3161 /*
3162 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3163 * S.M.A.R.T * and some other commands. We have to ensure that the
3164 * DMA cycle timing is slower/equal than the fastest PIO timing.
3165 */
3166
3167 if (speed > XFER_PIO_6) {
3168 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3169 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3170 }
3171
3172 /*
3173 * Lengthen active & recovery time so that cycle time is correct.
3174 */
3175
3176 if (t->act8b + t->rec8b < t->cyc8b) {
3177 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3178 t->rec8b = t->cyc8b - t->act8b;
3179 }
3180
3181 if (t->active + t->recover < t->cycle) {
3182 t->active += (t->cycle - (t->active + t->recover)) / 2;
3183 t->recover = t->cycle - t->active;
3184 }
3185
3186 /* In a few cases quantisation may produce enough errors to
3187 leave t->cycle too low for the sum of active and recovery
3188 if so we must correct this */
3189 if (t->active + t->recover > t->cycle)
3190 t->cycle = t->active + t->recover;
3191
3192 return 0;
3193 }
3194
3195 /**
3196 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3197 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3198 * @cycle: cycle duration in ns
3199 *
3200 * Return matching xfer mode for @cycle. The returned mode is of
3201 * the transfer type specified by @xfer_shift. If @cycle is too
3202 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3203 * than the fastest known mode, the fasted mode is returned.
3204 *
3205 * LOCKING:
3206 * None.
3207 *
3208 * RETURNS:
3209 * Matching xfer_mode, 0xff if no match found.
3210 */
3211 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3212 {
3213 u8 base_mode = 0xff, last_mode = 0xff;
3214 const struct ata_xfer_ent *ent;
3215 const struct ata_timing *t;
3216
3217 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3218 if (ent->shift == xfer_shift)
3219 base_mode = ent->base;
3220
3221 for (t = ata_timing_find_mode(base_mode);
3222 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3223 unsigned short this_cycle;
3224
3225 switch (xfer_shift) {
3226 case ATA_SHIFT_PIO:
3227 case ATA_SHIFT_MWDMA:
3228 this_cycle = t->cycle;
3229 break;
3230 case ATA_SHIFT_UDMA:
3231 this_cycle = t->udma;
3232 break;
3233 default:
3234 return 0xff;
3235 }
3236
3237 if (cycle > this_cycle)
3238 break;
3239
3240 last_mode = t->mode;
3241 }
3242
3243 return last_mode;
3244 }
3245
3246 /**
3247 * ata_down_xfermask_limit - adjust dev xfer masks downward
3248 * @dev: Device to adjust xfer masks
3249 * @sel: ATA_DNXFER_* selector
3250 *
3251 * Adjust xfer masks of @dev downward. Note that this function
3252 * does not apply the change. Invoking ata_set_mode() afterwards
3253 * will apply the limit.
3254 *
3255 * LOCKING:
3256 * Inherited from caller.
3257 *
3258 * RETURNS:
3259 * 0 on success, negative errno on failure
3260 */
3261 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3262 {
3263 char buf[32];
3264 unsigned long orig_mask, xfer_mask;
3265 unsigned long pio_mask, mwdma_mask, udma_mask;
3266 int quiet, highbit;
3267
3268 quiet = !!(sel & ATA_DNXFER_QUIET);
3269 sel &= ~ATA_DNXFER_QUIET;
3270
3271 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3272 dev->mwdma_mask,
3273 dev->udma_mask);
3274 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3275
3276 switch (sel) {
3277 case ATA_DNXFER_PIO:
3278 highbit = fls(pio_mask) - 1;
3279 pio_mask &= ~(1 << highbit);
3280 break;
3281
3282 case ATA_DNXFER_DMA:
3283 if (udma_mask) {
3284 highbit = fls(udma_mask) - 1;
3285 udma_mask &= ~(1 << highbit);
3286 if (!udma_mask)
3287 return -ENOENT;
3288 } else if (mwdma_mask) {
3289 highbit = fls(mwdma_mask) - 1;
3290 mwdma_mask &= ~(1 << highbit);
3291 if (!mwdma_mask)
3292 return -ENOENT;
3293 }
3294 break;
3295
3296 case ATA_DNXFER_40C:
3297 udma_mask &= ATA_UDMA_MASK_40C;
3298 break;
3299
3300 case ATA_DNXFER_FORCE_PIO0:
3301 pio_mask &= 1;
3302 case ATA_DNXFER_FORCE_PIO:
3303 mwdma_mask = 0;
3304 udma_mask = 0;
3305 break;
3306
3307 default:
3308 BUG();
3309 }
3310
3311 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3312
3313 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3314 return -ENOENT;
3315
3316 if (!quiet) {
3317 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3318 snprintf(buf, sizeof(buf), "%s:%s",
3319 ata_mode_string(xfer_mask),
3320 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3321 else
3322 snprintf(buf, sizeof(buf), "%s",
3323 ata_mode_string(xfer_mask));
3324
3325 ata_dev_printk(dev, KERN_WARNING,
3326 "limiting speed to %s\n", buf);
3327 }
3328
3329 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3330 &dev->udma_mask);
3331
3332 return 0;
3333 }
3334
3335 static int ata_dev_set_mode(struct ata_device *dev)
3336 {
3337 struct ata_port *ap = dev->link->ap;
3338 struct ata_eh_context *ehc = &dev->link->eh_context;
3339 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3340 const char *dev_err_whine = "";
3341 int ign_dev_err = 0;
3342 unsigned int err_mask = 0;
3343 int rc;
3344
3345 dev->flags &= ~ATA_DFLAG_PIO;
3346 if (dev->xfer_shift == ATA_SHIFT_PIO)
3347 dev->flags |= ATA_DFLAG_PIO;
3348
3349 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3350 dev_err_whine = " (SET_XFERMODE skipped)";
3351 else {
3352 if (nosetxfer)
3353 ata_dev_printk(dev, KERN_WARNING,
3354 "NOSETXFER but PATA detected - can't "
3355 "skip SETXFER, might malfunction\n");
3356 err_mask = ata_dev_set_xfermode(dev);
3357 }
3358
3359 if (err_mask & ~AC_ERR_DEV)
3360 goto fail;
3361
3362 /* revalidate */
3363 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3364 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3365 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3366 if (rc)
3367 return rc;
3368
3369 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3370 /* Old CFA may refuse this command, which is just fine */
3371 if (ata_id_is_cfa(dev->id))
3372 ign_dev_err = 1;
3373 /* Catch several broken garbage emulations plus some pre
3374 ATA devices */
3375 if (ata_id_major_version(dev->id) == 0 &&
3376 dev->pio_mode <= XFER_PIO_2)
3377 ign_dev_err = 1;
3378 /* Some very old devices and some bad newer ones fail
3379 any kind of SET_XFERMODE request but support PIO0-2
3380 timings and no IORDY */
3381 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3382 ign_dev_err = 1;
3383 }
3384 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3385 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3386 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3387 dev->dma_mode == XFER_MW_DMA_0 &&
3388 (dev->id[63] >> 8) & 1)
3389 ign_dev_err = 1;
3390
3391 /* if the device is actually configured correctly, ignore dev err */
3392 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3393 ign_dev_err = 1;
3394
3395 if (err_mask & AC_ERR_DEV) {
3396 if (!ign_dev_err)
3397 goto fail;
3398 else
3399 dev_err_whine = " (device error ignored)";
3400 }
3401
3402 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3403 dev->xfer_shift, (int)dev->xfer_mode);
3404
3405 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3406 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3407 dev_err_whine);
3408
3409 return 0;
3410
3411 fail:
3412 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3413 "(err_mask=0x%x)\n", err_mask);
3414 return -EIO;
3415 }
3416
3417 /**
3418 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3419 * @link: link on which timings will be programmed
3420 * @r_failed_dev: out parameter for failed device
3421 *
3422 * Standard implementation of the function used to tune and set
3423 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3424 * ata_dev_set_mode() fails, pointer to the failing device is
3425 * returned in @r_failed_dev.
3426 *
3427 * LOCKING:
3428 * PCI/etc. bus probe sem.
3429 *
3430 * RETURNS:
3431 * 0 on success, negative errno otherwise
3432 */
3433
3434 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3435 {
3436 struct ata_port *ap = link->ap;
3437 struct ata_device *dev;
3438 int rc = 0, used_dma = 0, found = 0;
3439
3440 /* step 1: calculate xfer_mask */
3441 ata_for_each_dev(dev, link, ENABLED) {
3442 unsigned long pio_mask, dma_mask;
3443 unsigned int mode_mask;
3444
3445 mode_mask = ATA_DMA_MASK_ATA;
3446 if (dev->class == ATA_DEV_ATAPI)
3447 mode_mask = ATA_DMA_MASK_ATAPI;
3448 else if (ata_id_is_cfa(dev->id))
3449 mode_mask = ATA_DMA_MASK_CFA;
3450
3451 ata_dev_xfermask(dev);
3452 ata_force_xfermask(dev);
3453
3454 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3455 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3456
3457 if (libata_dma_mask & mode_mask)
3458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3459 else
3460 dma_mask = 0;
3461
3462 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3463 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3464
3465 found = 1;
3466 if (ata_dma_enabled(dev))
3467 used_dma = 1;
3468 }
3469 if (!found)
3470 goto out;
3471
3472 /* step 2: always set host PIO timings */
3473 ata_for_each_dev(dev, link, ENABLED) {
3474 if (dev->pio_mode == 0xff) {
3475 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3476 rc = -EINVAL;
3477 goto out;
3478 }
3479
3480 dev->xfer_mode = dev->pio_mode;
3481 dev->xfer_shift = ATA_SHIFT_PIO;
3482 if (ap->ops->set_piomode)
3483 ap->ops->set_piomode(ap, dev);
3484 }
3485
3486 /* step 3: set host DMA timings */
3487 ata_for_each_dev(dev, link, ENABLED) {
3488 if (!ata_dma_enabled(dev))
3489 continue;
3490
3491 dev->xfer_mode = dev->dma_mode;
3492 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3493 if (ap->ops->set_dmamode)
3494 ap->ops->set_dmamode(ap, dev);
3495 }
3496
3497 /* step 4: update devices' xfer mode */
3498 ata_for_each_dev(dev, link, ENABLED) {
3499 rc = ata_dev_set_mode(dev);
3500 if (rc)
3501 goto out;
3502 }
3503
3504 /* Record simplex status. If we selected DMA then the other
3505 * host channels are not permitted to do so.
3506 */
3507 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3508 ap->host->simplex_claimed = ap;
3509
3510 out:
3511 if (rc)
3512 *r_failed_dev = dev;
3513 return rc;
3514 }
3515
3516 /**
3517 * ata_wait_ready - wait for link to become ready
3518 * @link: link to be waited on
3519 * @deadline: deadline jiffies for the operation
3520 * @check_ready: callback to check link readiness
3521 *
3522 * Wait for @link to become ready. @check_ready should return
3523 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3524 * link doesn't seem to be occupied, other errno for other error
3525 * conditions.
3526 *
3527 * Transient -ENODEV conditions are allowed for
3528 * ATA_TMOUT_FF_WAIT.
3529 *
3530 * LOCKING:
3531 * EH context.
3532 *
3533 * RETURNS:
3534 * 0 if @linke is ready before @deadline; otherwise, -errno.
3535 */
3536 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3537 int (*check_ready)(struct ata_link *link))
3538 {
3539 unsigned long start = jiffies;
3540 unsigned long nodev_deadline;
3541 int warned = 0;
3542
3543 /* choose which 0xff timeout to use, read comment in libata.h */
3544 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3545 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3546 else
3547 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3548
3549 /* Slave readiness can't be tested separately from master. On
3550 * M/S emulation configuration, this function should be called
3551 * only on the master and it will handle both master and slave.
3552 */
3553 WARN_ON(link == link->ap->slave_link);
3554
3555 if (time_after(nodev_deadline, deadline))
3556 nodev_deadline = deadline;
3557
3558 while (1) {
3559 unsigned long now = jiffies;
3560 int ready, tmp;
3561
3562 ready = tmp = check_ready(link);
3563 if (ready > 0)
3564 return 0;
3565
3566 /*
3567 * -ENODEV could be transient. Ignore -ENODEV if link
3568 * is online. Also, some SATA devices take a long
3569 * time to clear 0xff after reset. Wait for
3570 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3571 * offline.
3572 *
3573 * Note that some PATA controllers (pata_ali) explode
3574 * if status register is read more than once when
3575 * there's no device attached.
3576 */
3577 if (ready == -ENODEV) {
3578 if (ata_link_online(link))
3579 ready = 0;
3580 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3581 !ata_link_offline(link) &&
3582 time_before(now, nodev_deadline))
3583 ready = 0;
3584 }
3585
3586 if (ready)
3587 return ready;
3588 if (time_after(now, deadline))
3589 return -EBUSY;
3590
3591 if (!warned && time_after(now, start + 5 * HZ) &&
3592 (deadline - now > 3 * HZ)) {
3593 ata_link_printk(link, KERN_WARNING,
3594 "link is slow to respond, please be patient "
3595 "(ready=%d)\n", tmp);
3596 warned = 1;
3597 }
3598
3599 msleep(50);
3600 }
3601 }
3602
3603 /**
3604 * ata_wait_after_reset - wait for link to become ready after reset
3605 * @link: link to be waited on
3606 * @deadline: deadline jiffies for the operation
3607 * @check_ready: callback to check link readiness
3608 *
3609 * Wait for @link to become ready after reset.
3610 *
3611 * LOCKING:
3612 * EH context.
3613 *
3614 * RETURNS:
3615 * 0 if @linke is ready before @deadline; otherwise, -errno.
3616 */
3617 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3618 int (*check_ready)(struct ata_link *link))
3619 {
3620 msleep(ATA_WAIT_AFTER_RESET);
3621
3622 return ata_wait_ready(link, deadline, check_ready);
3623 }
3624
3625 /**
3626 * sata_link_debounce - debounce SATA phy status
3627 * @link: ATA link to debounce SATA phy status for
3628 * @params: timing parameters { interval, duratinon, timeout } in msec
3629 * @deadline: deadline jiffies for the operation
3630 *
3631 * Make sure SStatus of @link reaches stable state, determined by
3632 * holding the same value where DET is not 1 for @duration polled
3633 * every @interval, before @timeout. Timeout constraints the
3634 * beginning of the stable state. Because DET gets stuck at 1 on
3635 * some controllers after hot unplugging, this functions waits
3636 * until timeout then returns 0 if DET is stable at 1.
3637 *
3638 * @timeout is further limited by @deadline. The sooner of the
3639 * two is used.
3640 *
3641 * LOCKING:
3642 * Kernel thread context (may sleep)
3643 *
3644 * RETURNS:
3645 * 0 on success, -errno on failure.
3646 */
3647 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3648 unsigned long deadline)
3649 {
3650 unsigned long interval = params[0];
3651 unsigned long duration = params[1];
3652 unsigned long last_jiffies, t;
3653 u32 last, cur;
3654 int rc;
3655
3656 t = ata_deadline(jiffies, params[2]);
3657 if (time_before(t, deadline))
3658 deadline = t;
3659
3660 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3661 return rc;
3662 cur &= 0xf;
3663
3664 last = cur;
3665 last_jiffies = jiffies;
3666
3667 while (1) {
3668 msleep(interval);
3669 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3670 return rc;
3671 cur &= 0xf;
3672
3673 /* DET stable? */
3674 if (cur == last) {
3675 if (cur == 1 && time_before(jiffies, deadline))
3676 continue;
3677 if (time_after(jiffies,
3678 ata_deadline(last_jiffies, duration)))
3679 return 0;
3680 continue;
3681 }
3682
3683 /* unstable, start over */
3684 last = cur;
3685 last_jiffies = jiffies;
3686
3687 /* Check deadline. If debouncing failed, return
3688 * -EPIPE to tell upper layer to lower link speed.
3689 */
3690 if (time_after(jiffies, deadline))
3691 return -EPIPE;
3692 }
3693 }
3694
3695 /**
3696 * sata_link_resume - resume SATA link
3697 * @link: ATA link to resume SATA
3698 * @params: timing parameters { interval, duratinon, timeout } in msec
3699 * @deadline: deadline jiffies for the operation
3700 *
3701 * Resume SATA phy @link and debounce it.
3702 *
3703 * LOCKING:
3704 * Kernel thread context (may sleep)
3705 *
3706 * RETURNS:
3707 * 0 on success, -errno on failure.
3708 */
3709 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3710 unsigned long deadline)
3711 {
3712 int tries = ATA_LINK_RESUME_TRIES;
3713 u32 scontrol, serror;
3714 int rc;
3715
3716 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3717 return rc;
3718
3719 /*
3720 * Writes to SControl sometimes get ignored under certain
3721 * controllers (ata_piix SIDPR). Make sure DET actually is
3722 * cleared.
3723 */
3724 do {
3725 scontrol = (scontrol & 0x0f0) | 0x300;
3726 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3727 return rc;
3728 /*
3729 * Some PHYs react badly if SStatus is pounded
3730 * immediately after resuming. Delay 200ms before
3731 * debouncing.
3732 */
3733 msleep(200);
3734
3735 /* is SControl restored correctly? */
3736 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3737 return rc;
3738 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3739
3740 if ((scontrol & 0xf0f) != 0x300) {
3741 ata_link_printk(link, KERN_ERR,
3742 "failed to resume link (SControl %X)\n",
3743 scontrol);
3744 return 0;
3745 }
3746
3747 if (tries < ATA_LINK_RESUME_TRIES)
3748 ata_link_printk(link, KERN_WARNING,
3749 "link resume succeeded after %d retries\n",
3750 ATA_LINK_RESUME_TRIES - tries);
3751
3752 if ((rc = sata_link_debounce(link, params, deadline)))
3753 return rc;
3754
3755 /* clear SError, some PHYs require this even for SRST to work */
3756 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3757 rc = sata_scr_write(link, SCR_ERROR, serror);
3758
3759 return rc != -EINVAL ? rc : 0;
3760 }
3761
3762 /**
3763 * ata_std_prereset - prepare for reset
3764 * @link: ATA link to be reset
3765 * @deadline: deadline jiffies for the operation
3766 *
3767 * @link is about to be reset. Initialize it. Failure from
3768 * prereset makes libata abort whole reset sequence and give up
3769 * that port, so prereset should be best-effort. It does its
3770 * best to prepare for reset sequence but if things go wrong, it
3771 * should just whine, not fail.
3772 *
3773 * LOCKING:
3774 * Kernel thread context (may sleep)
3775 *
3776 * RETURNS:
3777 * 0 on success, -errno otherwise.
3778 */
3779 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3780 {
3781 struct ata_port *ap = link->ap;
3782 struct ata_eh_context *ehc = &link->eh_context;
3783 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3784 int rc;
3785
3786 /* if we're about to do hardreset, nothing more to do */
3787 if (ehc->i.action & ATA_EH_HARDRESET)
3788 return 0;
3789
3790 /* if SATA, resume link */
3791 if (ap->flags & ATA_FLAG_SATA) {
3792 rc = sata_link_resume(link, timing, deadline);
3793 /* whine about phy resume failure but proceed */
3794 if (rc && rc != -EOPNOTSUPP)
3795 ata_link_printk(link, KERN_WARNING, "failed to resume "
3796 "link for reset (errno=%d)\n", rc);
3797 }
3798
3799 /* no point in trying softreset on offline link */
3800 if (ata_phys_link_offline(link))
3801 ehc->i.action &= ~ATA_EH_SOFTRESET;
3802
3803 return 0;
3804 }
3805
3806 /**
3807 * sata_link_hardreset - reset link via SATA phy reset
3808 * @link: link to reset
3809 * @timing: timing parameters { interval, duratinon, timeout } in msec
3810 * @deadline: deadline jiffies for the operation
3811 * @online: optional out parameter indicating link onlineness
3812 * @check_ready: optional callback to check link readiness
3813 *
3814 * SATA phy-reset @link using DET bits of SControl register.
3815 * After hardreset, link readiness is waited upon using
3816 * ata_wait_ready() if @check_ready is specified. LLDs are
3817 * allowed to not specify @check_ready and wait itself after this
3818 * function returns. Device classification is LLD's
3819 * responsibility.
3820 *
3821 * *@online is set to one iff reset succeeded and @link is online
3822 * after reset.
3823 *
3824 * LOCKING:
3825 * Kernel thread context (may sleep)
3826 *
3827 * RETURNS:
3828 * 0 on success, -errno otherwise.
3829 */
3830 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3831 unsigned long deadline,
3832 bool *online, int (*check_ready)(struct ata_link *))
3833 {
3834 u32 scontrol;
3835 int rc;
3836
3837 DPRINTK("ENTER\n");
3838
3839 if (online)
3840 *online = false;
3841
3842 if (sata_set_spd_needed(link)) {
3843 /* SATA spec says nothing about how to reconfigure
3844 * spd. To be on the safe side, turn off phy during
3845 * reconfiguration. This works for at least ICH7 AHCI
3846 * and Sil3124.
3847 */
3848 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3849 goto out;
3850
3851 scontrol = (scontrol & 0x0f0) | 0x304;
3852
3853 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3854 goto out;
3855
3856 sata_set_spd(link);
3857 }
3858
3859 /* issue phy wake/reset */
3860 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3861 goto out;
3862
3863 scontrol = (scontrol & 0x0f0) | 0x301;
3864
3865 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3866 goto out;
3867
3868 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3869 * 10.4.2 says at least 1 ms.
3870 */
3871 msleep(1);
3872
3873 /* bring link back */
3874 rc = sata_link_resume(link, timing, deadline);
3875 if (rc)
3876 goto out;
3877 /* if link is offline nothing more to do */
3878 if (ata_phys_link_offline(link))
3879 goto out;
3880
3881 /* Link is online. From this point, -ENODEV too is an error. */
3882 if (online)
3883 *online = true;
3884
3885 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3886 /* If PMP is supported, we have to do follow-up SRST.
3887 * Some PMPs don't send D2H Reg FIS after hardreset if
3888 * the first port is empty. Wait only for
3889 * ATA_TMOUT_PMP_SRST_WAIT.
3890 */
3891 if (check_ready) {
3892 unsigned long pmp_deadline;
3893
3894 pmp_deadline = ata_deadline(jiffies,
3895 ATA_TMOUT_PMP_SRST_WAIT);
3896 if (time_after(pmp_deadline, deadline))
3897 pmp_deadline = deadline;
3898 ata_wait_ready(link, pmp_deadline, check_ready);
3899 }
3900 rc = -EAGAIN;
3901 goto out;
3902 }
3903
3904 rc = 0;
3905 if (check_ready)
3906 rc = ata_wait_ready(link, deadline, check_ready);
3907 out:
3908 if (rc && rc != -EAGAIN) {
3909 /* online is set iff link is online && reset succeeded */
3910 if (online)
3911 *online = false;
3912 ata_link_printk(link, KERN_ERR,
3913 "COMRESET failed (errno=%d)\n", rc);
3914 }
3915 DPRINTK("EXIT, rc=%d\n", rc);
3916 return rc;
3917 }
3918
3919 /**
3920 * sata_std_hardreset - COMRESET w/o waiting or classification
3921 * @link: link to reset
3922 * @class: resulting class of attached device
3923 * @deadline: deadline jiffies for the operation
3924 *
3925 * Standard SATA COMRESET w/o waiting or classification.
3926 *
3927 * LOCKING:
3928 * Kernel thread context (may sleep)
3929 *
3930 * RETURNS:
3931 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3932 */
3933 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3934 unsigned long deadline)
3935 {
3936 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3937 bool online;
3938 int rc;
3939
3940 /* do hardreset */
3941 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3942 return online ? -EAGAIN : rc;
3943 }
3944
3945 /**
3946 * ata_std_postreset - standard postreset callback
3947 * @link: the target ata_link
3948 * @classes: classes of attached devices
3949 *
3950 * This function is invoked after a successful reset. Note that
3951 * the device might have been reset more than once using
3952 * different reset methods before postreset is invoked.
3953 *
3954 * LOCKING:
3955 * Kernel thread context (may sleep)
3956 */
3957 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3958 {
3959 u32 serror;
3960
3961 DPRINTK("ENTER\n");
3962
3963 /* reset complete, clear SError */
3964 if (!sata_scr_read(link, SCR_ERROR, &serror))
3965 sata_scr_write(link, SCR_ERROR, serror);
3966
3967 /* print link status */
3968 sata_print_link_status(link);
3969
3970 DPRINTK("EXIT\n");
3971 }
3972
3973 /**
3974 * ata_dev_same_device - Determine whether new ID matches configured device
3975 * @dev: device to compare against
3976 * @new_class: class of the new device
3977 * @new_id: IDENTIFY page of the new device
3978 *
3979 * Compare @new_class and @new_id against @dev and determine
3980 * whether @dev is the device indicated by @new_class and
3981 * @new_id.
3982 *
3983 * LOCKING:
3984 * None.
3985 *
3986 * RETURNS:
3987 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3988 */
3989 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3990 const u16 *new_id)
3991 {
3992 const u16 *old_id = dev->id;
3993 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3994 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3995
3996 if (dev->class != new_class) {
3997 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3998 dev->class, new_class);
3999 return 0;
4000 }
4001
4002 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4003 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4004 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4005 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4006
4007 if (strcmp(model[0], model[1])) {
4008 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4009 "'%s' != '%s'\n", model[0], model[1]);
4010 return 0;
4011 }
4012
4013 if (strcmp(serial[0], serial[1])) {
4014 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4015 "'%s' != '%s'\n", serial[0], serial[1]);
4016 return 0;
4017 }
4018
4019 return 1;
4020 }
4021
4022 /**
4023 * ata_dev_reread_id - Re-read IDENTIFY data
4024 * @dev: target ATA device
4025 * @readid_flags: read ID flags
4026 *
4027 * Re-read IDENTIFY page and make sure @dev is still attached to
4028 * the port.
4029 *
4030 * LOCKING:
4031 * Kernel thread context (may sleep)
4032 *
4033 * RETURNS:
4034 * 0 on success, negative errno otherwise
4035 */
4036 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4037 {
4038 unsigned int class = dev->class;
4039 u16 *id = (void *)dev->link->ap->sector_buf;
4040 int rc;
4041
4042 /* read ID data */
4043 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4044 if (rc)
4045 return rc;
4046
4047 /* is the device still there? */
4048 if (!ata_dev_same_device(dev, class, id))
4049 return -ENODEV;
4050
4051 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4052 return 0;
4053 }
4054
4055 /**
4056 * ata_dev_revalidate - Revalidate ATA device
4057 * @dev: device to revalidate
4058 * @new_class: new class code
4059 * @readid_flags: read ID flags
4060 *
4061 * Re-read IDENTIFY page, make sure @dev is still attached to the
4062 * port and reconfigure it according to the new IDENTIFY page.
4063 *
4064 * LOCKING:
4065 * Kernel thread context (may sleep)
4066 *
4067 * RETURNS:
4068 * 0 on success, negative errno otherwise
4069 */
4070 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4071 unsigned int readid_flags)
4072 {
4073 u64 n_sectors = dev->n_sectors;
4074 u64 n_native_sectors = dev->n_native_sectors;
4075 int rc;
4076
4077 if (!ata_dev_enabled(dev))
4078 return -ENODEV;
4079
4080 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4081 if (ata_class_enabled(new_class) &&
4082 new_class != ATA_DEV_ATA &&
4083 new_class != ATA_DEV_ATAPI &&
4084 new_class != ATA_DEV_SEMB) {
4085 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4086 dev->class, new_class);
4087 rc = -ENODEV;
4088 goto fail;
4089 }
4090
4091 /* re-read ID */
4092 rc = ata_dev_reread_id(dev, readid_flags);
4093 if (rc)
4094 goto fail;
4095
4096 /* configure device according to the new ID */
4097 rc = ata_dev_configure(dev);
4098 if (rc)
4099 goto fail;
4100
4101 /* verify n_sectors hasn't changed */
4102 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4103 dev->n_sectors == n_sectors)
4104 return 0;
4105
4106 /* n_sectors has changed */
4107 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
4108 (unsigned long long)n_sectors,
4109 (unsigned long long)dev->n_sectors);
4110
4111 /*
4112 * Something could have caused HPA to be unlocked
4113 * involuntarily. If n_native_sectors hasn't changed and the
4114 * new size matches it, keep the device.
4115 */
4116 if (dev->n_native_sectors == n_native_sectors &&
4117 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4118 ata_dev_printk(dev, KERN_WARNING,
4119 "new n_sectors matches native, probably "
4120 "late HPA unlock, n_sectors updated\n");
4121 /* use the larger n_sectors */
4122 return 0;
4123 }
4124
4125 /*
4126 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4127 * unlocking HPA in those cases.
4128 *
4129 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4130 */
4131 if (dev->n_native_sectors == n_native_sectors &&
4132 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4133 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4134 ata_dev_printk(dev, KERN_WARNING,
4135 "old n_sectors matches native, probably "
4136 "late HPA lock, will try to unlock HPA\n");
4137 /* try unlocking HPA */
4138 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4139 rc = -EIO;
4140 } else
4141 rc = -ENODEV;
4142
4143 /* restore original n_[native_]sectors and fail */
4144 dev->n_native_sectors = n_native_sectors;
4145 dev->n_sectors = n_sectors;
4146 fail:
4147 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4148 return rc;
4149 }
4150
4151 struct ata_blacklist_entry {
4152 const char *model_num;
4153 const char *model_rev;
4154 unsigned long horkage;
4155 };
4156
4157 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4158 /* Devices with DMA related problems under Linux */
4159 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4160 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4161 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4162 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4163 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4164 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4165 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4166 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4167 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4168 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4169 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4170 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4171 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4172 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4173 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4174 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4175 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4176 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4177 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4178 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4179 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4180 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4181 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4182 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4183 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4184 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4185 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4186 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4187 /* Odd clown on sil3726/4726 PMPs */
4188 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4189
4190 /* Weird ATAPI devices */
4191 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4192 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4193
4194 /* Devices we expect to fail diagnostics */
4195
4196 /* Devices where NCQ should be avoided */
4197 /* NCQ is slow */
4198 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4199 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4200 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4201 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4202 /* NCQ is broken */
4203 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4204 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4205 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4206 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4207 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4208
4209 /* Seagate NCQ + FLUSH CACHE firmware bug */
4210 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4211 ATA_HORKAGE_FIRMWARE_WARN },
4212
4213 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4214 ATA_HORKAGE_FIRMWARE_WARN },
4215
4216 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4217 ATA_HORKAGE_FIRMWARE_WARN },
4218
4219 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4220 ATA_HORKAGE_FIRMWARE_WARN },
4221
4222 /* Blacklist entries taken from Silicon Image 3124/3132
4223 Windows driver .inf file - also several Linux problem reports */
4224 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4225 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4226 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4227
4228 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4229 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4230
4231 /* devices which puke on READ_NATIVE_MAX */
4232 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4233 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4234 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4235 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4236
4237 /* this one allows HPA unlocking but fails IOs on the area */
4238 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4239
4240 /* Devices which report 1 sector over size HPA */
4241 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4242 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4243 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4244
4245 /* Devices which get the IVB wrong */
4246 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4247 /* Maybe we should just blacklist TSSTcorp... */
4248 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4249
4250 /* Devices that do not need bridging limits applied */
4251 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4252
4253 /* Devices which aren't very happy with higher link speeds */
4254 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4255
4256 /*
4257 * Devices which choke on SETXFER. Applies only if both the
4258 * device and controller are SATA.
4259 */
4260 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4261
4262 /* End Marker */
4263 { }
4264 };
4265
4266 /**
4267 * glob_match - match a text string against a glob-style pattern
4268 * @text: the string to be examined
4269 * @pattern: the glob-style pattern to be matched against
4270 *
4271 * Either/both of text and pattern can be empty strings.
4272 *
4273 * Match text against a glob-style pattern, with wildcards and simple sets:
4274 *
4275 * ? matches any single character.
4276 * * matches any run of characters.
4277 * [xyz] matches a single character from the set: x, y, or z.
4278 * [a-d] matches a single character from the range: a, b, c, or d.
4279 * [a-d0-9] matches a single character from either range.
4280 *
4281 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4282 * Behaviour with malformed patterns is undefined, though generally reasonable.
4283 *
4284 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4285 *
4286 * This function uses one level of recursion per '*' in pattern.
4287 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4288 * this will not cause stack problems for any reasonable use here.
4289 *
4290 * RETURNS:
4291 * 0 on match, 1 otherwise.
4292 */
4293 static int glob_match (const char *text, const char *pattern)
4294 {
4295 do {
4296 /* Match single character or a '?' wildcard */
4297 if (*text == *pattern || *pattern == '?') {
4298 if (!*pattern++)
4299 return 0; /* End of both strings: match */
4300 } else {
4301 /* Match single char against a '[' bracketed ']' pattern set */
4302 if (!*text || *pattern != '[')
4303 break; /* Not a pattern set */
4304 while (*++pattern && *pattern != ']' && *text != *pattern) {
4305 if (*pattern == '-' && *(pattern - 1) != '[')
4306 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4307 ++pattern;
4308 break;
4309 }
4310 }
4311 if (!*pattern || *pattern == ']')
4312 return 1; /* No match */
4313 while (*pattern && *pattern++ != ']');
4314 }
4315 } while (*++text && *pattern);
4316
4317 /* Match any run of chars against a '*' wildcard */
4318 if (*pattern == '*') {
4319 if (!*++pattern)
4320 return 0; /* Match: avoid recursion at end of pattern */
4321 /* Loop to handle additional pattern chars after the wildcard */
4322 while (*text) {
4323 if (glob_match(text, pattern) == 0)
4324 return 0; /* Remainder matched */
4325 ++text; /* Absorb (match) this char and try again */
4326 }
4327 }
4328 if (!*text && !*pattern)
4329 return 0; /* End of both strings: match */
4330 return 1; /* No match */
4331 }
4332
4333 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4334 {
4335 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4336 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4337 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4338
4339 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4340 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4341
4342 while (ad->model_num) {
4343 if (!glob_match(model_num, ad->model_num)) {
4344 if (ad->model_rev == NULL)
4345 return ad->horkage;
4346 if (!glob_match(model_rev, ad->model_rev))
4347 return ad->horkage;
4348 }
4349 ad++;
4350 }
4351 return 0;
4352 }
4353
4354 static int ata_dma_blacklisted(const struct ata_device *dev)
4355 {
4356 /* We don't support polling DMA.
4357 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4358 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4359 */
4360 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4361 (dev->flags & ATA_DFLAG_CDB_INTR))
4362 return 1;
4363 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4364 }
4365
4366 /**
4367 * ata_is_40wire - check drive side detection
4368 * @dev: device
4369 *
4370 * Perform drive side detection decoding, allowing for device vendors
4371 * who can't follow the documentation.
4372 */
4373
4374 static int ata_is_40wire(struct ata_device *dev)
4375 {
4376 if (dev->horkage & ATA_HORKAGE_IVB)
4377 return ata_drive_40wire_relaxed(dev->id);
4378 return ata_drive_40wire(dev->id);
4379 }
4380
4381 /**
4382 * cable_is_40wire - 40/80/SATA decider
4383 * @ap: port to consider
4384 *
4385 * This function encapsulates the policy for speed management
4386 * in one place. At the moment we don't cache the result but
4387 * there is a good case for setting ap->cbl to the result when
4388 * we are called with unknown cables (and figuring out if it
4389 * impacts hotplug at all).
4390 *
4391 * Return 1 if the cable appears to be 40 wire.
4392 */
4393
4394 static int cable_is_40wire(struct ata_port *ap)
4395 {
4396 struct ata_link *link;
4397 struct ata_device *dev;
4398
4399 /* If the controller thinks we are 40 wire, we are. */
4400 if (ap->cbl == ATA_CBL_PATA40)
4401 return 1;
4402
4403 /* If the controller thinks we are 80 wire, we are. */
4404 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4405 return 0;
4406
4407 /* If the system is known to be 40 wire short cable (eg
4408 * laptop), then we allow 80 wire modes even if the drive
4409 * isn't sure.
4410 */
4411 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4412 return 0;
4413
4414 /* If the controller doesn't know, we scan.
4415 *
4416 * Note: We look for all 40 wire detects at this point. Any
4417 * 80 wire detect is taken to be 80 wire cable because
4418 * - in many setups only the one drive (slave if present) will
4419 * give a valid detect
4420 * - if you have a non detect capable drive you don't want it
4421 * to colour the choice
4422 */
4423 ata_for_each_link(link, ap, EDGE) {
4424 ata_for_each_dev(dev, link, ENABLED) {
4425 if (!ata_is_40wire(dev))
4426 return 0;
4427 }
4428 }
4429 return 1;
4430 }
4431
4432 /**
4433 * ata_dev_xfermask - Compute supported xfermask of the given device
4434 * @dev: Device to compute xfermask for
4435 *
4436 * Compute supported xfermask of @dev and store it in
4437 * dev->*_mask. This function is responsible for applying all
4438 * known limits including host controller limits, device
4439 * blacklist, etc...
4440 *
4441 * LOCKING:
4442 * None.
4443 */
4444 static void ata_dev_xfermask(struct ata_device *dev)
4445 {
4446 struct ata_link *link = dev->link;
4447 struct ata_port *ap = link->ap;
4448 struct ata_host *host = ap->host;
4449 unsigned long xfer_mask;
4450
4451 /* controller modes available */
4452 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4453 ap->mwdma_mask, ap->udma_mask);
4454
4455 /* drive modes available */
4456 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4457 dev->mwdma_mask, dev->udma_mask);
4458 xfer_mask &= ata_id_xfermask(dev->id);
4459
4460 /*
4461 * CFA Advanced TrueIDE timings are not allowed on a shared
4462 * cable
4463 */
4464 if (ata_dev_pair(dev)) {
4465 /* No PIO5 or PIO6 */
4466 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4467 /* No MWDMA3 or MWDMA 4 */
4468 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4469 }
4470
4471 if (ata_dma_blacklisted(dev)) {
4472 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4473 ata_dev_printk(dev, KERN_WARNING,
4474 "device is on DMA blacklist, disabling DMA\n");
4475 }
4476
4477 if ((host->flags & ATA_HOST_SIMPLEX) &&
4478 host->simplex_claimed && host->simplex_claimed != ap) {
4479 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4480 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4481 "other device, disabling DMA\n");
4482 }
4483
4484 if (ap->flags & ATA_FLAG_NO_IORDY)
4485 xfer_mask &= ata_pio_mask_no_iordy(dev);
4486
4487 if (ap->ops->mode_filter)
4488 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4489
4490 /* Apply cable rule here. Don't apply it early because when
4491 * we handle hot plug the cable type can itself change.
4492 * Check this last so that we know if the transfer rate was
4493 * solely limited by the cable.
4494 * Unknown or 80 wire cables reported host side are checked
4495 * drive side as well. Cases where we know a 40wire cable
4496 * is used safely for 80 are not checked here.
4497 */
4498 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4499 /* UDMA/44 or higher would be available */
4500 if (cable_is_40wire(ap)) {
4501 ata_dev_printk(dev, KERN_WARNING,
4502 "limited to UDMA/33 due to 40-wire cable\n");
4503 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4504 }
4505
4506 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4507 &dev->mwdma_mask, &dev->udma_mask);
4508 }
4509
4510 /**
4511 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4512 * @dev: Device to which command will be sent
4513 *
4514 * Issue SET FEATURES - XFER MODE command to device @dev
4515 * on port @ap.
4516 *
4517 * LOCKING:
4518 * PCI/etc. bus probe sem.
4519 *
4520 * RETURNS:
4521 * 0 on success, AC_ERR_* mask otherwise.
4522 */
4523
4524 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4525 {
4526 struct ata_taskfile tf;
4527 unsigned int err_mask;
4528
4529 /* set up set-features taskfile */
4530 DPRINTK("set features - xfer mode\n");
4531
4532 /* Some controllers and ATAPI devices show flaky interrupt
4533 * behavior after setting xfer mode. Use polling instead.
4534 */
4535 ata_tf_init(dev, &tf);
4536 tf.command = ATA_CMD_SET_FEATURES;
4537 tf.feature = SETFEATURES_XFER;
4538 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4539 tf.protocol = ATA_PROT_NODATA;
4540 /* If we are using IORDY we must send the mode setting command */
4541 if (ata_pio_need_iordy(dev))
4542 tf.nsect = dev->xfer_mode;
4543 /* If the device has IORDY and the controller does not - turn it off */
4544 else if (ata_id_has_iordy(dev->id))
4545 tf.nsect = 0x01;
4546 else /* In the ancient relic department - skip all of this */
4547 return 0;
4548
4549 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4550
4551 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4552 return err_mask;
4553 }
4554 /**
4555 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4556 * @dev: Device to which command will be sent
4557 * @enable: Whether to enable or disable the feature
4558 * @feature: The sector count represents the feature to set
4559 *
4560 * Issue SET FEATURES - SATA FEATURES command to device @dev
4561 * on port @ap with sector count
4562 *
4563 * LOCKING:
4564 * PCI/etc. bus probe sem.
4565 *
4566 * RETURNS:
4567 * 0 on success, AC_ERR_* mask otherwise.
4568 */
4569 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4570 u8 feature)
4571 {
4572 struct ata_taskfile tf;
4573 unsigned int err_mask;
4574
4575 /* set up set-features taskfile */
4576 DPRINTK("set features - SATA features\n");
4577
4578 ata_tf_init(dev, &tf);
4579 tf.command = ATA_CMD_SET_FEATURES;
4580 tf.feature = enable;
4581 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4582 tf.protocol = ATA_PROT_NODATA;
4583 tf.nsect = feature;
4584
4585 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4586
4587 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4588 return err_mask;
4589 }
4590
4591 /**
4592 * ata_dev_init_params - Issue INIT DEV PARAMS command
4593 * @dev: Device to which command will be sent
4594 * @heads: Number of heads (taskfile parameter)
4595 * @sectors: Number of sectors (taskfile parameter)
4596 *
4597 * LOCKING:
4598 * Kernel thread context (may sleep)
4599 *
4600 * RETURNS:
4601 * 0 on success, AC_ERR_* mask otherwise.
4602 */
4603 static unsigned int ata_dev_init_params(struct ata_device *dev,
4604 u16 heads, u16 sectors)
4605 {
4606 struct ata_taskfile tf;
4607 unsigned int err_mask;
4608
4609 /* Number of sectors per track 1-255. Number of heads 1-16 */
4610 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4611 return AC_ERR_INVALID;
4612
4613 /* set up init dev params taskfile */
4614 DPRINTK("init dev params \n");
4615
4616 ata_tf_init(dev, &tf);
4617 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4618 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4619 tf.protocol = ATA_PROT_NODATA;
4620 tf.nsect = sectors;
4621 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4622
4623 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4624 /* A clean abort indicates an original or just out of spec drive
4625 and we should continue as we issue the setup based on the
4626 drive reported working geometry */
4627 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4628 err_mask = 0;
4629
4630 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4631 return err_mask;
4632 }
4633
4634 /**
4635 * ata_sg_clean - Unmap DMA memory associated with command
4636 * @qc: Command containing DMA memory to be released
4637 *
4638 * Unmap all mapped DMA memory associated with this command.
4639 *
4640 * LOCKING:
4641 * spin_lock_irqsave(host lock)
4642 */
4643 void ata_sg_clean(struct ata_queued_cmd *qc)
4644 {
4645 struct ata_port *ap = qc->ap;
4646 struct scatterlist *sg = qc->sg;
4647 int dir = qc->dma_dir;
4648
4649 WARN_ON_ONCE(sg == NULL);
4650
4651 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4652
4653 if (qc->n_elem)
4654 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4655
4656 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4657 qc->sg = NULL;
4658 }
4659
4660 /**
4661 * atapi_check_dma - Check whether ATAPI DMA can be supported
4662 * @qc: Metadata associated with taskfile to check
4663 *
4664 * Allow low-level driver to filter ATA PACKET commands, returning
4665 * a status indicating whether or not it is OK to use DMA for the
4666 * supplied PACKET command.
4667 *
4668 * LOCKING:
4669 * spin_lock_irqsave(host lock)
4670 *
4671 * RETURNS: 0 when ATAPI DMA can be used
4672 * nonzero otherwise
4673 */
4674 int atapi_check_dma(struct ata_queued_cmd *qc)
4675 {
4676 struct ata_port *ap = qc->ap;
4677
4678 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4679 * few ATAPI devices choke on such DMA requests.
4680 */
4681 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4682 unlikely(qc->nbytes & 15))
4683 return 1;
4684
4685 if (ap->ops->check_atapi_dma)
4686 return ap->ops->check_atapi_dma(qc);
4687
4688 return 0;
4689 }
4690
4691 /**
4692 * ata_std_qc_defer - Check whether a qc needs to be deferred
4693 * @qc: ATA command in question
4694 *
4695 * Non-NCQ commands cannot run with any other command, NCQ or
4696 * not. As upper layer only knows the queue depth, we are
4697 * responsible for maintaining exclusion. This function checks
4698 * whether a new command @qc can be issued.
4699 *
4700 * LOCKING:
4701 * spin_lock_irqsave(host lock)
4702 *
4703 * RETURNS:
4704 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4705 */
4706 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4707 {
4708 struct ata_link *link = qc->dev->link;
4709
4710 if (qc->tf.protocol == ATA_PROT_NCQ) {
4711 if (!ata_tag_valid(link->active_tag))
4712 return 0;
4713 } else {
4714 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4715 return 0;
4716 }
4717
4718 return ATA_DEFER_LINK;
4719 }
4720
4721 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4722
4723 /**
4724 * ata_sg_init - Associate command with scatter-gather table.
4725 * @qc: Command to be associated
4726 * @sg: Scatter-gather table.
4727 * @n_elem: Number of elements in s/g table.
4728 *
4729 * Initialize the data-related elements of queued_cmd @qc
4730 * to point to a scatter-gather table @sg, containing @n_elem
4731 * elements.
4732 *
4733 * LOCKING:
4734 * spin_lock_irqsave(host lock)
4735 */
4736 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4737 unsigned int n_elem)
4738 {
4739 qc->sg = sg;
4740 qc->n_elem = n_elem;
4741 qc->cursg = qc->sg;
4742 }
4743
4744 /**
4745 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4746 * @qc: Command with scatter-gather table to be mapped.
4747 *
4748 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4749 *
4750 * LOCKING:
4751 * spin_lock_irqsave(host lock)
4752 *
4753 * RETURNS:
4754 * Zero on success, negative on error.
4755 *
4756 */
4757 static int ata_sg_setup(struct ata_queued_cmd *qc)
4758 {
4759 struct ata_port *ap = qc->ap;
4760 unsigned int n_elem;
4761
4762 VPRINTK("ENTER, ata%u\n", ap->print_id);
4763
4764 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4765 if (n_elem < 1)
4766 return -1;
4767
4768 DPRINTK("%d sg elements mapped\n", n_elem);
4769 qc->orig_n_elem = qc->n_elem;
4770 qc->n_elem = n_elem;
4771 qc->flags |= ATA_QCFLAG_DMAMAP;
4772
4773 return 0;
4774 }
4775
4776 /**
4777 * swap_buf_le16 - swap halves of 16-bit words in place
4778 * @buf: Buffer to swap
4779 * @buf_words: Number of 16-bit words in buffer.
4780 *
4781 * Swap halves of 16-bit words if needed to convert from
4782 * little-endian byte order to native cpu byte order, or
4783 * vice-versa.
4784 *
4785 * LOCKING:
4786 * Inherited from caller.
4787 */
4788 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4789 {
4790 #ifdef __BIG_ENDIAN
4791 unsigned int i;
4792
4793 for (i = 0; i < buf_words; i++)
4794 buf[i] = le16_to_cpu(buf[i]);
4795 #endif /* __BIG_ENDIAN */
4796 }
4797
4798 /**
4799 * ata_qc_new - Request an available ATA command, for queueing
4800 * @ap: target port
4801 *
4802 * LOCKING:
4803 * None.
4804 */
4805
4806 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4807 {
4808 struct ata_queued_cmd *qc = NULL;
4809 unsigned int i;
4810
4811 /* no command while frozen */
4812 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4813 return NULL;
4814
4815 /* the last tag is reserved for internal command. */
4816 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4817 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4818 qc = __ata_qc_from_tag(ap, i);
4819 break;
4820 }
4821
4822 if (qc)
4823 qc->tag = i;
4824
4825 return qc;
4826 }
4827
4828 /**
4829 * ata_qc_new_init - Request an available ATA command, and initialize it
4830 * @dev: Device from whom we request an available command structure
4831 *
4832 * LOCKING:
4833 * None.
4834 */
4835
4836 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4837 {
4838 struct ata_port *ap = dev->link->ap;
4839 struct ata_queued_cmd *qc;
4840
4841 qc = ata_qc_new(ap);
4842 if (qc) {
4843 qc->scsicmd = NULL;
4844 qc->ap = ap;
4845 qc->dev = dev;
4846
4847 ata_qc_reinit(qc);
4848 }
4849
4850 return qc;
4851 }
4852
4853 /**
4854 * ata_qc_free - free unused ata_queued_cmd
4855 * @qc: Command to complete
4856 *
4857 * Designed to free unused ata_queued_cmd object
4858 * in case something prevents using it.
4859 *
4860 * LOCKING:
4861 * spin_lock_irqsave(host lock)
4862 */
4863 void ata_qc_free(struct ata_queued_cmd *qc)
4864 {
4865 struct ata_port *ap;
4866 unsigned int tag;
4867
4868 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4869 ap = qc->ap;
4870
4871 qc->flags = 0;
4872 tag = qc->tag;
4873 if (likely(ata_tag_valid(tag))) {
4874 qc->tag = ATA_TAG_POISON;
4875 clear_bit(tag, &ap->qc_allocated);
4876 }
4877 }
4878
4879 void __ata_qc_complete(struct ata_queued_cmd *qc)
4880 {
4881 struct ata_port *ap;
4882 struct ata_link *link;
4883
4884 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4885 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4886 ap = qc->ap;
4887 link = qc->dev->link;
4888
4889 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4890 ata_sg_clean(qc);
4891
4892 /* command should be marked inactive atomically with qc completion */
4893 if (qc->tf.protocol == ATA_PROT_NCQ) {
4894 link->sactive &= ~(1 << qc->tag);
4895 if (!link->sactive)
4896 ap->nr_active_links--;
4897 } else {
4898 link->active_tag = ATA_TAG_POISON;
4899 ap->nr_active_links--;
4900 }
4901
4902 /* clear exclusive status */
4903 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4904 ap->excl_link == link))
4905 ap->excl_link = NULL;
4906
4907 /* atapi: mark qc as inactive to prevent the interrupt handler
4908 * from completing the command twice later, before the error handler
4909 * is called. (when rc != 0 and atapi request sense is needed)
4910 */
4911 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4912 ap->qc_active &= ~(1 << qc->tag);
4913
4914 /* call completion callback */
4915 qc->complete_fn(qc);
4916 }
4917
4918 static void fill_result_tf(struct ata_queued_cmd *qc)
4919 {
4920 struct ata_port *ap = qc->ap;
4921
4922 qc->result_tf.flags = qc->tf.flags;
4923 ap->ops->qc_fill_rtf(qc);
4924 }
4925
4926 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4927 {
4928 struct ata_device *dev = qc->dev;
4929
4930 if (ata_tag_internal(qc->tag))
4931 return;
4932
4933 if (ata_is_nodata(qc->tf.protocol))
4934 return;
4935
4936 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4937 return;
4938
4939 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4940 }
4941
4942 /**
4943 * ata_qc_complete - Complete an active ATA command
4944 * @qc: Command to complete
4945 *
4946 * Indicate to the mid and upper layers that an ATA
4947 * command has completed, with either an ok or not-ok status.
4948 *
4949 * LOCKING:
4950 * spin_lock_irqsave(host lock)
4951 */
4952 void ata_qc_complete(struct ata_queued_cmd *qc)
4953 {
4954 struct ata_port *ap = qc->ap;
4955
4956 /* XXX: New EH and old EH use different mechanisms to
4957 * synchronize EH with regular execution path.
4958 *
4959 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4960 * Normal execution path is responsible for not accessing a
4961 * failed qc. libata core enforces the rule by returning NULL
4962 * from ata_qc_from_tag() for failed qcs.
4963 *
4964 * Old EH depends on ata_qc_complete() nullifying completion
4965 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4966 * not synchronize with interrupt handler. Only PIO task is
4967 * taken care of.
4968 */
4969 if (ap->ops->error_handler) {
4970 struct ata_device *dev = qc->dev;
4971 struct ata_eh_info *ehi = &dev->link->eh_info;
4972
4973 if (unlikely(qc->err_mask))
4974 qc->flags |= ATA_QCFLAG_FAILED;
4975
4976 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4977 /* always fill result TF for failed qc */
4978 fill_result_tf(qc);
4979
4980 if (!ata_tag_internal(qc->tag))
4981 ata_qc_schedule_eh(qc);
4982 else
4983 __ata_qc_complete(qc);
4984 return;
4985 }
4986
4987 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4988
4989 /* read result TF if requested */
4990 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4991 fill_result_tf(qc);
4992
4993 /* Some commands need post-processing after successful
4994 * completion.
4995 */
4996 switch (qc->tf.command) {
4997 case ATA_CMD_SET_FEATURES:
4998 if (qc->tf.feature != SETFEATURES_WC_ON &&
4999 qc->tf.feature != SETFEATURES_WC_OFF)
5000 break;
5001 /* fall through */
5002 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5003 case ATA_CMD_SET_MULTI: /* multi_count changed */
5004 /* revalidate device */
5005 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5006 ata_port_schedule_eh(ap);
5007 break;
5008
5009 case ATA_CMD_SLEEP:
5010 dev->flags |= ATA_DFLAG_SLEEPING;
5011 break;
5012 }
5013
5014 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5015 ata_verify_xfer(qc);
5016
5017 __ata_qc_complete(qc);
5018 } else {
5019 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5020 return;
5021
5022 /* read result TF if failed or requested */
5023 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5024 fill_result_tf(qc);
5025
5026 __ata_qc_complete(qc);
5027 }
5028 }
5029
5030 /**
5031 * ata_qc_complete_multiple - Complete multiple qcs successfully
5032 * @ap: port in question
5033 * @qc_active: new qc_active mask
5034 *
5035 * Complete in-flight commands. This functions is meant to be
5036 * called from low-level driver's interrupt routine to complete
5037 * requests normally. ap->qc_active and @qc_active is compared
5038 * and commands are completed accordingly.
5039 *
5040 * LOCKING:
5041 * spin_lock_irqsave(host lock)
5042 *
5043 * RETURNS:
5044 * Number of completed commands on success, -errno otherwise.
5045 */
5046 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5047 {
5048 int nr_done = 0;
5049 u32 done_mask;
5050
5051 done_mask = ap->qc_active ^ qc_active;
5052
5053 if (unlikely(done_mask & qc_active)) {
5054 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5055 "(%08x->%08x)\n", ap->qc_active, qc_active);
5056 return -EINVAL;
5057 }
5058
5059 while (done_mask) {
5060 struct ata_queued_cmd *qc;
5061 unsigned int tag = __ffs(done_mask);
5062
5063 qc = ata_qc_from_tag(ap, tag);
5064 if (qc) {
5065 ata_qc_complete(qc);
5066 nr_done++;
5067 }
5068 done_mask &= ~(1 << tag);
5069 }
5070
5071 return nr_done;
5072 }
5073
5074 /**
5075 * ata_qc_issue - issue taskfile to device
5076 * @qc: command to issue to device
5077 *
5078 * Prepare an ATA command to submission to device.
5079 * This includes mapping the data into a DMA-able
5080 * area, filling in the S/G table, and finally
5081 * writing the taskfile to hardware, starting the command.
5082 *
5083 * LOCKING:
5084 * spin_lock_irqsave(host lock)
5085 */
5086 void ata_qc_issue(struct ata_queued_cmd *qc)
5087 {
5088 struct ata_port *ap = qc->ap;
5089 struct ata_link *link = qc->dev->link;
5090 u8 prot = qc->tf.protocol;
5091
5092 /* Make sure only one non-NCQ command is outstanding. The
5093 * check is skipped for old EH because it reuses active qc to
5094 * request ATAPI sense.
5095 */
5096 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5097
5098 if (ata_is_ncq(prot)) {
5099 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5100
5101 if (!link->sactive)
5102 ap->nr_active_links++;
5103 link->sactive |= 1 << qc->tag;
5104 } else {
5105 WARN_ON_ONCE(link->sactive);
5106
5107 ap->nr_active_links++;
5108 link->active_tag = qc->tag;
5109 }
5110
5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5112 ap->qc_active |= 1 << qc->tag;
5113
5114 /*
5115 * We guarantee to LLDs that they will have at least one
5116 * non-zero sg if the command is a data command.
5117 */
5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err;
5121
5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5124 if (ata_sg_setup(qc))
5125 goto sys_err;
5126
5127 /* if device is sleeping, schedule reset and abort the link */
5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5129 link->eh_info.action |= ATA_EH_RESET;
5130 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5131 ata_link_abort(link);
5132 return;
5133 }
5134
5135 ap->ops->qc_prep(qc);
5136
5137 qc->err_mask |= ap->ops->qc_issue(qc);
5138 if (unlikely(qc->err_mask))
5139 goto err;
5140 return;
5141
5142 sys_err:
5143 qc->err_mask |= AC_ERR_SYSTEM;
5144 err:
5145 ata_qc_complete(qc);
5146 }
5147
5148 /**
5149 * sata_scr_valid - test whether SCRs are accessible
5150 * @link: ATA link to test SCR accessibility for
5151 *
5152 * Test whether SCRs are accessible for @link.
5153 *
5154 * LOCKING:
5155 * None.
5156 *
5157 * RETURNS:
5158 * 1 if SCRs are accessible, 0 otherwise.
5159 */
5160 int sata_scr_valid(struct ata_link *link)
5161 {
5162 struct ata_port *ap = link->ap;
5163
5164 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5165 }
5166
5167 /**
5168 * sata_scr_read - read SCR register of the specified port
5169 * @link: ATA link to read SCR for
5170 * @reg: SCR to read
5171 * @val: Place to store read value
5172 *
5173 * Read SCR register @reg of @link into *@val. This function is
5174 * guaranteed to succeed if @link is ap->link, the cable type of
5175 * the port is SATA and the port implements ->scr_read.
5176 *
5177 * LOCKING:
5178 * None if @link is ap->link. Kernel thread context otherwise.
5179 *
5180 * RETURNS:
5181 * 0 on success, negative errno on failure.
5182 */
5183 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5184 {
5185 if (ata_is_host_link(link)) {
5186 if (sata_scr_valid(link))
5187 return link->ap->ops->scr_read(link, reg, val);
5188 return -EOPNOTSUPP;
5189 }
5190
5191 return sata_pmp_scr_read(link, reg, val);
5192 }
5193
5194 /**
5195 * sata_scr_write - write SCR register of the specified port
5196 * @link: ATA link to write SCR for
5197 * @reg: SCR to write
5198 * @val: value to write
5199 *
5200 * Write @val to SCR register @reg of @link. This function is
5201 * guaranteed to succeed if @link is ap->link, the cable type of
5202 * the port is SATA and the port implements ->scr_read.
5203 *
5204 * LOCKING:
5205 * None if @link is ap->link. Kernel thread context otherwise.
5206 *
5207 * RETURNS:
5208 * 0 on success, negative errno on failure.
5209 */
5210 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5211 {
5212 if (ata_is_host_link(link)) {
5213 if (sata_scr_valid(link))
5214 return link->ap->ops->scr_write(link, reg, val);
5215 return -EOPNOTSUPP;
5216 }
5217
5218 return sata_pmp_scr_write(link, reg, val);
5219 }
5220
5221 /**
5222 * sata_scr_write_flush - write SCR register of the specified port and flush
5223 * @link: ATA link to write SCR for
5224 * @reg: SCR to write
5225 * @val: value to write
5226 *
5227 * This function is identical to sata_scr_write() except that this
5228 * function performs flush after writing to the register.
5229 *
5230 * LOCKING:
5231 * None if @link is ap->link. Kernel thread context otherwise.
5232 *
5233 * RETURNS:
5234 * 0 on success, negative errno on failure.
5235 */
5236 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5237 {
5238 if (ata_is_host_link(link)) {
5239 int rc;
5240
5241 if (sata_scr_valid(link)) {
5242 rc = link->ap->ops->scr_write(link, reg, val);
5243 if (rc == 0)
5244 rc = link->ap->ops->scr_read(link, reg, &val);
5245 return rc;
5246 }
5247 return -EOPNOTSUPP;
5248 }
5249
5250 return sata_pmp_scr_write(link, reg, val);
5251 }
5252
5253 /**
5254 * ata_phys_link_online - test whether the given link is online
5255 * @link: ATA link to test
5256 *
5257 * Test whether @link is online. Note that this function returns
5258 * 0 if online status of @link cannot be obtained, so
5259 * ata_link_online(link) != !ata_link_offline(link).
5260 *
5261 * LOCKING:
5262 * None.
5263 *
5264 * RETURNS:
5265 * True if the port online status is available and online.
5266 */
5267 bool ata_phys_link_online(struct ata_link *link)
5268 {
5269 u32 sstatus;
5270
5271 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5272 ata_sstatus_online(sstatus))
5273 return true;
5274 return false;
5275 }
5276
5277 /**
5278 * ata_phys_link_offline - test whether the given link is offline
5279 * @link: ATA link to test
5280 *
5281 * Test whether @link is offline. Note that this function
5282 * returns 0 if offline status of @link cannot be obtained, so
5283 * ata_link_online(link) != !ata_link_offline(link).
5284 *
5285 * LOCKING:
5286 * None.
5287 *
5288 * RETURNS:
5289 * True if the port offline status is available and offline.
5290 */
5291 bool ata_phys_link_offline(struct ata_link *link)
5292 {
5293 u32 sstatus;
5294
5295 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5296 !ata_sstatus_online(sstatus))
5297 return true;
5298 return false;
5299 }
5300
5301 /**
5302 * ata_link_online - test whether the given link is online
5303 * @link: ATA link to test
5304 *
5305 * Test whether @link is online. This is identical to
5306 * ata_phys_link_online() when there's no slave link. When
5307 * there's a slave link, this function should only be called on
5308 * the master link and will return true if any of M/S links is
5309 * online.
5310 *
5311 * LOCKING:
5312 * None.
5313 *
5314 * RETURNS:
5315 * True if the port online status is available and online.
5316 */
5317 bool ata_link_online(struct ata_link *link)
5318 {
5319 struct ata_link *slave = link->ap->slave_link;
5320
5321 WARN_ON(link == slave); /* shouldn't be called on slave link */
5322
5323 return ata_phys_link_online(link) ||
5324 (slave && ata_phys_link_online(slave));
5325 }
5326
5327 /**
5328 * ata_link_offline - test whether the given link is offline
5329 * @link: ATA link to test
5330 *
5331 * Test whether @link is offline. This is identical to
5332 * ata_phys_link_offline() when there's no slave link. When
5333 * there's a slave link, this function should only be called on
5334 * the master link and will return true if both M/S links are
5335 * offline.
5336 *
5337 * LOCKING:
5338 * None.
5339 *
5340 * RETURNS:
5341 * True if the port offline status is available and offline.
5342 */
5343 bool ata_link_offline(struct ata_link *link)
5344 {
5345 struct ata_link *slave = link->ap->slave_link;
5346
5347 WARN_ON(link == slave); /* shouldn't be called on slave link */
5348
5349 return ata_phys_link_offline(link) &&
5350 (!slave || ata_phys_link_offline(slave));
5351 }
5352
5353 #ifdef CONFIG_PM
5354 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5355 unsigned int action, unsigned int ehi_flags,
5356 int wait)
5357 {
5358 unsigned long flags;
5359 int i, rc;
5360
5361 for (i = 0; i < host->n_ports; i++) {
5362 struct ata_port *ap = host->ports[i];
5363 struct ata_link *link;
5364
5365 /* Previous resume operation might still be in
5366 * progress. Wait for PM_PENDING to clear.
5367 */
5368 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5369 ata_port_wait_eh(ap);
5370 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5371 }
5372
5373 /* request PM ops to EH */
5374 spin_lock_irqsave(ap->lock, flags);
5375
5376 ap->pm_mesg = mesg;
5377 if (wait) {
5378 rc = 0;
5379 ap->pm_result = &rc;
5380 }
5381
5382 ap->pflags |= ATA_PFLAG_PM_PENDING;
5383 ata_for_each_link(link, ap, HOST_FIRST) {
5384 link->eh_info.action |= action;
5385 link->eh_info.flags |= ehi_flags;
5386 }
5387
5388 ata_port_schedule_eh(ap);
5389
5390 spin_unlock_irqrestore(ap->lock, flags);
5391
5392 /* wait and check result */
5393 if (wait) {
5394 ata_port_wait_eh(ap);
5395 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5396 if (rc)
5397 return rc;
5398 }
5399 }
5400
5401 return 0;
5402 }
5403
5404 /**
5405 * ata_host_suspend - suspend host
5406 * @host: host to suspend
5407 * @mesg: PM message
5408 *
5409 * Suspend @host. Actual operation is performed by EH. This
5410 * function requests EH to perform PM operations and waits for EH
5411 * to finish.
5412 *
5413 * LOCKING:
5414 * Kernel thread context (may sleep).
5415 *
5416 * RETURNS:
5417 * 0 on success, -errno on failure.
5418 */
5419 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5420 {
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5422 int rc;
5423
5424 /*
5425 * disable link pm on all ports before requesting
5426 * any pm activity
5427 */
5428 ata_lpm_enable(host);
5429
5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5442 if (rc == 0)
5443 host->dev->power.power_state = mesg;
5444 return rc;
5445 }
5446
5447 /**
5448 * ata_host_resume - resume host
5449 * @host: host to resume
5450 *
5451 * Resume @host. Actual operation is performed by EH. This
5452 * function requests EH to perform PM operations and returns.
5453 * Note that all resume operations are performed parallely.
5454 *
5455 * LOCKING:
5456 * Kernel thread context (may sleep).
5457 */
5458 void ata_host_resume(struct ata_host *host)
5459 {
5460 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5461 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5462 host->dev->power.power_state = PMSG_ON;
5463
5464 /* reenable link pm */
5465 ata_lpm_disable(host);
5466 }
5467 #endif
5468
5469 /**
5470 * ata_dev_init - Initialize an ata_device structure
5471 * @dev: Device structure to initialize
5472 *
5473 * Initialize @dev in preparation for probing.
5474 *
5475 * LOCKING:
5476 * Inherited from caller.
5477 */
5478 void ata_dev_init(struct ata_device *dev)
5479 {
5480 struct ata_link *link = ata_dev_phys_link(dev);
5481 struct ata_port *ap = link->ap;
5482 unsigned long flags;
5483
5484 /* SATA spd limit is bound to the attached device, reset together */
5485 link->sata_spd_limit = link->hw_sata_spd_limit;
5486 link->sata_spd = 0;
5487
5488 /* High bits of dev->flags are used to record warm plug
5489 * requests which occur asynchronously. Synchronize using
5490 * host lock.
5491 */
5492 spin_lock_irqsave(ap->lock, flags);
5493 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5494 dev->horkage = 0;
5495 spin_unlock_irqrestore(ap->lock, flags);
5496
5497 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5498 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5499 dev->pio_mask = UINT_MAX;
5500 dev->mwdma_mask = UINT_MAX;
5501 dev->udma_mask = UINT_MAX;
5502 }
5503
5504 /**
5505 * ata_link_init - Initialize an ata_link structure
5506 * @ap: ATA port link is attached to
5507 * @link: Link structure to initialize
5508 * @pmp: Port multiplier port number
5509 *
5510 * Initialize @link.
5511 *
5512 * LOCKING:
5513 * Kernel thread context (may sleep)
5514 */
5515 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5516 {
5517 int i;
5518
5519 /* clear everything except for devices */
5520 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5521 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5522
5523 link->ap = ap;
5524 link->pmp = pmp;
5525 link->active_tag = ATA_TAG_POISON;
5526 link->hw_sata_spd_limit = UINT_MAX;
5527
5528 /* can't use iterator, ap isn't initialized yet */
5529 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5530 struct ata_device *dev = &link->device[i];
5531
5532 dev->link = link;
5533 dev->devno = dev - link->device;
5534 #ifdef CONFIG_ATA_ACPI
5535 dev->gtf_filter = ata_acpi_gtf_filter;
5536 #endif
5537 ata_dev_init(dev);
5538 }
5539 }
5540
5541 /**
5542 * sata_link_init_spd - Initialize link->sata_spd_limit
5543 * @link: Link to configure sata_spd_limit for
5544 *
5545 * Initialize @link->[hw_]sata_spd_limit to the currently
5546 * configured value.
5547 *
5548 * LOCKING:
5549 * Kernel thread context (may sleep).
5550 *
5551 * RETURNS:
5552 * 0 on success, -errno on failure.
5553 */
5554 int sata_link_init_spd(struct ata_link *link)
5555 {
5556 u8 spd;
5557 int rc;
5558
5559 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5560 if (rc)
5561 return rc;
5562
5563 spd = (link->saved_scontrol >> 4) & 0xf;
5564 if (spd)
5565 link->hw_sata_spd_limit &= (1 << spd) - 1;
5566
5567 ata_force_link_limits(link);
5568
5569 link->sata_spd_limit = link->hw_sata_spd_limit;
5570
5571 return 0;
5572 }
5573
5574 /**
5575 * ata_port_alloc - allocate and initialize basic ATA port resources
5576 * @host: ATA host this allocated port belongs to
5577 *
5578 * Allocate and initialize basic ATA port resources.
5579 *
5580 * RETURNS:
5581 * Allocate ATA port on success, NULL on failure.
5582 *
5583 * LOCKING:
5584 * Inherited from calling layer (may sleep).
5585 */
5586 struct ata_port *ata_port_alloc(struct ata_host *host)
5587 {
5588 struct ata_port *ap;
5589
5590 DPRINTK("ENTER\n");
5591
5592 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5593 if (!ap)
5594 return NULL;
5595
5596 ap->pflags |= ATA_PFLAG_INITIALIZING;
5597 ap->lock = &host->lock;
5598 ap->print_id = -1;
5599 ap->host = host;
5600 ap->dev = host->dev;
5601
5602 #if defined(ATA_VERBOSE_DEBUG)
5603 /* turn on all debugging levels */
5604 ap->msg_enable = 0x00FF;
5605 #elif defined(ATA_DEBUG)
5606 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5607 #else
5608 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5609 #endif
5610
5611 mutex_init(&ap->scsi_scan_mutex);
5612 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5613 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5614 INIT_LIST_HEAD(&ap->eh_done_q);
5615 init_waitqueue_head(&ap->eh_wait_q);
5616 init_completion(&ap->park_req_pending);
5617 init_timer_deferrable(&ap->fastdrain_timer);
5618 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5619 ap->fastdrain_timer.data = (unsigned long)ap;
5620
5621 ap->cbl = ATA_CBL_NONE;
5622
5623 ata_link_init(ap, &ap->link, 0);
5624
5625 #ifdef ATA_IRQ_TRAP
5626 ap->stats.unhandled_irq = 1;
5627 ap->stats.idle_irq = 1;
5628 #endif
5629 ata_sff_port_init(ap);
5630
5631 return ap;
5632 }
5633
5634 static void ata_host_release(struct device *gendev, void *res)
5635 {
5636 struct ata_host *host = dev_get_drvdata(gendev);
5637 int i;
5638
5639 for (i = 0; i < host->n_ports; i++) {
5640 struct ata_port *ap = host->ports[i];
5641
5642 if (!ap)
5643 continue;
5644
5645 if (ap->scsi_host)
5646 scsi_host_put(ap->scsi_host);
5647
5648 kfree(ap->pmp_link);
5649 kfree(ap->slave_link);
5650 kfree(ap);
5651 host->ports[i] = NULL;
5652 }
5653
5654 dev_set_drvdata(gendev, NULL);
5655 }
5656
5657 /**
5658 * ata_host_alloc - allocate and init basic ATA host resources
5659 * @dev: generic device this host is associated with
5660 * @max_ports: maximum number of ATA ports associated with this host
5661 *
5662 * Allocate and initialize basic ATA host resources. LLD calls
5663 * this function to allocate a host, initializes it fully and
5664 * attaches it using ata_host_register().
5665 *
5666 * @max_ports ports are allocated and host->n_ports is
5667 * initialized to @max_ports. The caller is allowed to decrease
5668 * host->n_ports before calling ata_host_register(). The unused
5669 * ports will be automatically freed on registration.
5670 *
5671 * RETURNS:
5672 * Allocate ATA host on success, NULL on failure.
5673 *
5674 * LOCKING:
5675 * Inherited from calling layer (may sleep).
5676 */
5677 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5678 {
5679 struct ata_host *host;
5680 size_t sz;
5681 int i;
5682
5683 DPRINTK("ENTER\n");
5684
5685 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5686 return NULL;
5687
5688 /* alloc a container for our list of ATA ports (buses) */
5689 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5690 /* alloc a container for our list of ATA ports (buses) */
5691 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5692 if (!host)
5693 goto err_out;
5694
5695 devres_add(dev, host);
5696 dev_set_drvdata(dev, host);
5697
5698 spin_lock_init(&host->lock);
5699 host->dev = dev;
5700 host->n_ports = max_ports;
5701
5702 /* allocate ports bound to this host */
5703 for (i = 0; i < max_ports; i++) {
5704 struct ata_port *ap;
5705
5706 ap = ata_port_alloc(host);
5707 if (!ap)
5708 goto err_out;
5709
5710 ap->port_no = i;
5711 host->ports[i] = ap;
5712 }
5713
5714 devres_remove_group(dev, NULL);
5715 return host;
5716
5717 err_out:
5718 devres_release_group(dev, NULL);
5719 return NULL;
5720 }
5721
5722 /**
5723 * ata_host_alloc_pinfo - alloc host and init with port_info array
5724 * @dev: generic device this host is associated with
5725 * @ppi: array of ATA port_info to initialize host with
5726 * @n_ports: number of ATA ports attached to this host
5727 *
5728 * Allocate ATA host and initialize with info from @ppi. If NULL
5729 * terminated, @ppi may contain fewer entries than @n_ports. The
5730 * last entry will be used for the remaining ports.
5731 *
5732 * RETURNS:
5733 * Allocate ATA host on success, NULL on failure.
5734 *
5735 * LOCKING:
5736 * Inherited from calling layer (may sleep).
5737 */
5738 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5739 const struct ata_port_info * const * ppi,
5740 int n_ports)
5741 {
5742 const struct ata_port_info *pi;
5743 struct ata_host *host;
5744 int i, j;
5745
5746 host = ata_host_alloc(dev, n_ports);
5747 if (!host)
5748 return NULL;
5749
5750 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5751 struct ata_port *ap = host->ports[i];
5752
5753 if (ppi[j])
5754 pi = ppi[j++];
5755
5756 ap->pio_mask = pi->pio_mask;
5757 ap->mwdma_mask = pi->mwdma_mask;
5758 ap->udma_mask = pi->udma_mask;
5759 ap->flags |= pi->flags;
5760 ap->link.flags |= pi->link_flags;
5761 ap->ops = pi->port_ops;
5762
5763 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5764 host->ops = pi->port_ops;
5765 }
5766
5767 return host;
5768 }
5769
5770 /**
5771 * ata_slave_link_init - initialize slave link
5772 * @ap: port to initialize slave link for
5773 *
5774 * Create and initialize slave link for @ap. This enables slave
5775 * link handling on the port.
5776 *
5777 * In libata, a port contains links and a link contains devices.
5778 * There is single host link but if a PMP is attached to it,
5779 * there can be multiple fan-out links. On SATA, there's usually
5780 * a single device connected to a link but PATA and SATA
5781 * controllers emulating TF based interface can have two - master
5782 * and slave.
5783 *
5784 * However, there are a few controllers which don't fit into this
5785 * abstraction too well - SATA controllers which emulate TF
5786 * interface with both master and slave devices but also have
5787 * separate SCR register sets for each device. These controllers
5788 * need separate links for physical link handling
5789 * (e.g. onlineness, link speed) but should be treated like a
5790 * traditional M/S controller for everything else (e.g. command
5791 * issue, softreset).
5792 *
5793 * slave_link is libata's way of handling this class of
5794 * controllers without impacting core layer too much. For
5795 * anything other than physical link handling, the default host
5796 * link is used for both master and slave. For physical link
5797 * handling, separate @ap->slave_link is used. All dirty details
5798 * are implemented inside libata core layer. From LLD's POV, the
5799 * only difference is that prereset, hardreset and postreset are
5800 * called once more for the slave link, so the reset sequence
5801 * looks like the following.
5802 *
5803 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5804 * softreset(M) -> postreset(M) -> postreset(S)
5805 *
5806 * Note that softreset is called only for the master. Softreset
5807 * resets both M/S by definition, so SRST on master should handle
5808 * both (the standard method will work just fine).
5809 *
5810 * LOCKING:
5811 * Should be called before host is registered.
5812 *
5813 * RETURNS:
5814 * 0 on success, -errno on failure.
5815 */
5816 int ata_slave_link_init(struct ata_port *ap)
5817 {
5818 struct ata_link *link;
5819
5820 WARN_ON(ap->slave_link);
5821 WARN_ON(ap->flags & ATA_FLAG_PMP);
5822
5823 link = kzalloc(sizeof(*link), GFP_KERNEL);
5824 if (!link)
5825 return -ENOMEM;
5826
5827 ata_link_init(ap, link, 1);
5828 ap->slave_link = link;
5829 return 0;
5830 }
5831
5832 static void ata_host_stop(struct device *gendev, void *res)
5833 {
5834 struct ata_host *host = dev_get_drvdata(gendev);
5835 int i;
5836
5837 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5838
5839 for (i = 0; i < host->n_ports; i++) {
5840 struct ata_port *ap = host->ports[i];
5841
5842 if (ap->ops->port_stop)
5843 ap->ops->port_stop(ap);
5844 }
5845
5846 if (host->ops->host_stop)
5847 host->ops->host_stop(host);
5848 }
5849
5850 /**
5851 * ata_finalize_port_ops - finalize ata_port_operations
5852 * @ops: ata_port_operations to finalize
5853 *
5854 * An ata_port_operations can inherit from another ops and that
5855 * ops can again inherit from another. This can go on as many
5856 * times as necessary as long as there is no loop in the
5857 * inheritance chain.
5858 *
5859 * Ops tables are finalized when the host is started. NULL or
5860 * unspecified entries are inherited from the closet ancestor
5861 * which has the method and the entry is populated with it.
5862 * After finalization, the ops table directly points to all the
5863 * methods and ->inherits is no longer necessary and cleared.
5864 *
5865 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5866 *
5867 * LOCKING:
5868 * None.
5869 */
5870 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5871 {
5872 static DEFINE_SPINLOCK(lock);
5873 const struct ata_port_operations *cur;
5874 void **begin = (void **)ops;
5875 void **end = (void **)&ops->inherits;
5876 void **pp;
5877
5878 if (!ops || !ops->inherits)
5879 return;
5880
5881 spin_lock(&lock);
5882
5883 for (cur = ops->inherits; cur; cur = cur->inherits) {
5884 void **inherit = (void **)cur;
5885
5886 for (pp = begin; pp < end; pp++, inherit++)
5887 if (!*pp)
5888 *pp = *inherit;
5889 }
5890
5891 for (pp = begin; pp < end; pp++)
5892 if (IS_ERR(*pp))
5893 *pp = NULL;
5894
5895 ops->inherits = NULL;
5896
5897 spin_unlock(&lock);
5898 }
5899
5900 /**
5901 * ata_host_start - start and freeze ports of an ATA host
5902 * @host: ATA host to start ports for
5903 *
5904 * Start and then freeze ports of @host. Started status is
5905 * recorded in host->flags, so this function can be called
5906 * multiple times. Ports are guaranteed to get started only
5907 * once. If host->ops isn't initialized yet, its set to the
5908 * first non-dummy port ops.
5909 *
5910 * LOCKING:
5911 * Inherited from calling layer (may sleep).
5912 *
5913 * RETURNS:
5914 * 0 if all ports are started successfully, -errno otherwise.
5915 */
5916 int ata_host_start(struct ata_host *host)
5917 {
5918 int have_stop = 0;
5919 void *start_dr = NULL;
5920 int i, rc;
5921
5922 if (host->flags & ATA_HOST_STARTED)
5923 return 0;
5924
5925 ata_finalize_port_ops(host->ops);
5926
5927 for (i = 0; i < host->n_ports; i++) {
5928 struct ata_port *ap = host->ports[i];
5929
5930 ata_finalize_port_ops(ap->ops);
5931
5932 if (!host->ops && !ata_port_is_dummy(ap))
5933 host->ops = ap->ops;
5934
5935 if (ap->ops->port_stop)
5936 have_stop = 1;
5937 }
5938
5939 if (host->ops->host_stop)
5940 have_stop = 1;
5941
5942 if (have_stop) {
5943 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5944 if (!start_dr)
5945 return -ENOMEM;
5946 }
5947
5948 for (i = 0; i < host->n_ports; i++) {
5949 struct ata_port *ap = host->ports[i];
5950
5951 if (ap->ops->port_start) {
5952 rc = ap->ops->port_start(ap);
5953 if (rc) {
5954 if (rc != -ENODEV)
5955 dev_printk(KERN_ERR, host->dev,
5956 "failed to start port %d "
5957 "(errno=%d)\n", i, rc);
5958 goto err_out;
5959 }
5960 }
5961 ata_eh_freeze_port(ap);
5962 }
5963
5964 if (start_dr)
5965 devres_add(host->dev, start_dr);
5966 host->flags |= ATA_HOST_STARTED;
5967 return 0;
5968
5969 err_out:
5970 while (--i >= 0) {
5971 struct ata_port *ap = host->ports[i];
5972
5973 if (ap->ops->port_stop)
5974 ap->ops->port_stop(ap);
5975 }
5976 devres_free(start_dr);
5977 return rc;
5978 }
5979
5980 /**
5981 * ata_sas_host_init - Initialize a host struct
5982 * @host: host to initialize
5983 * @dev: device host is attached to
5984 * @flags: host flags
5985 * @ops: port_ops
5986 *
5987 * LOCKING:
5988 * PCI/etc. bus probe sem.
5989 *
5990 */
5991 /* KILLME - the only user left is ipr */
5992 void ata_host_init(struct ata_host *host, struct device *dev,
5993 unsigned long flags, struct ata_port_operations *ops)
5994 {
5995 spin_lock_init(&host->lock);
5996 host->dev = dev;
5997 host->flags = flags;
5998 host->ops = ops;
5999 }
6000
6001
6002 static void async_port_probe(void *data, async_cookie_t cookie)
6003 {
6004 int rc;
6005 struct ata_port *ap = data;
6006
6007 /*
6008 * If we're not allowed to scan this host in parallel,
6009 * we need to wait until all previous scans have completed
6010 * before going further.
6011 * Jeff Garzik says this is only within a controller, so we
6012 * don't need to wait for port 0, only for later ports.
6013 */
6014 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6015 async_synchronize_cookie(cookie);
6016
6017 /* probe */
6018 if (ap->ops->error_handler) {
6019 struct ata_eh_info *ehi = &ap->link.eh_info;
6020 unsigned long flags;
6021
6022 /* kick EH for boot probing */
6023 spin_lock_irqsave(ap->lock, flags);
6024
6025 ehi->probe_mask |= ATA_ALL_DEVICES;
6026 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6027 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6028
6029 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6030 ap->pflags |= ATA_PFLAG_LOADING;
6031 ata_port_schedule_eh(ap);
6032
6033 spin_unlock_irqrestore(ap->lock, flags);
6034
6035 /* wait for EH to finish */
6036 ata_port_wait_eh(ap);
6037 } else {
6038 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6039 rc = ata_bus_probe(ap);
6040 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6041
6042 if (rc) {
6043 /* FIXME: do something useful here?
6044 * Current libata behavior will
6045 * tear down everything when
6046 * the module is removed
6047 * or the h/w is unplugged.
6048 */
6049 }
6050 }
6051
6052 /* in order to keep device order, we need to synchronize at this point */
6053 async_synchronize_cookie(cookie);
6054
6055 ata_scsi_scan_host(ap, 1);
6056
6057 }
6058 /**
6059 * ata_host_register - register initialized ATA host
6060 * @host: ATA host to register
6061 * @sht: template for SCSI host
6062 *
6063 * Register initialized ATA host. @host is allocated using
6064 * ata_host_alloc() and fully initialized by LLD. This function
6065 * starts ports, registers @host with ATA and SCSI layers and
6066 * probe registered devices.
6067 *
6068 * LOCKING:
6069 * Inherited from calling layer (may sleep).
6070 *
6071 * RETURNS:
6072 * 0 on success, -errno otherwise.
6073 */
6074 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6075 {
6076 int i, rc;
6077
6078 /* host must have been started */
6079 if (!(host->flags & ATA_HOST_STARTED)) {
6080 dev_printk(KERN_ERR, host->dev,
6081 "BUG: trying to register unstarted host\n");
6082 WARN_ON(1);
6083 return -EINVAL;
6084 }
6085
6086 /* Blow away unused ports. This happens when LLD can't
6087 * determine the exact number of ports to allocate at
6088 * allocation time.
6089 */
6090 for (i = host->n_ports; host->ports[i]; i++)
6091 kfree(host->ports[i]);
6092
6093 /* give ports names and add SCSI hosts */
6094 for (i = 0; i < host->n_ports; i++)
6095 host->ports[i]->print_id = ata_print_id++;
6096
6097
6098 /* Create associated sysfs transport objects */
6099 for (i = 0; i < host->n_ports; i++) {
6100 rc = ata_tport_add(host->dev,host->ports[i]);
6101 if (rc) {
6102 goto err_tadd;
6103 }
6104 }
6105
6106 rc = ata_scsi_add_hosts(host, sht);
6107 if (rc)
6108 goto err_tadd;
6109
6110 /* associate with ACPI nodes */
6111 ata_acpi_associate(host);
6112
6113 /* set cable, sata_spd_limit and report */
6114 for (i = 0; i < host->n_ports; i++) {
6115 struct ata_port *ap = host->ports[i];
6116 unsigned long xfer_mask;
6117
6118 /* set SATA cable type if still unset */
6119 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6120 ap->cbl = ATA_CBL_SATA;
6121
6122 /* init sata_spd_limit to the current value */
6123 sata_link_init_spd(&ap->link);
6124 if (ap->slave_link)
6125 sata_link_init_spd(ap->slave_link);
6126
6127 /* print per-port info to dmesg */
6128 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6129 ap->udma_mask);
6130
6131 if (!ata_port_is_dummy(ap)) {
6132 ata_port_printk(ap, KERN_INFO,
6133 "%cATA max %s %s\n",
6134 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6135 ata_mode_string(xfer_mask),
6136 ap->link.eh_info.desc);
6137 ata_ehi_clear_desc(&ap->link.eh_info);
6138 } else
6139 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6140 }
6141
6142 /* perform each probe asynchronously */
6143 for (i = 0; i < host->n_ports; i++) {
6144 struct ata_port *ap = host->ports[i];
6145 async_schedule(async_port_probe, ap);
6146 }
6147
6148 return 0;
6149
6150 err_tadd:
6151 while (--i >= 0) {
6152 ata_tport_delete(host->ports[i]);
6153 }
6154 return rc;
6155
6156 }
6157
6158 /**
6159 * ata_host_activate - start host, request IRQ and register it
6160 * @host: target ATA host
6161 * @irq: IRQ to request
6162 * @irq_handler: irq_handler used when requesting IRQ
6163 * @irq_flags: irq_flags used when requesting IRQ
6164 * @sht: scsi_host_template to use when registering the host
6165 *
6166 * After allocating an ATA host and initializing it, most libata
6167 * LLDs perform three steps to activate the host - start host,
6168 * request IRQ and register it. This helper takes necessasry
6169 * arguments and performs the three steps in one go.
6170 *
6171 * An invalid IRQ skips the IRQ registration and expects the host to
6172 * have set polling mode on the port. In this case, @irq_handler
6173 * should be NULL.
6174 *
6175 * LOCKING:
6176 * Inherited from calling layer (may sleep).
6177 *
6178 * RETURNS:
6179 * 0 on success, -errno otherwise.
6180 */
6181 int ata_host_activate(struct ata_host *host, int irq,
6182 irq_handler_t irq_handler, unsigned long irq_flags,
6183 struct scsi_host_template *sht)
6184 {
6185 int i, rc;
6186
6187 rc = ata_host_start(host);
6188 if (rc)
6189 return rc;
6190
6191 /* Special case for polling mode */
6192 if (!irq) {
6193 WARN_ON(irq_handler);
6194 return ata_host_register(host, sht);
6195 }
6196
6197 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6198 dev_driver_string(host->dev), host);
6199 if (rc)
6200 return rc;
6201
6202 for (i = 0; i < host->n_ports; i++)
6203 ata_port_desc(host->ports[i], "irq %d", irq);
6204
6205 rc = ata_host_register(host, sht);
6206 /* if failed, just free the IRQ and leave ports alone */
6207 if (rc)
6208 devm_free_irq(host->dev, irq, host);
6209
6210 return rc;
6211 }
6212
6213 /**
6214 * ata_port_detach - Detach ATA port in prepration of device removal
6215 * @ap: ATA port to be detached
6216 *
6217 * Detach all ATA devices and the associated SCSI devices of @ap;
6218 * then, remove the associated SCSI host. @ap is guaranteed to
6219 * be quiescent on return from this function.
6220 *
6221 * LOCKING:
6222 * Kernel thread context (may sleep).
6223 */
6224 static void ata_port_detach(struct ata_port *ap)
6225 {
6226 unsigned long flags;
6227
6228 if (!ap->ops->error_handler)
6229 goto skip_eh;
6230
6231 /* tell EH we're leaving & flush EH */
6232 spin_lock_irqsave(ap->lock, flags);
6233 ap->pflags |= ATA_PFLAG_UNLOADING;
6234 ata_port_schedule_eh(ap);
6235 spin_unlock_irqrestore(ap->lock, flags);
6236
6237 /* wait till EH commits suicide */
6238 ata_port_wait_eh(ap);
6239
6240 /* it better be dead now */
6241 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6242
6243 cancel_rearming_delayed_work(&ap->hotplug_task);
6244
6245 skip_eh:
6246 if (ap->pmp_link) {
6247 int i;
6248 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6249 ata_tlink_delete(&ap->pmp_link[i]);
6250 }
6251 ata_tport_delete(ap);
6252
6253 /* remove the associated SCSI host */
6254 scsi_remove_host(ap->scsi_host);
6255 }
6256
6257 /**
6258 * ata_host_detach - Detach all ports of an ATA host
6259 * @host: Host to detach
6260 *
6261 * Detach all ports of @host.
6262 *
6263 * LOCKING:
6264 * Kernel thread context (may sleep).
6265 */
6266 void ata_host_detach(struct ata_host *host)
6267 {
6268 int i;
6269
6270 for (i = 0; i < host->n_ports; i++)
6271 ata_port_detach(host->ports[i]);
6272
6273 /* the host is dead now, dissociate ACPI */
6274 ata_acpi_dissociate(host);
6275 }
6276
6277 #ifdef CONFIG_PCI
6278
6279 /**
6280 * ata_pci_remove_one - PCI layer callback for device removal
6281 * @pdev: PCI device that was removed
6282 *
6283 * PCI layer indicates to libata via this hook that hot-unplug or
6284 * module unload event has occurred. Detach all ports. Resource
6285 * release is handled via devres.
6286 *
6287 * LOCKING:
6288 * Inherited from PCI layer (may sleep).
6289 */
6290 void ata_pci_remove_one(struct pci_dev *pdev)
6291 {
6292 struct device *dev = &pdev->dev;
6293 struct ata_host *host = dev_get_drvdata(dev);
6294
6295 ata_host_detach(host);
6296 }
6297
6298 /* move to PCI subsystem */
6299 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6300 {
6301 unsigned long tmp = 0;
6302
6303 switch (bits->width) {
6304 case 1: {
6305 u8 tmp8 = 0;
6306 pci_read_config_byte(pdev, bits->reg, &tmp8);
6307 tmp = tmp8;
6308 break;
6309 }
6310 case 2: {
6311 u16 tmp16 = 0;
6312 pci_read_config_word(pdev, bits->reg, &tmp16);
6313 tmp = tmp16;
6314 break;
6315 }
6316 case 4: {
6317 u32 tmp32 = 0;
6318 pci_read_config_dword(pdev, bits->reg, &tmp32);
6319 tmp = tmp32;
6320 break;
6321 }
6322
6323 default:
6324 return -EINVAL;
6325 }
6326
6327 tmp &= bits->mask;
6328
6329 return (tmp == bits->val) ? 1 : 0;
6330 }
6331
6332 #ifdef CONFIG_PM
6333 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6334 {
6335 pci_save_state(pdev);
6336 pci_disable_device(pdev);
6337
6338 if (mesg.event & PM_EVENT_SLEEP)
6339 pci_set_power_state(pdev, PCI_D3hot);
6340 }
6341
6342 int ata_pci_device_do_resume(struct pci_dev *pdev)
6343 {
6344 int rc;
6345
6346 pci_set_power_state(pdev, PCI_D0);
6347 pci_restore_state(pdev);
6348
6349 rc = pcim_enable_device(pdev);
6350 if (rc) {
6351 dev_printk(KERN_ERR, &pdev->dev,
6352 "failed to enable device after resume (%d)\n", rc);
6353 return rc;
6354 }
6355
6356 pci_set_master(pdev);
6357 return 0;
6358 }
6359
6360 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6361 {
6362 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6363 int rc = 0;
6364
6365 rc = ata_host_suspend(host, mesg);
6366 if (rc)
6367 return rc;
6368
6369 ata_pci_device_do_suspend(pdev, mesg);
6370
6371 return 0;
6372 }
6373
6374 int ata_pci_device_resume(struct pci_dev *pdev)
6375 {
6376 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6377 int rc;
6378
6379 rc = ata_pci_device_do_resume(pdev);
6380 if (rc == 0)
6381 ata_host_resume(host);
6382 return rc;
6383 }
6384 #endif /* CONFIG_PM */
6385
6386 #endif /* CONFIG_PCI */
6387
6388 static int __init ata_parse_force_one(char **cur,
6389 struct ata_force_ent *force_ent,
6390 const char **reason)
6391 {
6392 /* FIXME: Currently, there's no way to tag init const data and
6393 * using __initdata causes build failure on some versions of
6394 * gcc. Once __initdataconst is implemented, add const to the
6395 * following structure.
6396 */
6397 static struct ata_force_param force_tbl[] __initdata = {
6398 { "40c", .cbl = ATA_CBL_PATA40 },
6399 { "80c", .cbl = ATA_CBL_PATA80 },
6400 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6401 { "unk", .cbl = ATA_CBL_PATA_UNK },
6402 { "ign", .cbl = ATA_CBL_PATA_IGN },
6403 { "sata", .cbl = ATA_CBL_SATA },
6404 { "1.5Gbps", .spd_limit = 1 },
6405 { "3.0Gbps", .spd_limit = 2 },
6406 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6407 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6408 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6409 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6410 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6411 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6412 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6413 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6414 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6415 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6416 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6417 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6418 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6419 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6420 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6421 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6422 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6423 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6424 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6425 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6426 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6427 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6428 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6429 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6430 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6431 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6432 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6433 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6434 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6435 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6436 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6437 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6438 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6439 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6440 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6441 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6442 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6443 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6444 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6445 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6446 };
6447 char *start = *cur, *p = *cur;
6448 char *id, *val, *endp;
6449 const struct ata_force_param *match_fp = NULL;
6450 int nr_matches = 0, i;
6451
6452 /* find where this param ends and update *cur */
6453 while (*p != '\0' && *p != ',')
6454 p++;
6455
6456 if (*p == '\0')
6457 *cur = p;
6458 else
6459 *cur = p + 1;
6460
6461 *p = '\0';
6462
6463 /* parse */
6464 p = strchr(start, ':');
6465 if (!p) {
6466 val = strstrip(start);
6467 goto parse_val;
6468 }
6469 *p = '\0';
6470
6471 id = strstrip(start);
6472 val = strstrip(p + 1);
6473
6474 /* parse id */
6475 p = strchr(id, '.');
6476 if (p) {
6477 *p++ = '\0';
6478 force_ent->device = simple_strtoul(p, &endp, 10);
6479 if (p == endp || *endp != '\0') {
6480 *reason = "invalid device";
6481 return -EINVAL;
6482 }
6483 }
6484
6485 force_ent->port = simple_strtoul(id, &endp, 10);
6486 if (p == endp || *endp != '\0') {
6487 *reason = "invalid port/link";
6488 return -EINVAL;
6489 }
6490
6491 parse_val:
6492 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6493 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6494 const struct ata_force_param *fp = &force_tbl[i];
6495
6496 if (strncasecmp(val, fp->name, strlen(val)))
6497 continue;
6498
6499 nr_matches++;
6500 match_fp = fp;
6501
6502 if (strcasecmp(val, fp->name) == 0) {
6503 nr_matches = 1;
6504 break;
6505 }
6506 }
6507
6508 if (!nr_matches) {
6509 *reason = "unknown value";
6510 return -EINVAL;
6511 }
6512 if (nr_matches > 1) {
6513 *reason = "ambigious value";
6514 return -EINVAL;
6515 }
6516
6517 force_ent->param = *match_fp;
6518
6519 return 0;
6520 }
6521
6522 static void __init ata_parse_force_param(void)
6523 {
6524 int idx = 0, size = 1;
6525 int last_port = -1, last_device = -1;
6526 char *p, *cur, *next;
6527
6528 /* calculate maximum number of params and allocate force_tbl */
6529 for (p = ata_force_param_buf; *p; p++)
6530 if (*p == ',')
6531 size++;
6532
6533 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6534 if (!ata_force_tbl) {
6535 printk(KERN_WARNING "ata: failed to extend force table, "
6536 "libata.force ignored\n");
6537 return;
6538 }
6539
6540 /* parse and populate the table */
6541 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6542 const char *reason = "";
6543 struct ata_force_ent te = { .port = -1, .device = -1 };
6544
6545 next = cur;
6546 if (ata_parse_force_one(&next, &te, &reason)) {
6547 printk(KERN_WARNING "ata: failed to parse force "
6548 "parameter \"%s\" (%s)\n",
6549 cur, reason);
6550 continue;
6551 }
6552
6553 if (te.port == -1) {
6554 te.port = last_port;
6555 te.device = last_device;
6556 }
6557
6558 ata_force_tbl[idx++] = te;
6559
6560 last_port = te.port;
6561 last_device = te.device;
6562 }
6563
6564 ata_force_tbl_size = idx;
6565 }
6566
6567 static int __init ata_init(void)
6568 {
6569 int rc;
6570
6571 ata_parse_force_param();
6572
6573 rc = ata_sff_init();
6574 if (rc) {
6575 kfree(ata_force_tbl);
6576 return rc;
6577 }
6578
6579 libata_transport_init();
6580 ata_scsi_transport_template = ata_attach_transport();
6581 if (!ata_scsi_transport_template) {
6582 ata_sff_exit();
6583 rc = -ENOMEM;
6584 goto err_out;
6585 }
6586
6587 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6588 return 0;
6589
6590 err_out:
6591 return rc;
6592 }
6593
6594 static void __exit ata_exit(void)
6595 {
6596 ata_release_transport(ata_scsi_transport_template);
6597 libata_transport_exit();
6598 ata_sff_exit();
6599 kfree(ata_force_tbl);
6600 }
6601
6602 subsys_initcall(ata_init);
6603 module_exit(ata_exit);
6604
6605 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6606
6607 int ata_ratelimit(void)
6608 {
6609 return __ratelimit(&ratelimit);
6610 }
6611
6612 /**
6613 * ata_wait_register - wait until register value changes
6614 * @reg: IO-mapped register
6615 * @mask: Mask to apply to read register value
6616 * @val: Wait condition
6617 * @interval: polling interval in milliseconds
6618 * @timeout: timeout in milliseconds
6619 *
6620 * Waiting for some bits of register to change is a common
6621 * operation for ATA controllers. This function reads 32bit LE
6622 * IO-mapped register @reg and tests for the following condition.
6623 *
6624 * (*@reg & mask) != val
6625 *
6626 * If the condition is met, it returns; otherwise, the process is
6627 * repeated after @interval_msec until timeout.
6628 *
6629 * LOCKING:
6630 * Kernel thread context (may sleep)
6631 *
6632 * RETURNS:
6633 * The final register value.
6634 */
6635 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6636 unsigned long interval, unsigned long timeout)
6637 {
6638 unsigned long deadline;
6639 u32 tmp;
6640
6641 tmp = ioread32(reg);
6642
6643 /* Calculate timeout _after_ the first read to make sure
6644 * preceding writes reach the controller before starting to
6645 * eat away the timeout.
6646 */
6647 deadline = ata_deadline(jiffies, timeout);
6648
6649 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6650 msleep(interval);
6651 tmp = ioread32(reg);
6652 }
6653
6654 return tmp;
6655 }
6656
6657 /*
6658 * Dummy port_ops
6659 */
6660 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6661 {
6662 return AC_ERR_SYSTEM;
6663 }
6664
6665 static void ata_dummy_error_handler(struct ata_port *ap)
6666 {
6667 /* truly dummy */
6668 }
6669
6670 struct ata_port_operations ata_dummy_port_ops = {
6671 .qc_prep = ata_noop_qc_prep,
6672 .qc_issue = ata_dummy_qc_issue,
6673 .error_handler = ata_dummy_error_handler,
6674 };
6675
6676 const struct ata_port_info ata_dummy_port_info = {
6677 .port_ops = &ata_dummy_port_ops,
6678 };
6679
6680 /*
6681 * libata is essentially a library of internal helper functions for
6682 * low-level ATA host controller drivers. As such, the API/ABI is
6683 * likely to change as new drivers are added and updated.
6684 * Do not depend on ABI/API stability.
6685 */
6686 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6687 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6688 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6689 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6690 EXPORT_SYMBOL_GPL(sata_port_ops);
6691 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6692 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6693 EXPORT_SYMBOL_GPL(ata_link_next);
6694 EXPORT_SYMBOL_GPL(ata_dev_next);
6695 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6696 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6697 EXPORT_SYMBOL_GPL(ata_host_init);
6698 EXPORT_SYMBOL_GPL(ata_host_alloc);
6699 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6700 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6701 EXPORT_SYMBOL_GPL(ata_host_start);
6702 EXPORT_SYMBOL_GPL(ata_host_register);
6703 EXPORT_SYMBOL_GPL(ata_host_activate);
6704 EXPORT_SYMBOL_GPL(ata_host_detach);
6705 EXPORT_SYMBOL_GPL(ata_sg_init);
6706 EXPORT_SYMBOL_GPL(ata_qc_complete);
6707 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6708 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6709 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6710 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6711 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6712 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6713 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6714 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6715 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6716 EXPORT_SYMBOL_GPL(ata_mode_string);
6717 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6718 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6719 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6720 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6721 EXPORT_SYMBOL_GPL(ata_dev_disable);
6722 EXPORT_SYMBOL_GPL(sata_set_spd);
6723 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6724 EXPORT_SYMBOL_GPL(sata_link_debounce);
6725 EXPORT_SYMBOL_GPL(sata_link_resume);
6726 EXPORT_SYMBOL_GPL(ata_std_prereset);
6727 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6728 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6729 EXPORT_SYMBOL_GPL(ata_std_postreset);
6730 EXPORT_SYMBOL_GPL(ata_dev_classify);
6731 EXPORT_SYMBOL_GPL(ata_dev_pair);
6732 EXPORT_SYMBOL_GPL(ata_ratelimit);
6733 EXPORT_SYMBOL_GPL(ata_wait_register);
6734 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6735 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6736 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6737 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6738 EXPORT_SYMBOL_GPL(sata_scr_valid);
6739 EXPORT_SYMBOL_GPL(sata_scr_read);
6740 EXPORT_SYMBOL_GPL(sata_scr_write);
6741 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6742 EXPORT_SYMBOL_GPL(ata_link_online);
6743 EXPORT_SYMBOL_GPL(ata_link_offline);
6744 #ifdef CONFIG_PM
6745 EXPORT_SYMBOL_GPL(ata_host_suspend);
6746 EXPORT_SYMBOL_GPL(ata_host_resume);
6747 #endif /* CONFIG_PM */
6748 EXPORT_SYMBOL_GPL(ata_id_string);
6749 EXPORT_SYMBOL_GPL(ata_id_c_string);
6750 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6751 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6752
6753 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6754 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6755 EXPORT_SYMBOL_GPL(ata_timing_compute);
6756 EXPORT_SYMBOL_GPL(ata_timing_merge);
6757 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6758
6759 #ifdef CONFIG_PCI
6760 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6761 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6762 #ifdef CONFIG_PM
6763 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6764 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6765 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6766 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6767 #endif /* CONFIG_PM */
6768 #endif /* CONFIG_PCI */
6769
6770 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6771 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6772 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6773 EXPORT_SYMBOL_GPL(ata_port_desc);
6774 #ifdef CONFIG_PCI
6775 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6776 #endif /* CONFIG_PCI */
6777 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6778 EXPORT_SYMBOL_GPL(ata_link_abort);
6779 EXPORT_SYMBOL_GPL(ata_port_abort);
6780 EXPORT_SYMBOL_GPL(ata_port_freeze);
6781 EXPORT_SYMBOL_GPL(sata_async_notification);
6782 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6783 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6784 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6785 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6786 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6787 EXPORT_SYMBOL_GPL(ata_do_eh);
6788 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6789
6790 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6791 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6792 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6793 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6794 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.177263 seconds and 5 git commands to generate.