libata: don't use ap->ioaddr in non-SFF drivers
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/jiffies.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h>
64 #include <asm/semaphore.h>
65 #include <asm/byteorder.h>
66 #include <linux/cdrom.h>
67
68 #include "libata.h"
69
70
71 /* debounce timing parameters in msecs { interval, duration, timeout } */
72 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
75
76 const struct ata_port_operations ata_base_port_ops = {
77 .prereset = ata_std_prereset,
78 .postreset = ata_std_postreset,
79 .error_handler = ata_std_error_handler,
80 };
81
82 const struct ata_port_operations sata_port_ops = {
83 .inherits = &ata_base_port_ops,
84
85 .qc_defer = ata_std_qc_defer,
86 .hardreset = sata_std_hardreset,
87 };
88
89 const struct ata_port_operations sata_pmp_port_ops = {
90 .inherits = &sata_port_ops,
91
92 .pmp_prereset = ata_std_prereset,
93 .pmp_hardreset = sata_std_hardreset,
94 .pmp_postreset = ata_std_postreset,
95 .error_handler = sata_pmp_error_handler,
96 };
97
98 static unsigned int ata_dev_init_params(struct ata_device *dev,
99 u16 heads, u16 sectors);
100 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
101 static unsigned int ata_dev_set_feature(struct ata_device *dev,
102 u8 enable, u8 feature);
103 static void ata_dev_xfermask(struct ata_device *dev);
104 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
105
106 unsigned int ata_print_id = 1;
107 static struct workqueue_struct *ata_wq;
108
109 struct workqueue_struct *ata_aux_wq;
110
111 struct ata_force_param {
112 const char *name;
113 unsigned int cbl;
114 int spd_limit;
115 unsigned long xfer_mask;
116 unsigned int horkage_on;
117 unsigned int horkage_off;
118 };
119
120 struct ata_force_ent {
121 int port;
122 int device;
123 struct ata_force_param param;
124 };
125
126 static struct ata_force_ent *ata_force_tbl;
127 static int ata_force_tbl_size;
128
129 static char ata_force_param_buf[PAGE_SIZE] __initdata;
130 /* param_buf is thrown away after initialization, disallow read */
131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
133
134 int atapi_enabled = 1;
135 module_param(atapi_enabled, int, 0444);
136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
137
138 static int atapi_dmadir = 0;
139 module_param(atapi_dmadir, int, 0444);
140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
141
142 int atapi_passthru16 = 1;
143 module_param(atapi_passthru16, int, 0444);
144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
145
146 int libata_fua = 0;
147 module_param_named(fua, libata_fua, int, 0444);
148 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
149
150 static int ata_ignore_hpa;
151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153
154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155 module_param_named(dma, libata_dma_mask, int, 0444);
156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157
158 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
159 module_param(ata_probe_timeout, int, 0444);
160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161
162 int libata_noacpi = 0;
163 module_param_named(noacpi, libata_noacpi, int, 0444);
164 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
165
166 int libata_allow_tpm = 0;
167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
169
170 MODULE_AUTHOR("Jeff Garzik");
171 MODULE_DESCRIPTION("Library module for ATA devices");
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
174
175
176 /**
177 * ata_force_cbl - force cable type according to libata.force
178 * @ap: ATA port of interest
179 *
180 * Force cable type according to libata.force and whine about it.
181 * The last entry which has matching port number is used, so it
182 * can be specified as part of device force parameters. For
183 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
184 * same effect.
185 *
186 * LOCKING:
187 * EH context.
188 */
189 void ata_force_cbl(struct ata_port *ap)
190 {
191 int i;
192
193 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
194 const struct ata_force_ent *fe = &ata_force_tbl[i];
195
196 if (fe->port != -1 && fe->port != ap->print_id)
197 continue;
198
199 if (fe->param.cbl == ATA_CBL_NONE)
200 continue;
201
202 ap->cbl = fe->param.cbl;
203 ata_port_printk(ap, KERN_NOTICE,
204 "FORCE: cable set to %s\n", fe->param.name);
205 return;
206 }
207 }
208
209 /**
210 * ata_force_spd_limit - force SATA spd limit according to libata.force
211 * @link: ATA link of interest
212 *
213 * Force SATA spd limit according to libata.force and whine about
214 * it. When only the port part is specified (e.g. 1:), the limit
215 * applies to all links connected to both the host link and all
216 * fan-out ports connected via PMP. If the device part is
217 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
218 * link not the host link. Device number 15 always points to the
219 * host link whether PMP is attached or not.
220 *
221 * LOCKING:
222 * EH context.
223 */
224 static void ata_force_spd_limit(struct ata_link *link)
225 {
226 int linkno, i;
227
228 if (ata_is_host_link(link))
229 linkno = 15;
230 else
231 linkno = link->pmp;
232
233 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
234 const struct ata_force_ent *fe = &ata_force_tbl[i];
235
236 if (fe->port != -1 && fe->port != link->ap->print_id)
237 continue;
238
239 if (fe->device != -1 && fe->device != linkno)
240 continue;
241
242 if (!fe->param.spd_limit)
243 continue;
244
245 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
246 ata_link_printk(link, KERN_NOTICE,
247 "FORCE: PHY spd limit set to %s\n", fe->param.name);
248 return;
249 }
250 }
251
252 /**
253 * ata_force_xfermask - force xfermask according to libata.force
254 * @dev: ATA device of interest
255 *
256 * Force xfer_mask according to libata.force and whine about it.
257 * For consistency with link selection, device number 15 selects
258 * the first device connected to the host link.
259 *
260 * LOCKING:
261 * EH context.
262 */
263 static void ata_force_xfermask(struct ata_device *dev)
264 {
265 int devno = dev->link->pmp + dev->devno;
266 int alt_devno = devno;
267 int i;
268
269 /* allow n.15 for the first device attached to host port */
270 if (ata_is_host_link(dev->link) && devno == 0)
271 alt_devno = 15;
272
273 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
274 const struct ata_force_ent *fe = &ata_force_tbl[i];
275 unsigned long pio_mask, mwdma_mask, udma_mask;
276
277 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
278 continue;
279
280 if (fe->device != -1 && fe->device != devno &&
281 fe->device != alt_devno)
282 continue;
283
284 if (!fe->param.xfer_mask)
285 continue;
286
287 ata_unpack_xfermask(fe->param.xfer_mask,
288 &pio_mask, &mwdma_mask, &udma_mask);
289 if (udma_mask)
290 dev->udma_mask = udma_mask;
291 else if (mwdma_mask) {
292 dev->udma_mask = 0;
293 dev->mwdma_mask = mwdma_mask;
294 } else {
295 dev->udma_mask = 0;
296 dev->mwdma_mask = 0;
297 dev->pio_mask = pio_mask;
298 }
299
300 ata_dev_printk(dev, KERN_NOTICE,
301 "FORCE: xfer_mask set to %s\n", fe->param.name);
302 return;
303 }
304 }
305
306 /**
307 * ata_force_horkage - force horkage according to libata.force
308 * @dev: ATA device of interest
309 *
310 * Force horkage according to libata.force and whine about it.
311 * For consistency with link selection, device number 15 selects
312 * the first device connected to the host link.
313 *
314 * LOCKING:
315 * EH context.
316 */
317 static void ata_force_horkage(struct ata_device *dev)
318 {
319 int devno = dev->link->pmp + dev->devno;
320 int alt_devno = devno;
321 int i;
322
323 /* allow n.15 for the first device attached to host port */
324 if (ata_is_host_link(dev->link) && devno == 0)
325 alt_devno = 15;
326
327 for (i = 0; i < ata_force_tbl_size; i++) {
328 const struct ata_force_ent *fe = &ata_force_tbl[i];
329
330 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
331 continue;
332
333 if (fe->device != -1 && fe->device != devno &&
334 fe->device != alt_devno)
335 continue;
336
337 if (!(~dev->horkage & fe->param.horkage_on) &&
338 !(dev->horkage & fe->param.horkage_off))
339 continue;
340
341 dev->horkage |= fe->param.horkage_on;
342 dev->horkage &= ~fe->param.horkage_off;
343
344 ata_dev_printk(dev, KERN_NOTICE,
345 "FORCE: horkage modified (%s)\n", fe->param.name);
346 }
347 }
348
349 /**
350 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
351 * @opcode: SCSI opcode
352 *
353 * Determine ATAPI command type from @opcode.
354 *
355 * LOCKING:
356 * None.
357 *
358 * RETURNS:
359 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
360 */
361 int atapi_cmd_type(u8 opcode)
362 {
363 switch (opcode) {
364 case GPCMD_READ_10:
365 case GPCMD_READ_12:
366 return ATAPI_READ;
367
368 case GPCMD_WRITE_10:
369 case GPCMD_WRITE_12:
370 case GPCMD_WRITE_AND_VERIFY_10:
371 return ATAPI_WRITE;
372
373 case GPCMD_READ_CD:
374 case GPCMD_READ_CD_MSF:
375 return ATAPI_READ_CD;
376
377 case ATA_16:
378 case ATA_12:
379 if (atapi_passthru16)
380 return ATAPI_PASS_THRU;
381 /* fall thru */
382 default:
383 return ATAPI_MISC;
384 }
385 }
386
387 /**
388 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
389 * @tf: Taskfile to convert
390 * @pmp: Port multiplier port
391 * @is_cmd: This FIS is for command
392 * @fis: Buffer into which data will output
393 *
394 * Converts a standard ATA taskfile to a Serial ATA
395 * FIS structure (Register - Host to Device).
396 *
397 * LOCKING:
398 * Inherited from caller.
399 */
400 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
401 {
402 fis[0] = 0x27; /* Register - Host to Device FIS */
403 fis[1] = pmp & 0xf; /* Port multiplier number*/
404 if (is_cmd)
405 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
406
407 fis[2] = tf->command;
408 fis[3] = tf->feature;
409
410 fis[4] = tf->lbal;
411 fis[5] = tf->lbam;
412 fis[6] = tf->lbah;
413 fis[7] = tf->device;
414
415 fis[8] = tf->hob_lbal;
416 fis[9] = tf->hob_lbam;
417 fis[10] = tf->hob_lbah;
418 fis[11] = tf->hob_feature;
419
420 fis[12] = tf->nsect;
421 fis[13] = tf->hob_nsect;
422 fis[14] = 0;
423 fis[15] = tf->ctl;
424
425 fis[16] = 0;
426 fis[17] = 0;
427 fis[18] = 0;
428 fis[19] = 0;
429 }
430
431 /**
432 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
433 * @fis: Buffer from which data will be input
434 * @tf: Taskfile to output
435 *
436 * Converts a serial ATA FIS structure to a standard ATA taskfile.
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441
442 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
443 {
444 tf->command = fis[2]; /* status */
445 tf->feature = fis[3]; /* error */
446
447 tf->lbal = fis[4];
448 tf->lbam = fis[5];
449 tf->lbah = fis[6];
450 tf->device = fis[7];
451
452 tf->hob_lbal = fis[8];
453 tf->hob_lbam = fis[9];
454 tf->hob_lbah = fis[10];
455
456 tf->nsect = fis[12];
457 tf->hob_nsect = fis[13];
458 }
459
460 static const u8 ata_rw_cmds[] = {
461 /* pio multi */
462 ATA_CMD_READ_MULTI,
463 ATA_CMD_WRITE_MULTI,
464 ATA_CMD_READ_MULTI_EXT,
465 ATA_CMD_WRITE_MULTI_EXT,
466 0,
467 0,
468 0,
469 ATA_CMD_WRITE_MULTI_FUA_EXT,
470 /* pio */
471 ATA_CMD_PIO_READ,
472 ATA_CMD_PIO_WRITE,
473 ATA_CMD_PIO_READ_EXT,
474 ATA_CMD_PIO_WRITE_EXT,
475 0,
476 0,
477 0,
478 0,
479 /* dma */
480 ATA_CMD_READ,
481 ATA_CMD_WRITE,
482 ATA_CMD_READ_EXT,
483 ATA_CMD_WRITE_EXT,
484 0,
485 0,
486 0,
487 ATA_CMD_WRITE_FUA_EXT
488 };
489
490 /**
491 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
492 * @tf: command to examine and configure
493 * @dev: device tf belongs to
494 *
495 * Examine the device configuration and tf->flags to calculate
496 * the proper read/write commands and protocol to use.
497 *
498 * LOCKING:
499 * caller.
500 */
501 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
502 {
503 u8 cmd;
504
505 int index, fua, lba48, write;
506
507 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
508 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
509 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
510
511 if (dev->flags & ATA_DFLAG_PIO) {
512 tf->protocol = ATA_PROT_PIO;
513 index = dev->multi_count ? 0 : 8;
514 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
515 /* Unable to use DMA due to host limitation */
516 tf->protocol = ATA_PROT_PIO;
517 index = dev->multi_count ? 0 : 8;
518 } else {
519 tf->protocol = ATA_PROT_DMA;
520 index = 16;
521 }
522
523 cmd = ata_rw_cmds[index + fua + lba48 + write];
524 if (cmd) {
525 tf->command = cmd;
526 return 0;
527 }
528 return -1;
529 }
530
531 /**
532 * ata_tf_read_block - Read block address from ATA taskfile
533 * @tf: ATA taskfile of interest
534 * @dev: ATA device @tf belongs to
535 *
536 * LOCKING:
537 * None.
538 *
539 * Read block address from @tf. This function can handle all
540 * three address formats - LBA, LBA48 and CHS. tf->protocol and
541 * flags select the address format to use.
542 *
543 * RETURNS:
544 * Block address read from @tf.
545 */
546 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
547 {
548 u64 block = 0;
549
550 if (tf->flags & ATA_TFLAG_LBA) {
551 if (tf->flags & ATA_TFLAG_LBA48) {
552 block |= (u64)tf->hob_lbah << 40;
553 block |= (u64)tf->hob_lbam << 32;
554 block |= tf->hob_lbal << 24;
555 } else
556 block |= (tf->device & 0xf) << 24;
557
558 block |= tf->lbah << 16;
559 block |= tf->lbam << 8;
560 block |= tf->lbal;
561 } else {
562 u32 cyl, head, sect;
563
564 cyl = tf->lbam | (tf->lbah << 8);
565 head = tf->device & 0xf;
566 sect = tf->lbal;
567
568 block = (cyl * dev->heads + head) * dev->sectors + sect;
569 }
570
571 return block;
572 }
573
574 /**
575 * ata_build_rw_tf - Build ATA taskfile for given read/write request
576 * @tf: Target ATA taskfile
577 * @dev: ATA device @tf belongs to
578 * @block: Block address
579 * @n_block: Number of blocks
580 * @tf_flags: RW/FUA etc...
581 * @tag: tag
582 *
583 * LOCKING:
584 * None.
585 *
586 * Build ATA taskfile @tf for read/write request described by
587 * @block, @n_block, @tf_flags and @tag on @dev.
588 *
589 * RETURNS:
590 *
591 * 0 on success, -ERANGE if the request is too large for @dev,
592 * -EINVAL if the request is invalid.
593 */
594 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
595 u64 block, u32 n_block, unsigned int tf_flags,
596 unsigned int tag)
597 {
598 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
599 tf->flags |= tf_flags;
600
601 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
602 /* yay, NCQ */
603 if (!lba_48_ok(block, n_block))
604 return -ERANGE;
605
606 tf->protocol = ATA_PROT_NCQ;
607 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
608
609 if (tf->flags & ATA_TFLAG_WRITE)
610 tf->command = ATA_CMD_FPDMA_WRITE;
611 else
612 tf->command = ATA_CMD_FPDMA_READ;
613
614 tf->nsect = tag << 3;
615 tf->hob_feature = (n_block >> 8) & 0xff;
616 tf->feature = n_block & 0xff;
617
618 tf->hob_lbah = (block >> 40) & 0xff;
619 tf->hob_lbam = (block >> 32) & 0xff;
620 tf->hob_lbal = (block >> 24) & 0xff;
621 tf->lbah = (block >> 16) & 0xff;
622 tf->lbam = (block >> 8) & 0xff;
623 tf->lbal = block & 0xff;
624
625 tf->device = 1 << 6;
626 if (tf->flags & ATA_TFLAG_FUA)
627 tf->device |= 1 << 7;
628 } else if (dev->flags & ATA_DFLAG_LBA) {
629 tf->flags |= ATA_TFLAG_LBA;
630
631 if (lba_28_ok(block, n_block)) {
632 /* use LBA28 */
633 tf->device |= (block >> 24) & 0xf;
634 } else if (lba_48_ok(block, n_block)) {
635 if (!(dev->flags & ATA_DFLAG_LBA48))
636 return -ERANGE;
637
638 /* use LBA48 */
639 tf->flags |= ATA_TFLAG_LBA48;
640
641 tf->hob_nsect = (n_block >> 8) & 0xff;
642
643 tf->hob_lbah = (block >> 40) & 0xff;
644 tf->hob_lbam = (block >> 32) & 0xff;
645 tf->hob_lbal = (block >> 24) & 0xff;
646 } else
647 /* request too large even for LBA48 */
648 return -ERANGE;
649
650 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
651 return -EINVAL;
652
653 tf->nsect = n_block & 0xff;
654
655 tf->lbah = (block >> 16) & 0xff;
656 tf->lbam = (block >> 8) & 0xff;
657 tf->lbal = block & 0xff;
658
659 tf->device |= ATA_LBA;
660 } else {
661 /* CHS */
662 u32 sect, head, cyl, track;
663
664 /* The request -may- be too large for CHS addressing. */
665 if (!lba_28_ok(block, n_block))
666 return -ERANGE;
667
668 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
669 return -EINVAL;
670
671 /* Convert LBA to CHS */
672 track = (u32)block / dev->sectors;
673 cyl = track / dev->heads;
674 head = track % dev->heads;
675 sect = (u32)block % dev->sectors + 1;
676
677 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
678 (u32)block, track, cyl, head, sect);
679
680 /* Check whether the converted CHS can fit.
681 Cylinder: 0-65535
682 Head: 0-15
683 Sector: 1-255*/
684 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
685 return -ERANGE;
686
687 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
688 tf->lbal = sect;
689 tf->lbam = cyl;
690 tf->lbah = cyl >> 8;
691 tf->device |= head;
692 }
693
694 return 0;
695 }
696
697 /**
698 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
699 * @pio_mask: pio_mask
700 * @mwdma_mask: mwdma_mask
701 * @udma_mask: udma_mask
702 *
703 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
704 * unsigned int xfer_mask.
705 *
706 * LOCKING:
707 * None.
708 *
709 * RETURNS:
710 * Packed xfer_mask.
711 */
712 unsigned long ata_pack_xfermask(unsigned long pio_mask,
713 unsigned long mwdma_mask,
714 unsigned long udma_mask)
715 {
716 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
717 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
718 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
719 }
720
721 /**
722 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
723 * @xfer_mask: xfer_mask to unpack
724 * @pio_mask: resulting pio_mask
725 * @mwdma_mask: resulting mwdma_mask
726 * @udma_mask: resulting udma_mask
727 *
728 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
729 * Any NULL distination masks will be ignored.
730 */
731 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
732 unsigned long *mwdma_mask, unsigned long *udma_mask)
733 {
734 if (pio_mask)
735 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
736 if (mwdma_mask)
737 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
738 if (udma_mask)
739 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
740 }
741
742 static const struct ata_xfer_ent {
743 int shift, bits;
744 u8 base;
745 } ata_xfer_tbl[] = {
746 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
747 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
748 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
749 { -1, },
750 };
751
752 /**
753 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
754 * @xfer_mask: xfer_mask of interest
755 *
756 * Return matching XFER_* value for @xfer_mask. Only the highest
757 * bit of @xfer_mask is considered.
758 *
759 * LOCKING:
760 * None.
761 *
762 * RETURNS:
763 * Matching XFER_* value, 0xff if no match found.
764 */
765 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
766 {
767 int highbit = fls(xfer_mask) - 1;
768 const struct ata_xfer_ent *ent;
769
770 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
771 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
772 return ent->base + highbit - ent->shift;
773 return 0xff;
774 }
775
776 /**
777 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
778 * @xfer_mode: XFER_* of interest
779 *
780 * Return matching xfer_mask for @xfer_mode.
781 *
782 * LOCKING:
783 * None.
784 *
785 * RETURNS:
786 * Matching xfer_mask, 0 if no match found.
787 */
788 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
789 {
790 const struct ata_xfer_ent *ent;
791
792 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
793 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
794 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
795 & ~((1 << ent->shift) - 1);
796 return 0;
797 }
798
799 /**
800 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
801 * @xfer_mode: XFER_* of interest
802 *
803 * Return matching xfer_shift for @xfer_mode.
804 *
805 * LOCKING:
806 * None.
807 *
808 * RETURNS:
809 * Matching xfer_shift, -1 if no match found.
810 */
811 int ata_xfer_mode2shift(unsigned long xfer_mode)
812 {
813 const struct ata_xfer_ent *ent;
814
815 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
816 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
817 return ent->shift;
818 return -1;
819 }
820
821 /**
822 * ata_mode_string - convert xfer_mask to string
823 * @xfer_mask: mask of bits supported; only highest bit counts.
824 *
825 * Determine string which represents the highest speed
826 * (highest bit in @modemask).
827 *
828 * LOCKING:
829 * None.
830 *
831 * RETURNS:
832 * Constant C string representing highest speed listed in
833 * @mode_mask, or the constant C string "<n/a>".
834 */
835 const char *ata_mode_string(unsigned long xfer_mask)
836 {
837 static const char * const xfer_mode_str[] = {
838 "PIO0",
839 "PIO1",
840 "PIO2",
841 "PIO3",
842 "PIO4",
843 "PIO5",
844 "PIO6",
845 "MWDMA0",
846 "MWDMA1",
847 "MWDMA2",
848 "MWDMA3",
849 "MWDMA4",
850 "UDMA/16",
851 "UDMA/25",
852 "UDMA/33",
853 "UDMA/44",
854 "UDMA/66",
855 "UDMA/100",
856 "UDMA/133",
857 "UDMA7",
858 };
859 int highbit;
860
861 highbit = fls(xfer_mask) - 1;
862 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
863 return xfer_mode_str[highbit];
864 return "<n/a>";
865 }
866
867 static const char *sata_spd_string(unsigned int spd)
868 {
869 static const char * const spd_str[] = {
870 "1.5 Gbps",
871 "3.0 Gbps",
872 };
873
874 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
875 return "<unknown>";
876 return spd_str[spd - 1];
877 }
878
879 void ata_dev_disable(struct ata_device *dev)
880 {
881 if (ata_dev_enabled(dev)) {
882 if (ata_msg_drv(dev->link->ap))
883 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
884 ata_acpi_on_disable(dev);
885 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
886 ATA_DNXFER_QUIET);
887 dev->class++;
888 }
889 }
890
891 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
892 {
893 struct ata_link *link = dev->link;
894 struct ata_port *ap = link->ap;
895 u32 scontrol;
896 unsigned int err_mask;
897 int rc;
898
899 /*
900 * disallow DIPM for drivers which haven't set
901 * ATA_FLAG_IPM. This is because when DIPM is enabled,
902 * phy ready will be set in the interrupt status on
903 * state changes, which will cause some drivers to
904 * think there are errors - additionally drivers will
905 * need to disable hot plug.
906 */
907 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
908 ap->pm_policy = NOT_AVAILABLE;
909 return -EINVAL;
910 }
911
912 /*
913 * For DIPM, we will only enable it for the
914 * min_power setting.
915 *
916 * Why? Because Disks are too stupid to know that
917 * If the host rejects a request to go to SLUMBER
918 * they should retry at PARTIAL, and instead it
919 * just would give up. So, for medium_power to
920 * work at all, we need to only allow HIPM.
921 */
922 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
923 if (rc)
924 return rc;
925
926 switch (policy) {
927 case MIN_POWER:
928 /* no restrictions on IPM transitions */
929 scontrol &= ~(0x3 << 8);
930 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
931 if (rc)
932 return rc;
933
934 /* enable DIPM */
935 if (dev->flags & ATA_DFLAG_DIPM)
936 err_mask = ata_dev_set_feature(dev,
937 SETFEATURES_SATA_ENABLE, SATA_DIPM);
938 break;
939 case MEDIUM_POWER:
940 /* allow IPM to PARTIAL */
941 scontrol &= ~(0x1 << 8);
942 scontrol |= (0x2 << 8);
943 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
944 if (rc)
945 return rc;
946
947 /*
948 * we don't have to disable DIPM since IPM flags
949 * disallow transitions to SLUMBER, which effectively
950 * disable DIPM if it does not support PARTIAL
951 */
952 break;
953 case NOT_AVAILABLE:
954 case MAX_PERFORMANCE:
955 /* disable all IPM transitions */
956 scontrol |= (0x3 << 8);
957 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
958 if (rc)
959 return rc;
960
961 /*
962 * we don't have to disable DIPM since IPM flags
963 * disallow all transitions which effectively
964 * disable DIPM anyway.
965 */
966 break;
967 }
968
969 /* FIXME: handle SET FEATURES failure */
970 (void) err_mask;
971
972 return 0;
973 }
974
975 /**
976 * ata_dev_enable_pm - enable SATA interface power management
977 * @dev: device to enable power management
978 * @policy: the link power management policy
979 *
980 * Enable SATA Interface power management. This will enable
981 * Device Interface Power Management (DIPM) for min_power
982 * policy, and then call driver specific callbacks for
983 * enabling Host Initiated Power management.
984 *
985 * Locking: Caller.
986 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
987 */
988 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
989 {
990 int rc = 0;
991 struct ata_port *ap = dev->link->ap;
992
993 /* set HIPM first, then DIPM */
994 if (ap->ops->enable_pm)
995 rc = ap->ops->enable_pm(ap, policy);
996 if (rc)
997 goto enable_pm_out;
998 rc = ata_dev_set_dipm(dev, policy);
999
1000 enable_pm_out:
1001 if (rc)
1002 ap->pm_policy = MAX_PERFORMANCE;
1003 else
1004 ap->pm_policy = policy;
1005 return /* rc */; /* hopefully we can use 'rc' eventually */
1006 }
1007
1008 #ifdef CONFIG_PM
1009 /**
1010 * ata_dev_disable_pm - disable SATA interface power management
1011 * @dev: device to disable power management
1012 *
1013 * Disable SATA Interface power management. This will disable
1014 * Device Interface Power Management (DIPM) without changing
1015 * policy, call driver specific callbacks for disabling Host
1016 * Initiated Power management.
1017 *
1018 * Locking: Caller.
1019 * Returns: void
1020 */
1021 static void ata_dev_disable_pm(struct ata_device *dev)
1022 {
1023 struct ata_port *ap = dev->link->ap;
1024
1025 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1026 if (ap->ops->disable_pm)
1027 ap->ops->disable_pm(ap);
1028 }
1029 #endif /* CONFIG_PM */
1030
1031 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1032 {
1033 ap->pm_policy = policy;
1034 ap->link.eh_info.action |= ATA_EH_LPM;
1035 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1036 ata_port_schedule_eh(ap);
1037 }
1038
1039 #ifdef CONFIG_PM
1040 static void ata_lpm_enable(struct ata_host *host)
1041 {
1042 struct ata_link *link;
1043 struct ata_port *ap;
1044 struct ata_device *dev;
1045 int i;
1046
1047 for (i = 0; i < host->n_ports; i++) {
1048 ap = host->ports[i];
1049 ata_port_for_each_link(link, ap) {
1050 ata_link_for_each_dev(dev, link)
1051 ata_dev_disable_pm(dev);
1052 }
1053 }
1054 }
1055
1056 static void ata_lpm_disable(struct ata_host *host)
1057 {
1058 int i;
1059
1060 for (i = 0; i < host->n_ports; i++) {
1061 struct ata_port *ap = host->ports[i];
1062 ata_lpm_schedule(ap, ap->pm_policy);
1063 }
1064 }
1065 #endif /* CONFIG_PM */
1066
1067 /**
1068 * ata_dev_classify - determine device type based on ATA-spec signature
1069 * @tf: ATA taskfile register set for device to be identified
1070 *
1071 * Determine from taskfile register contents whether a device is
1072 * ATA or ATAPI, as per "Signature and persistence" section
1073 * of ATA/PI spec (volume 1, sect 5.14).
1074 *
1075 * LOCKING:
1076 * None.
1077 *
1078 * RETURNS:
1079 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1080 * %ATA_DEV_UNKNOWN the event of failure.
1081 */
1082 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1083 {
1084 /* Apple's open source Darwin code hints that some devices only
1085 * put a proper signature into the LBA mid/high registers,
1086 * So, we only check those. It's sufficient for uniqueness.
1087 *
1088 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1089 * signatures for ATA and ATAPI devices attached on SerialATA,
1090 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1091 * spec has never mentioned about using different signatures
1092 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1093 * Multiplier specification began to use 0x69/0x96 to identify
1094 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1095 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1096 * 0x69/0x96 shortly and described them as reserved for
1097 * SerialATA.
1098 *
1099 * We follow the current spec and consider that 0x69/0x96
1100 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1101 */
1102 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1103 DPRINTK("found ATA device by sig\n");
1104 return ATA_DEV_ATA;
1105 }
1106
1107 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1108 DPRINTK("found ATAPI device by sig\n");
1109 return ATA_DEV_ATAPI;
1110 }
1111
1112 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1113 DPRINTK("found PMP device by sig\n");
1114 return ATA_DEV_PMP;
1115 }
1116
1117 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1118 printk(KERN_INFO "ata: SEMB device ignored\n");
1119 return ATA_DEV_SEMB_UNSUP; /* not yet */
1120 }
1121
1122 DPRINTK("unknown device\n");
1123 return ATA_DEV_UNKNOWN;
1124 }
1125
1126 /**
1127 * ata_id_string - Convert IDENTIFY DEVICE page into string
1128 * @id: IDENTIFY DEVICE results we will examine
1129 * @s: string into which data is output
1130 * @ofs: offset into identify device page
1131 * @len: length of string to return. must be an even number.
1132 *
1133 * The strings in the IDENTIFY DEVICE page are broken up into
1134 * 16-bit chunks. Run through the string, and output each
1135 * 8-bit chunk linearly, regardless of platform.
1136 *
1137 * LOCKING:
1138 * caller.
1139 */
1140
1141 void ata_id_string(const u16 *id, unsigned char *s,
1142 unsigned int ofs, unsigned int len)
1143 {
1144 unsigned int c;
1145
1146 while (len > 0) {
1147 c = id[ofs] >> 8;
1148 *s = c;
1149 s++;
1150
1151 c = id[ofs] & 0xff;
1152 *s = c;
1153 s++;
1154
1155 ofs++;
1156 len -= 2;
1157 }
1158 }
1159
1160 /**
1161 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1162 * @id: IDENTIFY DEVICE results we will examine
1163 * @s: string into which data is output
1164 * @ofs: offset into identify device page
1165 * @len: length of string to return. must be an odd number.
1166 *
1167 * This function is identical to ata_id_string except that it
1168 * trims trailing spaces and terminates the resulting string with
1169 * null. @len must be actual maximum length (even number) + 1.
1170 *
1171 * LOCKING:
1172 * caller.
1173 */
1174 void ata_id_c_string(const u16 *id, unsigned char *s,
1175 unsigned int ofs, unsigned int len)
1176 {
1177 unsigned char *p;
1178
1179 WARN_ON(!(len & 1));
1180
1181 ata_id_string(id, s, ofs, len - 1);
1182
1183 p = s + strnlen(s, len - 1);
1184 while (p > s && p[-1] == ' ')
1185 p--;
1186 *p = '\0';
1187 }
1188
1189 static u64 ata_id_n_sectors(const u16 *id)
1190 {
1191 if (ata_id_has_lba(id)) {
1192 if (ata_id_has_lba48(id))
1193 return ata_id_u64(id, 100);
1194 else
1195 return ata_id_u32(id, 60);
1196 } else {
1197 if (ata_id_current_chs_valid(id))
1198 return ata_id_u32(id, 57);
1199 else
1200 return id[1] * id[3] * id[6];
1201 }
1202 }
1203
1204 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1205 {
1206 u64 sectors = 0;
1207
1208 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1209 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1210 sectors |= (tf->hob_lbal & 0xff) << 24;
1211 sectors |= (tf->lbah & 0xff) << 16;
1212 sectors |= (tf->lbam & 0xff) << 8;
1213 sectors |= (tf->lbal & 0xff);
1214
1215 return sectors;
1216 }
1217
1218 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1219 {
1220 u64 sectors = 0;
1221
1222 sectors |= (tf->device & 0x0f) << 24;
1223 sectors |= (tf->lbah & 0xff) << 16;
1224 sectors |= (tf->lbam & 0xff) << 8;
1225 sectors |= (tf->lbal & 0xff);
1226
1227 return sectors;
1228 }
1229
1230 /**
1231 * ata_read_native_max_address - Read native max address
1232 * @dev: target device
1233 * @max_sectors: out parameter for the result native max address
1234 *
1235 * Perform an LBA48 or LBA28 native size query upon the device in
1236 * question.
1237 *
1238 * RETURNS:
1239 * 0 on success, -EACCES if command is aborted by the drive.
1240 * -EIO on other errors.
1241 */
1242 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1243 {
1244 unsigned int err_mask;
1245 struct ata_taskfile tf;
1246 int lba48 = ata_id_has_lba48(dev->id);
1247
1248 ata_tf_init(dev, &tf);
1249
1250 /* always clear all address registers */
1251 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1252
1253 if (lba48) {
1254 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1255 tf.flags |= ATA_TFLAG_LBA48;
1256 } else
1257 tf.command = ATA_CMD_READ_NATIVE_MAX;
1258
1259 tf.protocol |= ATA_PROT_NODATA;
1260 tf.device |= ATA_LBA;
1261
1262 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1263 if (err_mask) {
1264 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1265 "max address (err_mask=0x%x)\n", err_mask);
1266 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1267 return -EACCES;
1268 return -EIO;
1269 }
1270
1271 if (lba48)
1272 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1273 else
1274 *max_sectors = ata_tf_to_lba(&tf) + 1;
1275 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1276 (*max_sectors)--;
1277 return 0;
1278 }
1279
1280 /**
1281 * ata_set_max_sectors - Set max sectors
1282 * @dev: target device
1283 * @new_sectors: new max sectors value to set for the device
1284 *
1285 * Set max sectors of @dev to @new_sectors.
1286 *
1287 * RETURNS:
1288 * 0 on success, -EACCES if command is aborted or denied (due to
1289 * previous non-volatile SET_MAX) by the drive. -EIO on other
1290 * errors.
1291 */
1292 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1293 {
1294 unsigned int err_mask;
1295 struct ata_taskfile tf;
1296 int lba48 = ata_id_has_lba48(dev->id);
1297
1298 new_sectors--;
1299
1300 ata_tf_init(dev, &tf);
1301
1302 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1303
1304 if (lba48) {
1305 tf.command = ATA_CMD_SET_MAX_EXT;
1306 tf.flags |= ATA_TFLAG_LBA48;
1307
1308 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1309 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1310 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1311 } else {
1312 tf.command = ATA_CMD_SET_MAX;
1313
1314 tf.device |= (new_sectors >> 24) & 0xf;
1315 }
1316
1317 tf.protocol |= ATA_PROT_NODATA;
1318 tf.device |= ATA_LBA;
1319
1320 tf.lbal = (new_sectors >> 0) & 0xff;
1321 tf.lbam = (new_sectors >> 8) & 0xff;
1322 tf.lbah = (new_sectors >> 16) & 0xff;
1323
1324 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1325 if (err_mask) {
1326 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1327 "max address (err_mask=0x%x)\n", err_mask);
1328 if (err_mask == AC_ERR_DEV &&
1329 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1330 return -EACCES;
1331 return -EIO;
1332 }
1333
1334 return 0;
1335 }
1336
1337 /**
1338 * ata_hpa_resize - Resize a device with an HPA set
1339 * @dev: Device to resize
1340 *
1341 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1342 * it if required to the full size of the media. The caller must check
1343 * the drive has the HPA feature set enabled.
1344 *
1345 * RETURNS:
1346 * 0 on success, -errno on failure.
1347 */
1348 static int ata_hpa_resize(struct ata_device *dev)
1349 {
1350 struct ata_eh_context *ehc = &dev->link->eh_context;
1351 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1352 u64 sectors = ata_id_n_sectors(dev->id);
1353 u64 native_sectors;
1354 int rc;
1355
1356 /* do we need to do it? */
1357 if (dev->class != ATA_DEV_ATA ||
1358 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1359 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1360 return 0;
1361
1362 /* read native max address */
1363 rc = ata_read_native_max_address(dev, &native_sectors);
1364 if (rc) {
1365 /* If device aborted the command or HPA isn't going to
1366 * be unlocked, skip HPA resizing.
1367 */
1368 if (rc == -EACCES || !ata_ignore_hpa) {
1369 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1370 "broken, skipping HPA handling\n");
1371 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1372
1373 /* we can continue if device aborted the command */
1374 if (rc == -EACCES)
1375 rc = 0;
1376 }
1377
1378 return rc;
1379 }
1380
1381 /* nothing to do? */
1382 if (native_sectors <= sectors || !ata_ignore_hpa) {
1383 if (!print_info || native_sectors == sectors)
1384 return 0;
1385
1386 if (native_sectors > sectors)
1387 ata_dev_printk(dev, KERN_INFO,
1388 "HPA detected: current %llu, native %llu\n",
1389 (unsigned long long)sectors,
1390 (unsigned long long)native_sectors);
1391 else if (native_sectors < sectors)
1392 ata_dev_printk(dev, KERN_WARNING,
1393 "native sectors (%llu) is smaller than "
1394 "sectors (%llu)\n",
1395 (unsigned long long)native_sectors,
1396 (unsigned long long)sectors);
1397 return 0;
1398 }
1399
1400 /* let's unlock HPA */
1401 rc = ata_set_max_sectors(dev, native_sectors);
1402 if (rc == -EACCES) {
1403 /* if device aborted the command, skip HPA resizing */
1404 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1405 "(%llu -> %llu), skipping HPA handling\n",
1406 (unsigned long long)sectors,
1407 (unsigned long long)native_sectors);
1408 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1409 return 0;
1410 } else if (rc)
1411 return rc;
1412
1413 /* re-read IDENTIFY data */
1414 rc = ata_dev_reread_id(dev, 0);
1415 if (rc) {
1416 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1417 "data after HPA resizing\n");
1418 return rc;
1419 }
1420
1421 if (print_info) {
1422 u64 new_sectors = ata_id_n_sectors(dev->id);
1423 ata_dev_printk(dev, KERN_INFO,
1424 "HPA unlocked: %llu -> %llu, native %llu\n",
1425 (unsigned long long)sectors,
1426 (unsigned long long)new_sectors,
1427 (unsigned long long)native_sectors);
1428 }
1429
1430 return 0;
1431 }
1432
1433 /**
1434 * ata_dump_id - IDENTIFY DEVICE info debugging output
1435 * @id: IDENTIFY DEVICE page to dump
1436 *
1437 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1438 * page.
1439 *
1440 * LOCKING:
1441 * caller.
1442 */
1443
1444 static inline void ata_dump_id(const u16 *id)
1445 {
1446 DPRINTK("49==0x%04x "
1447 "53==0x%04x "
1448 "63==0x%04x "
1449 "64==0x%04x "
1450 "75==0x%04x \n",
1451 id[49],
1452 id[53],
1453 id[63],
1454 id[64],
1455 id[75]);
1456 DPRINTK("80==0x%04x "
1457 "81==0x%04x "
1458 "82==0x%04x "
1459 "83==0x%04x "
1460 "84==0x%04x \n",
1461 id[80],
1462 id[81],
1463 id[82],
1464 id[83],
1465 id[84]);
1466 DPRINTK("88==0x%04x "
1467 "93==0x%04x\n",
1468 id[88],
1469 id[93]);
1470 }
1471
1472 /**
1473 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1474 * @id: IDENTIFY data to compute xfer mask from
1475 *
1476 * Compute the xfermask for this device. This is not as trivial
1477 * as it seems if we must consider early devices correctly.
1478 *
1479 * FIXME: pre IDE drive timing (do we care ?).
1480 *
1481 * LOCKING:
1482 * None.
1483 *
1484 * RETURNS:
1485 * Computed xfermask
1486 */
1487 unsigned long ata_id_xfermask(const u16 *id)
1488 {
1489 unsigned long pio_mask, mwdma_mask, udma_mask;
1490
1491 /* Usual case. Word 53 indicates word 64 is valid */
1492 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1493 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1494 pio_mask <<= 3;
1495 pio_mask |= 0x7;
1496 } else {
1497 /* If word 64 isn't valid then Word 51 high byte holds
1498 * the PIO timing number for the maximum. Turn it into
1499 * a mask.
1500 */
1501 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1502 if (mode < 5) /* Valid PIO range */
1503 pio_mask = (2 << mode) - 1;
1504 else
1505 pio_mask = 1;
1506
1507 /* But wait.. there's more. Design your standards by
1508 * committee and you too can get a free iordy field to
1509 * process. However its the speeds not the modes that
1510 * are supported... Note drivers using the timing API
1511 * will get this right anyway
1512 */
1513 }
1514
1515 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1516
1517 if (ata_id_is_cfa(id)) {
1518 /*
1519 * Process compact flash extended modes
1520 */
1521 int pio = id[163] & 0x7;
1522 int dma = (id[163] >> 3) & 7;
1523
1524 if (pio)
1525 pio_mask |= (1 << 5);
1526 if (pio > 1)
1527 pio_mask |= (1 << 6);
1528 if (dma)
1529 mwdma_mask |= (1 << 3);
1530 if (dma > 1)
1531 mwdma_mask |= (1 << 4);
1532 }
1533
1534 udma_mask = 0;
1535 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1536 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1537
1538 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1539 }
1540
1541 /**
1542 * ata_pio_queue_task - Queue port_task
1543 * @ap: The ata_port to queue port_task for
1544 * @fn: workqueue function to be scheduled
1545 * @data: data for @fn to use
1546 * @delay: delay time for workqueue function
1547 *
1548 * Schedule @fn(@data) for execution after @delay jiffies using
1549 * port_task. There is one port_task per port and it's the
1550 * user(low level driver)'s responsibility to make sure that only
1551 * one task is active at any given time.
1552 *
1553 * libata core layer takes care of synchronization between
1554 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1555 * synchronization.
1556 *
1557 * LOCKING:
1558 * Inherited from caller.
1559 */
1560 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1561 {
1562 ap->port_task_data = data;
1563
1564 /* may fail if ata_port_flush_task() in progress */
1565 queue_delayed_work(ata_wq, &ap->port_task, delay);
1566 }
1567
1568 /**
1569 * ata_port_flush_task - Flush port_task
1570 * @ap: The ata_port to flush port_task for
1571 *
1572 * After this function completes, port_task is guranteed not to
1573 * be running or scheduled.
1574 *
1575 * LOCKING:
1576 * Kernel thread context (may sleep)
1577 */
1578 void ata_port_flush_task(struct ata_port *ap)
1579 {
1580 DPRINTK("ENTER\n");
1581
1582 cancel_rearming_delayed_work(&ap->port_task);
1583
1584 if (ata_msg_ctl(ap))
1585 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1586 }
1587
1588 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1589 {
1590 struct completion *waiting = qc->private_data;
1591
1592 complete(waiting);
1593 }
1594
1595 /**
1596 * ata_exec_internal_sg - execute libata internal command
1597 * @dev: Device to which the command is sent
1598 * @tf: Taskfile registers for the command and the result
1599 * @cdb: CDB for packet command
1600 * @dma_dir: Data tranfer direction of the command
1601 * @sgl: sg list for the data buffer of the command
1602 * @n_elem: Number of sg entries
1603 * @timeout: Timeout in msecs (0 for default)
1604 *
1605 * Executes libata internal command with timeout. @tf contains
1606 * command on entry and result on return. Timeout and error
1607 * conditions are reported via return value. No recovery action
1608 * is taken after a command times out. It's caller's duty to
1609 * clean up after timeout.
1610 *
1611 * LOCKING:
1612 * None. Should be called with kernel context, might sleep.
1613 *
1614 * RETURNS:
1615 * Zero on success, AC_ERR_* mask on failure
1616 */
1617 unsigned ata_exec_internal_sg(struct ata_device *dev,
1618 struct ata_taskfile *tf, const u8 *cdb,
1619 int dma_dir, struct scatterlist *sgl,
1620 unsigned int n_elem, unsigned long timeout)
1621 {
1622 struct ata_link *link = dev->link;
1623 struct ata_port *ap = link->ap;
1624 u8 command = tf->command;
1625 struct ata_queued_cmd *qc;
1626 unsigned int tag, preempted_tag;
1627 u32 preempted_sactive, preempted_qc_active;
1628 int preempted_nr_active_links;
1629 DECLARE_COMPLETION_ONSTACK(wait);
1630 unsigned long flags;
1631 unsigned int err_mask;
1632 int rc;
1633
1634 spin_lock_irqsave(ap->lock, flags);
1635
1636 /* no internal command while frozen */
1637 if (ap->pflags & ATA_PFLAG_FROZEN) {
1638 spin_unlock_irqrestore(ap->lock, flags);
1639 return AC_ERR_SYSTEM;
1640 }
1641
1642 /* initialize internal qc */
1643
1644 /* XXX: Tag 0 is used for drivers with legacy EH as some
1645 * drivers choke if any other tag is given. This breaks
1646 * ata_tag_internal() test for those drivers. Don't use new
1647 * EH stuff without converting to it.
1648 */
1649 if (ap->ops->error_handler)
1650 tag = ATA_TAG_INTERNAL;
1651 else
1652 tag = 0;
1653
1654 if (test_and_set_bit(tag, &ap->qc_allocated))
1655 BUG();
1656 qc = __ata_qc_from_tag(ap, tag);
1657
1658 qc->tag = tag;
1659 qc->scsicmd = NULL;
1660 qc->ap = ap;
1661 qc->dev = dev;
1662 ata_qc_reinit(qc);
1663
1664 preempted_tag = link->active_tag;
1665 preempted_sactive = link->sactive;
1666 preempted_qc_active = ap->qc_active;
1667 preempted_nr_active_links = ap->nr_active_links;
1668 link->active_tag = ATA_TAG_POISON;
1669 link->sactive = 0;
1670 ap->qc_active = 0;
1671 ap->nr_active_links = 0;
1672
1673 /* prepare & issue qc */
1674 qc->tf = *tf;
1675 if (cdb)
1676 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1677 qc->flags |= ATA_QCFLAG_RESULT_TF;
1678 qc->dma_dir = dma_dir;
1679 if (dma_dir != DMA_NONE) {
1680 unsigned int i, buflen = 0;
1681 struct scatterlist *sg;
1682
1683 for_each_sg(sgl, sg, n_elem, i)
1684 buflen += sg->length;
1685
1686 ata_sg_init(qc, sgl, n_elem);
1687 qc->nbytes = buflen;
1688 }
1689
1690 qc->private_data = &wait;
1691 qc->complete_fn = ata_qc_complete_internal;
1692
1693 ata_qc_issue(qc);
1694
1695 spin_unlock_irqrestore(ap->lock, flags);
1696
1697 if (!timeout)
1698 timeout = ata_probe_timeout * 1000 / HZ;
1699
1700 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1701
1702 ata_port_flush_task(ap);
1703
1704 if (!rc) {
1705 spin_lock_irqsave(ap->lock, flags);
1706
1707 /* We're racing with irq here. If we lose, the
1708 * following test prevents us from completing the qc
1709 * twice. If we win, the port is frozen and will be
1710 * cleaned up by ->post_internal_cmd().
1711 */
1712 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1713 qc->err_mask |= AC_ERR_TIMEOUT;
1714
1715 if (ap->ops->error_handler)
1716 ata_port_freeze(ap);
1717 else
1718 ata_qc_complete(qc);
1719
1720 if (ata_msg_warn(ap))
1721 ata_dev_printk(dev, KERN_WARNING,
1722 "qc timeout (cmd 0x%x)\n", command);
1723 }
1724
1725 spin_unlock_irqrestore(ap->lock, flags);
1726 }
1727
1728 /* do post_internal_cmd */
1729 if (ap->ops->post_internal_cmd)
1730 ap->ops->post_internal_cmd(qc);
1731
1732 /* perform minimal error analysis */
1733 if (qc->flags & ATA_QCFLAG_FAILED) {
1734 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1735 qc->err_mask |= AC_ERR_DEV;
1736
1737 if (!qc->err_mask)
1738 qc->err_mask |= AC_ERR_OTHER;
1739
1740 if (qc->err_mask & ~AC_ERR_OTHER)
1741 qc->err_mask &= ~AC_ERR_OTHER;
1742 }
1743
1744 /* finish up */
1745 spin_lock_irqsave(ap->lock, flags);
1746
1747 *tf = qc->result_tf;
1748 err_mask = qc->err_mask;
1749
1750 ata_qc_free(qc);
1751 link->active_tag = preempted_tag;
1752 link->sactive = preempted_sactive;
1753 ap->qc_active = preempted_qc_active;
1754 ap->nr_active_links = preempted_nr_active_links;
1755
1756 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1757 * Until those drivers are fixed, we detect the condition
1758 * here, fail the command with AC_ERR_SYSTEM and reenable the
1759 * port.
1760 *
1761 * Note that this doesn't change any behavior as internal
1762 * command failure results in disabling the device in the
1763 * higher layer for LLDDs without new reset/EH callbacks.
1764 *
1765 * Kill the following code as soon as those drivers are fixed.
1766 */
1767 if (ap->flags & ATA_FLAG_DISABLED) {
1768 err_mask |= AC_ERR_SYSTEM;
1769 ata_port_probe(ap);
1770 }
1771
1772 spin_unlock_irqrestore(ap->lock, flags);
1773
1774 return err_mask;
1775 }
1776
1777 /**
1778 * ata_exec_internal - execute libata internal command
1779 * @dev: Device to which the command is sent
1780 * @tf: Taskfile registers for the command and the result
1781 * @cdb: CDB for packet command
1782 * @dma_dir: Data tranfer direction of the command
1783 * @buf: Data buffer of the command
1784 * @buflen: Length of data buffer
1785 * @timeout: Timeout in msecs (0 for default)
1786 *
1787 * Wrapper around ata_exec_internal_sg() which takes simple
1788 * buffer instead of sg list.
1789 *
1790 * LOCKING:
1791 * None. Should be called with kernel context, might sleep.
1792 *
1793 * RETURNS:
1794 * Zero on success, AC_ERR_* mask on failure
1795 */
1796 unsigned ata_exec_internal(struct ata_device *dev,
1797 struct ata_taskfile *tf, const u8 *cdb,
1798 int dma_dir, void *buf, unsigned int buflen,
1799 unsigned long timeout)
1800 {
1801 struct scatterlist *psg = NULL, sg;
1802 unsigned int n_elem = 0;
1803
1804 if (dma_dir != DMA_NONE) {
1805 WARN_ON(!buf);
1806 sg_init_one(&sg, buf, buflen);
1807 psg = &sg;
1808 n_elem++;
1809 }
1810
1811 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1812 timeout);
1813 }
1814
1815 /**
1816 * ata_do_simple_cmd - execute simple internal command
1817 * @dev: Device to which the command is sent
1818 * @cmd: Opcode to execute
1819 *
1820 * Execute a 'simple' command, that only consists of the opcode
1821 * 'cmd' itself, without filling any other registers
1822 *
1823 * LOCKING:
1824 * Kernel thread context (may sleep).
1825 *
1826 * RETURNS:
1827 * Zero on success, AC_ERR_* mask on failure
1828 */
1829 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1830 {
1831 struct ata_taskfile tf;
1832
1833 ata_tf_init(dev, &tf);
1834
1835 tf.command = cmd;
1836 tf.flags |= ATA_TFLAG_DEVICE;
1837 tf.protocol = ATA_PROT_NODATA;
1838
1839 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1840 }
1841
1842 /**
1843 * ata_pio_need_iordy - check if iordy needed
1844 * @adev: ATA device
1845 *
1846 * Check if the current speed of the device requires IORDY. Used
1847 * by various controllers for chip configuration.
1848 */
1849
1850 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1851 {
1852 /* Controller doesn't support IORDY. Probably a pointless check
1853 as the caller should know this */
1854 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1855 return 0;
1856 /* PIO3 and higher it is mandatory */
1857 if (adev->pio_mode > XFER_PIO_2)
1858 return 1;
1859 /* We turn it on when possible */
1860 if (ata_id_has_iordy(adev->id))
1861 return 1;
1862 return 0;
1863 }
1864
1865 /**
1866 * ata_pio_mask_no_iordy - Return the non IORDY mask
1867 * @adev: ATA device
1868 *
1869 * Compute the highest mode possible if we are not using iordy. Return
1870 * -1 if no iordy mode is available.
1871 */
1872
1873 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1874 {
1875 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1876 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1877 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1878 /* Is the speed faster than the drive allows non IORDY ? */
1879 if (pio) {
1880 /* This is cycle times not frequency - watch the logic! */
1881 if (pio > 240) /* PIO2 is 240nS per cycle */
1882 return 3 << ATA_SHIFT_PIO;
1883 return 7 << ATA_SHIFT_PIO;
1884 }
1885 }
1886 return 3 << ATA_SHIFT_PIO;
1887 }
1888
1889 /**
1890 * ata_dev_read_id - Read ID data from the specified device
1891 * @dev: target device
1892 * @p_class: pointer to class of the target device (may be changed)
1893 * @flags: ATA_READID_* flags
1894 * @id: buffer to read IDENTIFY data into
1895 *
1896 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1897 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1898 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1899 * for pre-ATA4 drives.
1900 *
1901 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1902 * now we abort if we hit that case.
1903 *
1904 * LOCKING:
1905 * Kernel thread context (may sleep)
1906 *
1907 * RETURNS:
1908 * 0 on success, -errno otherwise.
1909 */
1910 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1911 unsigned int flags, u16 *id)
1912 {
1913 struct ata_port *ap = dev->link->ap;
1914 unsigned int class = *p_class;
1915 struct ata_taskfile tf;
1916 unsigned int err_mask = 0;
1917 const char *reason;
1918 int may_fallback = 1, tried_spinup = 0;
1919 int rc;
1920
1921 if (ata_msg_ctl(ap))
1922 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1923
1924 retry:
1925 ata_tf_init(dev, &tf);
1926
1927 switch (class) {
1928 case ATA_DEV_ATA:
1929 tf.command = ATA_CMD_ID_ATA;
1930 break;
1931 case ATA_DEV_ATAPI:
1932 tf.command = ATA_CMD_ID_ATAPI;
1933 break;
1934 default:
1935 rc = -ENODEV;
1936 reason = "unsupported class";
1937 goto err_out;
1938 }
1939
1940 tf.protocol = ATA_PROT_PIO;
1941
1942 /* Some devices choke if TF registers contain garbage. Make
1943 * sure those are properly initialized.
1944 */
1945 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1946
1947 /* Device presence detection is unreliable on some
1948 * controllers. Always poll IDENTIFY if available.
1949 */
1950 tf.flags |= ATA_TFLAG_POLLING;
1951
1952 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1953 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1954 if (err_mask) {
1955 if (err_mask & AC_ERR_NODEV_HINT) {
1956 ata_dev_printk(dev, KERN_DEBUG,
1957 "NODEV after polling detection\n");
1958 return -ENOENT;
1959 }
1960
1961 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1962 /* Device or controller might have reported
1963 * the wrong device class. Give a shot at the
1964 * other IDENTIFY if the current one is
1965 * aborted by the device.
1966 */
1967 if (may_fallback) {
1968 may_fallback = 0;
1969
1970 if (class == ATA_DEV_ATA)
1971 class = ATA_DEV_ATAPI;
1972 else
1973 class = ATA_DEV_ATA;
1974 goto retry;
1975 }
1976
1977 /* Control reaches here iff the device aborted
1978 * both flavors of IDENTIFYs which happens
1979 * sometimes with phantom devices.
1980 */
1981 ata_dev_printk(dev, KERN_DEBUG,
1982 "both IDENTIFYs aborted, assuming NODEV\n");
1983 return -ENOENT;
1984 }
1985
1986 rc = -EIO;
1987 reason = "I/O error";
1988 goto err_out;
1989 }
1990
1991 /* Falling back doesn't make sense if ID data was read
1992 * successfully at least once.
1993 */
1994 may_fallback = 0;
1995
1996 swap_buf_le16(id, ATA_ID_WORDS);
1997
1998 /* sanity check */
1999 rc = -EINVAL;
2000 reason = "device reports invalid type";
2001
2002 if (class == ATA_DEV_ATA) {
2003 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2004 goto err_out;
2005 } else {
2006 if (ata_id_is_ata(id))
2007 goto err_out;
2008 }
2009
2010 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2011 tried_spinup = 1;
2012 /*
2013 * Drive powered-up in standby mode, and requires a specific
2014 * SET_FEATURES spin-up subcommand before it will accept
2015 * anything other than the original IDENTIFY command.
2016 */
2017 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2018 if (err_mask && id[2] != 0x738c) {
2019 rc = -EIO;
2020 reason = "SPINUP failed";
2021 goto err_out;
2022 }
2023 /*
2024 * If the drive initially returned incomplete IDENTIFY info,
2025 * we now must reissue the IDENTIFY command.
2026 */
2027 if (id[2] == 0x37c8)
2028 goto retry;
2029 }
2030
2031 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2032 /*
2033 * The exact sequence expected by certain pre-ATA4 drives is:
2034 * SRST RESET
2035 * IDENTIFY (optional in early ATA)
2036 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2037 * anything else..
2038 * Some drives were very specific about that exact sequence.
2039 *
2040 * Note that ATA4 says lba is mandatory so the second check
2041 * shoud never trigger.
2042 */
2043 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2044 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2045 if (err_mask) {
2046 rc = -EIO;
2047 reason = "INIT_DEV_PARAMS failed";
2048 goto err_out;
2049 }
2050
2051 /* current CHS translation info (id[53-58]) might be
2052 * changed. reread the identify device info.
2053 */
2054 flags &= ~ATA_READID_POSTRESET;
2055 goto retry;
2056 }
2057 }
2058
2059 *p_class = class;
2060
2061 return 0;
2062
2063 err_out:
2064 if (ata_msg_warn(ap))
2065 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2066 "(%s, err_mask=0x%x)\n", reason, err_mask);
2067 return rc;
2068 }
2069
2070 static inline u8 ata_dev_knobble(struct ata_device *dev)
2071 {
2072 struct ata_port *ap = dev->link->ap;
2073 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2074 }
2075
2076 static void ata_dev_config_ncq(struct ata_device *dev,
2077 char *desc, size_t desc_sz)
2078 {
2079 struct ata_port *ap = dev->link->ap;
2080 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2081
2082 if (!ata_id_has_ncq(dev->id)) {
2083 desc[0] = '\0';
2084 return;
2085 }
2086 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2087 snprintf(desc, desc_sz, "NCQ (not used)");
2088 return;
2089 }
2090 if (ap->flags & ATA_FLAG_NCQ) {
2091 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2092 dev->flags |= ATA_DFLAG_NCQ;
2093 }
2094
2095 if (hdepth >= ddepth)
2096 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2097 else
2098 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2099 }
2100
2101 /**
2102 * ata_dev_configure - Configure the specified ATA/ATAPI device
2103 * @dev: Target device to configure
2104 *
2105 * Configure @dev according to @dev->id. Generic and low-level
2106 * driver specific fixups are also applied.
2107 *
2108 * LOCKING:
2109 * Kernel thread context (may sleep)
2110 *
2111 * RETURNS:
2112 * 0 on success, -errno otherwise
2113 */
2114 int ata_dev_configure(struct ata_device *dev)
2115 {
2116 struct ata_port *ap = dev->link->ap;
2117 struct ata_eh_context *ehc = &dev->link->eh_context;
2118 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2119 const u16 *id = dev->id;
2120 unsigned long xfer_mask;
2121 char revbuf[7]; /* XYZ-99\0 */
2122 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2123 char modelbuf[ATA_ID_PROD_LEN+1];
2124 int rc;
2125
2126 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2127 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2128 __func__);
2129 return 0;
2130 }
2131
2132 if (ata_msg_probe(ap))
2133 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2134
2135 /* set horkage */
2136 dev->horkage |= ata_dev_blacklisted(dev);
2137 ata_force_horkage(dev);
2138
2139 /* let ACPI work its magic */
2140 rc = ata_acpi_on_devcfg(dev);
2141 if (rc)
2142 return rc;
2143
2144 /* massage HPA, do it early as it might change IDENTIFY data */
2145 rc = ata_hpa_resize(dev);
2146 if (rc)
2147 return rc;
2148
2149 /* print device capabilities */
2150 if (ata_msg_probe(ap))
2151 ata_dev_printk(dev, KERN_DEBUG,
2152 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2153 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2154 __func__,
2155 id[49], id[82], id[83], id[84],
2156 id[85], id[86], id[87], id[88]);
2157
2158 /* initialize to-be-configured parameters */
2159 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2160 dev->max_sectors = 0;
2161 dev->cdb_len = 0;
2162 dev->n_sectors = 0;
2163 dev->cylinders = 0;
2164 dev->heads = 0;
2165 dev->sectors = 0;
2166
2167 /*
2168 * common ATA, ATAPI feature tests
2169 */
2170
2171 /* find max transfer mode; for printk only */
2172 xfer_mask = ata_id_xfermask(id);
2173
2174 if (ata_msg_probe(ap))
2175 ata_dump_id(id);
2176
2177 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2178 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2179 sizeof(fwrevbuf));
2180
2181 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2182 sizeof(modelbuf));
2183
2184 /* ATA-specific feature tests */
2185 if (dev->class == ATA_DEV_ATA) {
2186 if (ata_id_is_cfa(id)) {
2187 if (id[162] & 1) /* CPRM may make this media unusable */
2188 ata_dev_printk(dev, KERN_WARNING,
2189 "supports DRM functions and may "
2190 "not be fully accessable.\n");
2191 snprintf(revbuf, 7, "CFA");
2192 } else {
2193 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2194 /* Warn the user if the device has TPM extensions */
2195 if (ata_id_has_tpm(id))
2196 ata_dev_printk(dev, KERN_WARNING,
2197 "supports DRM functions and may "
2198 "not be fully accessable.\n");
2199 }
2200
2201 dev->n_sectors = ata_id_n_sectors(id);
2202
2203 if (dev->id[59] & 0x100)
2204 dev->multi_count = dev->id[59] & 0xff;
2205
2206 if (ata_id_has_lba(id)) {
2207 const char *lba_desc;
2208 char ncq_desc[20];
2209
2210 lba_desc = "LBA";
2211 dev->flags |= ATA_DFLAG_LBA;
2212 if (ata_id_has_lba48(id)) {
2213 dev->flags |= ATA_DFLAG_LBA48;
2214 lba_desc = "LBA48";
2215
2216 if (dev->n_sectors >= (1UL << 28) &&
2217 ata_id_has_flush_ext(id))
2218 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2219 }
2220
2221 /* config NCQ */
2222 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2223
2224 /* print device info to dmesg */
2225 if (ata_msg_drv(ap) && print_info) {
2226 ata_dev_printk(dev, KERN_INFO,
2227 "%s: %s, %s, max %s\n",
2228 revbuf, modelbuf, fwrevbuf,
2229 ata_mode_string(xfer_mask));
2230 ata_dev_printk(dev, KERN_INFO,
2231 "%Lu sectors, multi %u: %s %s\n",
2232 (unsigned long long)dev->n_sectors,
2233 dev->multi_count, lba_desc, ncq_desc);
2234 }
2235 } else {
2236 /* CHS */
2237
2238 /* Default translation */
2239 dev->cylinders = id[1];
2240 dev->heads = id[3];
2241 dev->sectors = id[6];
2242
2243 if (ata_id_current_chs_valid(id)) {
2244 /* Current CHS translation is valid. */
2245 dev->cylinders = id[54];
2246 dev->heads = id[55];
2247 dev->sectors = id[56];
2248 }
2249
2250 /* print device info to dmesg */
2251 if (ata_msg_drv(ap) && print_info) {
2252 ata_dev_printk(dev, KERN_INFO,
2253 "%s: %s, %s, max %s\n",
2254 revbuf, modelbuf, fwrevbuf,
2255 ata_mode_string(xfer_mask));
2256 ata_dev_printk(dev, KERN_INFO,
2257 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2258 (unsigned long long)dev->n_sectors,
2259 dev->multi_count, dev->cylinders,
2260 dev->heads, dev->sectors);
2261 }
2262 }
2263
2264 dev->cdb_len = 16;
2265 }
2266
2267 /* ATAPI-specific feature tests */
2268 else if (dev->class == ATA_DEV_ATAPI) {
2269 const char *cdb_intr_string = "";
2270 const char *atapi_an_string = "";
2271 const char *dma_dir_string = "";
2272 u32 sntf;
2273
2274 rc = atapi_cdb_len(id);
2275 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2276 if (ata_msg_warn(ap))
2277 ata_dev_printk(dev, KERN_WARNING,
2278 "unsupported CDB len\n");
2279 rc = -EINVAL;
2280 goto err_out_nosup;
2281 }
2282 dev->cdb_len = (unsigned int) rc;
2283
2284 /* Enable ATAPI AN if both the host and device have
2285 * the support. If PMP is attached, SNTF is required
2286 * to enable ATAPI AN to discern between PHY status
2287 * changed notifications and ATAPI ANs.
2288 */
2289 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2290 (!ap->nr_pmp_links ||
2291 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2292 unsigned int err_mask;
2293
2294 /* issue SET feature command to turn this on */
2295 err_mask = ata_dev_set_feature(dev,
2296 SETFEATURES_SATA_ENABLE, SATA_AN);
2297 if (err_mask)
2298 ata_dev_printk(dev, KERN_ERR,
2299 "failed to enable ATAPI AN "
2300 "(err_mask=0x%x)\n", err_mask);
2301 else {
2302 dev->flags |= ATA_DFLAG_AN;
2303 atapi_an_string = ", ATAPI AN";
2304 }
2305 }
2306
2307 if (ata_id_cdb_intr(dev->id)) {
2308 dev->flags |= ATA_DFLAG_CDB_INTR;
2309 cdb_intr_string = ", CDB intr";
2310 }
2311
2312 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2313 dev->flags |= ATA_DFLAG_DMADIR;
2314 dma_dir_string = ", DMADIR";
2315 }
2316
2317 /* print device info to dmesg */
2318 if (ata_msg_drv(ap) && print_info)
2319 ata_dev_printk(dev, KERN_INFO,
2320 "ATAPI: %s, %s, max %s%s%s%s\n",
2321 modelbuf, fwrevbuf,
2322 ata_mode_string(xfer_mask),
2323 cdb_intr_string, atapi_an_string,
2324 dma_dir_string);
2325 }
2326
2327 /* determine max_sectors */
2328 dev->max_sectors = ATA_MAX_SECTORS;
2329 if (dev->flags & ATA_DFLAG_LBA48)
2330 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2331
2332 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2333 if (ata_id_has_hipm(dev->id))
2334 dev->flags |= ATA_DFLAG_HIPM;
2335 if (ata_id_has_dipm(dev->id))
2336 dev->flags |= ATA_DFLAG_DIPM;
2337 }
2338
2339 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2340 200 sectors */
2341 if (ata_dev_knobble(dev)) {
2342 if (ata_msg_drv(ap) && print_info)
2343 ata_dev_printk(dev, KERN_INFO,
2344 "applying bridge limits\n");
2345 dev->udma_mask &= ATA_UDMA5;
2346 dev->max_sectors = ATA_MAX_SECTORS;
2347 }
2348
2349 if ((dev->class == ATA_DEV_ATAPI) &&
2350 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2351 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2352 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2353 }
2354
2355 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2356 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2357 dev->max_sectors);
2358
2359 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2360 dev->horkage |= ATA_HORKAGE_IPM;
2361
2362 /* reset link pm_policy for this port to no pm */
2363 ap->pm_policy = MAX_PERFORMANCE;
2364 }
2365
2366 if (ap->ops->dev_config)
2367 ap->ops->dev_config(dev);
2368
2369 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2370 /* Let the user know. We don't want to disallow opens for
2371 rescue purposes, or in case the vendor is just a blithering
2372 idiot. Do this after the dev_config call as some controllers
2373 with buggy firmware may want to avoid reporting false device
2374 bugs */
2375
2376 if (print_info) {
2377 ata_dev_printk(dev, KERN_WARNING,
2378 "Drive reports diagnostics failure. This may indicate a drive\n");
2379 ata_dev_printk(dev, KERN_WARNING,
2380 "fault or invalid emulation. Contact drive vendor for information.\n");
2381 }
2382 }
2383
2384 return 0;
2385
2386 err_out_nosup:
2387 if (ata_msg_probe(ap))
2388 ata_dev_printk(dev, KERN_DEBUG,
2389 "%s: EXIT, err\n", __func__);
2390 return rc;
2391 }
2392
2393 /**
2394 * ata_cable_40wire - return 40 wire cable type
2395 * @ap: port
2396 *
2397 * Helper method for drivers which want to hardwire 40 wire cable
2398 * detection.
2399 */
2400
2401 int ata_cable_40wire(struct ata_port *ap)
2402 {
2403 return ATA_CBL_PATA40;
2404 }
2405
2406 /**
2407 * ata_cable_80wire - return 80 wire cable type
2408 * @ap: port
2409 *
2410 * Helper method for drivers which want to hardwire 80 wire cable
2411 * detection.
2412 */
2413
2414 int ata_cable_80wire(struct ata_port *ap)
2415 {
2416 return ATA_CBL_PATA80;
2417 }
2418
2419 /**
2420 * ata_cable_unknown - return unknown PATA cable.
2421 * @ap: port
2422 *
2423 * Helper method for drivers which have no PATA cable detection.
2424 */
2425
2426 int ata_cable_unknown(struct ata_port *ap)
2427 {
2428 return ATA_CBL_PATA_UNK;
2429 }
2430
2431 /**
2432 * ata_cable_ignore - return ignored PATA cable.
2433 * @ap: port
2434 *
2435 * Helper method for drivers which don't use cable type to limit
2436 * transfer mode.
2437 */
2438 int ata_cable_ignore(struct ata_port *ap)
2439 {
2440 return ATA_CBL_PATA_IGN;
2441 }
2442
2443 /**
2444 * ata_cable_sata - return SATA cable type
2445 * @ap: port
2446 *
2447 * Helper method for drivers which have SATA cables
2448 */
2449
2450 int ata_cable_sata(struct ata_port *ap)
2451 {
2452 return ATA_CBL_SATA;
2453 }
2454
2455 /**
2456 * ata_bus_probe - Reset and probe ATA bus
2457 * @ap: Bus to probe
2458 *
2459 * Master ATA bus probing function. Initiates a hardware-dependent
2460 * bus reset, then attempts to identify any devices found on
2461 * the bus.
2462 *
2463 * LOCKING:
2464 * PCI/etc. bus probe sem.
2465 *
2466 * RETURNS:
2467 * Zero on success, negative errno otherwise.
2468 */
2469
2470 int ata_bus_probe(struct ata_port *ap)
2471 {
2472 unsigned int classes[ATA_MAX_DEVICES];
2473 int tries[ATA_MAX_DEVICES];
2474 int rc;
2475 struct ata_device *dev;
2476
2477 ata_port_probe(ap);
2478
2479 ata_link_for_each_dev(dev, &ap->link)
2480 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2481
2482 retry:
2483 ata_link_for_each_dev(dev, &ap->link) {
2484 /* If we issue an SRST then an ATA drive (not ATAPI)
2485 * may change configuration and be in PIO0 timing. If
2486 * we do a hard reset (or are coming from power on)
2487 * this is true for ATA or ATAPI. Until we've set a
2488 * suitable controller mode we should not touch the
2489 * bus as we may be talking too fast.
2490 */
2491 dev->pio_mode = XFER_PIO_0;
2492
2493 /* If the controller has a pio mode setup function
2494 * then use it to set the chipset to rights. Don't
2495 * touch the DMA setup as that will be dealt with when
2496 * configuring devices.
2497 */
2498 if (ap->ops->set_piomode)
2499 ap->ops->set_piomode(ap, dev);
2500 }
2501
2502 /* reset and determine device classes */
2503 ap->ops->phy_reset(ap);
2504
2505 ata_link_for_each_dev(dev, &ap->link) {
2506 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2507 dev->class != ATA_DEV_UNKNOWN)
2508 classes[dev->devno] = dev->class;
2509 else
2510 classes[dev->devno] = ATA_DEV_NONE;
2511
2512 dev->class = ATA_DEV_UNKNOWN;
2513 }
2514
2515 ata_port_probe(ap);
2516
2517 /* read IDENTIFY page and configure devices. We have to do the identify
2518 specific sequence bass-ackwards so that PDIAG- is released by
2519 the slave device */
2520
2521 ata_link_for_each_dev_reverse(dev, &ap->link) {
2522 if (tries[dev->devno])
2523 dev->class = classes[dev->devno];
2524
2525 if (!ata_dev_enabled(dev))
2526 continue;
2527
2528 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2529 dev->id);
2530 if (rc)
2531 goto fail;
2532 }
2533
2534 /* Now ask for the cable type as PDIAG- should have been released */
2535 if (ap->ops->cable_detect)
2536 ap->cbl = ap->ops->cable_detect(ap);
2537
2538 /* We may have SATA bridge glue hiding here irrespective of the
2539 reported cable types and sensed types */
2540 ata_link_for_each_dev(dev, &ap->link) {
2541 if (!ata_dev_enabled(dev))
2542 continue;
2543 /* SATA drives indicate we have a bridge. We don't know which
2544 end of the link the bridge is which is a problem */
2545 if (ata_id_is_sata(dev->id))
2546 ap->cbl = ATA_CBL_SATA;
2547 }
2548
2549 /* After the identify sequence we can now set up the devices. We do
2550 this in the normal order so that the user doesn't get confused */
2551
2552 ata_link_for_each_dev(dev, &ap->link) {
2553 if (!ata_dev_enabled(dev))
2554 continue;
2555
2556 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2557 rc = ata_dev_configure(dev);
2558 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2559 if (rc)
2560 goto fail;
2561 }
2562
2563 /* configure transfer mode */
2564 rc = ata_set_mode(&ap->link, &dev);
2565 if (rc)
2566 goto fail;
2567
2568 ata_link_for_each_dev(dev, &ap->link)
2569 if (ata_dev_enabled(dev))
2570 return 0;
2571
2572 /* no device present, disable port */
2573 ata_port_disable(ap);
2574 return -ENODEV;
2575
2576 fail:
2577 tries[dev->devno]--;
2578
2579 switch (rc) {
2580 case -EINVAL:
2581 /* eeek, something went very wrong, give up */
2582 tries[dev->devno] = 0;
2583 break;
2584
2585 case -ENODEV:
2586 /* give it just one more chance */
2587 tries[dev->devno] = min(tries[dev->devno], 1);
2588 case -EIO:
2589 if (tries[dev->devno] == 1) {
2590 /* This is the last chance, better to slow
2591 * down than lose it.
2592 */
2593 sata_down_spd_limit(&ap->link);
2594 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2595 }
2596 }
2597
2598 if (!tries[dev->devno])
2599 ata_dev_disable(dev);
2600
2601 goto retry;
2602 }
2603
2604 /**
2605 * ata_port_probe - Mark port as enabled
2606 * @ap: Port for which we indicate enablement
2607 *
2608 * Modify @ap data structure such that the system
2609 * thinks that the entire port is enabled.
2610 *
2611 * LOCKING: host lock, or some other form of
2612 * serialization.
2613 */
2614
2615 void ata_port_probe(struct ata_port *ap)
2616 {
2617 ap->flags &= ~ATA_FLAG_DISABLED;
2618 }
2619
2620 /**
2621 * sata_print_link_status - Print SATA link status
2622 * @link: SATA link to printk link status about
2623 *
2624 * This function prints link speed and status of a SATA link.
2625 *
2626 * LOCKING:
2627 * None.
2628 */
2629 void sata_print_link_status(struct ata_link *link)
2630 {
2631 u32 sstatus, scontrol, tmp;
2632
2633 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2634 return;
2635 sata_scr_read(link, SCR_CONTROL, &scontrol);
2636
2637 if (ata_link_online(link)) {
2638 tmp = (sstatus >> 4) & 0xf;
2639 ata_link_printk(link, KERN_INFO,
2640 "SATA link up %s (SStatus %X SControl %X)\n",
2641 sata_spd_string(tmp), sstatus, scontrol);
2642 } else {
2643 ata_link_printk(link, KERN_INFO,
2644 "SATA link down (SStatus %X SControl %X)\n",
2645 sstatus, scontrol);
2646 }
2647 }
2648
2649 /**
2650 * ata_dev_pair - return other device on cable
2651 * @adev: device
2652 *
2653 * Obtain the other device on the same cable, or if none is
2654 * present NULL is returned
2655 */
2656
2657 struct ata_device *ata_dev_pair(struct ata_device *adev)
2658 {
2659 struct ata_link *link = adev->link;
2660 struct ata_device *pair = &link->device[1 - adev->devno];
2661 if (!ata_dev_enabled(pair))
2662 return NULL;
2663 return pair;
2664 }
2665
2666 /**
2667 * ata_port_disable - Disable port.
2668 * @ap: Port to be disabled.
2669 *
2670 * Modify @ap data structure such that the system
2671 * thinks that the entire port is disabled, and should
2672 * never attempt to probe or communicate with devices
2673 * on this port.
2674 *
2675 * LOCKING: host lock, or some other form of
2676 * serialization.
2677 */
2678
2679 void ata_port_disable(struct ata_port *ap)
2680 {
2681 ap->link.device[0].class = ATA_DEV_NONE;
2682 ap->link.device[1].class = ATA_DEV_NONE;
2683 ap->flags |= ATA_FLAG_DISABLED;
2684 }
2685
2686 /**
2687 * sata_down_spd_limit - adjust SATA spd limit downward
2688 * @link: Link to adjust SATA spd limit for
2689 *
2690 * Adjust SATA spd limit of @link downward. Note that this
2691 * function only adjusts the limit. The change must be applied
2692 * using sata_set_spd().
2693 *
2694 * LOCKING:
2695 * Inherited from caller.
2696 *
2697 * RETURNS:
2698 * 0 on success, negative errno on failure
2699 */
2700 int sata_down_spd_limit(struct ata_link *link)
2701 {
2702 u32 sstatus, spd, mask;
2703 int rc, highbit;
2704
2705 if (!sata_scr_valid(link))
2706 return -EOPNOTSUPP;
2707
2708 /* If SCR can be read, use it to determine the current SPD.
2709 * If not, use cached value in link->sata_spd.
2710 */
2711 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2712 if (rc == 0)
2713 spd = (sstatus >> 4) & 0xf;
2714 else
2715 spd = link->sata_spd;
2716
2717 mask = link->sata_spd_limit;
2718 if (mask <= 1)
2719 return -EINVAL;
2720
2721 /* unconditionally mask off the highest bit */
2722 highbit = fls(mask) - 1;
2723 mask &= ~(1 << highbit);
2724
2725 /* Mask off all speeds higher than or equal to the current
2726 * one. Force 1.5Gbps if current SPD is not available.
2727 */
2728 if (spd > 1)
2729 mask &= (1 << (spd - 1)) - 1;
2730 else
2731 mask &= 1;
2732
2733 /* were we already at the bottom? */
2734 if (!mask)
2735 return -EINVAL;
2736
2737 link->sata_spd_limit = mask;
2738
2739 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2740 sata_spd_string(fls(mask)));
2741
2742 return 0;
2743 }
2744
2745 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2746 {
2747 struct ata_link *host_link = &link->ap->link;
2748 u32 limit, target, spd;
2749
2750 limit = link->sata_spd_limit;
2751
2752 /* Don't configure downstream link faster than upstream link.
2753 * It doesn't speed up anything and some PMPs choke on such
2754 * configuration.
2755 */
2756 if (!ata_is_host_link(link) && host_link->sata_spd)
2757 limit &= (1 << host_link->sata_spd) - 1;
2758
2759 if (limit == UINT_MAX)
2760 target = 0;
2761 else
2762 target = fls(limit);
2763
2764 spd = (*scontrol >> 4) & 0xf;
2765 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2766
2767 return spd != target;
2768 }
2769
2770 /**
2771 * sata_set_spd_needed - is SATA spd configuration needed
2772 * @link: Link in question
2773 *
2774 * Test whether the spd limit in SControl matches
2775 * @link->sata_spd_limit. This function is used to determine
2776 * whether hardreset is necessary to apply SATA spd
2777 * configuration.
2778 *
2779 * LOCKING:
2780 * Inherited from caller.
2781 *
2782 * RETURNS:
2783 * 1 if SATA spd configuration is needed, 0 otherwise.
2784 */
2785 int sata_set_spd_needed(struct ata_link *link)
2786 {
2787 u32 scontrol;
2788
2789 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2790 return 1;
2791
2792 return __sata_set_spd_needed(link, &scontrol);
2793 }
2794
2795 /**
2796 * sata_set_spd - set SATA spd according to spd limit
2797 * @link: Link to set SATA spd for
2798 *
2799 * Set SATA spd of @link according to sata_spd_limit.
2800 *
2801 * LOCKING:
2802 * Inherited from caller.
2803 *
2804 * RETURNS:
2805 * 0 if spd doesn't need to be changed, 1 if spd has been
2806 * changed. Negative errno if SCR registers are inaccessible.
2807 */
2808 int sata_set_spd(struct ata_link *link)
2809 {
2810 u32 scontrol;
2811 int rc;
2812
2813 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2814 return rc;
2815
2816 if (!__sata_set_spd_needed(link, &scontrol))
2817 return 0;
2818
2819 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2820 return rc;
2821
2822 return 1;
2823 }
2824
2825 /*
2826 * This mode timing computation functionality is ported over from
2827 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2828 */
2829 /*
2830 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2831 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2832 * for UDMA6, which is currently supported only by Maxtor drives.
2833 *
2834 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2835 */
2836
2837 static const struct ata_timing ata_timing[] = {
2838 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2839 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2840 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2841 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2842 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2843 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2844 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2845 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2846
2847 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2848 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2849 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2850
2851 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2852 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2853 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2854 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2855 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2856
2857 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2858 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2859 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2860 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2861 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2862 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2863 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2864 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2865
2866 { 0xFF }
2867 };
2868
2869 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2870 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2871
2872 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2873 {
2874 q->setup = EZ(t->setup * 1000, T);
2875 q->act8b = EZ(t->act8b * 1000, T);
2876 q->rec8b = EZ(t->rec8b * 1000, T);
2877 q->cyc8b = EZ(t->cyc8b * 1000, T);
2878 q->active = EZ(t->active * 1000, T);
2879 q->recover = EZ(t->recover * 1000, T);
2880 q->cycle = EZ(t->cycle * 1000, T);
2881 q->udma = EZ(t->udma * 1000, UT);
2882 }
2883
2884 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2885 struct ata_timing *m, unsigned int what)
2886 {
2887 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2888 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2889 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2890 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2891 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2892 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2893 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2894 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2895 }
2896
2897 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2898 {
2899 const struct ata_timing *t = ata_timing;
2900
2901 while (xfer_mode > t->mode)
2902 t++;
2903
2904 if (xfer_mode == t->mode)
2905 return t;
2906 return NULL;
2907 }
2908
2909 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2910 struct ata_timing *t, int T, int UT)
2911 {
2912 const struct ata_timing *s;
2913 struct ata_timing p;
2914
2915 /*
2916 * Find the mode.
2917 */
2918
2919 if (!(s = ata_timing_find_mode(speed)))
2920 return -EINVAL;
2921
2922 memcpy(t, s, sizeof(*s));
2923
2924 /*
2925 * If the drive is an EIDE drive, it can tell us it needs extended
2926 * PIO/MW_DMA cycle timing.
2927 */
2928
2929 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2930 memset(&p, 0, sizeof(p));
2931 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2932 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2933 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2934 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2935 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2936 }
2937 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2938 }
2939
2940 /*
2941 * Convert the timing to bus clock counts.
2942 */
2943
2944 ata_timing_quantize(t, t, T, UT);
2945
2946 /*
2947 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2948 * S.M.A.R.T * and some other commands. We have to ensure that the
2949 * DMA cycle timing is slower/equal than the fastest PIO timing.
2950 */
2951
2952 if (speed > XFER_PIO_6) {
2953 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2954 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2955 }
2956
2957 /*
2958 * Lengthen active & recovery time so that cycle time is correct.
2959 */
2960
2961 if (t->act8b + t->rec8b < t->cyc8b) {
2962 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2963 t->rec8b = t->cyc8b - t->act8b;
2964 }
2965
2966 if (t->active + t->recover < t->cycle) {
2967 t->active += (t->cycle - (t->active + t->recover)) / 2;
2968 t->recover = t->cycle - t->active;
2969 }
2970
2971 /* In a few cases quantisation may produce enough errors to
2972 leave t->cycle too low for the sum of active and recovery
2973 if so we must correct this */
2974 if (t->active + t->recover > t->cycle)
2975 t->cycle = t->active + t->recover;
2976
2977 return 0;
2978 }
2979
2980 /**
2981 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2982 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2983 * @cycle: cycle duration in ns
2984 *
2985 * Return matching xfer mode for @cycle. The returned mode is of
2986 * the transfer type specified by @xfer_shift. If @cycle is too
2987 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2988 * than the fastest known mode, the fasted mode is returned.
2989 *
2990 * LOCKING:
2991 * None.
2992 *
2993 * RETURNS:
2994 * Matching xfer_mode, 0xff if no match found.
2995 */
2996 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2997 {
2998 u8 base_mode = 0xff, last_mode = 0xff;
2999 const struct ata_xfer_ent *ent;
3000 const struct ata_timing *t;
3001
3002 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3003 if (ent->shift == xfer_shift)
3004 base_mode = ent->base;
3005
3006 for (t = ata_timing_find_mode(base_mode);
3007 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3008 unsigned short this_cycle;
3009
3010 switch (xfer_shift) {
3011 case ATA_SHIFT_PIO:
3012 case ATA_SHIFT_MWDMA:
3013 this_cycle = t->cycle;
3014 break;
3015 case ATA_SHIFT_UDMA:
3016 this_cycle = t->udma;
3017 break;
3018 default:
3019 return 0xff;
3020 }
3021
3022 if (cycle > this_cycle)
3023 break;
3024
3025 last_mode = t->mode;
3026 }
3027
3028 return last_mode;
3029 }
3030
3031 /**
3032 * ata_down_xfermask_limit - adjust dev xfer masks downward
3033 * @dev: Device to adjust xfer masks
3034 * @sel: ATA_DNXFER_* selector
3035 *
3036 * Adjust xfer masks of @dev downward. Note that this function
3037 * does not apply the change. Invoking ata_set_mode() afterwards
3038 * will apply the limit.
3039 *
3040 * LOCKING:
3041 * Inherited from caller.
3042 *
3043 * RETURNS:
3044 * 0 on success, negative errno on failure
3045 */
3046 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3047 {
3048 char buf[32];
3049 unsigned long orig_mask, xfer_mask;
3050 unsigned long pio_mask, mwdma_mask, udma_mask;
3051 int quiet, highbit;
3052
3053 quiet = !!(sel & ATA_DNXFER_QUIET);
3054 sel &= ~ATA_DNXFER_QUIET;
3055
3056 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3057 dev->mwdma_mask,
3058 dev->udma_mask);
3059 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3060
3061 switch (sel) {
3062 case ATA_DNXFER_PIO:
3063 highbit = fls(pio_mask) - 1;
3064 pio_mask &= ~(1 << highbit);
3065 break;
3066
3067 case ATA_DNXFER_DMA:
3068 if (udma_mask) {
3069 highbit = fls(udma_mask) - 1;
3070 udma_mask &= ~(1 << highbit);
3071 if (!udma_mask)
3072 return -ENOENT;
3073 } else if (mwdma_mask) {
3074 highbit = fls(mwdma_mask) - 1;
3075 mwdma_mask &= ~(1 << highbit);
3076 if (!mwdma_mask)
3077 return -ENOENT;
3078 }
3079 break;
3080
3081 case ATA_DNXFER_40C:
3082 udma_mask &= ATA_UDMA_MASK_40C;
3083 break;
3084
3085 case ATA_DNXFER_FORCE_PIO0:
3086 pio_mask &= 1;
3087 case ATA_DNXFER_FORCE_PIO:
3088 mwdma_mask = 0;
3089 udma_mask = 0;
3090 break;
3091
3092 default:
3093 BUG();
3094 }
3095
3096 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3097
3098 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3099 return -ENOENT;
3100
3101 if (!quiet) {
3102 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3103 snprintf(buf, sizeof(buf), "%s:%s",
3104 ata_mode_string(xfer_mask),
3105 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3106 else
3107 snprintf(buf, sizeof(buf), "%s",
3108 ata_mode_string(xfer_mask));
3109
3110 ata_dev_printk(dev, KERN_WARNING,
3111 "limiting speed to %s\n", buf);
3112 }
3113
3114 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3115 &dev->udma_mask);
3116
3117 return 0;
3118 }
3119
3120 static int ata_dev_set_mode(struct ata_device *dev)
3121 {
3122 struct ata_eh_context *ehc = &dev->link->eh_context;
3123 const char *dev_err_whine = "";
3124 int ign_dev_err = 0;
3125 unsigned int err_mask;
3126 int rc;
3127
3128 dev->flags &= ~ATA_DFLAG_PIO;
3129 if (dev->xfer_shift == ATA_SHIFT_PIO)
3130 dev->flags |= ATA_DFLAG_PIO;
3131
3132 err_mask = ata_dev_set_xfermode(dev);
3133
3134 if (err_mask & ~AC_ERR_DEV)
3135 goto fail;
3136
3137 /* revalidate */
3138 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3139 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3140 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3141 if (rc)
3142 return rc;
3143
3144 /* Old CFA may refuse this command, which is just fine */
3145 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3146 ign_dev_err = 1;
3147
3148 /* Some very old devices and some bad newer ones fail any kind of
3149 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3150 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3151 dev->pio_mode <= XFER_PIO_2)
3152 ign_dev_err = 1;
3153
3154 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3155 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3156 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3157 dev->dma_mode == XFER_MW_DMA_0 &&
3158 (dev->id[63] >> 8) & 1)
3159 ign_dev_err = 1;
3160
3161 /* if the device is actually configured correctly, ignore dev err */
3162 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3163 ign_dev_err = 1;
3164
3165 if (err_mask & AC_ERR_DEV) {
3166 if (!ign_dev_err)
3167 goto fail;
3168 else
3169 dev_err_whine = " (device error ignored)";
3170 }
3171
3172 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3173 dev->xfer_shift, (int)dev->xfer_mode);
3174
3175 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3176 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3177 dev_err_whine);
3178
3179 return 0;
3180
3181 fail:
3182 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3183 "(err_mask=0x%x)\n", err_mask);
3184 return -EIO;
3185 }
3186
3187 /**
3188 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3189 * @link: link on which timings will be programmed
3190 * @r_failed_dev: out parameter for failed device
3191 *
3192 * Standard implementation of the function used to tune and set
3193 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3194 * ata_dev_set_mode() fails, pointer to the failing device is
3195 * returned in @r_failed_dev.
3196 *
3197 * LOCKING:
3198 * PCI/etc. bus probe sem.
3199 *
3200 * RETURNS:
3201 * 0 on success, negative errno otherwise
3202 */
3203
3204 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3205 {
3206 struct ata_port *ap = link->ap;
3207 struct ata_device *dev;
3208 int rc = 0, used_dma = 0, found = 0;
3209
3210 /* step 1: calculate xfer_mask */
3211 ata_link_for_each_dev(dev, link) {
3212 unsigned long pio_mask, dma_mask;
3213 unsigned int mode_mask;
3214
3215 if (!ata_dev_enabled(dev))
3216 continue;
3217
3218 mode_mask = ATA_DMA_MASK_ATA;
3219 if (dev->class == ATA_DEV_ATAPI)
3220 mode_mask = ATA_DMA_MASK_ATAPI;
3221 else if (ata_id_is_cfa(dev->id))
3222 mode_mask = ATA_DMA_MASK_CFA;
3223
3224 ata_dev_xfermask(dev);
3225 ata_force_xfermask(dev);
3226
3227 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3228 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3229
3230 if (libata_dma_mask & mode_mask)
3231 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3232 else
3233 dma_mask = 0;
3234
3235 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3236 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3237
3238 found = 1;
3239 if (dev->dma_mode != 0xff)
3240 used_dma = 1;
3241 }
3242 if (!found)
3243 goto out;
3244
3245 /* step 2: always set host PIO timings */
3246 ata_link_for_each_dev(dev, link) {
3247 if (!ata_dev_enabled(dev))
3248 continue;
3249
3250 if (dev->pio_mode == 0xff) {
3251 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3252 rc = -EINVAL;
3253 goto out;
3254 }
3255
3256 dev->xfer_mode = dev->pio_mode;
3257 dev->xfer_shift = ATA_SHIFT_PIO;
3258 if (ap->ops->set_piomode)
3259 ap->ops->set_piomode(ap, dev);
3260 }
3261
3262 /* step 3: set host DMA timings */
3263 ata_link_for_each_dev(dev, link) {
3264 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3265 continue;
3266
3267 dev->xfer_mode = dev->dma_mode;
3268 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3269 if (ap->ops->set_dmamode)
3270 ap->ops->set_dmamode(ap, dev);
3271 }
3272
3273 /* step 4: update devices' xfer mode */
3274 ata_link_for_each_dev(dev, link) {
3275 /* don't update suspended devices' xfer mode */
3276 if (!ata_dev_enabled(dev))
3277 continue;
3278
3279 rc = ata_dev_set_mode(dev);
3280 if (rc)
3281 goto out;
3282 }
3283
3284 /* Record simplex status. If we selected DMA then the other
3285 * host channels are not permitted to do so.
3286 */
3287 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3288 ap->host->simplex_claimed = ap;
3289
3290 out:
3291 if (rc)
3292 *r_failed_dev = dev;
3293 return rc;
3294 }
3295
3296 /**
3297 * ata_wait_ready - wait for link to become ready
3298 * @link: link to be waited on
3299 * @deadline: deadline jiffies for the operation
3300 * @check_ready: callback to check link readiness
3301 *
3302 * Wait for @link to become ready. @check_ready should return
3303 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3304 * link doesn't seem to be occupied, other errno for other error
3305 * conditions.
3306 *
3307 * Transient -ENODEV conditions are allowed for
3308 * ATA_TMOUT_FF_WAIT.
3309 *
3310 * LOCKING:
3311 * EH context.
3312 *
3313 * RETURNS:
3314 * 0 if @linke is ready before @deadline; otherwise, -errno.
3315 */
3316 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3317 int (*check_ready)(struct ata_link *link))
3318 {
3319 unsigned long start = jiffies;
3320 unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
3321 int warned = 0;
3322
3323 if (time_after(nodev_deadline, deadline))
3324 nodev_deadline = deadline;
3325
3326 while (1) {
3327 unsigned long now = jiffies;
3328 int ready, tmp;
3329
3330 ready = tmp = check_ready(link);
3331 if (ready > 0)
3332 return 0;
3333
3334 /* -ENODEV could be transient. Ignore -ENODEV if link
3335 * is online. Also, some SATA devices take a long
3336 * time to clear 0xff after reset. For example,
3337 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3338 * GoVault needs even more than that. Wait for
3339 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3340 *
3341 * Note that some PATA controllers (pata_ali) explode
3342 * if status register is read more than once when
3343 * there's no device attached.
3344 */
3345 if (ready == -ENODEV) {
3346 if (ata_link_online(link))
3347 ready = 0;
3348 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3349 !ata_link_offline(link) &&
3350 time_before(now, nodev_deadline))
3351 ready = 0;
3352 }
3353
3354 if (ready)
3355 return ready;
3356 if (time_after(now, deadline))
3357 return -EBUSY;
3358
3359 if (!warned && time_after(now, start + 5 * HZ) &&
3360 (deadline - now > 3 * HZ)) {
3361 ata_link_printk(link, KERN_WARNING,
3362 "link is slow to respond, please be patient "
3363 "(ready=%d)\n", tmp);
3364 warned = 1;
3365 }
3366
3367 msleep(50);
3368 }
3369 }
3370
3371 /**
3372 * ata_wait_after_reset - wait for link to become ready after reset
3373 * @link: link to be waited on
3374 * @deadline: deadline jiffies for the operation
3375 * @check_ready: callback to check link readiness
3376 *
3377 * Wait for @link to become ready after reset.
3378 *
3379 * LOCKING:
3380 * EH context.
3381 *
3382 * RETURNS:
3383 * 0 if @linke is ready before @deadline; otherwise, -errno.
3384 */
3385 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3386 int (*check_ready)(struct ata_link *link))
3387 {
3388 msleep(ATA_WAIT_AFTER_RESET_MSECS);
3389
3390 return ata_wait_ready(link, deadline, check_ready);
3391 }
3392
3393 /**
3394 * sata_link_debounce - debounce SATA phy status
3395 * @link: ATA link to debounce SATA phy status for
3396 * @params: timing parameters { interval, duratinon, timeout } in msec
3397 * @deadline: deadline jiffies for the operation
3398 *
3399 * Make sure SStatus of @link reaches stable state, determined by
3400 * holding the same value where DET is not 1 for @duration polled
3401 * every @interval, before @timeout. Timeout constraints the
3402 * beginning of the stable state. Because DET gets stuck at 1 on
3403 * some controllers after hot unplugging, this functions waits
3404 * until timeout then returns 0 if DET is stable at 1.
3405 *
3406 * @timeout is further limited by @deadline. The sooner of the
3407 * two is used.
3408 *
3409 * LOCKING:
3410 * Kernel thread context (may sleep)
3411 *
3412 * RETURNS:
3413 * 0 on success, -errno on failure.
3414 */
3415 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3416 unsigned long deadline)
3417 {
3418 unsigned long interval_msec = params[0];
3419 unsigned long duration = msecs_to_jiffies(params[1]);
3420 unsigned long last_jiffies, t;
3421 u32 last, cur;
3422 int rc;
3423
3424 t = jiffies + msecs_to_jiffies(params[2]);
3425 if (time_before(t, deadline))
3426 deadline = t;
3427
3428 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3429 return rc;
3430 cur &= 0xf;
3431
3432 last = cur;
3433 last_jiffies = jiffies;
3434
3435 while (1) {
3436 msleep(interval_msec);
3437 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3438 return rc;
3439 cur &= 0xf;
3440
3441 /* DET stable? */
3442 if (cur == last) {
3443 if (cur == 1 && time_before(jiffies, deadline))
3444 continue;
3445 if (time_after(jiffies, last_jiffies + duration))
3446 return 0;
3447 continue;
3448 }
3449
3450 /* unstable, start over */
3451 last = cur;
3452 last_jiffies = jiffies;
3453
3454 /* Check deadline. If debouncing failed, return
3455 * -EPIPE to tell upper layer to lower link speed.
3456 */
3457 if (time_after(jiffies, deadline))
3458 return -EPIPE;
3459 }
3460 }
3461
3462 /**
3463 * sata_link_resume - resume SATA link
3464 * @link: ATA link to resume SATA
3465 * @params: timing parameters { interval, duratinon, timeout } in msec
3466 * @deadline: deadline jiffies for the operation
3467 *
3468 * Resume SATA phy @link and debounce it.
3469 *
3470 * LOCKING:
3471 * Kernel thread context (may sleep)
3472 *
3473 * RETURNS:
3474 * 0 on success, -errno on failure.
3475 */
3476 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3477 unsigned long deadline)
3478 {
3479 u32 scontrol, serror;
3480 int rc;
3481
3482 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3483 return rc;
3484
3485 scontrol = (scontrol & 0x0f0) | 0x300;
3486
3487 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3488 return rc;
3489
3490 /* Some PHYs react badly if SStatus is pounded immediately
3491 * after resuming. Delay 200ms before debouncing.
3492 */
3493 msleep(200);
3494
3495 if ((rc = sata_link_debounce(link, params, deadline)))
3496 return rc;
3497
3498 /* Clear SError. PMP and some host PHYs require this to
3499 * operate and clearing should be done before checking PHY
3500 * online status to avoid race condition (hotplugging between
3501 * link resume and status check).
3502 */
3503 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3504 rc = sata_scr_write(link, SCR_ERROR, serror);
3505 if (rc == 0 || rc == -EINVAL) {
3506 unsigned long flags;
3507
3508 spin_lock_irqsave(link->ap->lock, flags);
3509 link->eh_info.serror = 0;
3510 spin_unlock_irqrestore(link->ap->lock, flags);
3511 rc = 0;
3512 }
3513 return rc;
3514 }
3515
3516 /**
3517 * ata_std_prereset - prepare for reset
3518 * @link: ATA link to be reset
3519 * @deadline: deadline jiffies for the operation
3520 *
3521 * @link is about to be reset. Initialize it. Failure from
3522 * prereset makes libata abort whole reset sequence and give up
3523 * that port, so prereset should be best-effort. It does its
3524 * best to prepare for reset sequence but if things go wrong, it
3525 * should just whine, not fail.
3526 *
3527 * LOCKING:
3528 * Kernel thread context (may sleep)
3529 *
3530 * RETURNS:
3531 * 0 on success, -errno otherwise.
3532 */
3533 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3534 {
3535 struct ata_port *ap = link->ap;
3536 struct ata_eh_context *ehc = &link->eh_context;
3537 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3538 int rc;
3539
3540 /* if we're about to do hardreset, nothing more to do */
3541 if (ehc->i.action & ATA_EH_HARDRESET)
3542 return 0;
3543
3544 /* if SATA, resume link */
3545 if (ap->flags & ATA_FLAG_SATA) {
3546 rc = sata_link_resume(link, timing, deadline);
3547 /* whine about phy resume failure but proceed */
3548 if (rc && rc != -EOPNOTSUPP)
3549 ata_link_printk(link, KERN_WARNING, "failed to resume "
3550 "link for reset (errno=%d)\n", rc);
3551 }
3552
3553 return 0;
3554 }
3555
3556 /**
3557 * sata_link_hardreset - reset link via SATA phy reset
3558 * @link: link to reset
3559 * @timing: timing parameters { interval, duratinon, timeout } in msec
3560 * @deadline: deadline jiffies for the operation
3561 * @online: optional out parameter indicating link onlineness
3562 * @check_ready: optional callback to check link readiness
3563 *
3564 * SATA phy-reset @link using DET bits of SControl register.
3565 * After hardreset, link readiness is waited upon using
3566 * ata_wait_ready() if @check_ready is specified. LLDs are
3567 * allowed to not specify @check_ready and wait itself after this
3568 * function returns. Device classification is LLD's
3569 * responsibility.
3570 *
3571 * *@online is set to one iff reset succeeded and @link is online
3572 * after reset.
3573 *
3574 * LOCKING:
3575 * Kernel thread context (may sleep)
3576 *
3577 * RETURNS:
3578 * 0 on success, -errno otherwise.
3579 */
3580 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3581 unsigned long deadline,
3582 bool *online, int (*check_ready)(struct ata_link *))
3583 {
3584 u32 scontrol;
3585 int rc;
3586
3587 DPRINTK("ENTER\n");
3588
3589 if (online)
3590 *online = false;
3591
3592 if (sata_set_spd_needed(link)) {
3593 /* SATA spec says nothing about how to reconfigure
3594 * spd. To be on the safe side, turn off phy during
3595 * reconfiguration. This works for at least ICH7 AHCI
3596 * and Sil3124.
3597 */
3598 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3599 goto out;
3600
3601 scontrol = (scontrol & 0x0f0) | 0x304;
3602
3603 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3604 goto out;
3605
3606 sata_set_spd(link);
3607 }
3608
3609 /* issue phy wake/reset */
3610 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3611 goto out;
3612
3613 scontrol = (scontrol & 0x0f0) | 0x301;
3614
3615 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3616 goto out;
3617
3618 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3619 * 10.4.2 says at least 1 ms.
3620 */
3621 msleep(1);
3622
3623 /* bring link back */
3624 rc = sata_link_resume(link, timing, deadline);
3625 if (rc)
3626 goto out;
3627 /* if link is offline nothing more to do */
3628 if (ata_link_offline(link))
3629 goto out;
3630
3631 /* Link is online. From this point, -ENODEV too is an error. */
3632 if (online)
3633 *online = true;
3634
3635 if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) {
3636 /* If PMP is supported, we have to do follow-up SRST.
3637 * Some PMPs don't send D2H Reg FIS after hardreset if
3638 * the first port is empty. Wait only for
3639 * ATA_TMOUT_PMP_SRST_WAIT.
3640 */
3641 if (check_ready) {
3642 unsigned long pmp_deadline;
3643
3644 pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
3645 if (time_after(pmp_deadline, deadline))
3646 pmp_deadline = deadline;
3647 ata_wait_ready(link, pmp_deadline, check_ready);
3648 }
3649 rc = -EAGAIN;
3650 goto out;
3651 }
3652
3653 rc = 0;
3654 if (check_ready)
3655 rc = ata_wait_ready(link, deadline, check_ready);
3656 out:
3657 if (rc && rc != -EAGAIN)
3658 ata_link_printk(link, KERN_ERR,
3659 "COMRESET failed (errno=%d)\n", rc);
3660 DPRINTK("EXIT, rc=%d\n", rc);
3661 return rc;
3662 }
3663
3664 /**
3665 * sata_std_hardreset - COMRESET w/o waiting or classification
3666 * @link: link to reset
3667 * @class: resulting class of attached device
3668 * @deadline: deadline jiffies for the operation
3669 *
3670 * Standard SATA COMRESET w/o waiting or classification.
3671 *
3672 * LOCKING:
3673 * Kernel thread context (may sleep)
3674 *
3675 * RETURNS:
3676 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3677 */
3678 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3679 unsigned long deadline)
3680 {
3681 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3682 bool online;
3683 int rc;
3684
3685 /* do hardreset */
3686 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3687 return online ? -EAGAIN : rc;
3688 }
3689
3690 /**
3691 * ata_std_postreset - standard postreset callback
3692 * @link: the target ata_link
3693 * @classes: classes of attached devices
3694 *
3695 * This function is invoked after a successful reset. Note that
3696 * the device might have been reset more than once using
3697 * different reset methods before postreset is invoked.
3698 *
3699 * LOCKING:
3700 * Kernel thread context (may sleep)
3701 */
3702 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3703 {
3704 DPRINTK("ENTER\n");
3705
3706 /* print link status */
3707 sata_print_link_status(link);
3708
3709 DPRINTK("EXIT\n");
3710 }
3711
3712 /**
3713 * ata_dev_same_device - Determine whether new ID matches configured device
3714 * @dev: device to compare against
3715 * @new_class: class of the new device
3716 * @new_id: IDENTIFY page of the new device
3717 *
3718 * Compare @new_class and @new_id against @dev and determine
3719 * whether @dev is the device indicated by @new_class and
3720 * @new_id.
3721 *
3722 * LOCKING:
3723 * None.
3724 *
3725 * RETURNS:
3726 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3727 */
3728 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3729 const u16 *new_id)
3730 {
3731 const u16 *old_id = dev->id;
3732 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3733 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3734
3735 if (dev->class != new_class) {
3736 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3737 dev->class, new_class);
3738 return 0;
3739 }
3740
3741 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3742 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3743 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3744 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3745
3746 if (strcmp(model[0], model[1])) {
3747 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3748 "'%s' != '%s'\n", model[0], model[1]);
3749 return 0;
3750 }
3751
3752 if (strcmp(serial[0], serial[1])) {
3753 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3754 "'%s' != '%s'\n", serial[0], serial[1]);
3755 return 0;
3756 }
3757
3758 return 1;
3759 }
3760
3761 /**
3762 * ata_dev_reread_id - Re-read IDENTIFY data
3763 * @dev: target ATA device
3764 * @readid_flags: read ID flags
3765 *
3766 * Re-read IDENTIFY page and make sure @dev is still attached to
3767 * the port.
3768 *
3769 * LOCKING:
3770 * Kernel thread context (may sleep)
3771 *
3772 * RETURNS:
3773 * 0 on success, negative errno otherwise
3774 */
3775 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3776 {
3777 unsigned int class = dev->class;
3778 u16 *id = (void *)dev->link->ap->sector_buf;
3779 int rc;
3780
3781 /* read ID data */
3782 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3783 if (rc)
3784 return rc;
3785
3786 /* is the device still there? */
3787 if (!ata_dev_same_device(dev, class, id))
3788 return -ENODEV;
3789
3790 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3791 return 0;
3792 }
3793
3794 /**
3795 * ata_dev_revalidate - Revalidate ATA device
3796 * @dev: device to revalidate
3797 * @new_class: new class code
3798 * @readid_flags: read ID flags
3799 *
3800 * Re-read IDENTIFY page, make sure @dev is still attached to the
3801 * port and reconfigure it according to the new IDENTIFY page.
3802 *
3803 * LOCKING:
3804 * Kernel thread context (may sleep)
3805 *
3806 * RETURNS:
3807 * 0 on success, negative errno otherwise
3808 */
3809 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3810 unsigned int readid_flags)
3811 {
3812 u64 n_sectors = dev->n_sectors;
3813 int rc;
3814
3815 if (!ata_dev_enabled(dev))
3816 return -ENODEV;
3817
3818 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3819 if (ata_class_enabled(new_class) &&
3820 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3821 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3822 dev->class, new_class);
3823 rc = -ENODEV;
3824 goto fail;
3825 }
3826
3827 /* re-read ID */
3828 rc = ata_dev_reread_id(dev, readid_flags);
3829 if (rc)
3830 goto fail;
3831
3832 /* configure device according to the new ID */
3833 rc = ata_dev_configure(dev);
3834 if (rc)
3835 goto fail;
3836
3837 /* verify n_sectors hasn't changed */
3838 if (dev->class == ATA_DEV_ATA && n_sectors &&
3839 dev->n_sectors != n_sectors) {
3840 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3841 "%llu != %llu\n",
3842 (unsigned long long)n_sectors,
3843 (unsigned long long)dev->n_sectors);
3844
3845 /* restore original n_sectors */
3846 dev->n_sectors = n_sectors;
3847
3848 rc = -ENODEV;
3849 goto fail;
3850 }
3851
3852 return 0;
3853
3854 fail:
3855 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3856 return rc;
3857 }
3858
3859 struct ata_blacklist_entry {
3860 const char *model_num;
3861 const char *model_rev;
3862 unsigned long horkage;
3863 };
3864
3865 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3866 /* Devices with DMA related problems under Linux */
3867 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3868 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3869 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3870 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3871 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3872 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3873 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3874 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3875 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3876 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3877 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3878 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3879 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3880 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3881 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3882 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3883 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3884 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3885 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3886 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3887 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3888 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3889 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3890 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3891 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3892 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3893 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3894 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3895 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3896 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3897 /* Odd clown on sil3726/4726 PMPs */
3898 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3899 ATA_HORKAGE_SKIP_PM },
3900
3901 /* Weird ATAPI devices */
3902 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3903
3904 /* Devices we expect to fail diagnostics */
3905
3906 /* Devices where NCQ should be avoided */
3907 /* NCQ is slow */
3908 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3909 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3910 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3911 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3912 /* NCQ is broken */
3913 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3914 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3915 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3916 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3917
3918 /* Blacklist entries taken from Silicon Image 3124/3132
3919 Windows driver .inf file - also several Linux problem reports */
3920 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3921 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3922 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3923
3924 /* devices which puke on READ_NATIVE_MAX */
3925 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3926 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3927 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3928 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3929
3930 /* Devices which report 1 sector over size HPA */
3931 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3932 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3933 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
3934
3935 /* Devices which get the IVB wrong */
3936 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3937 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
3938 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3939 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3940 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
3941
3942 /* End Marker */
3943 { }
3944 };
3945
3946 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3947 {
3948 const char *p;
3949 int len;
3950
3951 /*
3952 * check for trailing wildcard: *\0
3953 */
3954 p = strchr(patt, wildchar);
3955 if (p && ((*(p + 1)) == 0))
3956 len = p - patt;
3957 else {
3958 len = strlen(name);
3959 if (!len) {
3960 if (!*patt)
3961 return 0;
3962 return -1;
3963 }
3964 }
3965
3966 return strncmp(patt, name, len);
3967 }
3968
3969 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3970 {
3971 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3972 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3973 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3974
3975 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3976 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3977
3978 while (ad->model_num) {
3979 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3980 if (ad->model_rev == NULL)
3981 return ad->horkage;
3982 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3983 return ad->horkage;
3984 }
3985 ad++;
3986 }
3987 return 0;
3988 }
3989
3990 static int ata_dma_blacklisted(const struct ata_device *dev)
3991 {
3992 /* We don't support polling DMA.
3993 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3994 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3995 */
3996 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3997 (dev->flags & ATA_DFLAG_CDB_INTR))
3998 return 1;
3999 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4000 }
4001
4002 /**
4003 * ata_is_40wire - check drive side detection
4004 * @dev: device
4005 *
4006 * Perform drive side detection decoding, allowing for device vendors
4007 * who can't follow the documentation.
4008 */
4009
4010 static int ata_is_40wire(struct ata_device *dev)
4011 {
4012 if (dev->horkage & ATA_HORKAGE_IVB)
4013 return ata_drive_40wire_relaxed(dev->id);
4014 return ata_drive_40wire(dev->id);
4015 }
4016
4017 /**
4018 * cable_is_40wire - 40/80/SATA decider
4019 * @ap: port to consider
4020 *
4021 * This function encapsulates the policy for speed management
4022 * in one place. At the moment we don't cache the result but
4023 * there is a good case for setting ap->cbl to the result when
4024 * we are called with unknown cables (and figuring out if it
4025 * impacts hotplug at all).
4026 *
4027 * Return 1 if the cable appears to be 40 wire.
4028 */
4029
4030 static int cable_is_40wire(struct ata_port *ap)
4031 {
4032 struct ata_link *link;
4033 struct ata_device *dev;
4034
4035 /* If the controller thinks we are 40 wire, we are */
4036 if (ap->cbl == ATA_CBL_PATA40)
4037 return 1;
4038 /* If the controller thinks we are 80 wire, we are */
4039 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4040 return 0;
4041 /* If the controller doesn't know we scan
4042
4043 - Note: We look for all 40 wire detects at this point.
4044 Any 80 wire detect is taken to be 80 wire cable
4045 because
4046 - In many setups only the one drive (slave if present)
4047 will give a valid detect
4048 - If you have a non detect capable drive you don't
4049 want it to colour the choice
4050 */
4051 ata_port_for_each_link(link, ap) {
4052 ata_link_for_each_dev(dev, link) {
4053 if (!ata_is_40wire(dev))
4054 return 0;
4055 }
4056 }
4057 return 1;
4058 }
4059
4060 /**
4061 * ata_dev_xfermask - Compute supported xfermask of the given device
4062 * @dev: Device to compute xfermask for
4063 *
4064 * Compute supported xfermask of @dev and store it in
4065 * dev->*_mask. This function is responsible for applying all
4066 * known limits including host controller limits, device
4067 * blacklist, etc...
4068 *
4069 * LOCKING:
4070 * None.
4071 */
4072 static void ata_dev_xfermask(struct ata_device *dev)
4073 {
4074 struct ata_link *link = dev->link;
4075 struct ata_port *ap = link->ap;
4076 struct ata_host *host = ap->host;
4077 unsigned long xfer_mask;
4078
4079 /* controller modes available */
4080 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4081 ap->mwdma_mask, ap->udma_mask);
4082
4083 /* drive modes available */
4084 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4085 dev->mwdma_mask, dev->udma_mask);
4086 xfer_mask &= ata_id_xfermask(dev->id);
4087
4088 /*
4089 * CFA Advanced TrueIDE timings are not allowed on a shared
4090 * cable
4091 */
4092 if (ata_dev_pair(dev)) {
4093 /* No PIO5 or PIO6 */
4094 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4095 /* No MWDMA3 or MWDMA 4 */
4096 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4097 }
4098
4099 if (ata_dma_blacklisted(dev)) {
4100 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4101 ata_dev_printk(dev, KERN_WARNING,
4102 "device is on DMA blacklist, disabling DMA\n");
4103 }
4104
4105 if ((host->flags & ATA_HOST_SIMPLEX) &&
4106 host->simplex_claimed && host->simplex_claimed != ap) {
4107 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4108 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4109 "other device, disabling DMA\n");
4110 }
4111
4112 if (ap->flags & ATA_FLAG_NO_IORDY)
4113 xfer_mask &= ata_pio_mask_no_iordy(dev);
4114
4115 if (ap->ops->mode_filter)
4116 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4117
4118 /* Apply cable rule here. Don't apply it early because when
4119 * we handle hot plug the cable type can itself change.
4120 * Check this last so that we know if the transfer rate was
4121 * solely limited by the cable.
4122 * Unknown or 80 wire cables reported host side are checked
4123 * drive side as well. Cases where we know a 40wire cable
4124 * is used safely for 80 are not checked here.
4125 */
4126 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4127 /* UDMA/44 or higher would be available */
4128 if (cable_is_40wire(ap)) {
4129 ata_dev_printk(dev, KERN_WARNING,
4130 "limited to UDMA/33 due to 40-wire cable\n");
4131 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4132 }
4133
4134 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4135 &dev->mwdma_mask, &dev->udma_mask);
4136 }
4137
4138 /**
4139 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4140 * @dev: Device to which command will be sent
4141 *
4142 * Issue SET FEATURES - XFER MODE command to device @dev
4143 * on port @ap.
4144 *
4145 * LOCKING:
4146 * PCI/etc. bus probe sem.
4147 *
4148 * RETURNS:
4149 * 0 on success, AC_ERR_* mask otherwise.
4150 */
4151
4152 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4153 {
4154 struct ata_taskfile tf;
4155 unsigned int err_mask;
4156
4157 /* set up set-features taskfile */
4158 DPRINTK("set features - xfer mode\n");
4159
4160 /* Some controllers and ATAPI devices show flaky interrupt
4161 * behavior after setting xfer mode. Use polling instead.
4162 */
4163 ata_tf_init(dev, &tf);
4164 tf.command = ATA_CMD_SET_FEATURES;
4165 tf.feature = SETFEATURES_XFER;
4166 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4167 tf.protocol = ATA_PROT_NODATA;
4168 /* If we are using IORDY we must send the mode setting command */
4169 if (ata_pio_need_iordy(dev))
4170 tf.nsect = dev->xfer_mode;
4171 /* If the device has IORDY and the controller does not - turn it off */
4172 else if (ata_id_has_iordy(dev->id))
4173 tf.nsect = 0x01;
4174 else /* In the ancient relic department - skip all of this */
4175 return 0;
4176
4177 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4178
4179 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4180 return err_mask;
4181 }
4182 /**
4183 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4184 * @dev: Device to which command will be sent
4185 * @enable: Whether to enable or disable the feature
4186 * @feature: The sector count represents the feature to set
4187 *
4188 * Issue SET FEATURES - SATA FEATURES command to device @dev
4189 * on port @ap with sector count
4190 *
4191 * LOCKING:
4192 * PCI/etc. bus probe sem.
4193 *
4194 * RETURNS:
4195 * 0 on success, AC_ERR_* mask otherwise.
4196 */
4197 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4198 u8 feature)
4199 {
4200 struct ata_taskfile tf;
4201 unsigned int err_mask;
4202
4203 /* set up set-features taskfile */
4204 DPRINTK("set features - SATA features\n");
4205
4206 ata_tf_init(dev, &tf);
4207 tf.command = ATA_CMD_SET_FEATURES;
4208 tf.feature = enable;
4209 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4210 tf.protocol = ATA_PROT_NODATA;
4211 tf.nsect = feature;
4212
4213 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4214
4215 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4216 return err_mask;
4217 }
4218
4219 /**
4220 * ata_dev_init_params - Issue INIT DEV PARAMS command
4221 * @dev: Device to which command will be sent
4222 * @heads: Number of heads (taskfile parameter)
4223 * @sectors: Number of sectors (taskfile parameter)
4224 *
4225 * LOCKING:
4226 * Kernel thread context (may sleep)
4227 *
4228 * RETURNS:
4229 * 0 on success, AC_ERR_* mask otherwise.
4230 */
4231 static unsigned int ata_dev_init_params(struct ata_device *dev,
4232 u16 heads, u16 sectors)
4233 {
4234 struct ata_taskfile tf;
4235 unsigned int err_mask;
4236
4237 /* Number of sectors per track 1-255. Number of heads 1-16 */
4238 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4239 return AC_ERR_INVALID;
4240
4241 /* set up init dev params taskfile */
4242 DPRINTK("init dev params \n");
4243
4244 ata_tf_init(dev, &tf);
4245 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4246 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4247 tf.protocol = ATA_PROT_NODATA;
4248 tf.nsect = sectors;
4249 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4250
4251 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4252 /* A clean abort indicates an original or just out of spec drive
4253 and we should continue as we issue the setup based on the
4254 drive reported working geometry */
4255 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4256 err_mask = 0;
4257
4258 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4259 return err_mask;
4260 }
4261
4262 /**
4263 * ata_sg_clean - Unmap DMA memory associated with command
4264 * @qc: Command containing DMA memory to be released
4265 *
4266 * Unmap all mapped DMA memory associated with this command.
4267 *
4268 * LOCKING:
4269 * spin_lock_irqsave(host lock)
4270 */
4271 void ata_sg_clean(struct ata_queued_cmd *qc)
4272 {
4273 struct ata_port *ap = qc->ap;
4274 struct scatterlist *sg = qc->sg;
4275 int dir = qc->dma_dir;
4276
4277 WARN_ON(sg == NULL);
4278
4279 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4280
4281 if (qc->n_elem)
4282 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4283
4284 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4285 qc->sg = NULL;
4286 }
4287
4288 /**
4289 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4290 * @qc: Metadata associated with taskfile to check
4291 *
4292 * Allow low-level driver to filter ATA PACKET commands, returning
4293 * a status indicating whether or not it is OK to use DMA for the
4294 * supplied PACKET command.
4295 *
4296 * LOCKING:
4297 * spin_lock_irqsave(host lock)
4298 *
4299 * RETURNS: 0 when ATAPI DMA can be used
4300 * nonzero otherwise
4301 */
4302 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4303 {
4304 struct ata_port *ap = qc->ap;
4305
4306 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4307 * few ATAPI devices choke on such DMA requests.
4308 */
4309 if (unlikely(qc->nbytes & 15))
4310 return 1;
4311
4312 if (ap->ops->check_atapi_dma)
4313 return ap->ops->check_atapi_dma(qc);
4314
4315 return 0;
4316 }
4317
4318 /**
4319 * ata_std_qc_defer - Check whether a qc needs to be deferred
4320 * @qc: ATA command in question
4321 *
4322 * Non-NCQ commands cannot run with any other command, NCQ or
4323 * not. As upper layer only knows the queue depth, we are
4324 * responsible for maintaining exclusion. This function checks
4325 * whether a new command @qc can be issued.
4326 *
4327 * LOCKING:
4328 * spin_lock_irqsave(host lock)
4329 *
4330 * RETURNS:
4331 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4332 */
4333 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4334 {
4335 struct ata_link *link = qc->dev->link;
4336
4337 if (qc->tf.protocol == ATA_PROT_NCQ) {
4338 if (!ata_tag_valid(link->active_tag))
4339 return 0;
4340 } else {
4341 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4342 return 0;
4343 }
4344
4345 return ATA_DEFER_LINK;
4346 }
4347
4348 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4349
4350 /**
4351 * ata_sg_init - Associate command with scatter-gather table.
4352 * @qc: Command to be associated
4353 * @sg: Scatter-gather table.
4354 * @n_elem: Number of elements in s/g table.
4355 *
4356 * Initialize the data-related elements of queued_cmd @qc
4357 * to point to a scatter-gather table @sg, containing @n_elem
4358 * elements.
4359 *
4360 * LOCKING:
4361 * spin_lock_irqsave(host lock)
4362 */
4363 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4364 unsigned int n_elem)
4365 {
4366 qc->sg = sg;
4367 qc->n_elem = n_elem;
4368 qc->cursg = qc->sg;
4369 }
4370
4371 /**
4372 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4373 * @qc: Command with scatter-gather table to be mapped.
4374 *
4375 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4376 *
4377 * LOCKING:
4378 * spin_lock_irqsave(host lock)
4379 *
4380 * RETURNS:
4381 * Zero on success, negative on error.
4382 *
4383 */
4384 static int ata_sg_setup(struct ata_queued_cmd *qc)
4385 {
4386 struct ata_port *ap = qc->ap;
4387 unsigned int n_elem;
4388
4389 VPRINTK("ENTER, ata%u\n", ap->print_id);
4390
4391 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4392 if (n_elem < 1)
4393 return -1;
4394
4395 DPRINTK("%d sg elements mapped\n", n_elem);
4396
4397 qc->n_elem = n_elem;
4398 qc->flags |= ATA_QCFLAG_DMAMAP;
4399
4400 return 0;
4401 }
4402
4403 /**
4404 * swap_buf_le16 - swap halves of 16-bit words in place
4405 * @buf: Buffer to swap
4406 * @buf_words: Number of 16-bit words in buffer.
4407 *
4408 * Swap halves of 16-bit words if needed to convert from
4409 * little-endian byte order to native cpu byte order, or
4410 * vice-versa.
4411 *
4412 * LOCKING:
4413 * Inherited from caller.
4414 */
4415 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4416 {
4417 #ifdef __BIG_ENDIAN
4418 unsigned int i;
4419
4420 for (i = 0; i < buf_words; i++)
4421 buf[i] = le16_to_cpu(buf[i]);
4422 #endif /* __BIG_ENDIAN */
4423 }
4424
4425 /**
4426 * ata_qc_new - Request an available ATA command, for queueing
4427 * @ap: Port associated with device @dev
4428 * @dev: Device from whom we request an available command structure
4429 *
4430 * LOCKING:
4431 * None.
4432 */
4433
4434 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4435 {
4436 struct ata_queued_cmd *qc = NULL;
4437 unsigned int i;
4438
4439 /* no command while frozen */
4440 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4441 return NULL;
4442
4443 /* the last tag is reserved for internal command. */
4444 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4445 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4446 qc = __ata_qc_from_tag(ap, i);
4447 break;
4448 }
4449
4450 if (qc)
4451 qc->tag = i;
4452
4453 return qc;
4454 }
4455
4456 /**
4457 * ata_qc_new_init - Request an available ATA command, and initialize it
4458 * @dev: Device from whom we request an available command structure
4459 *
4460 * LOCKING:
4461 * None.
4462 */
4463
4464 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4465 {
4466 struct ata_port *ap = dev->link->ap;
4467 struct ata_queued_cmd *qc;
4468
4469 qc = ata_qc_new(ap);
4470 if (qc) {
4471 qc->scsicmd = NULL;
4472 qc->ap = ap;
4473 qc->dev = dev;
4474
4475 ata_qc_reinit(qc);
4476 }
4477
4478 return qc;
4479 }
4480
4481 /**
4482 * ata_qc_free - free unused ata_queued_cmd
4483 * @qc: Command to complete
4484 *
4485 * Designed to free unused ata_queued_cmd object
4486 * in case something prevents using it.
4487 *
4488 * LOCKING:
4489 * spin_lock_irqsave(host lock)
4490 */
4491 void ata_qc_free(struct ata_queued_cmd *qc)
4492 {
4493 struct ata_port *ap = qc->ap;
4494 unsigned int tag;
4495
4496 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4497
4498 qc->flags = 0;
4499 tag = qc->tag;
4500 if (likely(ata_tag_valid(tag))) {
4501 qc->tag = ATA_TAG_POISON;
4502 clear_bit(tag, &ap->qc_allocated);
4503 }
4504 }
4505
4506 void __ata_qc_complete(struct ata_queued_cmd *qc)
4507 {
4508 struct ata_port *ap = qc->ap;
4509 struct ata_link *link = qc->dev->link;
4510
4511 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4512 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4513
4514 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4515 ata_sg_clean(qc);
4516
4517 /* command should be marked inactive atomically with qc completion */
4518 if (qc->tf.protocol == ATA_PROT_NCQ) {
4519 link->sactive &= ~(1 << qc->tag);
4520 if (!link->sactive)
4521 ap->nr_active_links--;
4522 } else {
4523 link->active_tag = ATA_TAG_POISON;
4524 ap->nr_active_links--;
4525 }
4526
4527 /* clear exclusive status */
4528 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4529 ap->excl_link == link))
4530 ap->excl_link = NULL;
4531
4532 /* atapi: mark qc as inactive to prevent the interrupt handler
4533 * from completing the command twice later, before the error handler
4534 * is called. (when rc != 0 and atapi request sense is needed)
4535 */
4536 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4537 ap->qc_active &= ~(1 << qc->tag);
4538
4539 /* call completion callback */
4540 qc->complete_fn(qc);
4541 }
4542
4543 static void fill_result_tf(struct ata_queued_cmd *qc)
4544 {
4545 struct ata_port *ap = qc->ap;
4546
4547 qc->result_tf.flags = qc->tf.flags;
4548 ap->ops->qc_fill_rtf(qc);
4549 }
4550
4551 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4552 {
4553 struct ata_device *dev = qc->dev;
4554
4555 if (ata_tag_internal(qc->tag))
4556 return;
4557
4558 if (ata_is_nodata(qc->tf.protocol))
4559 return;
4560
4561 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4562 return;
4563
4564 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4565 }
4566
4567 /**
4568 * ata_qc_complete - Complete an active ATA command
4569 * @qc: Command to complete
4570 * @err_mask: ATA Status register contents
4571 *
4572 * Indicate to the mid and upper layers that an ATA
4573 * command has completed, with either an ok or not-ok status.
4574 *
4575 * LOCKING:
4576 * spin_lock_irqsave(host lock)
4577 */
4578 void ata_qc_complete(struct ata_queued_cmd *qc)
4579 {
4580 struct ata_port *ap = qc->ap;
4581
4582 /* XXX: New EH and old EH use different mechanisms to
4583 * synchronize EH with regular execution path.
4584 *
4585 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4586 * Normal execution path is responsible for not accessing a
4587 * failed qc. libata core enforces the rule by returning NULL
4588 * from ata_qc_from_tag() for failed qcs.
4589 *
4590 * Old EH depends on ata_qc_complete() nullifying completion
4591 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4592 * not synchronize with interrupt handler. Only PIO task is
4593 * taken care of.
4594 */
4595 if (ap->ops->error_handler) {
4596 struct ata_device *dev = qc->dev;
4597 struct ata_eh_info *ehi = &dev->link->eh_info;
4598
4599 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4600
4601 if (unlikely(qc->err_mask))
4602 qc->flags |= ATA_QCFLAG_FAILED;
4603
4604 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4605 if (!ata_tag_internal(qc->tag)) {
4606 /* always fill result TF for failed qc */
4607 fill_result_tf(qc);
4608 ata_qc_schedule_eh(qc);
4609 return;
4610 }
4611 }
4612
4613 /* read result TF if requested */
4614 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4615 fill_result_tf(qc);
4616
4617 /* Some commands need post-processing after successful
4618 * completion.
4619 */
4620 switch (qc->tf.command) {
4621 case ATA_CMD_SET_FEATURES:
4622 if (qc->tf.feature != SETFEATURES_WC_ON &&
4623 qc->tf.feature != SETFEATURES_WC_OFF)
4624 break;
4625 /* fall through */
4626 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4627 case ATA_CMD_SET_MULTI: /* multi_count changed */
4628 /* revalidate device */
4629 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4630 ata_port_schedule_eh(ap);
4631 break;
4632
4633 case ATA_CMD_SLEEP:
4634 dev->flags |= ATA_DFLAG_SLEEPING;
4635 break;
4636 }
4637
4638 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4639 ata_verify_xfer(qc);
4640
4641 __ata_qc_complete(qc);
4642 } else {
4643 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4644 return;
4645
4646 /* read result TF if failed or requested */
4647 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4648 fill_result_tf(qc);
4649
4650 __ata_qc_complete(qc);
4651 }
4652 }
4653
4654 /**
4655 * ata_qc_complete_multiple - Complete multiple qcs successfully
4656 * @ap: port in question
4657 * @qc_active: new qc_active mask
4658 *
4659 * Complete in-flight commands. This functions is meant to be
4660 * called from low-level driver's interrupt routine to complete
4661 * requests normally. ap->qc_active and @qc_active is compared
4662 * and commands are completed accordingly.
4663 *
4664 * LOCKING:
4665 * spin_lock_irqsave(host lock)
4666 *
4667 * RETURNS:
4668 * Number of completed commands on success, -errno otherwise.
4669 */
4670 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4671 {
4672 int nr_done = 0;
4673 u32 done_mask;
4674 int i;
4675
4676 done_mask = ap->qc_active ^ qc_active;
4677
4678 if (unlikely(done_mask & qc_active)) {
4679 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4680 "(%08x->%08x)\n", ap->qc_active, qc_active);
4681 return -EINVAL;
4682 }
4683
4684 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4685 struct ata_queued_cmd *qc;
4686
4687 if (!(done_mask & (1 << i)))
4688 continue;
4689
4690 if ((qc = ata_qc_from_tag(ap, i))) {
4691 ata_qc_complete(qc);
4692 nr_done++;
4693 }
4694 }
4695
4696 return nr_done;
4697 }
4698
4699 /**
4700 * ata_qc_issue - issue taskfile to device
4701 * @qc: command to issue to device
4702 *
4703 * Prepare an ATA command to submission to device.
4704 * This includes mapping the data into a DMA-able
4705 * area, filling in the S/G table, and finally
4706 * writing the taskfile to hardware, starting the command.
4707 *
4708 * LOCKING:
4709 * spin_lock_irqsave(host lock)
4710 */
4711 void ata_qc_issue(struct ata_queued_cmd *qc)
4712 {
4713 struct ata_port *ap = qc->ap;
4714 struct ata_link *link = qc->dev->link;
4715 u8 prot = qc->tf.protocol;
4716
4717 /* Make sure only one non-NCQ command is outstanding. The
4718 * check is skipped for old EH because it reuses active qc to
4719 * request ATAPI sense.
4720 */
4721 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4722
4723 if (ata_is_ncq(prot)) {
4724 WARN_ON(link->sactive & (1 << qc->tag));
4725
4726 if (!link->sactive)
4727 ap->nr_active_links++;
4728 link->sactive |= 1 << qc->tag;
4729 } else {
4730 WARN_ON(link->sactive);
4731
4732 ap->nr_active_links++;
4733 link->active_tag = qc->tag;
4734 }
4735
4736 qc->flags |= ATA_QCFLAG_ACTIVE;
4737 ap->qc_active |= 1 << qc->tag;
4738
4739 /* We guarantee to LLDs that they will have at least one
4740 * non-zero sg if the command is a data command.
4741 */
4742 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4743
4744 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4745 (ap->flags & ATA_FLAG_PIO_DMA)))
4746 if (ata_sg_setup(qc))
4747 goto sg_err;
4748
4749 /* if device is sleeping, schedule reset and abort the link */
4750 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4751 link->eh_info.action |= ATA_EH_RESET;
4752 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4753 ata_link_abort(link);
4754 return;
4755 }
4756
4757 ap->ops->qc_prep(qc);
4758
4759 qc->err_mask |= ap->ops->qc_issue(qc);
4760 if (unlikely(qc->err_mask))
4761 goto err;
4762 return;
4763
4764 sg_err:
4765 qc->err_mask |= AC_ERR_SYSTEM;
4766 err:
4767 ata_qc_complete(qc);
4768 }
4769
4770 /**
4771 * sata_scr_valid - test whether SCRs are accessible
4772 * @link: ATA link to test SCR accessibility for
4773 *
4774 * Test whether SCRs are accessible for @link.
4775 *
4776 * LOCKING:
4777 * None.
4778 *
4779 * RETURNS:
4780 * 1 if SCRs are accessible, 0 otherwise.
4781 */
4782 int sata_scr_valid(struct ata_link *link)
4783 {
4784 struct ata_port *ap = link->ap;
4785
4786 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4787 }
4788
4789 /**
4790 * sata_scr_read - read SCR register of the specified port
4791 * @link: ATA link to read SCR for
4792 * @reg: SCR to read
4793 * @val: Place to store read value
4794 *
4795 * Read SCR register @reg of @link into *@val. This function is
4796 * guaranteed to succeed if @link is ap->link, the cable type of
4797 * the port is SATA and the port implements ->scr_read.
4798 *
4799 * LOCKING:
4800 * None if @link is ap->link. Kernel thread context otherwise.
4801 *
4802 * RETURNS:
4803 * 0 on success, negative errno on failure.
4804 */
4805 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4806 {
4807 if (ata_is_host_link(link)) {
4808 struct ata_port *ap = link->ap;
4809
4810 if (sata_scr_valid(link))
4811 return ap->ops->scr_read(ap, reg, val);
4812 return -EOPNOTSUPP;
4813 }
4814
4815 return sata_pmp_scr_read(link, reg, val);
4816 }
4817
4818 /**
4819 * sata_scr_write - write SCR register of the specified port
4820 * @link: ATA link to write SCR for
4821 * @reg: SCR to write
4822 * @val: value to write
4823 *
4824 * Write @val to SCR register @reg of @link. This function is
4825 * guaranteed to succeed if @link is ap->link, the cable type of
4826 * the port is SATA and the port implements ->scr_read.
4827 *
4828 * LOCKING:
4829 * None if @link is ap->link. Kernel thread context otherwise.
4830 *
4831 * RETURNS:
4832 * 0 on success, negative errno on failure.
4833 */
4834 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4835 {
4836 if (ata_is_host_link(link)) {
4837 struct ata_port *ap = link->ap;
4838
4839 if (sata_scr_valid(link))
4840 return ap->ops->scr_write(ap, reg, val);
4841 return -EOPNOTSUPP;
4842 }
4843
4844 return sata_pmp_scr_write(link, reg, val);
4845 }
4846
4847 /**
4848 * sata_scr_write_flush - write SCR register of the specified port and flush
4849 * @link: ATA link to write SCR for
4850 * @reg: SCR to write
4851 * @val: value to write
4852 *
4853 * This function is identical to sata_scr_write() except that this
4854 * function performs flush after writing to the register.
4855 *
4856 * LOCKING:
4857 * None if @link is ap->link. Kernel thread context otherwise.
4858 *
4859 * RETURNS:
4860 * 0 on success, negative errno on failure.
4861 */
4862 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4863 {
4864 if (ata_is_host_link(link)) {
4865 struct ata_port *ap = link->ap;
4866 int rc;
4867
4868 if (sata_scr_valid(link)) {
4869 rc = ap->ops->scr_write(ap, reg, val);
4870 if (rc == 0)
4871 rc = ap->ops->scr_read(ap, reg, &val);
4872 return rc;
4873 }
4874 return -EOPNOTSUPP;
4875 }
4876
4877 return sata_pmp_scr_write(link, reg, val);
4878 }
4879
4880 /**
4881 * ata_link_online - test whether the given link is online
4882 * @link: ATA link to test
4883 *
4884 * Test whether @link is online. Note that this function returns
4885 * 0 if online status of @link cannot be obtained, so
4886 * ata_link_online(link) != !ata_link_offline(link).
4887 *
4888 * LOCKING:
4889 * None.
4890 *
4891 * RETURNS:
4892 * 1 if the port online status is available and online.
4893 */
4894 int ata_link_online(struct ata_link *link)
4895 {
4896 u32 sstatus;
4897
4898 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4899 (sstatus & 0xf) == 0x3)
4900 return 1;
4901 return 0;
4902 }
4903
4904 /**
4905 * ata_link_offline - test whether the given link is offline
4906 * @link: ATA link to test
4907 *
4908 * Test whether @link is offline. Note that this function
4909 * returns 0 if offline status of @link cannot be obtained, so
4910 * ata_link_online(link) != !ata_link_offline(link).
4911 *
4912 * LOCKING:
4913 * None.
4914 *
4915 * RETURNS:
4916 * 1 if the port offline status is available and offline.
4917 */
4918 int ata_link_offline(struct ata_link *link)
4919 {
4920 u32 sstatus;
4921
4922 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4923 (sstatus & 0xf) != 0x3)
4924 return 1;
4925 return 0;
4926 }
4927
4928 #ifdef CONFIG_PM
4929 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4930 unsigned int action, unsigned int ehi_flags,
4931 int wait)
4932 {
4933 unsigned long flags;
4934 int i, rc;
4935
4936 for (i = 0; i < host->n_ports; i++) {
4937 struct ata_port *ap = host->ports[i];
4938 struct ata_link *link;
4939
4940 /* Previous resume operation might still be in
4941 * progress. Wait for PM_PENDING to clear.
4942 */
4943 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4944 ata_port_wait_eh(ap);
4945 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4946 }
4947
4948 /* request PM ops to EH */
4949 spin_lock_irqsave(ap->lock, flags);
4950
4951 ap->pm_mesg = mesg;
4952 if (wait) {
4953 rc = 0;
4954 ap->pm_result = &rc;
4955 }
4956
4957 ap->pflags |= ATA_PFLAG_PM_PENDING;
4958 __ata_port_for_each_link(link, ap) {
4959 link->eh_info.action |= action;
4960 link->eh_info.flags |= ehi_flags;
4961 }
4962
4963 ata_port_schedule_eh(ap);
4964
4965 spin_unlock_irqrestore(ap->lock, flags);
4966
4967 /* wait and check result */
4968 if (wait) {
4969 ata_port_wait_eh(ap);
4970 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4971 if (rc)
4972 return rc;
4973 }
4974 }
4975
4976 return 0;
4977 }
4978
4979 /**
4980 * ata_host_suspend - suspend host
4981 * @host: host to suspend
4982 * @mesg: PM message
4983 *
4984 * Suspend @host. Actual operation is performed by EH. This
4985 * function requests EH to perform PM operations and waits for EH
4986 * to finish.
4987 *
4988 * LOCKING:
4989 * Kernel thread context (may sleep).
4990 *
4991 * RETURNS:
4992 * 0 on success, -errno on failure.
4993 */
4994 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
4995 {
4996 int rc;
4997
4998 /*
4999 * disable link pm on all ports before requesting
5000 * any pm activity
5001 */
5002 ata_lpm_enable(host);
5003
5004 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5005 if (rc == 0)
5006 host->dev->power.power_state = mesg;
5007 return rc;
5008 }
5009
5010 /**
5011 * ata_host_resume - resume host
5012 * @host: host to resume
5013 *
5014 * Resume @host. Actual operation is performed by EH. This
5015 * function requests EH to perform PM operations and returns.
5016 * Note that all resume operations are performed parallely.
5017 *
5018 * LOCKING:
5019 * Kernel thread context (may sleep).
5020 */
5021 void ata_host_resume(struct ata_host *host)
5022 {
5023 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5024 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5025 host->dev->power.power_state = PMSG_ON;
5026
5027 /* reenable link pm */
5028 ata_lpm_disable(host);
5029 }
5030 #endif
5031
5032 /**
5033 * ata_port_start - Set port up for dma.
5034 * @ap: Port to initialize
5035 *
5036 * Called just after data structures for each port are
5037 * initialized. Allocates space for PRD table.
5038 *
5039 * May be used as the port_start() entry in ata_port_operations.
5040 *
5041 * LOCKING:
5042 * Inherited from caller.
5043 */
5044 int ata_port_start(struct ata_port *ap)
5045 {
5046 struct device *dev = ap->dev;
5047
5048 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5049 GFP_KERNEL);
5050 if (!ap->prd)
5051 return -ENOMEM;
5052
5053 return 0;
5054 }
5055
5056 /**
5057 * ata_dev_init - Initialize an ata_device structure
5058 * @dev: Device structure to initialize
5059 *
5060 * Initialize @dev in preparation for probing.
5061 *
5062 * LOCKING:
5063 * Inherited from caller.
5064 */
5065 void ata_dev_init(struct ata_device *dev)
5066 {
5067 struct ata_link *link = dev->link;
5068 struct ata_port *ap = link->ap;
5069 unsigned long flags;
5070
5071 /* SATA spd limit is bound to the first device */
5072 link->sata_spd_limit = link->hw_sata_spd_limit;
5073 link->sata_spd = 0;
5074
5075 /* High bits of dev->flags are used to record warm plug
5076 * requests which occur asynchronously. Synchronize using
5077 * host lock.
5078 */
5079 spin_lock_irqsave(ap->lock, flags);
5080 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5081 dev->horkage = 0;
5082 spin_unlock_irqrestore(ap->lock, flags);
5083
5084 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5085 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5086 dev->pio_mask = UINT_MAX;
5087 dev->mwdma_mask = UINT_MAX;
5088 dev->udma_mask = UINT_MAX;
5089 }
5090
5091 /**
5092 * ata_link_init - Initialize an ata_link structure
5093 * @ap: ATA port link is attached to
5094 * @link: Link structure to initialize
5095 * @pmp: Port multiplier port number
5096 *
5097 * Initialize @link.
5098 *
5099 * LOCKING:
5100 * Kernel thread context (may sleep)
5101 */
5102 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5103 {
5104 int i;
5105
5106 /* clear everything except for devices */
5107 memset(link, 0, offsetof(struct ata_link, device[0]));
5108
5109 link->ap = ap;
5110 link->pmp = pmp;
5111 link->active_tag = ATA_TAG_POISON;
5112 link->hw_sata_spd_limit = UINT_MAX;
5113
5114 /* can't use iterator, ap isn't initialized yet */
5115 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5116 struct ata_device *dev = &link->device[i];
5117
5118 dev->link = link;
5119 dev->devno = dev - link->device;
5120 ata_dev_init(dev);
5121 }
5122 }
5123
5124 /**
5125 * sata_link_init_spd - Initialize link->sata_spd_limit
5126 * @link: Link to configure sata_spd_limit for
5127 *
5128 * Initialize @link->[hw_]sata_spd_limit to the currently
5129 * configured value.
5130 *
5131 * LOCKING:
5132 * Kernel thread context (may sleep).
5133 *
5134 * RETURNS:
5135 * 0 on success, -errno on failure.
5136 */
5137 int sata_link_init_spd(struct ata_link *link)
5138 {
5139 u32 scontrol;
5140 u8 spd;
5141 int rc;
5142
5143 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5144 if (rc)
5145 return rc;
5146
5147 spd = (scontrol >> 4) & 0xf;
5148 if (spd)
5149 link->hw_sata_spd_limit &= (1 << spd) - 1;
5150
5151 ata_force_spd_limit(link);
5152
5153 link->sata_spd_limit = link->hw_sata_spd_limit;
5154
5155 return 0;
5156 }
5157
5158 /**
5159 * ata_port_alloc - allocate and initialize basic ATA port resources
5160 * @host: ATA host this allocated port belongs to
5161 *
5162 * Allocate and initialize basic ATA port resources.
5163 *
5164 * RETURNS:
5165 * Allocate ATA port on success, NULL on failure.
5166 *
5167 * LOCKING:
5168 * Inherited from calling layer (may sleep).
5169 */
5170 struct ata_port *ata_port_alloc(struct ata_host *host)
5171 {
5172 struct ata_port *ap;
5173
5174 DPRINTK("ENTER\n");
5175
5176 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5177 if (!ap)
5178 return NULL;
5179
5180 ap->pflags |= ATA_PFLAG_INITIALIZING;
5181 ap->lock = &host->lock;
5182 ap->flags = ATA_FLAG_DISABLED;
5183 ap->print_id = -1;
5184 ap->ctl = ATA_DEVCTL_OBS;
5185 ap->host = host;
5186 ap->dev = host->dev;
5187 ap->last_ctl = 0xFF;
5188
5189 #if defined(ATA_VERBOSE_DEBUG)
5190 /* turn on all debugging levels */
5191 ap->msg_enable = 0x00FF;
5192 #elif defined(ATA_DEBUG)
5193 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5194 #else
5195 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5196 #endif
5197
5198 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5199 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5200 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5201 INIT_LIST_HEAD(&ap->eh_done_q);
5202 init_waitqueue_head(&ap->eh_wait_q);
5203 init_timer_deferrable(&ap->fastdrain_timer);
5204 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5205 ap->fastdrain_timer.data = (unsigned long)ap;
5206
5207 ap->cbl = ATA_CBL_NONE;
5208
5209 ata_link_init(ap, &ap->link, 0);
5210
5211 #ifdef ATA_IRQ_TRAP
5212 ap->stats.unhandled_irq = 1;
5213 ap->stats.idle_irq = 1;
5214 #endif
5215 return ap;
5216 }
5217
5218 static void ata_host_release(struct device *gendev, void *res)
5219 {
5220 struct ata_host *host = dev_get_drvdata(gendev);
5221 int i;
5222
5223 for (i = 0; i < host->n_ports; i++) {
5224 struct ata_port *ap = host->ports[i];
5225
5226 if (!ap)
5227 continue;
5228
5229 if (ap->scsi_host)
5230 scsi_host_put(ap->scsi_host);
5231
5232 kfree(ap->pmp_link);
5233 kfree(ap);
5234 host->ports[i] = NULL;
5235 }
5236
5237 dev_set_drvdata(gendev, NULL);
5238 }
5239
5240 /**
5241 * ata_host_alloc - allocate and init basic ATA host resources
5242 * @dev: generic device this host is associated with
5243 * @max_ports: maximum number of ATA ports associated with this host
5244 *
5245 * Allocate and initialize basic ATA host resources. LLD calls
5246 * this function to allocate a host, initializes it fully and
5247 * attaches it using ata_host_register().
5248 *
5249 * @max_ports ports are allocated and host->n_ports is
5250 * initialized to @max_ports. The caller is allowed to decrease
5251 * host->n_ports before calling ata_host_register(). The unused
5252 * ports will be automatically freed on registration.
5253 *
5254 * RETURNS:
5255 * Allocate ATA host on success, NULL on failure.
5256 *
5257 * LOCKING:
5258 * Inherited from calling layer (may sleep).
5259 */
5260 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5261 {
5262 struct ata_host *host;
5263 size_t sz;
5264 int i;
5265
5266 DPRINTK("ENTER\n");
5267
5268 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5269 return NULL;
5270
5271 /* alloc a container for our list of ATA ports (buses) */
5272 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5273 /* alloc a container for our list of ATA ports (buses) */
5274 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5275 if (!host)
5276 goto err_out;
5277
5278 devres_add(dev, host);
5279 dev_set_drvdata(dev, host);
5280
5281 spin_lock_init(&host->lock);
5282 host->dev = dev;
5283 host->n_ports = max_ports;
5284
5285 /* allocate ports bound to this host */
5286 for (i = 0; i < max_ports; i++) {
5287 struct ata_port *ap;
5288
5289 ap = ata_port_alloc(host);
5290 if (!ap)
5291 goto err_out;
5292
5293 ap->port_no = i;
5294 host->ports[i] = ap;
5295 }
5296
5297 devres_remove_group(dev, NULL);
5298 return host;
5299
5300 err_out:
5301 devres_release_group(dev, NULL);
5302 return NULL;
5303 }
5304
5305 /**
5306 * ata_host_alloc_pinfo - alloc host and init with port_info array
5307 * @dev: generic device this host is associated with
5308 * @ppi: array of ATA port_info to initialize host with
5309 * @n_ports: number of ATA ports attached to this host
5310 *
5311 * Allocate ATA host and initialize with info from @ppi. If NULL
5312 * terminated, @ppi may contain fewer entries than @n_ports. The
5313 * last entry will be used for the remaining ports.
5314 *
5315 * RETURNS:
5316 * Allocate ATA host on success, NULL on failure.
5317 *
5318 * LOCKING:
5319 * Inherited from calling layer (may sleep).
5320 */
5321 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5322 const struct ata_port_info * const * ppi,
5323 int n_ports)
5324 {
5325 const struct ata_port_info *pi;
5326 struct ata_host *host;
5327 int i, j;
5328
5329 host = ata_host_alloc(dev, n_ports);
5330 if (!host)
5331 return NULL;
5332
5333 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5334 struct ata_port *ap = host->ports[i];
5335
5336 if (ppi[j])
5337 pi = ppi[j++];
5338
5339 ap->pio_mask = pi->pio_mask;
5340 ap->mwdma_mask = pi->mwdma_mask;
5341 ap->udma_mask = pi->udma_mask;
5342 ap->flags |= pi->flags;
5343 ap->link.flags |= pi->link_flags;
5344 ap->ops = pi->port_ops;
5345
5346 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5347 host->ops = pi->port_ops;
5348 }
5349
5350 return host;
5351 }
5352
5353 static void ata_host_stop(struct device *gendev, void *res)
5354 {
5355 struct ata_host *host = dev_get_drvdata(gendev);
5356 int i;
5357
5358 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5359
5360 for (i = 0; i < host->n_ports; i++) {
5361 struct ata_port *ap = host->ports[i];
5362
5363 if (ap->ops->port_stop)
5364 ap->ops->port_stop(ap);
5365 }
5366
5367 if (host->ops->host_stop)
5368 host->ops->host_stop(host);
5369 }
5370
5371 /**
5372 * ata_finalize_port_ops - finalize ata_port_operations
5373 * @ops: ata_port_operations to finalize
5374 *
5375 * An ata_port_operations can inherit from another ops and that
5376 * ops can again inherit from another. This can go on as many
5377 * times as necessary as long as there is no loop in the
5378 * inheritance chain.
5379 *
5380 * Ops tables are finalized when the host is started. NULL or
5381 * unspecified entries are inherited from the closet ancestor
5382 * which has the method and the entry is populated with it.
5383 * After finalization, the ops table directly points to all the
5384 * methods and ->inherits is no longer necessary and cleared.
5385 *
5386 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5387 *
5388 * LOCKING:
5389 * None.
5390 */
5391 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5392 {
5393 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5394 const struct ata_port_operations *cur;
5395 void **begin = (void **)ops;
5396 void **end = (void **)&ops->inherits;
5397 void **pp;
5398
5399 if (!ops || !ops->inherits)
5400 return;
5401
5402 spin_lock(&lock);
5403
5404 for (cur = ops->inherits; cur; cur = cur->inherits) {
5405 void **inherit = (void **)cur;
5406
5407 for (pp = begin; pp < end; pp++, inherit++)
5408 if (!*pp)
5409 *pp = *inherit;
5410 }
5411
5412 for (pp = begin; pp < end; pp++)
5413 if (IS_ERR(*pp))
5414 *pp = NULL;
5415
5416 ops->inherits = NULL;
5417
5418 spin_unlock(&lock);
5419 }
5420
5421 /**
5422 * ata_host_start - start and freeze ports of an ATA host
5423 * @host: ATA host to start ports for
5424 *
5425 * Start and then freeze ports of @host. Started status is
5426 * recorded in host->flags, so this function can be called
5427 * multiple times. Ports are guaranteed to get started only
5428 * once. If host->ops isn't initialized yet, its set to the
5429 * first non-dummy port ops.
5430 *
5431 * LOCKING:
5432 * Inherited from calling layer (may sleep).
5433 *
5434 * RETURNS:
5435 * 0 if all ports are started successfully, -errno otherwise.
5436 */
5437 int ata_host_start(struct ata_host *host)
5438 {
5439 int have_stop = 0;
5440 void *start_dr = NULL;
5441 int i, rc;
5442
5443 if (host->flags & ATA_HOST_STARTED)
5444 return 0;
5445
5446 ata_finalize_port_ops(host->ops);
5447
5448 for (i = 0; i < host->n_ports; i++) {
5449 struct ata_port *ap = host->ports[i];
5450
5451 ata_finalize_port_ops(ap->ops);
5452
5453 if (!host->ops && !ata_port_is_dummy(ap))
5454 host->ops = ap->ops;
5455
5456 if (ap->ops->port_stop)
5457 have_stop = 1;
5458 }
5459
5460 if (host->ops->host_stop)
5461 have_stop = 1;
5462
5463 if (have_stop) {
5464 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5465 if (!start_dr)
5466 return -ENOMEM;
5467 }
5468
5469 for (i = 0; i < host->n_ports; i++) {
5470 struct ata_port *ap = host->ports[i];
5471
5472 if (ap->ops->port_start) {
5473 rc = ap->ops->port_start(ap);
5474 if (rc) {
5475 if (rc != -ENODEV)
5476 dev_printk(KERN_ERR, host->dev,
5477 "failed to start port %d "
5478 "(errno=%d)\n", i, rc);
5479 goto err_out;
5480 }
5481 }
5482 ata_eh_freeze_port(ap);
5483 }
5484
5485 if (start_dr)
5486 devres_add(host->dev, start_dr);
5487 host->flags |= ATA_HOST_STARTED;
5488 return 0;
5489
5490 err_out:
5491 while (--i >= 0) {
5492 struct ata_port *ap = host->ports[i];
5493
5494 if (ap->ops->port_stop)
5495 ap->ops->port_stop(ap);
5496 }
5497 devres_free(start_dr);
5498 return rc;
5499 }
5500
5501 /**
5502 * ata_sas_host_init - Initialize a host struct
5503 * @host: host to initialize
5504 * @dev: device host is attached to
5505 * @flags: host flags
5506 * @ops: port_ops
5507 *
5508 * LOCKING:
5509 * PCI/etc. bus probe sem.
5510 *
5511 */
5512 /* KILLME - the only user left is ipr */
5513 void ata_host_init(struct ata_host *host, struct device *dev,
5514 unsigned long flags, struct ata_port_operations *ops)
5515 {
5516 spin_lock_init(&host->lock);
5517 host->dev = dev;
5518 host->flags = flags;
5519 host->ops = ops;
5520 }
5521
5522 /**
5523 * ata_host_register - register initialized ATA host
5524 * @host: ATA host to register
5525 * @sht: template for SCSI host
5526 *
5527 * Register initialized ATA host. @host is allocated using
5528 * ata_host_alloc() and fully initialized by LLD. This function
5529 * starts ports, registers @host with ATA and SCSI layers and
5530 * probe registered devices.
5531 *
5532 * LOCKING:
5533 * Inherited from calling layer (may sleep).
5534 *
5535 * RETURNS:
5536 * 0 on success, -errno otherwise.
5537 */
5538 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5539 {
5540 int i, rc;
5541
5542 /* host must have been started */
5543 if (!(host->flags & ATA_HOST_STARTED)) {
5544 dev_printk(KERN_ERR, host->dev,
5545 "BUG: trying to register unstarted host\n");
5546 WARN_ON(1);
5547 return -EINVAL;
5548 }
5549
5550 /* Blow away unused ports. This happens when LLD can't
5551 * determine the exact number of ports to allocate at
5552 * allocation time.
5553 */
5554 for (i = host->n_ports; host->ports[i]; i++)
5555 kfree(host->ports[i]);
5556
5557 /* give ports names and add SCSI hosts */
5558 for (i = 0; i < host->n_ports; i++)
5559 host->ports[i]->print_id = ata_print_id++;
5560
5561 rc = ata_scsi_add_hosts(host, sht);
5562 if (rc)
5563 return rc;
5564
5565 /* associate with ACPI nodes */
5566 ata_acpi_associate(host);
5567
5568 /* set cable, sata_spd_limit and report */
5569 for (i = 0; i < host->n_ports; i++) {
5570 struct ata_port *ap = host->ports[i];
5571 unsigned long xfer_mask;
5572
5573 /* set SATA cable type if still unset */
5574 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5575 ap->cbl = ATA_CBL_SATA;
5576
5577 /* init sata_spd_limit to the current value */
5578 sata_link_init_spd(&ap->link);
5579
5580 /* print per-port info to dmesg */
5581 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5582 ap->udma_mask);
5583
5584 if (!ata_port_is_dummy(ap)) {
5585 ata_port_printk(ap, KERN_INFO,
5586 "%cATA max %s %s\n",
5587 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5588 ata_mode_string(xfer_mask),
5589 ap->link.eh_info.desc);
5590 ata_ehi_clear_desc(&ap->link.eh_info);
5591 } else
5592 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5593 }
5594
5595 /* perform each probe synchronously */
5596 DPRINTK("probe begin\n");
5597 for (i = 0; i < host->n_ports; i++) {
5598 struct ata_port *ap = host->ports[i];
5599
5600 /* probe */
5601 if (ap->ops->error_handler) {
5602 struct ata_eh_info *ehi = &ap->link.eh_info;
5603 unsigned long flags;
5604
5605 ata_port_probe(ap);
5606
5607 /* kick EH for boot probing */
5608 spin_lock_irqsave(ap->lock, flags);
5609
5610 ehi->probe_mask |= ATA_ALL_DEVICES;
5611 ehi->action |= ATA_EH_RESET;
5612 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5613
5614 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5615 ap->pflags |= ATA_PFLAG_LOADING;
5616 ata_port_schedule_eh(ap);
5617
5618 spin_unlock_irqrestore(ap->lock, flags);
5619
5620 /* wait for EH to finish */
5621 ata_port_wait_eh(ap);
5622 } else {
5623 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5624 rc = ata_bus_probe(ap);
5625 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5626
5627 if (rc) {
5628 /* FIXME: do something useful here?
5629 * Current libata behavior will
5630 * tear down everything when
5631 * the module is removed
5632 * or the h/w is unplugged.
5633 */
5634 }
5635 }
5636 }
5637
5638 /* probes are done, now scan each port's disk(s) */
5639 DPRINTK("host probe begin\n");
5640 for (i = 0; i < host->n_ports; i++) {
5641 struct ata_port *ap = host->ports[i];
5642
5643 ata_scsi_scan_host(ap, 1);
5644 ata_lpm_schedule(ap, ap->pm_policy);
5645 }
5646
5647 return 0;
5648 }
5649
5650 /**
5651 * ata_host_activate - start host, request IRQ and register it
5652 * @host: target ATA host
5653 * @irq: IRQ to request
5654 * @irq_handler: irq_handler used when requesting IRQ
5655 * @irq_flags: irq_flags used when requesting IRQ
5656 * @sht: scsi_host_template to use when registering the host
5657 *
5658 * After allocating an ATA host and initializing it, most libata
5659 * LLDs perform three steps to activate the host - start host,
5660 * request IRQ and register it. This helper takes necessasry
5661 * arguments and performs the three steps in one go.
5662 *
5663 * An invalid IRQ skips the IRQ registration and expects the host to
5664 * have set polling mode on the port. In this case, @irq_handler
5665 * should be NULL.
5666 *
5667 * LOCKING:
5668 * Inherited from calling layer (may sleep).
5669 *
5670 * RETURNS:
5671 * 0 on success, -errno otherwise.
5672 */
5673 int ata_host_activate(struct ata_host *host, int irq,
5674 irq_handler_t irq_handler, unsigned long irq_flags,
5675 struct scsi_host_template *sht)
5676 {
5677 int i, rc;
5678
5679 rc = ata_host_start(host);
5680 if (rc)
5681 return rc;
5682
5683 /* Special case for polling mode */
5684 if (!irq) {
5685 WARN_ON(irq_handler);
5686 return ata_host_register(host, sht);
5687 }
5688
5689 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5690 dev_driver_string(host->dev), host);
5691 if (rc)
5692 return rc;
5693
5694 for (i = 0; i < host->n_ports; i++)
5695 ata_port_desc(host->ports[i], "irq %d", irq);
5696
5697 rc = ata_host_register(host, sht);
5698 /* if failed, just free the IRQ and leave ports alone */
5699 if (rc)
5700 devm_free_irq(host->dev, irq, host);
5701
5702 return rc;
5703 }
5704
5705 /**
5706 * ata_port_detach - Detach ATA port in prepration of device removal
5707 * @ap: ATA port to be detached
5708 *
5709 * Detach all ATA devices and the associated SCSI devices of @ap;
5710 * then, remove the associated SCSI host. @ap is guaranteed to
5711 * be quiescent on return from this function.
5712 *
5713 * LOCKING:
5714 * Kernel thread context (may sleep).
5715 */
5716 static void ata_port_detach(struct ata_port *ap)
5717 {
5718 unsigned long flags;
5719 struct ata_link *link;
5720 struct ata_device *dev;
5721
5722 if (!ap->ops->error_handler)
5723 goto skip_eh;
5724
5725 /* tell EH we're leaving & flush EH */
5726 spin_lock_irqsave(ap->lock, flags);
5727 ap->pflags |= ATA_PFLAG_UNLOADING;
5728 spin_unlock_irqrestore(ap->lock, flags);
5729
5730 ata_port_wait_eh(ap);
5731
5732 /* EH is now guaranteed to see UNLOADING - EH context belongs
5733 * to us. Disable all existing devices.
5734 */
5735 ata_port_for_each_link(link, ap) {
5736 ata_link_for_each_dev(dev, link)
5737 ata_dev_disable(dev);
5738 }
5739
5740 /* Final freeze & EH. All in-flight commands are aborted. EH
5741 * will be skipped and retrials will be terminated with bad
5742 * target.
5743 */
5744 spin_lock_irqsave(ap->lock, flags);
5745 ata_port_freeze(ap); /* won't be thawed */
5746 spin_unlock_irqrestore(ap->lock, flags);
5747
5748 ata_port_wait_eh(ap);
5749 cancel_rearming_delayed_work(&ap->hotplug_task);
5750
5751 skip_eh:
5752 /* remove the associated SCSI host */
5753 scsi_remove_host(ap->scsi_host);
5754 }
5755
5756 /**
5757 * ata_host_detach - Detach all ports of an ATA host
5758 * @host: Host to detach
5759 *
5760 * Detach all ports of @host.
5761 *
5762 * LOCKING:
5763 * Kernel thread context (may sleep).
5764 */
5765 void ata_host_detach(struct ata_host *host)
5766 {
5767 int i;
5768
5769 for (i = 0; i < host->n_ports; i++)
5770 ata_port_detach(host->ports[i]);
5771
5772 /* the host is dead now, dissociate ACPI */
5773 ata_acpi_dissociate(host);
5774 }
5775
5776 #ifdef CONFIG_PCI
5777
5778 /**
5779 * ata_pci_remove_one - PCI layer callback for device removal
5780 * @pdev: PCI device that was removed
5781 *
5782 * PCI layer indicates to libata via this hook that hot-unplug or
5783 * module unload event has occurred. Detach all ports. Resource
5784 * release is handled via devres.
5785 *
5786 * LOCKING:
5787 * Inherited from PCI layer (may sleep).
5788 */
5789 void ata_pci_remove_one(struct pci_dev *pdev)
5790 {
5791 struct device *dev = &pdev->dev;
5792 struct ata_host *host = dev_get_drvdata(dev);
5793
5794 ata_host_detach(host);
5795 }
5796
5797 /* move to PCI subsystem */
5798 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5799 {
5800 unsigned long tmp = 0;
5801
5802 switch (bits->width) {
5803 case 1: {
5804 u8 tmp8 = 0;
5805 pci_read_config_byte(pdev, bits->reg, &tmp8);
5806 tmp = tmp8;
5807 break;
5808 }
5809 case 2: {
5810 u16 tmp16 = 0;
5811 pci_read_config_word(pdev, bits->reg, &tmp16);
5812 tmp = tmp16;
5813 break;
5814 }
5815 case 4: {
5816 u32 tmp32 = 0;
5817 pci_read_config_dword(pdev, bits->reg, &tmp32);
5818 tmp = tmp32;
5819 break;
5820 }
5821
5822 default:
5823 return -EINVAL;
5824 }
5825
5826 tmp &= bits->mask;
5827
5828 return (tmp == bits->val) ? 1 : 0;
5829 }
5830
5831 #ifdef CONFIG_PM
5832 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5833 {
5834 pci_save_state(pdev);
5835 pci_disable_device(pdev);
5836
5837 if (mesg.event & PM_EVENT_SLEEP)
5838 pci_set_power_state(pdev, PCI_D3hot);
5839 }
5840
5841 int ata_pci_device_do_resume(struct pci_dev *pdev)
5842 {
5843 int rc;
5844
5845 pci_set_power_state(pdev, PCI_D0);
5846 pci_restore_state(pdev);
5847
5848 rc = pcim_enable_device(pdev);
5849 if (rc) {
5850 dev_printk(KERN_ERR, &pdev->dev,
5851 "failed to enable device after resume (%d)\n", rc);
5852 return rc;
5853 }
5854
5855 pci_set_master(pdev);
5856 return 0;
5857 }
5858
5859 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5860 {
5861 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5862 int rc = 0;
5863
5864 rc = ata_host_suspend(host, mesg);
5865 if (rc)
5866 return rc;
5867
5868 ata_pci_device_do_suspend(pdev, mesg);
5869
5870 return 0;
5871 }
5872
5873 int ata_pci_device_resume(struct pci_dev *pdev)
5874 {
5875 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5876 int rc;
5877
5878 rc = ata_pci_device_do_resume(pdev);
5879 if (rc == 0)
5880 ata_host_resume(host);
5881 return rc;
5882 }
5883 #endif /* CONFIG_PM */
5884
5885 #endif /* CONFIG_PCI */
5886
5887 static int __init ata_parse_force_one(char **cur,
5888 struct ata_force_ent *force_ent,
5889 const char **reason)
5890 {
5891 /* FIXME: Currently, there's no way to tag init const data and
5892 * using __initdata causes build failure on some versions of
5893 * gcc. Once __initdataconst is implemented, add const to the
5894 * following structure.
5895 */
5896 static struct ata_force_param force_tbl[] __initdata = {
5897 { "40c", .cbl = ATA_CBL_PATA40 },
5898 { "80c", .cbl = ATA_CBL_PATA80 },
5899 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5900 { "unk", .cbl = ATA_CBL_PATA_UNK },
5901 { "ign", .cbl = ATA_CBL_PATA_IGN },
5902 { "sata", .cbl = ATA_CBL_SATA },
5903 { "1.5Gbps", .spd_limit = 1 },
5904 { "3.0Gbps", .spd_limit = 2 },
5905 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5906 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5907 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5908 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5909 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5910 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5911 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5912 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5913 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5914 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5915 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5916 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5917 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5918 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5919 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5920 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5921 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5922 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5923 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5924 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5925 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5926 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5927 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5928 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5929 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5930 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5931 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5932 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5933 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5934 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5935 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5936 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5937 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5938 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5939 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5940 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5941 };
5942 char *start = *cur, *p = *cur;
5943 char *id, *val, *endp;
5944 const struct ata_force_param *match_fp = NULL;
5945 int nr_matches = 0, i;
5946
5947 /* find where this param ends and update *cur */
5948 while (*p != '\0' && *p != ',')
5949 p++;
5950
5951 if (*p == '\0')
5952 *cur = p;
5953 else
5954 *cur = p + 1;
5955
5956 *p = '\0';
5957
5958 /* parse */
5959 p = strchr(start, ':');
5960 if (!p) {
5961 val = strstrip(start);
5962 goto parse_val;
5963 }
5964 *p = '\0';
5965
5966 id = strstrip(start);
5967 val = strstrip(p + 1);
5968
5969 /* parse id */
5970 p = strchr(id, '.');
5971 if (p) {
5972 *p++ = '\0';
5973 force_ent->device = simple_strtoul(p, &endp, 10);
5974 if (p == endp || *endp != '\0') {
5975 *reason = "invalid device";
5976 return -EINVAL;
5977 }
5978 }
5979
5980 force_ent->port = simple_strtoul(id, &endp, 10);
5981 if (p == endp || *endp != '\0') {
5982 *reason = "invalid port/link";
5983 return -EINVAL;
5984 }
5985
5986 parse_val:
5987 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
5988 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
5989 const struct ata_force_param *fp = &force_tbl[i];
5990
5991 if (strncasecmp(val, fp->name, strlen(val)))
5992 continue;
5993
5994 nr_matches++;
5995 match_fp = fp;
5996
5997 if (strcasecmp(val, fp->name) == 0) {
5998 nr_matches = 1;
5999 break;
6000 }
6001 }
6002
6003 if (!nr_matches) {
6004 *reason = "unknown value";
6005 return -EINVAL;
6006 }
6007 if (nr_matches > 1) {
6008 *reason = "ambigious value";
6009 return -EINVAL;
6010 }
6011
6012 force_ent->param = *match_fp;
6013
6014 return 0;
6015 }
6016
6017 static void __init ata_parse_force_param(void)
6018 {
6019 int idx = 0, size = 1;
6020 int last_port = -1, last_device = -1;
6021 char *p, *cur, *next;
6022
6023 /* calculate maximum number of params and allocate force_tbl */
6024 for (p = ata_force_param_buf; *p; p++)
6025 if (*p == ',')
6026 size++;
6027
6028 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6029 if (!ata_force_tbl) {
6030 printk(KERN_WARNING "ata: failed to extend force table, "
6031 "libata.force ignored\n");
6032 return;
6033 }
6034
6035 /* parse and populate the table */
6036 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6037 const char *reason = "";
6038 struct ata_force_ent te = { .port = -1, .device = -1 };
6039
6040 next = cur;
6041 if (ata_parse_force_one(&next, &te, &reason)) {
6042 printk(KERN_WARNING "ata: failed to parse force "
6043 "parameter \"%s\" (%s)\n",
6044 cur, reason);
6045 continue;
6046 }
6047
6048 if (te.port == -1) {
6049 te.port = last_port;
6050 te.device = last_device;
6051 }
6052
6053 ata_force_tbl[idx++] = te;
6054
6055 last_port = te.port;
6056 last_device = te.device;
6057 }
6058
6059 ata_force_tbl_size = idx;
6060 }
6061
6062 static int __init ata_init(void)
6063 {
6064 ata_probe_timeout *= HZ;
6065
6066 ata_parse_force_param();
6067
6068 ata_wq = create_workqueue("ata");
6069 if (!ata_wq)
6070 return -ENOMEM;
6071
6072 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6073 if (!ata_aux_wq) {
6074 destroy_workqueue(ata_wq);
6075 return -ENOMEM;
6076 }
6077
6078 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6079 return 0;
6080 }
6081
6082 static void __exit ata_exit(void)
6083 {
6084 kfree(ata_force_tbl);
6085 destroy_workqueue(ata_wq);
6086 destroy_workqueue(ata_aux_wq);
6087 }
6088
6089 subsys_initcall(ata_init);
6090 module_exit(ata_exit);
6091
6092 static unsigned long ratelimit_time;
6093 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6094
6095 int ata_ratelimit(void)
6096 {
6097 int rc;
6098 unsigned long flags;
6099
6100 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6101
6102 if (time_after(jiffies, ratelimit_time)) {
6103 rc = 1;
6104 ratelimit_time = jiffies + (HZ/5);
6105 } else
6106 rc = 0;
6107
6108 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6109
6110 return rc;
6111 }
6112
6113 /**
6114 * ata_wait_register - wait until register value changes
6115 * @reg: IO-mapped register
6116 * @mask: Mask to apply to read register value
6117 * @val: Wait condition
6118 * @interval_msec: polling interval in milliseconds
6119 * @timeout_msec: timeout in milliseconds
6120 *
6121 * Waiting for some bits of register to change is a common
6122 * operation for ATA controllers. This function reads 32bit LE
6123 * IO-mapped register @reg and tests for the following condition.
6124 *
6125 * (*@reg & mask) != val
6126 *
6127 * If the condition is met, it returns; otherwise, the process is
6128 * repeated after @interval_msec until timeout.
6129 *
6130 * LOCKING:
6131 * Kernel thread context (may sleep)
6132 *
6133 * RETURNS:
6134 * The final register value.
6135 */
6136 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6137 unsigned long interval_msec,
6138 unsigned long timeout_msec)
6139 {
6140 unsigned long timeout;
6141 u32 tmp;
6142
6143 tmp = ioread32(reg);
6144
6145 /* Calculate timeout _after_ the first read to make sure
6146 * preceding writes reach the controller before starting to
6147 * eat away the timeout.
6148 */
6149 timeout = jiffies + (timeout_msec * HZ) / 1000;
6150
6151 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6152 msleep(interval_msec);
6153 tmp = ioread32(reg);
6154 }
6155
6156 return tmp;
6157 }
6158
6159 /*
6160 * Dummy port_ops
6161 */
6162 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6163 {
6164 return AC_ERR_SYSTEM;
6165 }
6166
6167 static void ata_dummy_error_handler(struct ata_port *ap)
6168 {
6169 /* truly dummy */
6170 }
6171
6172 struct ata_port_operations ata_dummy_port_ops = {
6173 .qc_prep = ata_noop_qc_prep,
6174 .qc_issue = ata_dummy_qc_issue,
6175 .error_handler = ata_dummy_error_handler,
6176 };
6177
6178 const struct ata_port_info ata_dummy_port_info = {
6179 .port_ops = &ata_dummy_port_ops,
6180 };
6181
6182 /*
6183 * libata is essentially a library of internal helper functions for
6184 * low-level ATA host controller drivers. As such, the API/ABI is
6185 * likely to change as new drivers are added and updated.
6186 * Do not depend on ABI/API stability.
6187 */
6188 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6189 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6190 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6191 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6192 EXPORT_SYMBOL_GPL(sata_port_ops);
6193 EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
6194 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6195 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6196 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6197 EXPORT_SYMBOL_GPL(ata_host_init);
6198 EXPORT_SYMBOL_GPL(ata_host_alloc);
6199 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6200 EXPORT_SYMBOL_GPL(ata_host_start);
6201 EXPORT_SYMBOL_GPL(ata_host_register);
6202 EXPORT_SYMBOL_GPL(ata_host_activate);
6203 EXPORT_SYMBOL_GPL(ata_host_detach);
6204 EXPORT_SYMBOL_GPL(ata_sg_init);
6205 EXPORT_SYMBOL_GPL(ata_qc_complete);
6206 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6207 EXPORT_SYMBOL_GPL(sata_print_link_status);
6208 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6209 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6210 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6211 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6212 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6213 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6214 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6215 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6216 EXPORT_SYMBOL_GPL(ata_mode_string);
6217 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6218 EXPORT_SYMBOL_GPL(ata_port_start);
6219 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6220 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6221 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6222 EXPORT_SYMBOL_GPL(ata_port_probe);
6223 EXPORT_SYMBOL_GPL(ata_dev_disable);
6224 EXPORT_SYMBOL_GPL(sata_set_spd);
6225 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6226 EXPORT_SYMBOL_GPL(sata_link_debounce);
6227 EXPORT_SYMBOL_GPL(sata_link_resume);
6228 EXPORT_SYMBOL_GPL(ata_std_prereset);
6229 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6230 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6231 EXPORT_SYMBOL_GPL(ata_std_postreset);
6232 EXPORT_SYMBOL_GPL(ata_dev_classify);
6233 EXPORT_SYMBOL_GPL(ata_dev_pair);
6234 EXPORT_SYMBOL_GPL(ata_port_disable);
6235 EXPORT_SYMBOL_GPL(ata_ratelimit);
6236 EXPORT_SYMBOL_GPL(ata_wait_register);
6237 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6238 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6239 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6240 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6241 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6242 EXPORT_SYMBOL_GPL(sata_scr_valid);
6243 EXPORT_SYMBOL_GPL(sata_scr_read);
6244 EXPORT_SYMBOL_GPL(sata_scr_write);
6245 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6246 EXPORT_SYMBOL_GPL(ata_link_online);
6247 EXPORT_SYMBOL_GPL(ata_link_offline);
6248 #ifdef CONFIG_PM
6249 EXPORT_SYMBOL_GPL(ata_host_suspend);
6250 EXPORT_SYMBOL_GPL(ata_host_resume);
6251 #endif /* CONFIG_PM */
6252 EXPORT_SYMBOL_GPL(ata_id_string);
6253 EXPORT_SYMBOL_GPL(ata_id_c_string);
6254 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6255
6256 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6257 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6258 EXPORT_SYMBOL_GPL(ata_timing_compute);
6259 EXPORT_SYMBOL_GPL(ata_timing_merge);
6260 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6261
6262 #ifdef CONFIG_PCI
6263 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6264 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6265 #ifdef CONFIG_PM
6266 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6267 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6268 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6269 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6270 #endif /* CONFIG_PM */
6271 #endif /* CONFIG_PCI */
6272
6273 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
6274 EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
6275
6276 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6277 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6278 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6279 EXPORT_SYMBOL_GPL(ata_port_desc);
6280 #ifdef CONFIG_PCI
6281 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6282 #endif /* CONFIG_PCI */
6283 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6284 EXPORT_SYMBOL_GPL(ata_link_abort);
6285 EXPORT_SYMBOL_GPL(ata_port_abort);
6286 EXPORT_SYMBOL_GPL(ata_port_freeze);
6287 EXPORT_SYMBOL_GPL(sata_async_notification);
6288 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6289 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6290 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6291 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6292 EXPORT_SYMBOL_GPL(ata_do_eh);
6293 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6294
6295 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6296 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6297 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6298 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6299 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.169163 seconds and 6 git commands to generate.