libata: unify mechanism to request follow-up SRST
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/jiffies.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h>
64 #include <asm/semaphore.h>
65 #include <asm/byteorder.h>
66 #include <linux/cdrom.h>
67
68 #include "libata.h"
69
70
71 /* debounce timing parameters in msecs { interval, duration, timeout } */
72 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
75
76 const struct ata_port_operations ata_base_port_ops = {
77 .prereset = ata_std_prereset,
78 .postreset = ata_std_postreset,
79 .error_handler = ata_std_error_handler,
80 };
81
82 const struct ata_port_operations sata_port_ops = {
83 .inherits = &ata_base_port_ops,
84
85 .qc_defer = ata_std_qc_defer,
86 .hardreset = sata_std_hardreset,
87 .sff_dev_select = ata_noop_dev_select,
88 };
89
90 const struct ata_port_operations sata_pmp_port_ops = {
91 .inherits = &sata_port_ops,
92
93 .pmp_prereset = ata_std_prereset,
94 .pmp_hardreset = sata_std_hardreset,
95 .pmp_postreset = ata_std_postreset,
96 .error_handler = sata_pmp_error_handler,
97 };
98
99 static unsigned int ata_dev_init_params(struct ata_device *dev,
100 u16 heads, u16 sectors);
101 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
102 static unsigned int ata_dev_set_feature(struct ata_device *dev,
103 u8 enable, u8 feature);
104 static void ata_dev_xfermask(struct ata_device *dev);
105 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
106
107 unsigned int ata_print_id = 1;
108 static struct workqueue_struct *ata_wq;
109
110 struct workqueue_struct *ata_aux_wq;
111
112 struct ata_force_param {
113 const char *name;
114 unsigned int cbl;
115 int spd_limit;
116 unsigned long xfer_mask;
117 unsigned int horkage_on;
118 unsigned int horkage_off;
119 };
120
121 struct ata_force_ent {
122 int port;
123 int device;
124 struct ata_force_param param;
125 };
126
127 static struct ata_force_ent *ata_force_tbl;
128 static int ata_force_tbl_size;
129
130 static char ata_force_param_buf[PAGE_SIZE] __initdata;
131 /* param_buf is thrown away after initialization, disallow read */
132 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
133 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
134
135 int atapi_enabled = 1;
136 module_param(atapi_enabled, int, 0444);
137 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
138
139 static int atapi_dmadir = 0;
140 module_param(atapi_dmadir, int, 0444);
141 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
142
143 int atapi_passthru16 = 1;
144 module_param(atapi_passthru16, int, 0444);
145 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
146
147 int libata_fua = 0;
148 module_param_named(fua, libata_fua, int, 0444);
149 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
150
151 static int ata_ignore_hpa;
152 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
153 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
154
155 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
156 module_param_named(dma, libata_dma_mask, int, 0444);
157 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
158
159 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
160 module_param(ata_probe_timeout, int, 0444);
161 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
162
163 int libata_noacpi = 0;
164 module_param_named(noacpi, libata_noacpi, int, 0444);
165 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
166
167 int libata_allow_tpm = 0;
168 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
169 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
170
171 MODULE_AUTHOR("Jeff Garzik");
172 MODULE_DESCRIPTION("Library module for ATA devices");
173 MODULE_LICENSE("GPL");
174 MODULE_VERSION(DRV_VERSION);
175
176
177 /**
178 * ata_force_cbl - force cable type according to libata.force
179 * @ap: ATA port of interest
180 *
181 * Force cable type according to libata.force and whine about it.
182 * The last entry which has matching port number is used, so it
183 * can be specified as part of device force parameters. For
184 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
185 * same effect.
186 *
187 * LOCKING:
188 * EH context.
189 */
190 void ata_force_cbl(struct ata_port *ap)
191 {
192 int i;
193
194 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
195 const struct ata_force_ent *fe = &ata_force_tbl[i];
196
197 if (fe->port != -1 && fe->port != ap->print_id)
198 continue;
199
200 if (fe->param.cbl == ATA_CBL_NONE)
201 continue;
202
203 ap->cbl = fe->param.cbl;
204 ata_port_printk(ap, KERN_NOTICE,
205 "FORCE: cable set to %s\n", fe->param.name);
206 return;
207 }
208 }
209
210 /**
211 * ata_force_spd_limit - force SATA spd limit according to libata.force
212 * @link: ATA link of interest
213 *
214 * Force SATA spd limit according to libata.force and whine about
215 * it. When only the port part is specified (e.g. 1:), the limit
216 * applies to all links connected to both the host link and all
217 * fan-out ports connected via PMP. If the device part is
218 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
219 * link not the host link. Device number 15 always points to the
220 * host link whether PMP is attached or not.
221 *
222 * LOCKING:
223 * EH context.
224 */
225 static void ata_force_spd_limit(struct ata_link *link)
226 {
227 int linkno, i;
228
229 if (ata_is_host_link(link))
230 linkno = 15;
231 else
232 linkno = link->pmp;
233
234 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
235 const struct ata_force_ent *fe = &ata_force_tbl[i];
236
237 if (fe->port != -1 && fe->port != link->ap->print_id)
238 continue;
239
240 if (fe->device != -1 && fe->device != linkno)
241 continue;
242
243 if (!fe->param.spd_limit)
244 continue;
245
246 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
247 ata_link_printk(link, KERN_NOTICE,
248 "FORCE: PHY spd limit set to %s\n", fe->param.name);
249 return;
250 }
251 }
252
253 /**
254 * ata_force_xfermask - force xfermask according to libata.force
255 * @dev: ATA device of interest
256 *
257 * Force xfer_mask according to libata.force and whine about it.
258 * For consistency with link selection, device number 15 selects
259 * the first device connected to the host link.
260 *
261 * LOCKING:
262 * EH context.
263 */
264 static void ata_force_xfermask(struct ata_device *dev)
265 {
266 int devno = dev->link->pmp + dev->devno;
267 int alt_devno = devno;
268 int i;
269
270 /* allow n.15 for the first device attached to host port */
271 if (ata_is_host_link(dev->link) && devno == 0)
272 alt_devno = 15;
273
274 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
275 const struct ata_force_ent *fe = &ata_force_tbl[i];
276 unsigned long pio_mask, mwdma_mask, udma_mask;
277
278 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
279 continue;
280
281 if (fe->device != -1 && fe->device != devno &&
282 fe->device != alt_devno)
283 continue;
284
285 if (!fe->param.xfer_mask)
286 continue;
287
288 ata_unpack_xfermask(fe->param.xfer_mask,
289 &pio_mask, &mwdma_mask, &udma_mask);
290 if (udma_mask)
291 dev->udma_mask = udma_mask;
292 else if (mwdma_mask) {
293 dev->udma_mask = 0;
294 dev->mwdma_mask = mwdma_mask;
295 } else {
296 dev->udma_mask = 0;
297 dev->mwdma_mask = 0;
298 dev->pio_mask = pio_mask;
299 }
300
301 ata_dev_printk(dev, KERN_NOTICE,
302 "FORCE: xfer_mask set to %s\n", fe->param.name);
303 return;
304 }
305 }
306
307 /**
308 * ata_force_horkage - force horkage according to libata.force
309 * @dev: ATA device of interest
310 *
311 * Force horkage according to libata.force and whine about it.
312 * For consistency with link selection, device number 15 selects
313 * the first device connected to the host link.
314 *
315 * LOCKING:
316 * EH context.
317 */
318 static void ata_force_horkage(struct ata_device *dev)
319 {
320 int devno = dev->link->pmp + dev->devno;
321 int alt_devno = devno;
322 int i;
323
324 /* allow n.15 for the first device attached to host port */
325 if (ata_is_host_link(dev->link) && devno == 0)
326 alt_devno = 15;
327
328 for (i = 0; i < ata_force_tbl_size; i++) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
332 continue;
333
334 if (fe->device != -1 && fe->device != devno &&
335 fe->device != alt_devno)
336 continue;
337
338 if (!(~dev->horkage & fe->param.horkage_on) &&
339 !(dev->horkage & fe->param.horkage_off))
340 continue;
341
342 dev->horkage |= fe->param.horkage_on;
343 dev->horkage &= ~fe->param.horkage_off;
344
345 ata_dev_printk(dev, KERN_NOTICE,
346 "FORCE: horkage modified (%s)\n", fe->param.name);
347 }
348 }
349
350 /**
351 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
352 * @opcode: SCSI opcode
353 *
354 * Determine ATAPI command type from @opcode.
355 *
356 * LOCKING:
357 * None.
358 *
359 * RETURNS:
360 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
361 */
362 int atapi_cmd_type(u8 opcode)
363 {
364 switch (opcode) {
365 case GPCMD_READ_10:
366 case GPCMD_READ_12:
367 return ATAPI_READ;
368
369 case GPCMD_WRITE_10:
370 case GPCMD_WRITE_12:
371 case GPCMD_WRITE_AND_VERIFY_10:
372 return ATAPI_WRITE;
373
374 case GPCMD_READ_CD:
375 case GPCMD_READ_CD_MSF:
376 return ATAPI_READ_CD;
377
378 case ATA_16:
379 case ATA_12:
380 if (atapi_passthru16)
381 return ATAPI_PASS_THRU;
382 /* fall thru */
383 default:
384 return ATAPI_MISC;
385 }
386 }
387
388 /**
389 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
390 * @tf: Taskfile to convert
391 * @pmp: Port multiplier port
392 * @is_cmd: This FIS is for command
393 * @fis: Buffer into which data will output
394 *
395 * Converts a standard ATA taskfile to a Serial ATA
396 * FIS structure (Register - Host to Device).
397 *
398 * LOCKING:
399 * Inherited from caller.
400 */
401 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
402 {
403 fis[0] = 0x27; /* Register - Host to Device FIS */
404 fis[1] = pmp & 0xf; /* Port multiplier number*/
405 if (is_cmd)
406 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
407
408 fis[2] = tf->command;
409 fis[3] = tf->feature;
410
411 fis[4] = tf->lbal;
412 fis[5] = tf->lbam;
413 fis[6] = tf->lbah;
414 fis[7] = tf->device;
415
416 fis[8] = tf->hob_lbal;
417 fis[9] = tf->hob_lbam;
418 fis[10] = tf->hob_lbah;
419 fis[11] = tf->hob_feature;
420
421 fis[12] = tf->nsect;
422 fis[13] = tf->hob_nsect;
423 fis[14] = 0;
424 fis[15] = tf->ctl;
425
426 fis[16] = 0;
427 fis[17] = 0;
428 fis[18] = 0;
429 fis[19] = 0;
430 }
431
432 /**
433 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
434 * @fis: Buffer from which data will be input
435 * @tf: Taskfile to output
436 *
437 * Converts a serial ATA FIS structure to a standard ATA taskfile.
438 *
439 * LOCKING:
440 * Inherited from caller.
441 */
442
443 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
444 {
445 tf->command = fis[2]; /* status */
446 tf->feature = fis[3]; /* error */
447
448 tf->lbal = fis[4];
449 tf->lbam = fis[5];
450 tf->lbah = fis[6];
451 tf->device = fis[7];
452
453 tf->hob_lbal = fis[8];
454 tf->hob_lbam = fis[9];
455 tf->hob_lbah = fis[10];
456
457 tf->nsect = fis[12];
458 tf->hob_nsect = fis[13];
459 }
460
461 static const u8 ata_rw_cmds[] = {
462 /* pio multi */
463 ATA_CMD_READ_MULTI,
464 ATA_CMD_WRITE_MULTI,
465 ATA_CMD_READ_MULTI_EXT,
466 ATA_CMD_WRITE_MULTI_EXT,
467 0,
468 0,
469 0,
470 ATA_CMD_WRITE_MULTI_FUA_EXT,
471 /* pio */
472 ATA_CMD_PIO_READ,
473 ATA_CMD_PIO_WRITE,
474 ATA_CMD_PIO_READ_EXT,
475 ATA_CMD_PIO_WRITE_EXT,
476 0,
477 0,
478 0,
479 0,
480 /* dma */
481 ATA_CMD_READ,
482 ATA_CMD_WRITE,
483 ATA_CMD_READ_EXT,
484 ATA_CMD_WRITE_EXT,
485 0,
486 0,
487 0,
488 ATA_CMD_WRITE_FUA_EXT
489 };
490
491 /**
492 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
493 * @tf: command to examine and configure
494 * @dev: device tf belongs to
495 *
496 * Examine the device configuration and tf->flags to calculate
497 * the proper read/write commands and protocol to use.
498 *
499 * LOCKING:
500 * caller.
501 */
502 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
503 {
504 u8 cmd;
505
506 int index, fua, lba48, write;
507
508 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
509 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
510 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
511
512 if (dev->flags & ATA_DFLAG_PIO) {
513 tf->protocol = ATA_PROT_PIO;
514 index = dev->multi_count ? 0 : 8;
515 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
516 /* Unable to use DMA due to host limitation */
517 tf->protocol = ATA_PROT_PIO;
518 index = dev->multi_count ? 0 : 8;
519 } else {
520 tf->protocol = ATA_PROT_DMA;
521 index = 16;
522 }
523
524 cmd = ata_rw_cmds[index + fua + lba48 + write];
525 if (cmd) {
526 tf->command = cmd;
527 return 0;
528 }
529 return -1;
530 }
531
532 /**
533 * ata_tf_read_block - Read block address from ATA taskfile
534 * @tf: ATA taskfile of interest
535 * @dev: ATA device @tf belongs to
536 *
537 * LOCKING:
538 * None.
539 *
540 * Read block address from @tf. This function can handle all
541 * three address formats - LBA, LBA48 and CHS. tf->protocol and
542 * flags select the address format to use.
543 *
544 * RETURNS:
545 * Block address read from @tf.
546 */
547 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
548 {
549 u64 block = 0;
550
551 if (tf->flags & ATA_TFLAG_LBA) {
552 if (tf->flags & ATA_TFLAG_LBA48) {
553 block |= (u64)tf->hob_lbah << 40;
554 block |= (u64)tf->hob_lbam << 32;
555 block |= tf->hob_lbal << 24;
556 } else
557 block |= (tf->device & 0xf) << 24;
558
559 block |= tf->lbah << 16;
560 block |= tf->lbam << 8;
561 block |= tf->lbal;
562 } else {
563 u32 cyl, head, sect;
564
565 cyl = tf->lbam | (tf->lbah << 8);
566 head = tf->device & 0xf;
567 sect = tf->lbal;
568
569 block = (cyl * dev->heads + head) * dev->sectors + sect;
570 }
571
572 return block;
573 }
574
575 /**
576 * ata_build_rw_tf - Build ATA taskfile for given read/write request
577 * @tf: Target ATA taskfile
578 * @dev: ATA device @tf belongs to
579 * @block: Block address
580 * @n_block: Number of blocks
581 * @tf_flags: RW/FUA etc...
582 * @tag: tag
583 *
584 * LOCKING:
585 * None.
586 *
587 * Build ATA taskfile @tf for read/write request described by
588 * @block, @n_block, @tf_flags and @tag on @dev.
589 *
590 * RETURNS:
591 *
592 * 0 on success, -ERANGE if the request is too large for @dev,
593 * -EINVAL if the request is invalid.
594 */
595 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
596 u64 block, u32 n_block, unsigned int tf_flags,
597 unsigned int tag)
598 {
599 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
600 tf->flags |= tf_flags;
601
602 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
603 /* yay, NCQ */
604 if (!lba_48_ok(block, n_block))
605 return -ERANGE;
606
607 tf->protocol = ATA_PROT_NCQ;
608 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
609
610 if (tf->flags & ATA_TFLAG_WRITE)
611 tf->command = ATA_CMD_FPDMA_WRITE;
612 else
613 tf->command = ATA_CMD_FPDMA_READ;
614
615 tf->nsect = tag << 3;
616 tf->hob_feature = (n_block >> 8) & 0xff;
617 tf->feature = n_block & 0xff;
618
619 tf->hob_lbah = (block >> 40) & 0xff;
620 tf->hob_lbam = (block >> 32) & 0xff;
621 tf->hob_lbal = (block >> 24) & 0xff;
622 tf->lbah = (block >> 16) & 0xff;
623 tf->lbam = (block >> 8) & 0xff;
624 tf->lbal = block & 0xff;
625
626 tf->device = 1 << 6;
627 if (tf->flags & ATA_TFLAG_FUA)
628 tf->device |= 1 << 7;
629 } else if (dev->flags & ATA_DFLAG_LBA) {
630 tf->flags |= ATA_TFLAG_LBA;
631
632 if (lba_28_ok(block, n_block)) {
633 /* use LBA28 */
634 tf->device |= (block >> 24) & 0xf;
635 } else if (lba_48_ok(block, n_block)) {
636 if (!(dev->flags & ATA_DFLAG_LBA48))
637 return -ERANGE;
638
639 /* use LBA48 */
640 tf->flags |= ATA_TFLAG_LBA48;
641
642 tf->hob_nsect = (n_block >> 8) & 0xff;
643
644 tf->hob_lbah = (block >> 40) & 0xff;
645 tf->hob_lbam = (block >> 32) & 0xff;
646 tf->hob_lbal = (block >> 24) & 0xff;
647 } else
648 /* request too large even for LBA48 */
649 return -ERANGE;
650
651 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
652 return -EINVAL;
653
654 tf->nsect = n_block & 0xff;
655
656 tf->lbah = (block >> 16) & 0xff;
657 tf->lbam = (block >> 8) & 0xff;
658 tf->lbal = block & 0xff;
659
660 tf->device |= ATA_LBA;
661 } else {
662 /* CHS */
663 u32 sect, head, cyl, track;
664
665 /* The request -may- be too large for CHS addressing. */
666 if (!lba_28_ok(block, n_block))
667 return -ERANGE;
668
669 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
670 return -EINVAL;
671
672 /* Convert LBA to CHS */
673 track = (u32)block / dev->sectors;
674 cyl = track / dev->heads;
675 head = track % dev->heads;
676 sect = (u32)block % dev->sectors + 1;
677
678 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
679 (u32)block, track, cyl, head, sect);
680
681 /* Check whether the converted CHS can fit.
682 Cylinder: 0-65535
683 Head: 0-15
684 Sector: 1-255*/
685 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
686 return -ERANGE;
687
688 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
689 tf->lbal = sect;
690 tf->lbam = cyl;
691 tf->lbah = cyl >> 8;
692 tf->device |= head;
693 }
694
695 return 0;
696 }
697
698 /**
699 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
700 * @pio_mask: pio_mask
701 * @mwdma_mask: mwdma_mask
702 * @udma_mask: udma_mask
703 *
704 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
705 * unsigned int xfer_mask.
706 *
707 * LOCKING:
708 * None.
709 *
710 * RETURNS:
711 * Packed xfer_mask.
712 */
713 unsigned long ata_pack_xfermask(unsigned long pio_mask,
714 unsigned long mwdma_mask,
715 unsigned long udma_mask)
716 {
717 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
718 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
719 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
720 }
721
722 /**
723 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
724 * @xfer_mask: xfer_mask to unpack
725 * @pio_mask: resulting pio_mask
726 * @mwdma_mask: resulting mwdma_mask
727 * @udma_mask: resulting udma_mask
728 *
729 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
730 * Any NULL distination masks will be ignored.
731 */
732 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
733 unsigned long *mwdma_mask, unsigned long *udma_mask)
734 {
735 if (pio_mask)
736 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
737 if (mwdma_mask)
738 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
739 if (udma_mask)
740 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
741 }
742
743 static const struct ata_xfer_ent {
744 int shift, bits;
745 u8 base;
746 } ata_xfer_tbl[] = {
747 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
748 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
749 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
750 { -1, },
751 };
752
753 /**
754 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
755 * @xfer_mask: xfer_mask of interest
756 *
757 * Return matching XFER_* value for @xfer_mask. Only the highest
758 * bit of @xfer_mask is considered.
759 *
760 * LOCKING:
761 * None.
762 *
763 * RETURNS:
764 * Matching XFER_* value, 0xff if no match found.
765 */
766 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
767 {
768 int highbit = fls(xfer_mask) - 1;
769 const struct ata_xfer_ent *ent;
770
771 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
772 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
773 return ent->base + highbit - ent->shift;
774 return 0xff;
775 }
776
777 /**
778 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
779 * @xfer_mode: XFER_* of interest
780 *
781 * Return matching xfer_mask for @xfer_mode.
782 *
783 * LOCKING:
784 * None.
785 *
786 * RETURNS:
787 * Matching xfer_mask, 0 if no match found.
788 */
789 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
790 {
791 const struct ata_xfer_ent *ent;
792
793 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
794 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
795 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
796 & ~((1 << ent->shift) - 1);
797 return 0;
798 }
799
800 /**
801 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
802 * @xfer_mode: XFER_* of interest
803 *
804 * Return matching xfer_shift for @xfer_mode.
805 *
806 * LOCKING:
807 * None.
808 *
809 * RETURNS:
810 * Matching xfer_shift, -1 if no match found.
811 */
812 int ata_xfer_mode2shift(unsigned long xfer_mode)
813 {
814 const struct ata_xfer_ent *ent;
815
816 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
817 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
818 return ent->shift;
819 return -1;
820 }
821
822 /**
823 * ata_mode_string - convert xfer_mask to string
824 * @xfer_mask: mask of bits supported; only highest bit counts.
825 *
826 * Determine string which represents the highest speed
827 * (highest bit in @modemask).
828 *
829 * LOCKING:
830 * None.
831 *
832 * RETURNS:
833 * Constant C string representing highest speed listed in
834 * @mode_mask, or the constant C string "<n/a>".
835 */
836 const char *ata_mode_string(unsigned long xfer_mask)
837 {
838 static const char * const xfer_mode_str[] = {
839 "PIO0",
840 "PIO1",
841 "PIO2",
842 "PIO3",
843 "PIO4",
844 "PIO5",
845 "PIO6",
846 "MWDMA0",
847 "MWDMA1",
848 "MWDMA2",
849 "MWDMA3",
850 "MWDMA4",
851 "UDMA/16",
852 "UDMA/25",
853 "UDMA/33",
854 "UDMA/44",
855 "UDMA/66",
856 "UDMA/100",
857 "UDMA/133",
858 "UDMA7",
859 };
860 int highbit;
861
862 highbit = fls(xfer_mask) - 1;
863 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
864 return xfer_mode_str[highbit];
865 return "<n/a>";
866 }
867
868 static const char *sata_spd_string(unsigned int spd)
869 {
870 static const char * const spd_str[] = {
871 "1.5 Gbps",
872 "3.0 Gbps",
873 };
874
875 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
876 return "<unknown>";
877 return spd_str[spd - 1];
878 }
879
880 void ata_dev_disable(struct ata_device *dev)
881 {
882 if (ata_dev_enabled(dev)) {
883 if (ata_msg_drv(dev->link->ap))
884 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
885 ata_acpi_on_disable(dev);
886 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
887 ATA_DNXFER_QUIET);
888 dev->class++;
889 }
890 }
891
892 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
893 {
894 struct ata_link *link = dev->link;
895 struct ata_port *ap = link->ap;
896 u32 scontrol;
897 unsigned int err_mask;
898 int rc;
899
900 /*
901 * disallow DIPM for drivers which haven't set
902 * ATA_FLAG_IPM. This is because when DIPM is enabled,
903 * phy ready will be set in the interrupt status on
904 * state changes, which will cause some drivers to
905 * think there are errors - additionally drivers will
906 * need to disable hot plug.
907 */
908 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
909 ap->pm_policy = NOT_AVAILABLE;
910 return -EINVAL;
911 }
912
913 /*
914 * For DIPM, we will only enable it for the
915 * min_power setting.
916 *
917 * Why? Because Disks are too stupid to know that
918 * If the host rejects a request to go to SLUMBER
919 * they should retry at PARTIAL, and instead it
920 * just would give up. So, for medium_power to
921 * work at all, we need to only allow HIPM.
922 */
923 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
924 if (rc)
925 return rc;
926
927 switch (policy) {
928 case MIN_POWER:
929 /* no restrictions on IPM transitions */
930 scontrol &= ~(0x3 << 8);
931 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
932 if (rc)
933 return rc;
934
935 /* enable DIPM */
936 if (dev->flags & ATA_DFLAG_DIPM)
937 err_mask = ata_dev_set_feature(dev,
938 SETFEATURES_SATA_ENABLE, SATA_DIPM);
939 break;
940 case MEDIUM_POWER:
941 /* allow IPM to PARTIAL */
942 scontrol &= ~(0x1 << 8);
943 scontrol |= (0x2 << 8);
944 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
945 if (rc)
946 return rc;
947
948 /*
949 * we don't have to disable DIPM since IPM flags
950 * disallow transitions to SLUMBER, which effectively
951 * disable DIPM if it does not support PARTIAL
952 */
953 break;
954 case NOT_AVAILABLE:
955 case MAX_PERFORMANCE:
956 /* disable all IPM transitions */
957 scontrol |= (0x3 << 8);
958 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
959 if (rc)
960 return rc;
961
962 /*
963 * we don't have to disable DIPM since IPM flags
964 * disallow all transitions which effectively
965 * disable DIPM anyway.
966 */
967 break;
968 }
969
970 /* FIXME: handle SET FEATURES failure */
971 (void) err_mask;
972
973 return 0;
974 }
975
976 /**
977 * ata_dev_enable_pm - enable SATA interface power management
978 * @dev: device to enable power management
979 * @policy: the link power management policy
980 *
981 * Enable SATA Interface power management. This will enable
982 * Device Interface Power Management (DIPM) for min_power
983 * policy, and then call driver specific callbacks for
984 * enabling Host Initiated Power management.
985 *
986 * Locking: Caller.
987 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
988 */
989 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
990 {
991 int rc = 0;
992 struct ata_port *ap = dev->link->ap;
993
994 /* set HIPM first, then DIPM */
995 if (ap->ops->enable_pm)
996 rc = ap->ops->enable_pm(ap, policy);
997 if (rc)
998 goto enable_pm_out;
999 rc = ata_dev_set_dipm(dev, policy);
1000
1001 enable_pm_out:
1002 if (rc)
1003 ap->pm_policy = MAX_PERFORMANCE;
1004 else
1005 ap->pm_policy = policy;
1006 return /* rc */; /* hopefully we can use 'rc' eventually */
1007 }
1008
1009 #ifdef CONFIG_PM
1010 /**
1011 * ata_dev_disable_pm - disable SATA interface power management
1012 * @dev: device to disable power management
1013 *
1014 * Disable SATA Interface power management. This will disable
1015 * Device Interface Power Management (DIPM) without changing
1016 * policy, call driver specific callbacks for disabling Host
1017 * Initiated Power management.
1018 *
1019 * Locking: Caller.
1020 * Returns: void
1021 */
1022 static void ata_dev_disable_pm(struct ata_device *dev)
1023 {
1024 struct ata_port *ap = dev->link->ap;
1025
1026 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1027 if (ap->ops->disable_pm)
1028 ap->ops->disable_pm(ap);
1029 }
1030 #endif /* CONFIG_PM */
1031
1032 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1033 {
1034 ap->pm_policy = policy;
1035 ap->link.eh_info.action |= ATA_EH_LPM;
1036 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1037 ata_port_schedule_eh(ap);
1038 }
1039
1040 #ifdef CONFIG_PM
1041 static void ata_lpm_enable(struct ata_host *host)
1042 {
1043 struct ata_link *link;
1044 struct ata_port *ap;
1045 struct ata_device *dev;
1046 int i;
1047
1048 for (i = 0; i < host->n_ports; i++) {
1049 ap = host->ports[i];
1050 ata_port_for_each_link(link, ap) {
1051 ata_link_for_each_dev(dev, link)
1052 ata_dev_disable_pm(dev);
1053 }
1054 }
1055 }
1056
1057 static void ata_lpm_disable(struct ata_host *host)
1058 {
1059 int i;
1060
1061 for (i = 0; i < host->n_ports; i++) {
1062 struct ata_port *ap = host->ports[i];
1063 ata_lpm_schedule(ap, ap->pm_policy);
1064 }
1065 }
1066 #endif /* CONFIG_PM */
1067
1068 /**
1069 * ata_dev_classify - determine device type based on ATA-spec signature
1070 * @tf: ATA taskfile register set for device to be identified
1071 *
1072 * Determine from taskfile register contents whether a device is
1073 * ATA or ATAPI, as per "Signature and persistence" section
1074 * of ATA/PI spec (volume 1, sect 5.14).
1075 *
1076 * LOCKING:
1077 * None.
1078 *
1079 * RETURNS:
1080 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1081 * %ATA_DEV_UNKNOWN the event of failure.
1082 */
1083 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1084 {
1085 /* Apple's open source Darwin code hints that some devices only
1086 * put a proper signature into the LBA mid/high registers,
1087 * So, we only check those. It's sufficient for uniqueness.
1088 *
1089 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1090 * signatures for ATA and ATAPI devices attached on SerialATA,
1091 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1092 * spec has never mentioned about using different signatures
1093 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1094 * Multiplier specification began to use 0x69/0x96 to identify
1095 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1096 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1097 * 0x69/0x96 shortly and described them as reserved for
1098 * SerialATA.
1099 *
1100 * We follow the current spec and consider that 0x69/0x96
1101 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1102 */
1103 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1104 DPRINTK("found ATA device by sig\n");
1105 return ATA_DEV_ATA;
1106 }
1107
1108 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1109 DPRINTK("found ATAPI device by sig\n");
1110 return ATA_DEV_ATAPI;
1111 }
1112
1113 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1114 DPRINTK("found PMP device by sig\n");
1115 return ATA_DEV_PMP;
1116 }
1117
1118 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1119 printk(KERN_INFO "ata: SEMB device ignored\n");
1120 return ATA_DEV_SEMB_UNSUP; /* not yet */
1121 }
1122
1123 DPRINTK("unknown device\n");
1124 return ATA_DEV_UNKNOWN;
1125 }
1126
1127 /**
1128 * ata_id_string - Convert IDENTIFY DEVICE page into string
1129 * @id: IDENTIFY DEVICE results we will examine
1130 * @s: string into which data is output
1131 * @ofs: offset into identify device page
1132 * @len: length of string to return. must be an even number.
1133 *
1134 * The strings in the IDENTIFY DEVICE page are broken up into
1135 * 16-bit chunks. Run through the string, and output each
1136 * 8-bit chunk linearly, regardless of platform.
1137 *
1138 * LOCKING:
1139 * caller.
1140 */
1141
1142 void ata_id_string(const u16 *id, unsigned char *s,
1143 unsigned int ofs, unsigned int len)
1144 {
1145 unsigned int c;
1146
1147 while (len > 0) {
1148 c = id[ofs] >> 8;
1149 *s = c;
1150 s++;
1151
1152 c = id[ofs] & 0xff;
1153 *s = c;
1154 s++;
1155
1156 ofs++;
1157 len -= 2;
1158 }
1159 }
1160
1161 /**
1162 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1163 * @id: IDENTIFY DEVICE results we will examine
1164 * @s: string into which data is output
1165 * @ofs: offset into identify device page
1166 * @len: length of string to return. must be an odd number.
1167 *
1168 * This function is identical to ata_id_string except that it
1169 * trims trailing spaces and terminates the resulting string with
1170 * null. @len must be actual maximum length (even number) + 1.
1171 *
1172 * LOCKING:
1173 * caller.
1174 */
1175 void ata_id_c_string(const u16 *id, unsigned char *s,
1176 unsigned int ofs, unsigned int len)
1177 {
1178 unsigned char *p;
1179
1180 WARN_ON(!(len & 1));
1181
1182 ata_id_string(id, s, ofs, len - 1);
1183
1184 p = s + strnlen(s, len - 1);
1185 while (p > s && p[-1] == ' ')
1186 p--;
1187 *p = '\0';
1188 }
1189
1190 static u64 ata_id_n_sectors(const u16 *id)
1191 {
1192 if (ata_id_has_lba(id)) {
1193 if (ata_id_has_lba48(id))
1194 return ata_id_u64(id, 100);
1195 else
1196 return ata_id_u32(id, 60);
1197 } else {
1198 if (ata_id_current_chs_valid(id))
1199 return ata_id_u32(id, 57);
1200 else
1201 return id[1] * id[3] * id[6];
1202 }
1203 }
1204
1205 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1206 {
1207 u64 sectors = 0;
1208
1209 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1210 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1211 sectors |= (tf->hob_lbal & 0xff) << 24;
1212 sectors |= (tf->lbah & 0xff) << 16;
1213 sectors |= (tf->lbam & 0xff) << 8;
1214 sectors |= (tf->lbal & 0xff);
1215
1216 return sectors;
1217 }
1218
1219 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1220 {
1221 u64 sectors = 0;
1222
1223 sectors |= (tf->device & 0x0f) << 24;
1224 sectors |= (tf->lbah & 0xff) << 16;
1225 sectors |= (tf->lbam & 0xff) << 8;
1226 sectors |= (tf->lbal & 0xff);
1227
1228 return sectors;
1229 }
1230
1231 /**
1232 * ata_read_native_max_address - Read native max address
1233 * @dev: target device
1234 * @max_sectors: out parameter for the result native max address
1235 *
1236 * Perform an LBA48 or LBA28 native size query upon the device in
1237 * question.
1238 *
1239 * RETURNS:
1240 * 0 on success, -EACCES if command is aborted by the drive.
1241 * -EIO on other errors.
1242 */
1243 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1244 {
1245 unsigned int err_mask;
1246 struct ata_taskfile tf;
1247 int lba48 = ata_id_has_lba48(dev->id);
1248
1249 ata_tf_init(dev, &tf);
1250
1251 /* always clear all address registers */
1252 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1253
1254 if (lba48) {
1255 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1256 tf.flags |= ATA_TFLAG_LBA48;
1257 } else
1258 tf.command = ATA_CMD_READ_NATIVE_MAX;
1259
1260 tf.protocol |= ATA_PROT_NODATA;
1261 tf.device |= ATA_LBA;
1262
1263 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1264 if (err_mask) {
1265 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1266 "max address (err_mask=0x%x)\n", err_mask);
1267 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1268 return -EACCES;
1269 return -EIO;
1270 }
1271
1272 if (lba48)
1273 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1274 else
1275 *max_sectors = ata_tf_to_lba(&tf) + 1;
1276 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1277 (*max_sectors)--;
1278 return 0;
1279 }
1280
1281 /**
1282 * ata_set_max_sectors - Set max sectors
1283 * @dev: target device
1284 * @new_sectors: new max sectors value to set for the device
1285 *
1286 * Set max sectors of @dev to @new_sectors.
1287 *
1288 * RETURNS:
1289 * 0 on success, -EACCES if command is aborted or denied (due to
1290 * previous non-volatile SET_MAX) by the drive. -EIO on other
1291 * errors.
1292 */
1293 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1294 {
1295 unsigned int err_mask;
1296 struct ata_taskfile tf;
1297 int lba48 = ata_id_has_lba48(dev->id);
1298
1299 new_sectors--;
1300
1301 ata_tf_init(dev, &tf);
1302
1303 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1304
1305 if (lba48) {
1306 tf.command = ATA_CMD_SET_MAX_EXT;
1307 tf.flags |= ATA_TFLAG_LBA48;
1308
1309 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1310 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1311 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1312 } else {
1313 tf.command = ATA_CMD_SET_MAX;
1314
1315 tf.device |= (new_sectors >> 24) & 0xf;
1316 }
1317
1318 tf.protocol |= ATA_PROT_NODATA;
1319 tf.device |= ATA_LBA;
1320
1321 tf.lbal = (new_sectors >> 0) & 0xff;
1322 tf.lbam = (new_sectors >> 8) & 0xff;
1323 tf.lbah = (new_sectors >> 16) & 0xff;
1324
1325 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1326 if (err_mask) {
1327 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1328 "max address (err_mask=0x%x)\n", err_mask);
1329 if (err_mask == AC_ERR_DEV &&
1330 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1331 return -EACCES;
1332 return -EIO;
1333 }
1334
1335 return 0;
1336 }
1337
1338 /**
1339 * ata_hpa_resize - Resize a device with an HPA set
1340 * @dev: Device to resize
1341 *
1342 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1343 * it if required to the full size of the media. The caller must check
1344 * the drive has the HPA feature set enabled.
1345 *
1346 * RETURNS:
1347 * 0 on success, -errno on failure.
1348 */
1349 static int ata_hpa_resize(struct ata_device *dev)
1350 {
1351 struct ata_eh_context *ehc = &dev->link->eh_context;
1352 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1353 u64 sectors = ata_id_n_sectors(dev->id);
1354 u64 native_sectors;
1355 int rc;
1356
1357 /* do we need to do it? */
1358 if (dev->class != ATA_DEV_ATA ||
1359 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1360 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1361 return 0;
1362
1363 /* read native max address */
1364 rc = ata_read_native_max_address(dev, &native_sectors);
1365 if (rc) {
1366 /* If device aborted the command or HPA isn't going to
1367 * be unlocked, skip HPA resizing.
1368 */
1369 if (rc == -EACCES || !ata_ignore_hpa) {
1370 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1371 "broken, skipping HPA handling\n");
1372 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1373
1374 /* we can continue if device aborted the command */
1375 if (rc == -EACCES)
1376 rc = 0;
1377 }
1378
1379 return rc;
1380 }
1381
1382 /* nothing to do? */
1383 if (native_sectors <= sectors || !ata_ignore_hpa) {
1384 if (!print_info || native_sectors == sectors)
1385 return 0;
1386
1387 if (native_sectors > sectors)
1388 ata_dev_printk(dev, KERN_INFO,
1389 "HPA detected: current %llu, native %llu\n",
1390 (unsigned long long)sectors,
1391 (unsigned long long)native_sectors);
1392 else if (native_sectors < sectors)
1393 ata_dev_printk(dev, KERN_WARNING,
1394 "native sectors (%llu) is smaller than "
1395 "sectors (%llu)\n",
1396 (unsigned long long)native_sectors,
1397 (unsigned long long)sectors);
1398 return 0;
1399 }
1400
1401 /* let's unlock HPA */
1402 rc = ata_set_max_sectors(dev, native_sectors);
1403 if (rc == -EACCES) {
1404 /* if device aborted the command, skip HPA resizing */
1405 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1406 "(%llu -> %llu), skipping HPA handling\n",
1407 (unsigned long long)sectors,
1408 (unsigned long long)native_sectors);
1409 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1410 return 0;
1411 } else if (rc)
1412 return rc;
1413
1414 /* re-read IDENTIFY data */
1415 rc = ata_dev_reread_id(dev, 0);
1416 if (rc) {
1417 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1418 "data after HPA resizing\n");
1419 return rc;
1420 }
1421
1422 if (print_info) {
1423 u64 new_sectors = ata_id_n_sectors(dev->id);
1424 ata_dev_printk(dev, KERN_INFO,
1425 "HPA unlocked: %llu -> %llu, native %llu\n",
1426 (unsigned long long)sectors,
1427 (unsigned long long)new_sectors,
1428 (unsigned long long)native_sectors);
1429 }
1430
1431 return 0;
1432 }
1433
1434 /**
1435 * ata_noop_dev_select - Select device 0/1 on ATA bus
1436 * @ap: ATA channel to manipulate
1437 * @device: ATA device (numbered from zero) to select
1438 *
1439 * This function performs no actual function.
1440 *
1441 * May be used as the dev_select() entry in ata_port_operations.
1442 *
1443 * LOCKING:
1444 * caller.
1445 */
1446 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1447 {
1448 }
1449
1450 /**
1451 * ata_dump_id - IDENTIFY DEVICE info debugging output
1452 * @id: IDENTIFY DEVICE page to dump
1453 *
1454 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1455 * page.
1456 *
1457 * LOCKING:
1458 * caller.
1459 */
1460
1461 static inline void ata_dump_id(const u16 *id)
1462 {
1463 DPRINTK("49==0x%04x "
1464 "53==0x%04x "
1465 "63==0x%04x "
1466 "64==0x%04x "
1467 "75==0x%04x \n",
1468 id[49],
1469 id[53],
1470 id[63],
1471 id[64],
1472 id[75]);
1473 DPRINTK("80==0x%04x "
1474 "81==0x%04x "
1475 "82==0x%04x "
1476 "83==0x%04x "
1477 "84==0x%04x \n",
1478 id[80],
1479 id[81],
1480 id[82],
1481 id[83],
1482 id[84]);
1483 DPRINTK("88==0x%04x "
1484 "93==0x%04x\n",
1485 id[88],
1486 id[93]);
1487 }
1488
1489 /**
1490 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1491 * @id: IDENTIFY data to compute xfer mask from
1492 *
1493 * Compute the xfermask for this device. This is not as trivial
1494 * as it seems if we must consider early devices correctly.
1495 *
1496 * FIXME: pre IDE drive timing (do we care ?).
1497 *
1498 * LOCKING:
1499 * None.
1500 *
1501 * RETURNS:
1502 * Computed xfermask
1503 */
1504 unsigned long ata_id_xfermask(const u16 *id)
1505 {
1506 unsigned long pio_mask, mwdma_mask, udma_mask;
1507
1508 /* Usual case. Word 53 indicates word 64 is valid */
1509 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1510 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1511 pio_mask <<= 3;
1512 pio_mask |= 0x7;
1513 } else {
1514 /* If word 64 isn't valid then Word 51 high byte holds
1515 * the PIO timing number for the maximum. Turn it into
1516 * a mask.
1517 */
1518 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1519 if (mode < 5) /* Valid PIO range */
1520 pio_mask = (2 << mode) - 1;
1521 else
1522 pio_mask = 1;
1523
1524 /* But wait.. there's more. Design your standards by
1525 * committee and you too can get a free iordy field to
1526 * process. However its the speeds not the modes that
1527 * are supported... Note drivers using the timing API
1528 * will get this right anyway
1529 */
1530 }
1531
1532 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1533
1534 if (ata_id_is_cfa(id)) {
1535 /*
1536 * Process compact flash extended modes
1537 */
1538 int pio = id[163] & 0x7;
1539 int dma = (id[163] >> 3) & 7;
1540
1541 if (pio)
1542 pio_mask |= (1 << 5);
1543 if (pio > 1)
1544 pio_mask |= (1 << 6);
1545 if (dma)
1546 mwdma_mask |= (1 << 3);
1547 if (dma > 1)
1548 mwdma_mask |= (1 << 4);
1549 }
1550
1551 udma_mask = 0;
1552 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1553 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1554
1555 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1556 }
1557
1558 /**
1559 * ata_pio_queue_task - Queue port_task
1560 * @ap: The ata_port to queue port_task for
1561 * @fn: workqueue function to be scheduled
1562 * @data: data for @fn to use
1563 * @delay: delay time for workqueue function
1564 *
1565 * Schedule @fn(@data) for execution after @delay jiffies using
1566 * port_task. There is one port_task per port and it's the
1567 * user(low level driver)'s responsibility to make sure that only
1568 * one task is active at any given time.
1569 *
1570 * libata core layer takes care of synchronization between
1571 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1572 * synchronization.
1573 *
1574 * LOCKING:
1575 * Inherited from caller.
1576 */
1577 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1578 {
1579 ap->port_task_data = data;
1580
1581 /* may fail if ata_port_flush_task() in progress */
1582 queue_delayed_work(ata_wq, &ap->port_task, delay);
1583 }
1584
1585 /**
1586 * ata_port_flush_task - Flush port_task
1587 * @ap: The ata_port to flush port_task for
1588 *
1589 * After this function completes, port_task is guranteed not to
1590 * be running or scheduled.
1591 *
1592 * LOCKING:
1593 * Kernel thread context (may sleep)
1594 */
1595 void ata_port_flush_task(struct ata_port *ap)
1596 {
1597 DPRINTK("ENTER\n");
1598
1599 cancel_rearming_delayed_work(&ap->port_task);
1600
1601 if (ata_msg_ctl(ap))
1602 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1603 }
1604
1605 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1606 {
1607 struct completion *waiting = qc->private_data;
1608
1609 complete(waiting);
1610 }
1611
1612 /**
1613 * ata_exec_internal_sg - execute libata internal command
1614 * @dev: Device to which the command is sent
1615 * @tf: Taskfile registers for the command and the result
1616 * @cdb: CDB for packet command
1617 * @dma_dir: Data tranfer direction of the command
1618 * @sgl: sg list for the data buffer of the command
1619 * @n_elem: Number of sg entries
1620 * @timeout: Timeout in msecs (0 for default)
1621 *
1622 * Executes libata internal command with timeout. @tf contains
1623 * command on entry and result on return. Timeout and error
1624 * conditions are reported via return value. No recovery action
1625 * is taken after a command times out. It's caller's duty to
1626 * clean up after timeout.
1627 *
1628 * LOCKING:
1629 * None. Should be called with kernel context, might sleep.
1630 *
1631 * RETURNS:
1632 * Zero on success, AC_ERR_* mask on failure
1633 */
1634 unsigned ata_exec_internal_sg(struct ata_device *dev,
1635 struct ata_taskfile *tf, const u8 *cdb,
1636 int dma_dir, struct scatterlist *sgl,
1637 unsigned int n_elem, unsigned long timeout)
1638 {
1639 struct ata_link *link = dev->link;
1640 struct ata_port *ap = link->ap;
1641 u8 command = tf->command;
1642 struct ata_queued_cmd *qc;
1643 unsigned int tag, preempted_tag;
1644 u32 preempted_sactive, preempted_qc_active;
1645 int preempted_nr_active_links;
1646 DECLARE_COMPLETION_ONSTACK(wait);
1647 unsigned long flags;
1648 unsigned int err_mask;
1649 int rc;
1650
1651 spin_lock_irqsave(ap->lock, flags);
1652
1653 /* no internal command while frozen */
1654 if (ap->pflags & ATA_PFLAG_FROZEN) {
1655 spin_unlock_irqrestore(ap->lock, flags);
1656 return AC_ERR_SYSTEM;
1657 }
1658
1659 /* initialize internal qc */
1660
1661 /* XXX: Tag 0 is used for drivers with legacy EH as some
1662 * drivers choke if any other tag is given. This breaks
1663 * ata_tag_internal() test for those drivers. Don't use new
1664 * EH stuff without converting to it.
1665 */
1666 if (ap->ops->error_handler)
1667 tag = ATA_TAG_INTERNAL;
1668 else
1669 tag = 0;
1670
1671 if (test_and_set_bit(tag, &ap->qc_allocated))
1672 BUG();
1673 qc = __ata_qc_from_tag(ap, tag);
1674
1675 qc->tag = tag;
1676 qc->scsicmd = NULL;
1677 qc->ap = ap;
1678 qc->dev = dev;
1679 ata_qc_reinit(qc);
1680
1681 preempted_tag = link->active_tag;
1682 preempted_sactive = link->sactive;
1683 preempted_qc_active = ap->qc_active;
1684 preempted_nr_active_links = ap->nr_active_links;
1685 link->active_tag = ATA_TAG_POISON;
1686 link->sactive = 0;
1687 ap->qc_active = 0;
1688 ap->nr_active_links = 0;
1689
1690 /* prepare & issue qc */
1691 qc->tf = *tf;
1692 if (cdb)
1693 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1694 qc->flags |= ATA_QCFLAG_RESULT_TF;
1695 qc->dma_dir = dma_dir;
1696 if (dma_dir != DMA_NONE) {
1697 unsigned int i, buflen = 0;
1698 struct scatterlist *sg;
1699
1700 for_each_sg(sgl, sg, n_elem, i)
1701 buflen += sg->length;
1702
1703 ata_sg_init(qc, sgl, n_elem);
1704 qc->nbytes = buflen;
1705 }
1706
1707 qc->private_data = &wait;
1708 qc->complete_fn = ata_qc_complete_internal;
1709
1710 ata_qc_issue(qc);
1711
1712 spin_unlock_irqrestore(ap->lock, flags);
1713
1714 if (!timeout)
1715 timeout = ata_probe_timeout * 1000 / HZ;
1716
1717 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1718
1719 ata_port_flush_task(ap);
1720
1721 if (!rc) {
1722 spin_lock_irqsave(ap->lock, flags);
1723
1724 /* We're racing with irq here. If we lose, the
1725 * following test prevents us from completing the qc
1726 * twice. If we win, the port is frozen and will be
1727 * cleaned up by ->post_internal_cmd().
1728 */
1729 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1730 qc->err_mask |= AC_ERR_TIMEOUT;
1731
1732 if (ap->ops->error_handler)
1733 ata_port_freeze(ap);
1734 else
1735 ata_qc_complete(qc);
1736
1737 if (ata_msg_warn(ap))
1738 ata_dev_printk(dev, KERN_WARNING,
1739 "qc timeout (cmd 0x%x)\n", command);
1740 }
1741
1742 spin_unlock_irqrestore(ap->lock, flags);
1743 }
1744
1745 /* do post_internal_cmd */
1746 if (ap->ops->post_internal_cmd)
1747 ap->ops->post_internal_cmd(qc);
1748
1749 /* perform minimal error analysis */
1750 if (qc->flags & ATA_QCFLAG_FAILED) {
1751 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1752 qc->err_mask |= AC_ERR_DEV;
1753
1754 if (!qc->err_mask)
1755 qc->err_mask |= AC_ERR_OTHER;
1756
1757 if (qc->err_mask & ~AC_ERR_OTHER)
1758 qc->err_mask &= ~AC_ERR_OTHER;
1759 }
1760
1761 /* finish up */
1762 spin_lock_irqsave(ap->lock, flags);
1763
1764 *tf = qc->result_tf;
1765 err_mask = qc->err_mask;
1766
1767 ata_qc_free(qc);
1768 link->active_tag = preempted_tag;
1769 link->sactive = preempted_sactive;
1770 ap->qc_active = preempted_qc_active;
1771 ap->nr_active_links = preempted_nr_active_links;
1772
1773 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1774 * Until those drivers are fixed, we detect the condition
1775 * here, fail the command with AC_ERR_SYSTEM and reenable the
1776 * port.
1777 *
1778 * Note that this doesn't change any behavior as internal
1779 * command failure results in disabling the device in the
1780 * higher layer for LLDDs without new reset/EH callbacks.
1781 *
1782 * Kill the following code as soon as those drivers are fixed.
1783 */
1784 if (ap->flags & ATA_FLAG_DISABLED) {
1785 err_mask |= AC_ERR_SYSTEM;
1786 ata_port_probe(ap);
1787 }
1788
1789 spin_unlock_irqrestore(ap->lock, flags);
1790
1791 return err_mask;
1792 }
1793
1794 /**
1795 * ata_exec_internal - execute libata internal command
1796 * @dev: Device to which the command is sent
1797 * @tf: Taskfile registers for the command and the result
1798 * @cdb: CDB for packet command
1799 * @dma_dir: Data tranfer direction of the command
1800 * @buf: Data buffer of the command
1801 * @buflen: Length of data buffer
1802 * @timeout: Timeout in msecs (0 for default)
1803 *
1804 * Wrapper around ata_exec_internal_sg() which takes simple
1805 * buffer instead of sg list.
1806 *
1807 * LOCKING:
1808 * None. Should be called with kernel context, might sleep.
1809 *
1810 * RETURNS:
1811 * Zero on success, AC_ERR_* mask on failure
1812 */
1813 unsigned ata_exec_internal(struct ata_device *dev,
1814 struct ata_taskfile *tf, const u8 *cdb,
1815 int dma_dir, void *buf, unsigned int buflen,
1816 unsigned long timeout)
1817 {
1818 struct scatterlist *psg = NULL, sg;
1819 unsigned int n_elem = 0;
1820
1821 if (dma_dir != DMA_NONE) {
1822 WARN_ON(!buf);
1823 sg_init_one(&sg, buf, buflen);
1824 psg = &sg;
1825 n_elem++;
1826 }
1827
1828 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1829 timeout);
1830 }
1831
1832 /**
1833 * ata_do_simple_cmd - execute simple internal command
1834 * @dev: Device to which the command is sent
1835 * @cmd: Opcode to execute
1836 *
1837 * Execute a 'simple' command, that only consists of the opcode
1838 * 'cmd' itself, without filling any other registers
1839 *
1840 * LOCKING:
1841 * Kernel thread context (may sleep).
1842 *
1843 * RETURNS:
1844 * Zero on success, AC_ERR_* mask on failure
1845 */
1846 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1847 {
1848 struct ata_taskfile tf;
1849
1850 ata_tf_init(dev, &tf);
1851
1852 tf.command = cmd;
1853 tf.flags |= ATA_TFLAG_DEVICE;
1854 tf.protocol = ATA_PROT_NODATA;
1855
1856 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1857 }
1858
1859 /**
1860 * ata_pio_need_iordy - check if iordy needed
1861 * @adev: ATA device
1862 *
1863 * Check if the current speed of the device requires IORDY. Used
1864 * by various controllers for chip configuration.
1865 */
1866
1867 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1868 {
1869 /* Controller doesn't support IORDY. Probably a pointless check
1870 as the caller should know this */
1871 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1872 return 0;
1873 /* PIO3 and higher it is mandatory */
1874 if (adev->pio_mode > XFER_PIO_2)
1875 return 1;
1876 /* We turn it on when possible */
1877 if (ata_id_has_iordy(adev->id))
1878 return 1;
1879 return 0;
1880 }
1881
1882 /**
1883 * ata_pio_mask_no_iordy - Return the non IORDY mask
1884 * @adev: ATA device
1885 *
1886 * Compute the highest mode possible if we are not using iordy. Return
1887 * -1 if no iordy mode is available.
1888 */
1889
1890 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1891 {
1892 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1893 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1894 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1895 /* Is the speed faster than the drive allows non IORDY ? */
1896 if (pio) {
1897 /* This is cycle times not frequency - watch the logic! */
1898 if (pio > 240) /* PIO2 is 240nS per cycle */
1899 return 3 << ATA_SHIFT_PIO;
1900 return 7 << ATA_SHIFT_PIO;
1901 }
1902 }
1903 return 3 << ATA_SHIFT_PIO;
1904 }
1905
1906 /**
1907 * ata_dev_read_id - Read ID data from the specified device
1908 * @dev: target device
1909 * @p_class: pointer to class of the target device (may be changed)
1910 * @flags: ATA_READID_* flags
1911 * @id: buffer to read IDENTIFY data into
1912 *
1913 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1914 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1915 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1916 * for pre-ATA4 drives.
1917 *
1918 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1919 * now we abort if we hit that case.
1920 *
1921 * LOCKING:
1922 * Kernel thread context (may sleep)
1923 *
1924 * RETURNS:
1925 * 0 on success, -errno otherwise.
1926 */
1927 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1928 unsigned int flags, u16 *id)
1929 {
1930 struct ata_port *ap = dev->link->ap;
1931 unsigned int class = *p_class;
1932 struct ata_taskfile tf;
1933 unsigned int err_mask = 0;
1934 const char *reason;
1935 int may_fallback = 1, tried_spinup = 0;
1936 int rc;
1937
1938 if (ata_msg_ctl(ap))
1939 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1940
1941 retry:
1942 ata_tf_init(dev, &tf);
1943
1944 switch (class) {
1945 case ATA_DEV_ATA:
1946 tf.command = ATA_CMD_ID_ATA;
1947 break;
1948 case ATA_DEV_ATAPI:
1949 tf.command = ATA_CMD_ID_ATAPI;
1950 break;
1951 default:
1952 rc = -ENODEV;
1953 reason = "unsupported class";
1954 goto err_out;
1955 }
1956
1957 tf.protocol = ATA_PROT_PIO;
1958
1959 /* Some devices choke if TF registers contain garbage. Make
1960 * sure those are properly initialized.
1961 */
1962 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1963
1964 /* Device presence detection is unreliable on some
1965 * controllers. Always poll IDENTIFY if available.
1966 */
1967 tf.flags |= ATA_TFLAG_POLLING;
1968
1969 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1970 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1971 if (err_mask) {
1972 if (err_mask & AC_ERR_NODEV_HINT) {
1973 ata_dev_printk(dev, KERN_DEBUG,
1974 "NODEV after polling detection\n");
1975 return -ENOENT;
1976 }
1977
1978 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1979 /* Device or controller might have reported
1980 * the wrong device class. Give a shot at the
1981 * other IDENTIFY if the current one is
1982 * aborted by the device.
1983 */
1984 if (may_fallback) {
1985 may_fallback = 0;
1986
1987 if (class == ATA_DEV_ATA)
1988 class = ATA_DEV_ATAPI;
1989 else
1990 class = ATA_DEV_ATA;
1991 goto retry;
1992 }
1993
1994 /* Control reaches here iff the device aborted
1995 * both flavors of IDENTIFYs which happens
1996 * sometimes with phantom devices.
1997 */
1998 ata_dev_printk(dev, KERN_DEBUG,
1999 "both IDENTIFYs aborted, assuming NODEV\n");
2000 return -ENOENT;
2001 }
2002
2003 rc = -EIO;
2004 reason = "I/O error";
2005 goto err_out;
2006 }
2007
2008 /* Falling back doesn't make sense if ID data was read
2009 * successfully at least once.
2010 */
2011 may_fallback = 0;
2012
2013 swap_buf_le16(id, ATA_ID_WORDS);
2014
2015 /* sanity check */
2016 rc = -EINVAL;
2017 reason = "device reports invalid type";
2018
2019 if (class == ATA_DEV_ATA) {
2020 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2021 goto err_out;
2022 } else {
2023 if (ata_id_is_ata(id))
2024 goto err_out;
2025 }
2026
2027 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2028 tried_spinup = 1;
2029 /*
2030 * Drive powered-up in standby mode, and requires a specific
2031 * SET_FEATURES spin-up subcommand before it will accept
2032 * anything other than the original IDENTIFY command.
2033 */
2034 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2035 if (err_mask && id[2] != 0x738c) {
2036 rc = -EIO;
2037 reason = "SPINUP failed";
2038 goto err_out;
2039 }
2040 /*
2041 * If the drive initially returned incomplete IDENTIFY info,
2042 * we now must reissue the IDENTIFY command.
2043 */
2044 if (id[2] == 0x37c8)
2045 goto retry;
2046 }
2047
2048 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2049 /*
2050 * The exact sequence expected by certain pre-ATA4 drives is:
2051 * SRST RESET
2052 * IDENTIFY (optional in early ATA)
2053 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2054 * anything else..
2055 * Some drives were very specific about that exact sequence.
2056 *
2057 * Note that ATA4 says lba is mandatory so the second check
2058 * shoud never trigger.
2059 */
2060 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2061 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2062 if (err_mask) {
2063 rc = -EIO;
2064 reason = "INIT_DEV_PARAMS failed";
2065 goto err_out;
2066 }
2067
2068 /* current CHS translation info (id[53-58]) might be
2069 * changed. reread the identify device info.
2070 */
2071 flags &= ~ATA_READID_POSTRESET;
2072 goto retry;
2073 }
2074 }
2075
2076 *p_class = class;
2077
2078 return 0;
2079
2080 err_out:
2081 if (ata_msg_warn(ap))
2082 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2083 "(%s, err_mask=0x%x)\n", reason, err_mask);
2084 return rc;
2085 }
2086
2087 static inline u8 ata_dev_knobble(struct ata_device *dev)
2088 {
2089 struct ata_port *ap = dev->link->ap;
2090 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2091 }
2092
2093 static void ata_dev_config_ncq(struct ata_device *dev,
2094 char *desc, size_t desc_sz)
2095 {
2096 struct ata_port *ap = dev->link->ap;
2097 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2098
2099 if (!ata_id_has_ncq(dev->id)) {
2100 desc[0] = '\0';
2101 return;
2102 }
2103 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2104 snprintf(desc, desc_sz, "NCQ (not used)");
2105 return;
2106 }
2107 if (ap->flags & ATA_FLAG_NCQ) {
2108 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2109 dev->flags |= ATA_DFLAG_NCQ;
2110 }
2111
2112 if (hdepth >= ddepth)
2113 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2114 else
2115 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2116 }
2117
2118 /**
2119 * ata_dev_configure - Configure the specified ATA/ATAPI device
2120 * @dev: Target device to configure
2121 *
2122 * Configure @dev according to @dev->id. Generic and low-level
2123 * driver specific fixups are also applied.
2124 *
2125 * LOCKING:
2126 * Kernel thread context (may sleep)
2127 *
2128 * RETURNS:
2129 * 0 on success, -errno otherwise
2130 */
2131 int ata_dev_configure(struct ata_device *dev)
2132 {
2133 struct ata_port *ap = dev->link->ap;
2134 struct ata_eh_context *ehc = &dev->link->eh_context;
2135 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2136 const u16 *id = dev->id;
2137 unsigned long xfer_mask;
2138 char revbuf[7]; /* XYZ-99\0 */
2139 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2140 char modelbuf[ATA_ID_PROD_LEN+1];
2141 int rc;
2142
2143 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2144 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2145 __func__);
2146 return 0;
2147 }
2148
2149 if (ata_msg_probe(ap))
2150 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2151
2152 /* set horkage */
2153 dev->horkage |= ata_dev_blacklisted(dev);
2154 ata_force_horkage(dev);
2155
2156 /* let ACPI work its magic */
2157 rc = ata_acpi_on_devcfg(dev);
2158 if (rc)
2159 return rc;
2160
2161 /* massage HPA, do it early as it might change IDENTIFY data */
2162 rc = ata_hpa_resize(dev);
2163 if (rc)
2164 return rc;
2165
2166 /* print device capabilities */
2167 if (ata_msg_probe(ap))
2168 ata_dev_printk(dev, KERN_DEBUG,
2169 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2170 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2171 __func__,
2172 id[49], id[82], id[83], id[84],
2173 id[85], id[86], id[87], id[88]);
2174
2175 /* initialize to-be-configured parameters */
2176 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2177 dev->max_sectors = 0;
2178 dev->cdb_len = 0;
2179 dev->n_sectors = 0;
2180 dev->cylinders = 0;
2181 dev->heads = 0;
2182 dev->sectors = 0;
2183
2184 /*
2185 * common ATA, ATAPI feature tests
2186 */
2187
2188 /* find max transfer mode; for printk only */
2189 xfer_mask = ata_id_xfermask(id);
2190
2191 if (ata_msg_probe(ap))
2192 ata_dump_id(id);
2193
2194 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2195 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2196 sizeof(fwrevbuf));
2197
2198 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2199 sizeof(modelbuf));
2200
2201 /* ATA-specific feature tests */
2202 if (dev->class == ATA_DEV_ATA) {
2203 if (ata_id_is_cfa(id)) {
2204 if (id[162] & 1) /* CPRM may make this media unusable */
2205 ata_dev_printk(dev, KERN_WARNING,
2206 "supports DRM functions and may "
2207 "not be fully accessable.\n");
2208 snprintf(revbuf, 7, "CFA");
2209 } else {
2210 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2211 /* Warn the user if the device has TPM extensions */
2212 if (ata_id_has_tpm(id))
2213 ata_dev_printk(dev, KERN_WARNING,
2214 "supports DRM functions and may "
2215 "not be fully accessable.\n");
2216 }
2217
2218 dev->n_sectors = ata_id_n_sectors(id);
2219
2220 if (dev->id[59] & 0x100)
2221 dev->multi_count = dev->id[59] & 0xff;
2222
2223 if (ata_id_has_lba(id)) {
2224 const char *lba_desc;
2225 char ncq_desc[20];
2226
2227 lba_desc = "LBA";
2228 dev->flags |= ATA_DFLAG_LBA;
2229 if (ata_id_has_lba48(id)) {
2230 dev->flags |= ATA_DFLAG_LBA48;
2231 lba_desc = "LBA48";
2232
2233 if (dev->n_sectors >= (1UL << 28) &&
2234 ata_id_has_flush_ext(id))
2235 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2236 }
2237
2238 /* config NCQ */
2239 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2240
2241 /* print device info to dmesg */
2242 if (ata_msg_drv(ap) && print_info) {
2243 ata_dev_printk(dev, KERN_INFO,
2244 "%s: %s, %s, max %s\n",
2245 revbuf, modelbuf, fwrevbuf,
2246 ata_mode_string(xfer_mask));
2247 ata_dev_printk(dev, KERN_INFO,
2248 "%Lu sectors, multi %u: %s %s\n",
2249 (unsigned long long)dev->n_sectors,
2250 dev->multi_count, lba_desc, ncq_desc);
2251 }
2252 } else {
2253 /* CHS */
2254
2255 /* Default translation */
2256 dev->cylinders = id[1];
2257 dev->heads = id[3];
2258 dev->sectors = id[6];
2259
2260 if (ata_id_current_chs_valid(id)) {
2261 /* Current CHS translation is valid. */
2262 dev->cylinders = id[54];
2263 dev->heads = id[55];
2264 dev->sectors = id[56];
2265 }
2266
2267 /* print device info to dmesg */
2268 if (ata_msg_drv(ap) && print_info) {
2269 ata_dev_printk(dev, KERN_INFO,
2270 "%s: %s, %s, max %s\n",
2271 revbuf, modelbuf, fwrevbuf,
2272 ata_mode_string(xfer_mask));
2273 ata_dev_printk(dev, KERN_INFO,
2274 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2275 (unsigned long long)dev->n_sectors,
2276 dev->multi_count, dev->cylinders,
2277 dev->heads, dev->sectors);
2278 }
2279 }
2280
2281 dev->cdb_len = 16;
2282 }
2283
2284 /* ATAPI-specific feature tests */
2285 else if (dev->class == ATA_DEV_ATAPI) {
2286 const char *cdb_intr_string = "";
2287 const char *atapi_an_string = "";
2288 const char *dma_dir_string = "";
2289 u32 sntf;
2290
2291 rc = atapi_cdb_len(id);
2292 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2293 if (ata_msg_warn(ap))
2294 ata_dev_printk(dev, KERN_WARNING,
2295 "unsupported CDB len\n");
2296 rc = -EINVAL;
2297 goto err_out_nosup;
2298 }
2299 dev->cdb_len = (unsigned int) rc;
2300
2301 /* Enable ATAPI AN if both the host and device have
2302 * the support. If PMP is attached, SNTF is required
2303 * to enable ATAPI AN to discern between PHY status
2304 * changed notifications and ATAPI ANs.
2305 */
2306 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2307 (!ap->nr_pmp_links ||
2308 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2309 unsigned int err_mask;
2310
2311 /* issue SET feature command to turn this on */
2312 err_mask = ata_dev_set_feature(dev,
2313 SETFEATURES_SATA_ENABLE, SATA_AN);
2314 if (err_mask)
2315 ata_dev_printk(dev, KERN_ERR,
2316 "failed to enable ATAPI AN "
2317 "(err_mask=0x%x)\n", err_mask);
2318 else {
2319 dev->flags |= ATA_DFLAG_AN;
2320 atapi_an_string = ", ATAPI AN";
2321 }
2322 }
2323
2324 if (ata_id_cdb_intr(dev->id)) {
2325 dev->flags |= ATA_DFLAG_CDB_INTR;
2326 cdb_intr_string = ", CDB intr";
2327 }
2328
2329 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2330 dev->flags |= ATA_DFLAG_DMADIR;
2331 dma_dir_string = ", DMADIR";
2332 }
2333
2334 /* print device info to dmesg */
2335 if (ata_msg_drv(ap) && print_info)
2336 ata_dev_printk(dev, KERN_INFO,
2337 "ATAPI: %s, %s, max %s%s%s%s\n",
2338 modelbuf, fwrevbuf,
2339 ata_mode_string(xfer_mask),
2340 cdb_intr_string, atapi_an_string,
2341 dma_dir_string);
2342 }
2343
2344 /* determine max_sectors */
2345 dev->max_sectors = ATA_MAX_SECTORS;
2346 if (dev->flags & ATA_DFLAG_LBA48)
2347 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2348
2349 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2350 if (ata_id_has_hipm(dev->id))
2351 dev->flags |= ATA_DFLAG_HIPM;
2352 if (ata_id_has_dipm(dev->id))
2353 dev->flags |= ATA_DFLAG_DIPM;
2354 }
2355
2356 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2357 200 sectors */
2358 if (ata_dev_knobble(dev)) {
2359 if (ata_msg_drv(ap) && print_info)
2360 ata_dev_printk(dev, KERN_INFO,
2361 "applying bridge limits\n");
2362 dev->udma_mask &= ATA_UDMA5;
2363 dev->max_sectors = ATA_MAX_SECTORS;
2364 }
2365
2366 if ((dev->class == ATA_DEV_ATAPI) &&
2367 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2368 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2369 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2370 }
2371
2372 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2373 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2374 dev->max_sectors);
2375
2376 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2377 dev->horkage |= ATA_HORKAGE_IPM;
2378
2379 /* reset link pm_policy for this port to no pm */
2380 ap->pm_policy = MAX_PERFORMANCE;
2381 }
2382
2383 if (ap->ops->dev_config)
2384 ap->ops->dev_config(dev);
2385
2386 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2387 /* Let the user know. We don't want to disallow opens for
2388 rescue purposes, or in case the vendor is just a blithering
2389 idiot. Do this after the dev_config call as some controllers
2390 with buggy firmware may want to avoid reporting false device
2391 bugs */
2392
2393 if (print_info) {
2394 ata_dev_printk(dev, KERN_WARNING,
2395 "Drive reports diagnostics failure. This may indicate a drive\n");
2396 ata_dev_printk(dev, KERN_WARNING,
2397 "fault or invalid emulation. Contact drive vendor for information.\n");
2398 }
2399 }
2400
2401 return 0;
2402
2403 err_out_nosup:
2404 if (ata_msg_probe(ap))
2405 ata_dev_printk(dev, KERN_DEBUG,
2406 "%s: EXIT, err\n", __func__);
2407 return rc;
2408 }
2409
2410 /**
2411 * ata_cable_40wire - return 40 wire cable type
2412 * @ap: port
2413 *
2414 * Helper method for drivers which want to hardwire 40 wire cable
2415 * detection.
2416 */
2417
2418 int ata_cable_40wire(struct ata_port *ap)
2419 {
2420 return ATA_CBL_PATA40;
2421 }
2422
2423 /**
2424 * ata_cable_80wire - return 80 wire cable type
2425 * @ap: port
2426 *
2427 * Helper method for drivers which want to hardwire 80 wire cable
2428 * detection.
2429 */
2430
2431 int ata_cable_80wire(struct ata_port *ap)
2432 {
2433 return ATA_CBL_PATA80;
2434 }
2435
2436 /**
2437 * ata_cable_unknown - return unknown PATA cable.
2438 * @ap: port
2439 *
2440 * Helper method for drivers which have no PATA cable detection.
2441 */
2442
2443 int ata_cable_unknown(struct ata_port *ap)
2444 {
2445 return ATA_CBL_PATA_UNK;
2446 }
2447
2448 /**
2449 * ata_cable_ignore - return ignored PATA cable.
2450 * @ap: port
2451 *
2452 * Helper method for drivers which don't use cable type to limit
2453 * transfer mode.
2454 */
2455 int ata_cable_ignore(struct ata_port *ap)
2456 {
2457 return ATA_CBL_PATA_IGN;
2458 }
2459
2460 /**
2461 * ata_cable_sata - return SATA cable type
2462 * @ap: port
2463 *
2464 * Helper method for drivers which have SATA cables
2465 */
2466
2467 int ata_cable_sata(struct ata_port *ap)
2468 {
2469 return ATA_CBL_SATA;
2470 }
2471
2472 /**
2473 * ata_bus_probe - Reset and probe ATA bus
2474 * @ap: Bus to probe
2475 *
2476 * Master ATA bus probing function. Initiates a hardware-dependent
2477 * bus reset, then attempts to identify any devices found on
2478 * the bus.
2479 *
2480 * LOCKING:
2481 * PCI/etc. bus probe sem.
2482 *
2483 * RETURNS:
2484 * Zero on success, negative errno otherwise.
2485 */
2486
2487 int ata_bus_probe(struct ata_port *ap)
2488 {
2489 unsigned int classes[ATA_MAX_DEVICES];
2490 int tries[ATA_MAX_DEVICES];
2491 int rc;
2492 struct ata_device *dev;
2493
2494 ata_port_probe(ap);
2495
2496 ata_link_for_each_dev(dev, &ap->link)
2497 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2498
2499 retry:
2500 ata_link_for_each_dev(dev, &ap->link) {
2501 /* If we issue an SRST then an ATA drive (not ATAPI)
2502 * may change configuration and be in PIO0 timing. If
2503 * we do a hard reset (or are coming from power on)
2504 * this is true for ATA or ATAPI. Until we've set a
2505 * suitable controller mode we should not touch the
2506 * bus as we may be talking too fast.
2507 */
2508 dev->pio_mode = XFER_PIO_0;
2509
2510 /* If the controller has a pio mode setup function
2511 * then use it to set the chipset to rights. Don't
2512 * touch the DMA setup as that will be dealt with when
2513 * configuring devices.
2514 */
2515 if (ap->ops->set_piomode)
2516 ap->ops->set_piomode(ap, dev);
2517 }
2518
2519 /* reset and determine device classes */
2520 ap->ops->phy_reset(ap);
2521
2522 ata_link_for_each_dev(dev, &ap->link) {
2523 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2524 dev->class != ATA_DEV_UNKNOWN)
2525 classes[dev->devno] = dev->class;
2526 else
2527 classes[dev->devno] = ATA_DEV_NONE;
2528
2529 dev->class = ATA_DEV_UNKNOWN;
2530 }
2531
2532 ata_port_probe(ap);
2533
2534 /* read IDENTIFY page and configure devices. We have to do the identify
2535 specific sequence bass-ackwards so that PDIAG- is released by
2536 the slave device */
2537
2538 ata_link_for_each_dev_reverse(dev, &ap->link) {
2539 if (tries[dev->devno])
2540 dev->class = classes[dev->devno];
2541
2542 if (!ata_dev_enabled(dev))
2543 continue;
2544
2545 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2546 dev->id);
2547 if (rc)
2548 goto fail;
2549 }
2550
2551 /* Now ask for the cable type as PDIAG- should have been released */
2552 if (ap->ops->cable_detect)
2553 ap->cbl = ap->ops->cable_detect(ap);
2554
2555 /* We may have SATA bridge glue hiding here irrespective of the
2556 reported cable types and sensed types */
2557 ata_link_for_each_dev(dev, &ap->link) {
2558 if (!ata_dev_enabled(dev))
2559 continue;
2560 /* SATA drives indicate we have a bridge. We don't know which
2561 end of the link the bridge is which is a problem */
2562 if (ata_id_is_sata(dev->id))
2563 ap->cbl = ATA_CBL_SATA;
2564 }
2565
2566 /* After the identify sequence we can now set up the devices. We do
2567 this in the normal order so that the user doesn't get confused */
2568
2569 ata_link_for_each_dev(dev, &ap->link) {
2570 if (!ata_dev_enabled(dev))
2571 continue;
2572
2573 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2574 rc = ata_dev_configure(dev);
2575 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2576 if (rc)
2577 goto fail;
2578 }
2579
2580 /* configure transfer mode */
2581 rc = ata_set_mode(&ap->link, &dev);
2582 if (rc)
2583 goto fail;
2584
2585 ata_link_for_each_dev(dev, &ap->link)
2586 if (ata_dev_enabled(dev))
2587 return 0;
2588
2589 /* no device present, disable port */
2590 ata_port_disable(ap);
2591 return -ENODEV;
2592
2593 fail:
2594 tries[dev->devno]--;
2595
2596 switch (rc) {
2597 case -EINVAL:
2598 /* eeek, something went very wrong, give up */
2599 tries[dev->devno] = 0;
2600 break;
2601
2602 case -ENODEV:
2603 /* give it just one more chance */
2604 tries[dev->devno] = min(tries[dev->devno], 1);
2605 case -EIO:
2606 if (tries[dev->devno] == 1) {
2607 /* This is the last chance, better to slow
2608 * down than lose it.
2609 */
2610 sata_down_spd_limit(&ap->link);
2611 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2612 }
2613 }
2614
2615 if (!tries[dev->devno])
2616 ata_dev_disable(dev);
2617
2618 goto retry;
2619 }
2620
2621 /**
2622 * ata_port_probe - Mark port as enabled
2623 * @ap: Port for which we indicate enablement
2624 *
2625 * Modify @ap data structure such that the system
2626 * thinks that the entire port is enabled.
2627 *
2628 * LOCKING: host lock, or some other form of
2629 * serialization.
2630 */
2631
2632 void ata_port_probe(struct ata_port *ap)
2633 {
2634 ap->flags &= ~ATA_FLAG_DISABLED;
2635 }
2636
2637 /**
2638 * sata_print_link_status - Print SATA link status
2639 * @link: SATA link to printk link status about
2640 *
2641 * This function prints link speed and status of a SATA link.
2642 *
2643 * LOCKING:
2644 * None.
2645 */
2646 void sata_print_link_status(struct ata_link *link)
2647 {
2648 u32 sstatus, scontrol, tmp;
2649
2650 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2651 return;
2652 sata_scr_read(link, SCR_CONTROL, &scontrol);
2653
2654 if (ata_link_online(link)) {
2655 tmp = (sstatus >> 4) & 0xf;
2656 ata_link_printk(link, KERN_INFO,
2657 "SATA link up %s (SStatus %X SControl %X)\n",
2658 sata_spd_string(tmp), sstatus, scontrol);
2659 } else {
2660 ata_link_printk(link, KERN_INFO,
2661 "SATA link down (SStatus %X SControl %X)\n",
2662 sstatus, scontrol);
2663 }
2664 }
2665
2666 /**
2667 * ata_dev_pair - return other device on cable
2668 * @adev: device
2669 *
2670 * Obtain the other device on the same cable, or if none is
2671 * present NULL is returned
2672 */
2673
2674 struct ata_device *ata_dev_pair(struct ata_device *adev)
2675 {
2676 struct ata_link *link = adev->link;
2677 struct ata_device *pair = &link->device[1 - adev->devno];
2678 if (!ata_dev_enabled(pair))
2679 return NULL;
2680 return pair;
2681 }
2682
2683 /**
2684 * ata_port_disable - Disable port.
2685 * @ap: Port to be disabled.
2686 *
2687 * Modify @ap data structure such that the system
2688 * thinks that the entire port is disabled, and should
2689 * never attempt to probe or communicate with devices
2690 * on this port.
2691 *
2692 * LOCKING: host lock, or some other form of
2693 * serialization.
2694 */
2695
2696 void ata_port_disable(struct ata_port *ap)
2697 {
2698 ap->link.device[0].class = ATA_DEV_NONE;
2699 ap->link.device[1].class = ATA_DEV_NONE;
2700 ap->flags |= ATA_FLAG_DISABLED;
2701 }
2702
2703 /**
2704 * sata_down_spd_limit - adjust SATA spd limit downward
2705 * @link: Link to adjust SATA spd limit for
2706 *
2707 * Adjust SATA spd limit of @link downward. Note that this
2708 * function only adjusts the limit. The change must be applied
2709 * using sata_set_spd().
2710 *
2711 * LOCKING:
2712 * Inherited from caller.
2713 *
2714 * RETURNS:
2715 * 0 on success, negative errno on failure
2716 */
2717 int sata_down_spd_limit(struct ata_link *link)
2718 {
2719 u32 sstatus, spd, mask;
2720 int rc, highbit;
2721
2722 if (!sata_scr_valid(link))
2723 return -EOPNOTSUPP;
2724
2725 /* If SCR can be read, use it to determine the current SPD.
2726 * If not, use cached value in link->sata_spd.
2727 */
2728 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2729 if (rc == 0)
2730 spd = (sstatus >> 4) & 0xf;
2731 else
2732 spd = link->sata_spd;
2733
2734 mask = link->sata_spd_limit;
2735 if (mask <= 1)
2736 return -EINVAL;
2737
2738 /* unconditionally mask off the highest bit */
2739 highbit = fls(mask) - 1;
2740 mask &= ~(1 << highbit);
2741
2742 /* Mask off all speeds higher than or equal to the current
2743 * one. Force 1.5Gbps if current SPD is not available.
2744 */
2745 if (spd > 1)
2746 mask &= (1 << (spd - 1)) - 1;
2747 else
2748 mask &= 1;
2749
2750 /* were we already at the bottom? */
2751 if (!mask)
2752 return -EINVAL;
2753
2754 link->sata_spd_limit = mask;
2755
2756 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2757 sata_spd_string(fls(mask)));
2758
2759 return 0;
2760 }
2761
2762 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2763 {
2764 struct ata_link *host_link = &link->ap->link;
2765 u32 limit, target, spd;
2766
2767 limit = link->sata_spd_limit;
2768
2769 /* Don't configure downstream link faster than upstream link.
2770 * It doesn't speed up anything and some PMPs choke on such
2771 * configuration.
2772 */
2773 if (!ata_is_host_link(link) && host_link->sata_spd)
2774 limit &= (1 << host_link->sata_spd) - 1;
2775
2776 if (limit == UINT_MAX)
2777 target = 0;
2778 else
2779 target = fls(limit);
2780
2781 spd = (*scontrol >> 4) & 0xf;
2782 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2783
2784 return spd != target;
2785 }
2786
2787 /**
2788 * sata_set_spd_needed - is SATA spd configuration needed
2789 * @link: Link in question
2790 *
2791 * Test whether the spd limit in SControl matches
2792 * @link->sata_spd_limit. This function is used to determine
2793 * whether hardreset is necessary to apply SATA spd
2794 * configuration.
2795 *
2796 * LOCKING:
2797 * Inherited from caller.
2798 *
2799 * RETURNS:
2800 * 1 if SATA spd configuration is needed, 0 otherwise.
2801 */
2802 int sata_set_spd_needed(struct ata_link *link)
2803 {
2804 u32 scontrol;
2805
2806 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2807 return 1;
2808
2809 return __sata_set_spd_needed(link, &scontrol);
2810 }
2811
2812 /**
2813 * sata_set_spd - set SATA spd according to spd limit
2814 * @link: Link to set SATA spd for
2815 *
2816 * Set SATA spd of @link according to sata_spd_limit.
2817 *
2818 * LOCKING:
2819 * Inherited from caller.
2820 *
2821 * RETURNS:
2822 * 0 if spd doesn't need to be changed, 1 if spd has been
2823 * changed. Negative errno if SCR registers are inaccessible.
2824 */
2825 int sata_set_spd(struct ata_link *link)
2826 {
2827 u32 scontrol;
2828 int rc;
2829
2830 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2831 return rc;
2832
2833 if (!__sata_set_spd_needed(link, &scontrol))
2834 return 0;
2835
2836 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2837 return rc;
2838
2839 return 1;
2840 }
2841
2842 /*
2843 * This mode timing computation functionality is ported over from
2844 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2845 */
2846 /*
2847 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2848 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2849 * for UDMA6, which is currently supported only by Maxtor drives.
2850 *
2851 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2852 */
2853
2854 static const struct ata_timing ata_timing[] = {
2855 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2856 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2857 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2858 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2859 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2860 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2861 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2862 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2863
2864 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2865 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2866 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2867
2868 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2869 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2870 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2871 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2872 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2873
2874 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2875 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2876 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2877 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2878 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2879 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2880 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2881 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2882
2883 { 0xFF }
2884 };
2885
2886 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2887 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2888
2889 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2890 {
2891 q->setup = EZ(t->setup * 1000, T);
2892 q->act8b = EZ(t->act8b * 1000, T);
2893 q->rec8b = EZ(t->rec8b * 1000, T);
2894 q->cyc8b = EZ(t->cyc8b * 1000, T);
2895 q->active = EZ(t->active * 1000, T);
2896 q->recover = EZ(t->recover * 1000, T);
2897 q->cycle = EZ(t->cycle * 1000, T);
2898 q->udma = EZ(t->udma * 1000, UT);
2899 }
2900
2901 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2902 struct ata_timing *m, unsigned int what)
2903 {
2904 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2905 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2906 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2907 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2908 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2909 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2910 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2911 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2912 }
2913
2914 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2915 {
2916 const struct ata_timing *t = ata_timing;
2917
2918 while (xfer_mode > t->mode)
2919 t++;
2920
2921 if (xfer_mode == t->mode)
2922 return t;
2923 return NULL;
2924 }
2925
2926 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2927 struct ata_timing *t, int T, int UT)
2928 {
2929 const struct ata_timing *s;
2930 struct ata_timing p;
2931
2932 /*
2933 * Find the mode.
2934 */
2935
2936 if (!(s = ata_timing_find_mode(speed)))
2937 return -EINVAL;
2938
2939 memcpy(t, s, sizeof(*s));
2940
2941 /*
2942 * If the drive is an EIDE drive, it can tell us it needs extended
2943 * PIO/MW_DMA cycle timing.
2944 */
2945
2946 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2947 memset(&p, 0, sizeof(p));
2948 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2949 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2950 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2951 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2952 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2953 }
2954 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2955 }
2956
2957 /*
2958 * Convert the timing to bus clock counts.
2959 */
2960
2961 ata_timing_quantize(t, t, T, UT);
2962
2963 /*
2964 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2965 * S.M.A.R.T * and some other commands. We have to ensure that the
2966 * DMA cycle timing is slower/equal than the fastest PIO timing.
2967 */
2968
2969 if (speed > XFER_PIO_6) {
2970 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2971 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2972 }
2973
2974 /*
2975 * Lengthen active & recovery time so that cycle time is correct.
2976 */
2977
2978 if (t->act8b + t->rec8b < t->cyc8b) {
2979 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2980 t->rec8b = t->cyc8b - t->act8b;
2981 }
2982
2983 if (t->active + t->recover < t->cycle) {
2984 t->active += (t->cycle - (t->active + t->recover)) / 2;
2985 t->recover = t->cycle - t->active;
2986 }
2987
2988 /* In a few cases quantisation may produce enough errors to
2989 leave t->cycle too low for the sum of active and recovery
2990 if so we must correct this */
2991 if (t->active + t->recover > t->cycle)
2992 t->cycle = t->active + t->recover;
2993
2994 return 0;
2995 }
2996
2997 /**
2998 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2999 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3000 * @cycle: cycle duration in ns
3001 *
3002 * Return matching xfer mode for @cycle. The returned mode is of
3003 * the transfer type specified by @xfer_shift. If @cycle is too
3004 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3005 * than the fastest known mode, the fasted mode is returned.
3006 *
3007 * LOCKING:
3008 * None.
3009 *
3010 * RETURNS:
3011 * Matching xfer_mode, 0xff if no match found.
3012 */
3013 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3014 {
3015 u8 base_mode = 0xff, last_mode = 0xff;
3016 const struct ata_xfer_ent *ent;
3017 const struct ata_timing *t;
3018
3019 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3020 if (ent->shift == xfer_shift)
3021 base_mode = ent->base;
3022
3023 for (t = ata_timing_find_mode(base_mode);
3024 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3025 unsigned short this_cycle;
3026
3027 switch (xfer_shift) {
3028 case ATA_SHIFT_PIO:
3029 case ATA_SHIFT_MWDMA:
3030 this_cycle = t->cycle;
3031 break;
3032 case ATA_SHIFT_UDMA:
3033 this_cycle = t->udma;
3034 break;
3035 default:
3036 return 0xff;
3037 }
3038
3039 if (cycle > this_cycle)
3040 break;
3041
3042 last_mode = t->mode;
3043 }
3044
3045 return last_mode;
3046 }
3047
3048 /**
3049 * ata_down_xfermask_limit - adjust dev xfer masks downward
3050 * @dev: Device to adjust xfer masks
3051 * @sel: ATA_DNXFER_* selector
3052 *
3053 * Adjust xfer masks of @dev downward. Note that this function
3054 * does not apply the change. Invoking ata_set_mode() afterwards
3055 * will apply the limit.
3056 *
3057 * LOCKING:
3058 * Inherited from caller.
3059 *
3060 * RETURNS:
3061 * 0 on success, negative errno on failure
3062 */
3063 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3064 {
3065 char buf[32];
3066 unsigned long orig_mask, xfer_mask;
3067 unsigned long pio_mask, mwdma_mask, udma_mask;
3068 int quiet, highbit;
3069
3070 quiet = !!(sel & ATA_DNXFER_QUIET);
3071 sel &= ~ATA_DNXFER_QUIET;
3072
3073 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3074 dev->mwdma_mask,
3075 dev->udma_mask);
3076 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3077
3078 switch (sel) {
3079 case ATA_DNXFER_PIO:
3080 highbit = fls(pio_mask) - 1;
3081 pio_mask &= ~(1 << highbit);
3082 break;
3083
3084 case ATA_DNXFER_DMA:
3085 if (udma_mask) {
3086 highbit = fls(udma_mask) - 1;
3087 udma_mask &= ~(1 << highbit);
3088 if (!udma_mask)
3089 return -ENOENT;
3090 } else if (mwdma_mask) {
3091 highbit = fls(mwdma_mask) - 1;
3092 mwdma_mask &= ~(1 << highbit);
3093 if (!mwdma_mask)
3094 return -ENOENT;
3095 }
3096 break;
3097
3098 case ATA_DNXFER_40C:
3099 udma_mask &= ATA_UDMA_MASK_40C;
3100 break;
3101
3102 case ATA_DNXFER_FORCE_PIO0:
3103 pio_mask &= 1;
3104 case ATA_DNXFER_FORCE_PIO:
3105 mwdma_mask = 0;
3106 udma_mask = 0;
3107 break;
3108
3109 default:
3110 BUG();
3111 }
3112
3113 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3114
3115 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3116 return -ENOENT;
3117
3118 if (!quiet) {
3119 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3120 snprintf(buf, sizeof(buf), "%s:%s",
3121 ata_mode_string(xfer_mask),
3122 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3123 else
3124 snprintf(buf, sizeof(buf), "%s",
3125 ata_mode_string(xfer_mask));
3126
3127 ata_dev_printk(dev, KERN_WARNING,
3128 "limiting speed to %s\n", buf);
3129 }
3130
3131 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3132 &dev->udma_mask);
3133
3134 return 0;
3135 }
3136
3137 static int ata_dev_set_mode(struct ata_device *dev)
3138 {
3139 struct ata_eh_context *ehc = &dev->link->eh_context;
3140 const char *dev_err_whine = "";
3141 int ign_dev_err = 0;
3142 unsigned int err_mask;
3143 int rc;
3144
3145 dev->flags &= ~ATA_DFLAG_PIO;
3146 if (dev->xfer_shift == ATA_SHIFT_PIO)
3147 dev->flags |= ATA_DFLAG_PIO;
3148
3149 err_mask = ata_dev_set_xfermode(dev);
3150
3151 if (err_mask & ~AC_ERR_DEV)
3152 goto fail;
3153
3154 /* revalidate */
3155 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3156 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3157 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3158 if (rc)
3159 return rc;
3160
3161 /* Old CFA may refuse this command, which is just fine */
3162 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3163 ign_dev_err = 1;
3164
3165 /* Some very old devices and some bad newer ones fail any kind of
3166 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3167 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3168 dev->pio_mode <= XFER_PIO_2)
3169 ign_dev_err = 1;
3170
3171 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3172 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3173 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3174 dev->dma_mode == XFER_MW_DMA_0 &&
3175 (dev->id[63] >> 8) & 1)
3176 ign_dev_err = 1;
3177
3178 /* if the device is actually configured correctly, ignore dev err */
3179 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3180 ign_dev_err = 1;
3181
3182 if (err_mask & AC_ERR_DEV) {
3183 if (!ign_dev_err)
3184 goto fail;
3185 else
3186 dev_err_whine = " (device error ignored)";
3187 }
3188
3189 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3190 dev->xfer_shift, (int)dev->xfer_mode);
3191
3192 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3193 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3194 dev_err_whine);
3195
3196 return 0;
3197
3198 fail:
3199 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3200 "(err_mask=0x%x)\n", err_mask);
3201 return -EIO;
3202 }
3203
3204 /**
3205 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3206 * @link: link on which timings will be programmed
3207 * @r_failed_dev: out parameter for failed device
3208 *
3209 * Standard implementation of the function used to tune and set
3210 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3211 * ata_dev_set_mode() fails, pointer to the failing device is
3212 * returned in @r_failed_dev.
3213 *
3214 * LOCKING:
3215 * PCI/etc. bus probe sem.
3216 *
3217 * RETURNS:
3218 * 0 on success, negative errno otherwise
3219 */
3220
3221 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3222 {
3223 struct ata_port *ap = link->ap;
3224 struct ata_device *dev;
3225 int rc = 0, used_dma = 0, found = 0;
3226
3227 /* step 1: calculate xfer_mask */
3228 ata_link_for_each_dev(dev, link) {
3229 unsigned long pio_mask, dma_mask;
3230 unsigned int mode_mask;
3231
3232 if (!ata_dev_enabled(dev))
3233 continue;
3234
3235 mode_mask = ATA_DMA_MASK_ATA;
3236 if (dev->class == ATA_DEV_ATAPI)
3237 mode_mask = ATA_DMA_MASK_ATAPI;
3238 else if (ata_id_is_cfa(dev->id))
3239 mode_mask = ATA_DMA_MASK_CFA;
3240
3241 ata_dev_xfermask(dev);
3242 ata_force_xfermask(dev);
3243
3244 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3245 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3246
3247 if (libata_dma_mask & mode_mask)
3248 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3249 else
3250 dma_mask = 0;
3251
3252 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3253 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3254
3255 found = 1;
3256 if (dev->dma_mode != 0xff)
3257 used_dma = 1;
3258 }
3259 if (!found)
3260 goto out;
3261
3262 /* step 2: always set host PIO timings */
3263 ata_link_for_each_dev(dev, link) {
3264 if (!ata_dev_enabled(dev))
3265 continue;
3266
3267 if (dev->pio_mode == 0xff) {
3268 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3269 rc = -EINVAL;
3270 goto out;
3271 }
3272
3273 dev->xfer_mode = dev->pio_mode;
3274 dev->xfer_shift = ATA_SHIFT_PIO;
3275 if (ap->ops->set_piomode)
3276 ap->ops->set_piomode(ap, dev);
3277 }
3278
3279 /* step 3: set host DMA timings */
3280 ata_link_for_each_dev(dev, link) {
3281 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3282 continue;
3283
3284 dev->xfer_mode = dev->dma_mode;
3285 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3286 if (ap->ops->set_dmamode)
3287 ap->ops->set_dmamode(ap, dev);
3288 }
3289
3290 /* step 4: update devices' xfer mode */
3291 ata_link_for_each_dev(dev, link) {
3292 /* don't update suspended devices' xfer mode */
3293 if (!ata_dev_enabled(dev))
3294 continue;
3295
3296 rc = ata_dev_set_mode(dev);
3297 if (rc)
3298 goto out;
3299 }
3300
3301 /* Record simplex status. If we selected DMA then the other
3302 * host channels are not permitted to do so.
3303 */
3304 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3305 ap->host->simplex_claimed = ap;
3306
3307 out:
3308 if (rc)
3309 *r_failed_dev = dev;
3310 return rc;
3311 }
3312
3313 /**
3314 * ata_wait_ready - wait for link to become ready
3315 * @link: link to be waited on
3316 * @deadline: deadline jiffies for the operation
3317 * @check_ready: callback to check link readiness
3318 *
3319 * Wait for @link to become ready. @check_ready should return
3320 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3321 * link doesn't seem to be occupied, other errno for other error
3322 * conditions.
3323 *
3324 * Transient -ENODEV conditions are allowed for
3325 * ATA_TMOUT_FF_WAIT.
3326 *
3327 * LOCKING:
3328 * EH context.
3329 *
3330 * RETURNS:
3331 * 0 if @linke is ready before @deadline; otherwise, -errno.
3332 */
3333 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3334 int (*check_ready)(struct ata_link *link))
3335 {
3336 unsigned long start = jiffies;
3337 unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
3338 int warned = 0;
3339
3340 if (time_after(nodev_deadline, deadline))
3341 nodev_deadline = deadline;
3342
3343 while (1) {
3344 unsigned long now = jiffies;
3345 int ready, tmp;
3346
3347 ready = tmp = check_ready(link);
3348 if (ready > 0)
3349 return 0;
3350
3351 /* -ENODEV could be transient. Ignore -ENODEV if link
3352 * is online. Also, some SATA devices take a long
3353 * time to clear 0xff after reset. For example,
3354 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3355 * GoVault needs even more than that. Wait for
3356 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3357 *
3358 * Note that some PATA controllers (pata_ali) explode
3359 * if status register is read more than once when
3360 * there's no device attached.
3361 */
3362 if (ready == -ENODEV) {
3363 if (ata_link_online(link))
3364 ready = 0;
3365 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3366 !ata_link_offline(link) &&
3367 time_before(now, nodev_deadline))
3368 ready = 0;
3369 }
3370
3371 if (ready)
3372 return ready;
3373 if (time_after(now, deadline))
3374 return -EBUSY;
3375
3376 if (!warned && time_after(now, start + 5 * HZ) &&
3377 (deadline - now > 3 * HZ)) {
3378 ata_link_printk(link, KERN_WARNING,
3379 "link is slow to respond, please be patient "
3380 "(ready=%d)\n", tmp);
3381 warned = 1;
3382 }
3383
3384 msleep(50);
3385 }
3386 }
3387
3388 /**
3389 * ata_wait_after_reset - wait for link to become ready after reset
3390 * @link: link to be waited on
3391 * @deadline: deadline jiffies for the operation
3392 * @check_ready: callback to check link readiness
3393 *
3394 * Wait for @link to become ready after reset.
3395 *
3396 * LOCKING:
3397 * EH context.
3398 *
3399 * RETURNS:
3400 * 0 if @linke is ready before @deadline; otherwise, -errno.
3401 */
3402 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3403 int (*check_ready)(struct ata_link *link))
3404 {
3405 msleep(ATA_WAIT_AFTER_RESET_MSECS);
3406
3407 return ata_wait_ready(link, deadline, check_ready);
3408 }
3409
3410 /**
3411 * sata_link_debounce - debounce SATA phy status
3412 * @link: ATA link to debounce SATA phy status for
3413 * @params: timing parameters { interval, duratinon, timeout } in msec
3414 * @deadline: deadline jiffies for the operation
3415 *
3416 * Make sure SStatus of @link reaches stable state, determined by
3417 * holding the same value where DET is not 1 for @duration polled
3418 * every @interval, before @timeout. Timeout constraints the
3419 * beginning of the stable state. Because DET gets stuck at 1 on
3420 * some controllers after hot unplugging, this functions waits
3421 * until timeout then returns 0 if DET is stable at 1.
3422 *
3423 * @timeout is further limited by @deadline. The sooner of the
3424 * two is used.
3425 *
3426 * LOCKING:
3427 * Kernel thread context (may sleep)
3428 *
3429 * RETURNS:
3430 * 0 on success, -errno on failure.
3431 */
3432 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3433 unsigned long deadline)
3434 {
3435 unsigned long interval_msec = params[0];
3436 unsigned long duration = msecs_to_jiffies(params[1]);
3437 unsigned long last_jiffies, t;
3438 u32 last, cur;
3439 int rc;
3440
3441 t = jiffies + msecs_to_jiffies(params[2]);
3442 if (time_before(t, deadline))
3443 deadline = t;
3444
3445 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3446 return rc;
3447 cur &= 0xf;
3448
3449 last = cur;
3450 last_jiffies = jiffies;
3451
3452 while (1) {
3453 msleep(interval_msec);
3454 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3455 return rc;
3456 cur &= 0xf;
3457
3458 /* DET stable? */
3459 if (cur == last) {
3460 if (cur == 1 && time_before(jiffies, deadline))
3461 continue;
3462 if (time_after(jiffies, last_jiffies + duration))
3463 return 0;
3464 continue;
3465 }
3466
3467 /* unstable, start over */
3468 last = cur;
3469 last_jiffies = jiffies;
3470
3471 /* Check deadline. If debouncing failed, return
3472 * -EPIPE to tell upper layer to lower link speed.
3473 */
3474 if (time_after(jiffies, deadline))
3475 return -EPIPE;
3476 }
3477 }
3478
3479 /**
3480 * sata_link_resume - resume SATA link
3481 * @link: ATA link to resume SATA
3482 * @params: timing parameters { interval, duratinon, timeout } in msec
3483 * @deadline: deadline jiffies for the operation
3484 *
3485 * Resume SATA phy @link and debounce it.
3486 *
3487 * LOCKING:
3488 * Kernel thread context (may sleep)
3489 *
3490 * RETURNS:
3491 * 0 on success, -errno on failure.
3492 */
3493 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3494 unsigned long deadline)
3495 {
3496 u32 scontrol, serror;
3497 int rc;
3498
3499 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3500 return rc;
3501
3502 scontrol = (scontrol & 0x0f0) | 0x300;
3503
3504 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3505 return rc;
3506
3507 /* Some PHYs react badly if SStatus is pounded immediately
3508 * after resuming. Delay 200ms before debouncing.
3509 */
3510 msleep(200);
3511
3512 if ((rc = sata_link_debounce(link, params, deadline)))
3513 return rc;
3514
3515 /* Clear SError. PMP and some host PHYs require this to
3516 * operate and clearing should be done before checking PHY
3517 * online status to avoid race condition (hotplugging between
3518 * link resume and status check).
3519 */
3520 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3521 rc = sata_scr_write(link, SCR_ERROR, serror);
3522 if (rc == 0 || rc == -EINVAL) {
3523 unsigned long flags;
3524
3525 spin_lock_irqsave(link->ap->lock, flags);
3526 link->eh_info.serror = 0;
3527 spin_unlock_irqrestore(link->ap->lock, flags);
3528 rc = 0;
3529 }
3530 return rc;
3531 }
3532
3533 /**
3534 * ata_std_prereset - prepare for reset
3535 * @link: ATA link to be reset
3536 * @deadline: deadline jiffies for the operation
3537 *
3538 * @link is about to be reset. Initialize it. Failure from
3539 * prereset makes libata abort whole reset sequence and give up
3540 * that port, so prereset should be best-effort. It does its
3541 * best to prepare for reset sequence but if things go wrong, it
3542 * should just whine, not fail.
3543 *
3544 * LOCKING:
3545 * Kernel thread context (may sleep)
3546 *
3547 * RETURNS:
3548 * 0 on success, -errno otherwise.
3549 */
3550 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3551 {
3552 struct ata_port *ap = link->ap;
3553 struct ata_eh_context *ehc = &link->eh_context;
3554 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3555 int rc;
3556
3557 /* if we're about to do hardreset, nothing more to do */
3558 if (ehc->i.action & ATA_EH_HARDRESET)
3559 return 0;
3560
3561 /* if SATA, resume link */
3562 if (ap->flags & ATA_FLAG_SATA) {
3563 rc = sata_link_resume(link, timing, deadline);
3564 /* whine about phy resume failure but proceed */
3565 if (rc && rc != -EOPNOTSUPP)
3566 ata_link_printk(link, KERN_WARNING, "failed to resume "
3567 "link for reset (errno=%d)\n", rc);
3568 }
3569
3570 return 0;
3571 }
3572
3573 /**
3574 * sata_link_hardreset - reset link via SATA phy reset
3575 * @link: link to reset
3576 * @timing: timing parameters { interval, duratinon, timeout } in msec
3577 * @deadline: deadline jiffies for the operation
3578 * @online: optional out parameter indicating link onlineness
3579 * @check_ready: optional callback to check link readiness
3580 *
3581 * SATA phy-reset @link using DET bits of SControl register.
3582 * After hardreset, link readiness is waited upon using
3583 * ata_wait_ready() if @check_ready is specified. LLDs are
3584 * allowed to not specify @check_ready and wait itself after this
3585 * function returns. Device classification is LLD's
3586 * responsibility.
3587 *
3588 * *@online is set to one iff reset succeeded and @link is online
3589 * after reset.
3590 *
3591 * LOCKING:
3592 * Kernel thread context (may sleep)
3593 *
3594 * RETURNS:
3595 * 0 on success, -errno otherwise.
3596 */
3597 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3598 unsigned long deadline,
3599 bool *online, int (*check_ready)(struct ata_link *))
3600 {
3601 u32 scontrol;
3602 int rc;
3603
3604 DPRINTK("ENTER\n");
3605
3606 if (online)
3607 *online = false;
3608
3609 if (sata_set_spd_needed(link)) {
3610 /* SATA spec says nothing about how to reconfigure
3611 * spd. To be on the safe side, turn off phy during
3612 * reconfiguration. This works for at least ICH7 AHCI
3613 * and Sil3124.
3614 */
3615 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3616 goto out;
3617
3618 scontrol = (scontrol & 0x0f0) | 0x304;
3619
3620 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3621 goto out;
3622
3623 sata_set_spd(link);
3624 }
3625
3626 /* issue phy wake/reset */
3627 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3628 goto out;
3629
3630 scontrol = (scontrol & 0x0f0) | 0x301;
3631
3632 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3633 goto out;
3634
3635 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3636 * 10.4.2 says at least 1 ms.
3637 */
3638 msleep(1);
3639
3640 /* bring link back */
3641 rc = sata_link_resume(link, timing, deadline);
3642 if (rc)
3643 goto out;
3644 /* if link is offline nothing more to do */
3645 if (ata_link_offline(link))
3646 goto out;
3647
3648 /* Link is online. From this point, -ENODEV too is an error. */
3649 if (online)
3650 *online = true;
3651
3652 if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) {
3653 /* If PMP is supported, we have to do follow-up SRST.
3654 * Some PMPs don't send D2H Reg FIS after hardreset if
3655 * the first port is empty. Wait only for
3656 * ATA_TMOUT_PMP_SRST_WAIT.
3657 */
3658 if (check_ready) {
3659 unsigned long pmp_deadline;
3660
3661 pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
3662 if (time_after(pmp_deadline, deadline))
3663 pmp_deadline = deadline;
3664 ata_wait_ready(link, pmp_deadline, check_ready);
3665 }
3666 rc = -EAGAIN;
3667 goto out;
3668 }
3669
3670 rc = 0;
3671 if (check_ready)
3672 rc = ata_wait_ready(link, deadline, check_ready);
3673 out:
3674 if (rc && rc != -EAGAIN)
3675 ata_link_printk(link, KERN_ERR,
3676 "COMRESET failed (errno=%d)\n", rc);
3677 DPRINTK("EXIT, rc=%d\n", rc);
3678 return rc;
3679 }
3680
3681 /**
3682 * sata_std_hardreset - COMRESET w/o waiting or classification
3683 * @link: link to reset
3684 * @class: resulting class of attached device
3685 * @deadline: deadline jiffies for the operation
3686 *
3687 * Standard SATA COMRESET w/o waiting or classification.
3688 *
3689 * LOCKING:
3690 * Kernel thread context (may sleep)
3691 *
3692 * RETURNS:
3693 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3694 */
3695 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3696 unsigned long deadline)
3697 {
3698 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3699 bool online;
3700 int rc;
3701
3702 /* do hardreset */
3703 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3704 return online ? -EAGAIN : rc;
3705 }
3706
3707 /**
3708 * ata_std_postreset - standard postreset callback
3709 * @link: the target ata_link
3710 * @classes: classes of attached devices
3711 *
3712 * This function is invoked after a successful reset. Note that
3713 * the device might have been reset more than once using
3714 * different reset methods before postreset is invoked.
3715 *
3716 * LOCKING:
3717 * Kernel thread context (may sleep)
3718 */
3719 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3720 {
3721 DPRINTK("ENTER\n");
3722
3723 /* print link status */
3724 sata_print_link_status(link);
3725
3726 DPRINTK("EXIT\n");
3727 }
3728
3729 /**
3730 * ata_dev_same_device - Determine whether new ID matches configured device
3731 * @dev: device to compare against
3732 * @new_class: class of the new device
3733 * @new_id: IDENTIFY page of the new device
3734 *
3735 * Compare @new_class and @new_id against @dev and determine
3736 * whether @dev is the device indicated by @new_class and
3737 * @new_id.
3738 *
3739 * LOCKING:
3740 * None.
3741 *
3742 * RETURNS:
3743 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3744 */
3745 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3746 const u16 *new_id)
3747 {
3748 const u16 *old_id = dev->id;
3749 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3750 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3751
3752 if (dev->class != new_class) {
3753 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3754 dev->class, new_class);
3755 return 0;
3756 }
3757
3758 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3759 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3760 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3761 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3762
3763 if (strcmp(model[0], model[1])) {
3764 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3765 "'%s' != '%s'\n", model[0], model[1]);
3766 return 0;
3767 }
3768
3769 if (strcmp(serial[0], serial[1])) {
3770 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3771 "'%s' != '%s'\n", serial[0], serial[1]);
3772 return 0;
3773 }
3774
3775 return 1;
3776 }
3777
3778 /**
3779 * ata_dev_reread_id - Re-read IDENTIFY data
3780 * @dev: target ATA device
3781 * @readid_flags: read ID flags
3782 *
3783 * Re-read IDENTIFY page and make sure @dev is still attached to
3784 * the port.
3785 *
3786 * LOCKING:
3787 * Kernel thread context (may sleep)
3788 *
3789 * RETURNS:
3790 * 0 on success, negative errno otherwise
3791 */
3792 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3793 {
3794 unsigned int class = dev->class;
3795 u16 *id = (void *)dev->link->ap->sector_buf;
3796 int rc;
3797
3798 /* read ID data */
3799 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3800 if (rc)
3801 return rc;
3802
3803 /* is the device still there? */
3804 if (!ata_dev_same_device(dev, class, id))
3805 return -ENODEV;
3806
3807 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3808 return 0;
3809 }
3810
3811 /**
3812 * ata_dev_revalidate - Revalidate ATA device
3813 * @dev: device to revalidate
3814 * @new_class: new class code
3815 * @readid_flags: read ID flags
3816 *
3817 * Re-read IDENTIFY page, make sure @dev is still attached to the
3818 * port and reconfigure it according to the new IDENTIFY page.
3819 *
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3822 *
3823 * RETURNS:
3824 * 0 on success, negative errno otherwise
3825 */
3826 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3827 unsigned int readid_flags)
3828 {
3829 u64 n_sectors = dev->n_sectors;
3830 int rc;
3831
3832 if (!ata_dev_enabled(dev))
3833 return -ENODEV;
3834
3835 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3836 if (ata_class_enabled(new_class) &&
3837 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3838 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3839 dev->class, new_class);
3840 rc = -ENODEV;
3841 goto fail;
3842 }
3843
3844 /* re-read ID */
3845 rc = ata_dev_reread_id(dev, readid_flags);
3846 if (rc)
3847 goto fail;
3848
3849 /* configure device according to the new ID */
3850 rc = ata_dev_configure(dev);
3851 if (rc)
3852 goto fail;
3853
3854 /* verify n_sectors hasn't changed */
3855 if (dev->class == ATA_DEV_ATA && n_sectors &&
3856 dev->n_sectors != n_sectors) {
3857 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3858 "%llu != %llu\n",
3859 (unsigned long long)n_sectors,
3860 (unsigned long long)dev->n_sectors);
3861
3862 /* restore original n_sectors */
3863 dev->n_sectors = n_sectors;
3864
3865 rc = -ENODEV;
3866 goto fail;
3867 }
3868
3869 return 0;
3870
3871 fail:
3872 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3873 return rc;
3874 }
3875
3876 struct ata_blacklist_entry {
3877 const char *model_num;
3878 const char *model_rev;
3879 unsigned long horkage;
3880 };
3881
3882 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3883 /* Devices with DMA related problems under Linux */
3884 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3885 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3886 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3887 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3888 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3889 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3890 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3891 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3892 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3893 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3894 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3895 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3896 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3897 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3898 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3899 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3900 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3901 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3902 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3903 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3904 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3905 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3906 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3907 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3908 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3909 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3910 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3911 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3912 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3913 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3914 /* Odd clown on sil3726/4726 PMPs */
3915 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3916 ATA_HORKAGE_SKIP_PM },
3917
3918 /* Weird ATAPI devices */
3919 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3920
3921 /* Devices we expect to fail diagnostics */
3922
3923 /* Devices where NCQ should be avoided */
3924 /* NCQ is slow */
3925 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3926 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3927 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3928 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3929 /* NCQ is broken */
3930 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3931 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3932 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3933 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3934
3935 /* Blacklist entries taken from Silicon Image 3124/3132
3936 Windows driver .inf file - also several Linux problem reports */
3937 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3938 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3939 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3940
3941 /* devices which puke on READ_NATIVE_MAX */
3942 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3943 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3944 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3945 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3946
3947 /* Devices which report 1 sector over size HPA */
3948 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3949 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3950 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
3951
3952 /* Devices which get the IVB wrong */
3953 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3954 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
3955 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3956 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3957 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
3958
3959 /* End Marker */
3960 { }
3961 };
3962
3963 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3964 {
3965 const char *p;
3966 int len;
3967
3968 /*
3969 * check for trailing wildcard: *\0
3970 */
3971 p = strchr(patt, wildchar);
3972 if (p && ((*(p + 1)) == 0))
3973 len = p - patt;
3974 else {
3975 len = strlen(name);
3976 if (!len) {
3977 if (!*patt)
3978 return 0;
3979 return -1;
3980 }
3981 }
3982
3983 return strncmp(patt, name, len);
3984 }
3985
3986 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3987 {
3988 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3989 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3990 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3991
3992 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3993 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3994
3995 while (ad->model_num) {
3996 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3997 if (ad->model_rev == NULL)
3998 return ad->horkage;
3999 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4000 return ad->horkage;
4001 }
4002 ad++;
4003 }
4004 return 0;
4005 }
4006
4007 static int ata_dma_blacklisted(const struct ata_device *dev)
4008 {
4009 /* We don't support polling DMA.
4010 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4011 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4012 */
4013 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4014 (dev->flags & ATA_DFLAG_CDB_INTR))
4015 return 1;
4016 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4017 }
4018
4019 /**
4020 * ata_is_40wire - check drive side detection
4021 * @dev: device
4022 *
4023 * Perform drive side detection decoding, allowing for device vendors
4024 * who can't follow the documentation.
4025 */
4026
4027 static int ata_is_40wire(struct ata_device *dev)
4028 {
4029 if (dev->horkage & ATA_HORKAGE_IVB)
4030 return ata_drive_40wire_relaxed(dev->id);
4031 return ata_drive_40wire(dev->id);
4032 }
4033
4034 /**
4035 * cable_is_40wire - 40/80/SATA decider
4036 * @ap: port to consider
4037 *
4038 * This function encapsulates the policy for speed management
4039 * in one place. At the moment we don't cache the result but
4040 * there is a good case for setting ap->cbl to the result when
4041 * we are called with unknown cables (and figuring out if it
4042 * impacts hotplug at all).
4043 *
4044 * Return 1 if the cable appears to be 40 wire.
4045 */
4046
4047 static int cable_is_40wire(struct ata_port *ap)
4048 {
4049 struct ata_link *link;
4050 struct ata_device *dev;
4051
4052 /* If the controller thinks we are 40 wire, we are */
4053 if (ap->cbl == ATA_CBL_PATA40)
4054 return 1;
4055 /* If the controller thinks we are 80 wire, we are */
4056 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4057 return 0;
4058 /* If the controller doesn't know we scan
4059
4060 - Note: We look for all 40 wire detects at this point.
4061 Any 80 wire detect is taken to be 80 wire cable
4062 because
4063 - In many setups only the one drive (slave if present)
4064 will give a valid detect
4065 - If you have a non detect capable drive you don't
4066 want it to colour the choice
4067 */
4068 ata_port_for_each_link(link, ap) {
4069 ata_link_for_each_dev(dev, link) {
4070 if (!ata_is_40wire(dev))
4071 return 0;
4072 }
4073 }
4074 return 1;
4075 }
4076
4077 /**
4078 * ata_dev_xfermask - Compute supported xfermask of the given device
4079 * @dev: Device to compute xfermask for
4080 *
4081 * Compute supported xfermask of @dev and store it in
4082 * dev->*_mask. This function is responsible for applying all
4083 * known limits including host controller limits, device
4084 * blacklist, etc...
4085 *
4086 * LOCKING:
4087 * None.
4088 */
4089 static void ata_dev_xfermask(struct ata_device *dev)
4090 {
4091 struct ata_link *link = dev->link;
4092 struct ata_port *ap = link->ap;
4093 struct ata_host *host = ap->host;
4094 unsigned long xfer_mask;
4095
4096 /* controller modes available */
4097 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4098 ap->mwdma_mask, ap->udma_mask);
4099
4100 /* drive modes available */
4101 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4102 dev->mwdma_mask, dev->udma_mask);
4103 xfer_mask &= ata_id_xfermask(dev->id);
4104
4105 /*
4106 * CFA Advanced TrueIDE timings are not allowed on a shared
4107 * cable
4108 */
4109 if (ata_dev_pair(dev)) {
4110 /* No PIO5 or PIO6 */
4111 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4112 /* No MWDMA3 or MWDMA 4 */
4113 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4114 }
4115
4116 if (ata_dma_blacklisted(dev)) {
4117 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4118 ata_dev_printk(dev, KERN_WARNING,
4119 "device is on DMA blacklist, disabling DMA\n");
4120 }
4121
4122 if ((host->flags & ATA_HOST_SIMPLEX) &&
4123 host->simplex_claimed && host->simplex_claimed != ap) {
4124 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4125 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4126 "other device, disabling DMA\n");
4127 }
4128
4129 if (ap->flags & ATA_FLAG_NO_IORDY)
4130 xfer_mask &= ata_pio_mask_no_iordy(dev);
4131
4132 if (ap->ops->mode_filter)
4133 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4134
4135 /* Apply cable rule here. Don't apply it early because when
4136 * we handle hot plug the cable type can itself change.
4137 * Check this last so that we know if the transfer rate was
4138 * solely limited by the cable.
4139 * Unknown or 80 wire cables reported host side are checked
4140 * drive side as well. Cases where we know a 40wire cable
4141 * is used safely for 80 are not checked here.
4142 */
4143 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4144 /* UDMA/44 or higher would be available */
4145 if (cable_is_40wire(ap)) {
4146 ata_dev_printk(dev, KERN_WARNING,
4147 "limited to UDMA/33 due to 40-wire cable\n");
4148 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4149 }
4150
4151 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4152 &dev->mwdma_mask, &dev->udma_mask);
4153 }
4154
4155 /**
4156 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4157 * @dev: Device to which command will be sent
4158 *
4159 * Issue SET FEATURES - XFER MODE command to device @dev
4160 * on port @ap.
4161 *
4162 * LOCKING:
4163 * PCI/etc. bus probe sem.
4164 *
4165 * RETURNS:
4166 * 0 on success, AC_ERR_* mask otherwise.
4167 */
4168
4169 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4170 {
4171 struct ata_taskfile tf;
4172 unsigned int err_mask;
4173
4174 /* set up set-features taskfile */
4175 DPRINTK("set features - xfer mode\n");
4176
4177 /* Some controllers and ATAPI devices show flaky interrupt
4178 * behavior after setting xfer mode. Use polling instead.
4179 */
4180 ata_tf_init(dev, &tf);
4181 tf.command = ATA_CMD_SET_FEATURES;
4182 tf.feature = SETFEATURES_XFER;
4183 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4184 tf.protocol = ATA_PROT_NODATA;
4185 /* If we are using IORDY we must send the mode setting command */
4186 if (ata_pio_need_iordy(dev))
4187 tf.nsect = dev->xfer_mode;
4188 /* If the device has IORDY and the controller does not - turn it off */
4189 else if (ata_id_has_iordy(dev->id))
4190 tf.nsect = 0x01;
4191 else /* In the ancient relic department - skip all of this */
4192 return 0;
4193
4194 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4195
4196 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4197 return err_mask;
4198 }
4199 /**
4200 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4201 * @dev: Device to which command will be sent
4202 * @enable: Whether to enable or disable the feature
4203 * @feature: The sector count represents the feature to set
4204 *
4205 * Issue SET FEATURES - SATA FEATURES command to device @dev
4206 * on port @ap with sector count
4207 *
4208 * LOCKING:
4209 * PCI/etc. bus probe sem.
4210 *
4211 * RETURNS:
4212 * 0 on success, AC_ERR_* mask otherwise.
4213 */
4214 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4215 u8 feature)
4216 {
4217 struct ata_taskfile tf;
4218 unsigned int err_mask;
4219
4220 /* set up set-features taskfile */
4221 DPRINTK("set features - SATA features\n");
4222
4223 ata_tf_init(dev, &tf);
4224 tf.command = ATA_CMD_SET_FEATURES;
4225 tf.feature = enable;
4226 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4227 tf.protocol = ATA_PROT_NODATA;
4228 tf.nsect = feature;
4229
4230 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4231
4232 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4233 return err_mask;
4234 }
4235
4236 /**
4237 * ata_dev_init_params - Issue INIT DEV PARAMS command
4238 * @dev: Device to which command will be sent
4239 * @heads: Number of heads (taskfile parameter)
4240 * @sectors: Number of sectors (taskfile parameter)
4241 *
4242 * LOCKING:
4243 * Kernel thread context (may sleep)
4244 *
4245 * RETURNS:
4246 * 0 on success, AC_ERR_* mask otherwise.
4247 */
4248 static unsigned int ata_dev_init_params(struct ata_device *dev,
4249 u16 heads, u16 sectors)
4250 {
4251 struct ata_taskfile tf;
4252 unsigned int err_mask;
4253
4254 /* Number of sectors per track 1-255. Number of heads 1-16 */
4255 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4256 return AC_ERR_INVALID;
4257
4258 /* set up init dev params taskfile */
4259 DPRINTK("init dev params \n");
4260
4261 ata_tf_init(dev, &tf);
4262 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4263 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4264 tf.protocol = ATA_PROT_NODATA;
4265 tf.nsect = sectors;
4266 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4267
4268 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4269 /* A clean abort indicates an original or just out of spec drive
4270 and we should continue as we issue the setup based on the
4271 drive reported working geometry */
4272 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4273 err_mask = 0;
4274
4275 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4276 return err_mask;
4277 }
4278
4279 /**
4280 * ata_sg_clean - Unmap DMA memory associated with command
4281 * @qc: Command containing DMA memory to be released
4282 *
4283 * Unmap all mapped DMA memory associated with this command.
4284 *
4285 * LOCKING:
4286 * spin_lock_irqsave(host lock)
4287 */
4288 void ata_sg_clean(struct ata_queued_cmd *qc)
4289 {
4290 struct ata_port *ap = qc->ap;
4291 struct scatterlist *sg = qc->sg;
4292 int dir = qc->dma_dir;
4293
4294 WARN_ON(sg == NULL);
4295
4296 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4297
4298 if (qc->n_elem)
4299 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4300
4301 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4302 qc->sg = NULL;
4303 }
4304
4305 /**
4306 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4307 * @qc: Metadata associated with taskfile to check
4308 *
4309 * Allow low-level driver to filter ATA PACKET commands, returning
4310 * a status indicating whether or not it is OK to use DMA for the
4311 * supplied PACKET command.
4312 *
4313 * LOCKING:
4314 * spin_lock_irqsave(host lock)
4315 *
4316 * RETURNS: 0 when ATAPI DMA can be used
4317 * nonzero otherwise
4318 */
4319 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4320 {
4321 struct ata_port *ap = qc->ap;
4322
4323 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4324 * few ATAPI devices choke on such DMA requests.
4325 */
4326 if (unlikely(qc->nbytes & 15))
4327 return 1;
4328
4329 if (ap->ops->check_atapi_dma)
4330 return ap->ops->check_atapi_dma(qc);
4331
4332 return 0;
4333 }
4334
4335 /**
4336 * ata_std_qc_defer - Check whether a qc needs to be deferred
4337 * @qc: ATA command in question
4338 *
4339 * Non-NCQ commands cannot run with any other command, NCQ or
4340 * not. As upper layer only knows the queue depth, we are
4341 * responsible for maintaining exclusion. This function checks
4342 * whether a new command @qc can be issued.
4343 *
4344 * LOCKING:
4345 * spin_lock_irqsave(host lock)
4346 *
4347 * RETURNS:
4348 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4349 */
4350 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4351 {
4352 struct ata_link *link = qc->dev->link;
4353
4354 if (qc->tf.protocol == ATA_PROT_NCQ) {
4355 if (!ata_tag_valid(link->active_tag))
4356 return 0;
4357 } else {
4358 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4359 return 0;
4360 }
4361
4362 return ATA_DEFER_LINK;
4363 }
4364
4365 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4366
4367 /**
4368 * ata_sg_init - Associate command with scatter-gather table.
4369 * @qc: Command to be associated
4370 * @sg: Scatter-gather table.
4371 * @n_elem: Number of elements in s/g table.
4372 *
4373 * Initialize the data-related elements of queued_cmd @qc
4374 * to point to a scatter-gather table @sg, containing @n_elem
4375 * elements.
4376 *
4377 * LOCKING:
4378 * spin_lock_irqsave(host lock)
4379 */
4380 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4381 unsigned int n_elem)
4382 {
4383 qc->sg = sg;
4384 qc->n_elem = n_elem;
4385 qc->cursg = qc->sg;
4386 }
4387
4388 /**
4389 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4390 * @qc: Command with scatter-gather table to be mapped.
4391 *
4392 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4393 *
4394 * LOCKING:
4395 * spin_lock_irqsave(host lock)
4396 *
4397 * RETURNS:
4398 * Zero on success, negative on error.
4399 *
4400 */
4401 static int ata_sg_setup(struct ata_queued_cmd *qc)
4402 {
4403 struct ata_port *ap = qc->ap;
4404 unsigned int n_elem;
4405
4406 VPRINTK("ENTER, ata%u\n", ap->print_id);
4407
4408 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4409 if (n_elem < 1)
4410 return -1;
4411
4412 DPRINTK("%d sg elements mapped\n", n_elem);
4413
4414 qc->n_elem = n_elem;
4415 qc->flags |= ATA_QCFLAG_DMAMAP;
4416
4417 return 0;
4418 }
4419
4420 /**
4421 * swap_buf_le16 - swap halves of 16-bit words in place
4422 * @buf: Buffer to swap
4423 * @buf_words: Number of 16-bit words in buffer.
4424 *
4425 * Swap halves of 16-bit words if needed to convert from
4426 * little-endian byte order to native cpu byte order, or
4427 * vice-versa.
4428 *
4429 * LOCKING:
4430 * Inherited from caller.
4431 */
4432 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4433 {
4434 #ifdef __BIG_ENDIAN
4435 unsigned int i;
4436
4437 for (i = 0; i < buf_words; i++)
4438 buf[i] = le16_to_cpu(buf[i]);
4439 #endif /* __BIG_ENDIAN */
4440 }
4441
4442 /**
4443 * ata_qc_new - Request an available ATA command, for queueing
4444 * @ap: Port associated with device @dev
4445 * @dev: Device from whom we request an available command structure
4446 *
4447 * LOCKING:
4448 * None.
4449 */
4450
4451 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4452 {
4453 struct ata_queued_cmd *qc = NULL;
4454 unsigned int i;
4455
4456 /* no command while frozen */
4457 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4458 return NULL;
4459
4460 /* the last tag is reserved for internal command. */
4461 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4462 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4463 qc = __ata_qc_from_tag(ap, i);
4464 break;
4465 }
4466
4467 if (qc)
4468 qc->tag = i;
4469
4470 return qc;
4471 }
4472
4473 /**
4474 * ata_qc_new_init - Request an available ATA command, and initialize it
4475 * @dev: Device from whom we request an available command structure
4476 *
4477 * LOCKING:
4478 * None.
4479 */
4480
4481 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4482 {
4483 struct ata_port *ap = dev->link->ap;
4484 struct ata_queued_cmd *qc;
4485
4486 qc = ata_qc_new(ap);
4487 if (qc) {
4488 qc->scsicmd = NULL;
4489 qc->ap = ap;
4490 qc->dev = dev;
4491
4492 ata_qc_reinit(qc);
4493 }
4494
4495 return qc;
4496 }
4497
4498 /**
4499 * ata_qc_free - free unused ata_queued_cmd
4500 * @qc: Command to complete
4501 *
4502 * Designed to free unused ata_queued_cmd object
4503 * in case something prevents using it.
4504 *
4505 * LOCKING:
4506 * spin_lock_irqsave(host lock)
4507 */
4508 void ata_qc_free(struct ata_queued_cmd *qc)
4509 {
4510 struct ata_port *ap = qc->ap;
4511 unsigned int tag;
4512
4513 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4514
4515 qc->flags = 0;
4516 tag = qc->tag;
4517 if (likely(ata_tag_valid(tag))) {
4518 qc->tag = ATA_TAG_POISON;
4519 clear_bit(tag, &ap->qc_allocated);
4520 }
4521 }
4522
4523 void __ata_qc_complete(struct ata_queued_cmd *qc)
4524 {
4525 struct ata_port *ap = qc->ap;
4526 struct ata_link *link = qc->dev->link;
4527
4528 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4529 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4530
4531 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4532 ata_sg_clean(qc);
4533
4534 /* command should be marked inactive atomically with qc completion */
4535 if (qc->tf.protocol == ATA_PROT_NCQ) {
4536 link->sactive &= ~(1 << qc->tag);
4537 if (!link->sactive)
4538 ap->nr_active_links--;
4539 } else {
4540 link->active_tag = ATA_TAG_POISON;
4541 ap->nr_active_links--;
4542 }
4543
4544 /* clear exclusive status */
4545 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4546 ap->excl_link == link))
4547 ap->excl_link = NULL;
4548
4549 /* atapi: mark qc as inactive to prevent the interrupt handler
4550 * from completing the command twice later, before the error handler
4551 * is called. (when rc != 0 and atapi request sense is needed)
4552 */
4553 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4554 ap->qc_active &= ~(1 << qc->tag);
4555
4556 /* call completion callback */
4557 qc->complete_fn(qc);
4558 }
4559
4560 static void fill_result_tf(struct ata_queued_cmd *qc)
4561 {
4562 struct ata_port *ap = qc->ap;
4563
4564 qc->result_tf.flags = qc->tf.flags;
4565 ap->ops->sff_tf_read(ap, &qc->result_tf);
4566 }
4567
4568 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4569 {
4570 struct ata_device *dev = qc->dev;
4571
4572 if (ata_tag_internal(qc->tag))
4573 return;
4574
4575 if (ata_is_nodata(qc->tf.protocol))
4576 return;
4577
4578 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4579 return;
4580
4581 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4582 }
4583
4584 /**
4585 * ata_qc_complete - Complete an active ATA command
4586 * @qc: Command to complete
4587 * @err_mask: ATA Status register contents
4588 *
4589 * Indicate to the mid and upper layers that an ATA
4590 * command has completed, with either an ok or not-ok status.
4591 *
4592 * LOCKING:
4593 * spin_lock_irqsave(host lock)
4594 */
4595 void ata_qc_complete(struct ata_queued_cmd *qc)
4596 {
4597 struct ata_port *ap = qc->ap;
4598
4599 /* XXX: New EH and old EH use different mechanisms to
4600 * synchronize EH with regular execution path.
4601 *
4602 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4603 * Normal execution path is responsible for not accessing a
4604 * failed qc. libata core enforces the rule by returning NULL
4605 * from ata_qc_from_tag() for failed qcs.
4606 *
4607 * Old EH depends on ata_qc_complete() nullifying completion
4608 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4609 * not synchronize with interrupt handler. Only PIO task is
4610 * taken care of.
4611 */
4612 if (ap->ops->error_handler) {
4613 struct ata_device *dev = qc->dev;
4614 struct ata_eh_info *ehi = &dev->link->eh_info;
4615
4616 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4617
4618 if (unlikely(qc->err_mask))
4619 qc->flags |= ATA_QCFLAG_FAILED;
4620
4621 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4622 if (!ata_tag_internal(qc->tag)) {
4623 /* always fill result TF for failed qc */
4624 fill_result_tf(qc);
4625 ata_qc_schedule_eh(qc);
4626 return;
4627 }
4628 }
4629
4630 /* read result TF if requested */
4631 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4632 fill_result_tf(qc);
4633
4634 /* Some commands need post-processing after successful
4635 * completion.
4636 */
4637 switch (qc->tf.command) {
4638 case ATA_CMD_SET_FEATURES:
4639 if (qc->tf.feature != SETFEATURES_WC_ON &&
4640 qc->tf.feature != SETFEATURES_WC_OFF)
4641 break;
4642 /* fall through */
4643 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4644 case ATA_CMD_SET_MULTI: /* multi_count changed */
4645 /* revalidate device */
4646 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4647 ata_port_schedule_eh(ap);
4648 break;
4649
4650 case ATA_CMD_SLEEP:
4651 dev->flags |= ATA_DFLAG_SLEEPING;
4652 break;
4653 }
4654
4655 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4656 ata_verify_xfer(qc);
4657
4658 __ata_qc_complete(qc);
4659 } else {
4660 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4661 return;
4662
4663 /* read result TF if failed or requested */
4664 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4665 fill_result_tf(qc);
4666
4667 __ata_qc_complete(qc);
4668 }
4669 }
4670
4671 /**
4672 * ata_qc_complete_multiple - Complete multiple qcs successfully
4673 * @ap: port in question
4674 * @qc_active: new qc_active mask
4675 * @finish_qc: LLDD callback invoked before completing a qc
4676 *
4677 * Complete in-flight commands. This functions is meant to be
4678 * called from low-level driver's interrupt routine to complete
4679 * requests normally. ap->qc_active and @qc_active is compared
4680 * and commands are completed accordingly.
4681 *
4682 * LOCKING:
4683 * spin_lock_irqsave(host lock)
4684 *
4685 * RETURNS:
4686 * Number of completed commands on success, -errno otherwise.
4687 */
4688 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4689 void (*finish_qc)(struct ata_queued_cmd *))
4690 {
4691 int nr_done = 0;
4692 u32 done_mask;
4693 int i;
4694
4695 done_mask = ap->qc_active ^ qc_active;
4696
4697 if (unlikely(done_mask & qc_active)) {
4698 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4699 "(%08x->%08x)\n", ap->qc_active, qc_active);
4700 return -EINVAL;
4701 }
4702
4703 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4704 struct ata_queued_cmd *qc;
4705
4706 if (!(done_mask & (1 << i)))
4707 continue;
4708
4709 if ((qc = ata_qc_from_tag(ap, i))) {
4710 if (finish_qc)
4711 finish_qc(qc);
4712 ata_qc_complete(qc);
4713 nr_done++;
4714 }
4715 }
4716
4717 return nr_done;
4718 }
4719
4720 /**
4721 * ata_qc_issue - issue taskfile to device
4722 * @qc: command to issue to device
4723 *
4724 * Prepare an ATA command to submission to device.
4725 * This includes mapping the data into a DMA-able
4726 * area, filling in the S/G table, and finally
4727 * writing the taskfile to hardware, starting the command.
4728 *
4729 * LOCKING:
4730 * spin_lock_irqsave(host lock)
4731 */
4732 void ata_qc_issue(struct ata_queued_cmd *qc)
4733 {
4734 struct ata_port *ap = qc->ap;
4735 struct ata_link *link = qc->dev->link;
4736 u8 prot = qc->tf.protocol;
4737
4738 /* Make sure only one non-NCQ command is outstanding. The
4739 * check is skipped for old EH because it reuses active qc to
4740 * request ATAPI sense.
4741 */
4742 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4743
4744 if (ata_is_ncq(prot)) {
4745 WARN_ON(link->sactive & (1 << qc->tag));
4746
4747 if (!link->sactive)
4748 ap->nr_active_links++;
4749 link->sactive |= 1 << qc->tag;
4750 } else {
4751 WARN_ON(link->sactive);
4752
4753 ap->nr_active_links++;
4754 link->active_tag = qc->tag;
4755 }
4756
4757 qc->flags |= ATA_QCFLAG_ACTIVE;
4758 ap->qc_active |= 1 << qc->tag;
4759
4760 /* We guarantee to LLDs that they will have at least one
4761 * non-zero sg if the command is a data command.
4762 */
4763 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4764
4765 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4766 (ap->flags & ATA_FLAG_PIO_DMA)))
4767 if (ata_sg_setup(qc))
4768 goto sg_err;
4769
4770 /* if device is sleeping, schedule reset and abort the link */
4771 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4772 link->eh_info.action |= ATA_EH_RESET;
4773 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4774 ata_link_abort(link);
4775 return;
4776 }
4777
4778 ap->ops->qc_prep(qc);
4779
4780 qc->err_mask |= ap->ops->qc_issue(qc);
4781 if (unlikely(qc->err_mask))
4782 goto err;
4783 return;
4784
4785 sg_err:
4786 qc->err_mask |= AC_ERR_SYSTEM;
4787 err:
4788 ata_qc_complete(qc);
4789 }
4790
4791 /**
4792 * sata_scr_valid - test whether SCRs are accessible
4793 * @link: ATA link to test SCR accessibility for
4794 *
4795 * Test whether SCRs are accessible for @link.
4796 *
4797 * LOCKING:
4798 * None.
4799 *
4800 * RETURNS:
4801 * 1 if SCRs are accessible, 0 otherwise.
4802 */
4803 int sata_scr_valid(struct ata_link *link)
4804 {
4805 struct ata_port *ap = link->ap;
4806
4807 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4808 }
4809
4810 /**
4811 * sata_scr_read - read SCR register of the specified port
4812 * @link: ATA link to read SCR for
4813 * @reg: SCR to read
4814 * @val: Place to store read value
4815 *
4816 * Read SCR register @reg of @link into *@val. This function is
4817 * guaranteed to succeed if @link is ap->link, the cable type of
4818 * the port is SATA and the port implements ->scr_read.
4819 *
4820 * LOCKING:
4821 * None if @link is ap->link. Kernel thread context otherwise.
4822 *
4823 * RETURNS:
4824 * 0 on success, negative errno on failure.
4825 */
4826 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4827 {
4828 if (ata_is_host_link(link)) {
4829 struct ata_port *ap = link->ap;
4830
4831 if (sata_scr_valid(link))
4832 return ap->ops->scr_read(ap, reg, val);
4833 return -EOPNOTSUPP;
4834 }
4835
4836 return sata_pmp_scr_read(link, reg, val);
4837 }
4838
4839 /**
4840 * sata_scr_write - write SCR register of the specified port
4841 * @link: ATA link to write SCR for
4842 * @reg: SCR to write
4843 * @val: value to write
4844 *
4845 * Write @val to SCR register @reg of @link. This function is
4846 * guaranteed to succeed if @link is ap->link, the cable type of
4847 * the port is SATA and the port implements ->scr_read.
4848 *
4849 * LOCKING:
4850 * None if @link is ap->link. Kernel thread context otherwise.
4851 *
4852 * RETURNS:
4853 * 0 on success, negative errno on failure.
4854 */
4855 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4856 {
4857 if (ata_is_host_link(link)) {
4858 struct ata_port *ap = link->ap;
4859
4860 if (sata_scr_valid(link))
4861 return ap->ops->scr_write(ap, reg, val);
4862 return -EOPNOTSUPP;
4863 }
4864
4865 return sata_pmp_scr_write(link, reg, val);
4866 }
4867
4868 /**
4869 * sata_scr_write_flush - write SCR register of the specified port and flush
4870 * @link: ATA link to write SCR for
4871 * @reg: SCR to write
4872 * @val: value to write
4873 *
4874 * This function is identical to sata_scr_write() except that this
4875 * function performs flush after writing to the register.
4876 *
4877 * LOCKING:
4878 * None if @link is ap->link. Kernel thread context otherwise.
4879 *
4880 * RETURNS:
4881 * 0 on success, negative errno on failure.
4882 */
4883 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4884 {
4885 if (ata_is_host_link(link)) {
4886 struct ata_port *ap = link->ap;
4887 int rc;
4888
4889 if (sata_scr_valid(link)) {
4890 rc = ap->ops->scr_write(ap, reg, val);
4891 if (rc == 0)
4892 rc = ap->ops->scr_read(ap, reg, &val);
4893 return rc;
4894 }
4895 return -EOPNOTSUPP;
4896 }
4897
4898 return sata_pmp_scr_write(link, reg, val);
4899 }
4900
4901 /**
4902 * ata_link_online - test whether the given link is online
4903 * @link: ATA link to test
4904 *
4905 * Test whether @link is online. Note that this function returns
4906 * 0 if online status of @link cannot be obtained, so
4907 * ata_link_online(link) != !ata_link_offline(link).
4908 *
4909 * LOCKING:
4910 * None.
4911 *
4912 * RETURNS:
4913 * 1 if the port online status is available and online.
4914 */
4915 int ata_link_online(struct ata_link *link)
4916 {
4917 u32 sstatus;
4918
4919 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4920 (sstatus & 0xf) == 0x3)
4921 return 1;
4922 return 0;
4923 }
4924
4925 /**
4926 * ata_link_offline - test whether the given link is offline
4927 * @link: ATA link to test
4928 *
4929 * Test whether @link is offline. Note that this function
4930 * returns 0 if offline status of @link cannot be obtained, so
4931 * ata_link_online(link) != !ata_link_offline(link).
4932 *
4933 * LOCKING:
4934 * None.
4935 *
4936 * RETURNS:
4937 * 1 if the port offline status is available and offline.
4938 */
4939 int ata_link_offline(struct ata_link *link)
4940 {
4941 u32 sstatus;
4942
4943 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4944 (sstatus & 0xf) != 0x3)
4945 return 1;
4946 return 0;
4947 }
4948
4949 #ifdef CONFIG_PM
4950 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4951 unsigned int action, unsigned int ehi_flags,
4952 int wait)
4953 {
4954 unsigned long flags;
4955 int i, rc;
4956
4957 for (i = 0; i < host->n_ports; i++) {
4958 struct ata_port *ap = host->ports[i];
4959 struct ata_link *link;
4960
4961 /* Previous resume operation might still be in
4962 * progress. Wait for PM_PENDING to clear.
4963 */
4964 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4965 ata_port_wait_eh(ap);
4966 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4967 }
4968
4969 /* request PM ops to EH */
4970 spin_lock_irqsave(ap->lock, flags);
4971
4972 ap->pm_mesg = mesg;
4973 if (wait) {
4974 rc = 0;
4975 ap->pm_result = &rc;
4976 }
4977
4978 ap->pflags |= ATA_PFLAG_PM_PENDING;
4979 __ata_port_for_each_link(link, ap) {
4980 link->eh_info.action |= action;
4981 link->eh_info.flags |= ehi_flags;
4982 }
4983
4984 ata_port_schedule_eh(ap);
4985
4986 spin_unlock_irqrestore(ap->lock, flags);
4987
4988 /* wait and check result */
4989 if (wait) {
4990 ata_port_wait_eh(ap);
4991 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4992 if (rc)
4993 return rc;
4994 }
4995 }
4996
4997 return 0;
4998 }
4999
5000 /**
5001 * ata_host_suspend - suspend host
5002 * @host: host to suspend
5003 * @mesg: PM message
5004 *
5005 * Suspend @host. Actual operation is performed by EH. This
5006 * function requests EH to perform PM operations and waits for EH
5007 * to finish.
5008 *
5009 * LOCKING:
5010 * Kernel thread context (may sleep).
5011 *
5012 * RETURNS:
5013 * 0 on success, -errno on failure.
5014 */
5015 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5016 {
5017 int rc;
5018
5019 /*
5020 * disable link pm on all ports before requesting
5021 * any pm activity
5022 */
5023 ata_lpm_enable(host);
5024
5025 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5026 if (rc == 0)
5027 host->dev->power.power_state = mesg;
5028 return rc;
5029 }
5030
5031 /**
5032 * ata_host_resume - resume host
5033 * @host: host to resume
5034 *
5035 * Resume @host. Actual operation is performed by EH. This
5036 * function requests EH to perform PM operations and returns.
5037 * Note that all resume operations are performed parallely.
5038 *
5039 * LOCKING:
5040 * Kernel thread context (may sleep).
5041 */
5042 void ata_host_resume(struct ata_host *host)
5043 {
5044 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5045 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5046 host->dev->power.power_state = PMSG_ON;
5047
5048 /* reenable link pm */
5049 ata_lpm_disable(host);
5050 }
5051 #endif
5052
5053 /**
5054 * ata_port_start - Set port up for dma.
5055 * @ap: Port to initialize
5056 *
5057 * Called just after data structures for each port are
5058 * initialized. Allocates space for PRD table.
5059 *
5060 * May be used as the port_start() entry in ata_port_operations.
5061 *
5062 * LOCKING:
5063 * Inherited from caller.
5064 */
5065 int ata_port_start(struct ata_port *ap)
5066 {
5067 struct device *dev = ap->dev;
5068
5069 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5070 GFP_KERNEL);
5071 if (!ap->prd)
5072 return -ENOMEM;
5073
5074 return 0;
5075 }
5076
5077 /**
5078 * ata_dev_init - Initialize an ata_device structure
5079 * @dev: Device structure to initialize
5080 *
5081 * Initialize @dev in preparation for probing.
5082 *
5083 * LOCKING:
5084 * Inherited from caller.
5085 */
5086 void ata_dev_init(struct ata_device *dev)
5087 {
5088 struct ata_link *link = dev->link;
5089 struct ata_port *ap = link->ap;
5090 unsigned long flags;
5091
5092 /* SATA spd limit is bound to the first device */
5093 link->sata_spd_limit = link->hw_sata_spd_limit;
5094 link->sata_spd = 0;
5095
5096 /* High bits of dev->flags are used to record warm plug
5097 * requests which occur asynchronously. Synchronize using
5098 * host lock.
5099 */
5100 spin_lock_irqsave(ap->lock, flags);
5101 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5102 dev->horkage = 0;
5103 spin_unlock_irqrestore(ap->lock, flags);
5104
5105 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5106 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5107 dev->pio_mask = UINT_MAX;
5108 dev->mwdma_mask = UINT_MAX;
5109 dev->udma_mask = UINT_MAX;
5110 }
5111
5112 /**
5113 * ata_link_init - Initialize an ata_link structure
5114 * @ap: ATA port link is attached to
5115 * @link: Link structure to initialize
5116 * @pmp: Port multiplier port number
5117 *
5118 * Initialize @link.
5119 *
5120 * LOCKING:
5121 * Kernel thread context (may sleep)
5122 */
5123 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5124 {
5125 int i;
5126
5127 /* clear everything except for devices */
5128 memset(link, 0, offsetof(struct ata_link, device[0]));
5129
5130 link->ap = ap;
5131 link->pmp = pmp;
5132 link->active_tag = ATA_TAG_POISON;
5133 link->hw_sata_spd_limit = UINT_MAX;
5134
5135 /* can't use iterator, ap isn't initialized yet */
5136 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5137 struct ata_device *dev = &link->device[i];
5138
5139 dev->link = link;
5140 dev->devno = dev - link->device;
5141 ata_dev_init(dev);
5142 }
5143 }
5144
5145 /**
5146 * sata_link_init_spd - Initialize link->sata_spd_limit
5147 * @link: Link to configure sata_spd_limit for
5148 *
5149 * Initialize @link->[hw_]sata_spd_limit to the currently
5150 * configured value.
5151 *
5152 * LOCKING:
5153 * Kernel thread context (may sleep).
5154 *
5155 * RETURNS:
5156 * 0 on success, -errno on failure.
5157 */
5158 int sata_link_init_spd(struct ata_link *link)
5159 {
5160 u32 scontrol;
5161 u8 spd;
5162 int rc;
5163
5164 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5165 if (rc)
5166 return rc;
5167
5168 spd = (scontrol >> 4) & 0xf;
5169 if (spd)
5170 link->hw_sata_spd_limit &= (1 << spd) - 1;
5171
5172 ata_force_spd_limit(link);
5173
5174 link->sata_spd_limit = link->hw_sata_spd_limit;
5175
5176 return 0;
5177 }
5178
5179 /**
5180 * ata_port_alloc - allocate and initialize basic ATA port resources
5181 * @host: ATA host this allocated port belongs to
5182 *
5183 * Allocate and initialize basic ATA port resources.
5184 *
5185 * RETURNS:
5186 * Allocate ATA port on success, NULL on failure.
5187 *
5188 * LOCKING:
5189 * Inherited from calling layer (may sleep).
5190 */
5191 struct ata_port *ata_port_alloc(struct ata_host *host)
5192 {
5193 struct ata_port *ap;
5194
5195 DPRINTK("ENTER\n");
5196
5197 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5198 if (!ap)
5199 return NULL;
5200
5201 ap->pflags |= ATA_PFLAG_INITIALIZING;
5202 ap->lock = &host->lock;
5203 ap->flags = ATA_FLAG_DISABLED;
5204 ap->print_id = -1;
5205 ap->ctl = ATA_DEVCTL_OBS;
5206 ap->host = host;
5207 ap->dev = host->dev;
5208 ap->last_ctl = 0xFF;
5209
5210 #if defined(ATA_VERBOSE_DEBUG)
5211 /* turn on all debugging levels */
5212 ap->msg_enable = 0x00FF;
5213 #elif defined(ATA_DEBUG)
5214 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5215 #else
5216 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5217 #endif
5218
5219 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5220 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5221 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5222 INIT_LIST_HEAD(&ap->eh_done_q);
5223 init_waitqueue_head(&ap->eh_wait_q);
5224 init_timer_deferrable(&ap->fastdrain_timer);
5225 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5226 ap->fastdrain_timer.data = (unsigned long)ap;
5227
5228 ap->cbl = ATA_CBL_NONE;
5229
5230 ata_link_init(ap, &ap->link, 0);
5231
5232 #ifdef ATA_IRQ_TRAP
5233 ap->stats.unhandled_irq = 1;
5234 ap->stats.idle_irq = 1;
5235 #endif
5236 return ap;
5237 }
5238
5239 static void ata_host_release(struct device *gendev, void *res)
5240 {
5241 struct ata_host *host = dev_get_drvdata(gendev);
5242 int i;
5243
5244 for (i = 0; i < host->n_ports; i++) {
5245 struct ata_port *ap = host->ports[i];
5246
5247 if (!ap)
5248 continue;
5249
5250 if (ap->scsi_host)
5251 scsi_host_put(ap->scsi_host);
5252
5253 kfree(ap->pmp_link);
5254 kfree(ap);
5255 host->ports[i] = NULL;
5256 }
5257
5258 dev_set_drvdata(gendev, NULL);
5259 }
5260
5261 /**
5262 * ata_host_alloc - allocate and init basic ATA host resources
5263 * @dev: generic device this host is associated with
5264 * @max_ports: maximum number of ATA ports associated with this host
5265 *
5266 * Allocate and initialize basic ATA host resources. LLD calls
5267 * this function to allocate a host, initializes it fully and
5268 * attaches it using ata_host_register().
5269 *
5270 * @max_ports ports are allocated and host->n_ports is
5271 * initialized to @max_ports. The caller is allowed to decrease
5272 * host->n_ports before calling ata_host_register(). The unused
5273 * ports will be automatically freed on registration.
5274 *
5275 * RETURNS:
5276 * Allocate ATA host on success, NULL on failure.
5277 *
5278 * LOCKING:
5279 * Inherited from calling layer (may sleep).
5280 */
5281 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5282 {
5283 struct ata_host *host;
5284 size_t sz;
5285 int i;
5286
5287 DPRINTK("ENTER\n");
5288
5289 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5290 return NULL;
5291
5292 /* alloc a container for our list of ATA ports (buses) */
5293 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5294 /* alloc a container for our list of ATA ports (buses) */
5295 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5296 if (!host)
5297 goto err_out;
5298
5299 devres_add(dev, host);
5300 dev_set_drvdata(dev, host);
5301
5302 spin_lock_init(&host->lock);
5303 host->dev = dev;
5304 host->n_ports = max_ports;
5305
5306 /* allocate ports bound to this host */
5307 for (i = 0; i < max_ports; i++) {
5308 struct ata_port *ap;
5309
5310 ap = ata_port_alloc(host);
5311 if (!ap)
5312 goto err_out;
5313
5314 ap->port_no = i;
5315 host->ports[i] = ap;
5316 }
5317
5318 devres_remove_group(dev, NULL);
5319 return host;
5320
5321 err_out:
5322 devres_release_group(dev, NULL);
5323 return NULL;
5324 }
5325
5326 /**
5327 * ata_host_alloc_pinfo - alloc host and init with port_info array
5328 * @dev: generic device this host is associated with
5329 * @ppi: array of ATA port_info to initialize host with
5330 * @n_ports: number of ATA ports attached to this host
5331 *
5332 * Allocate ATA host and initialize with info from @ppi. If NULL
5333 * terminated, @ppi may contain fewer entries than @n_ports. The
5334 * last entry will be used for the remaining ports.
5335 *
5336 * RETURNS:
5337 * Allocate ATA host on success, NULL on failure.
5338 *
5339 * LOCKING:
5340 * Inherited from calling layer (may sleep).
5341 */
5342 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5343 const struct ata_port_info * const * ppi,
5344 int n_ports)
5345 {
5346 const struct ata_port_info *pi;
5347 struct ata_host *host;
5348 int i, j;
5349
5350 host = ata_host_alloc(dev, n_ports);
5351 if (!host)
5352 return NULL;
5353
5354 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5355 struct ata_port *ap = host->ports[i];
5356
5357 if (ppi[j])
5358 pi = ppi[j++];
5359
5360 ap->pio_mask = pi->pio_mask;
5361 ap->mwdma_mask = pi->mwdma_mask;
5362 ap->udma_mask = pi->udma_mask;
5363 ap->flags |= pi->flags;
5364 ap->link.flags |= pi->link_flags;
5365 ap->ops = pi->port_ops;
5366
5367 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5368 host->ops = pi->port_ops;
5369 }
5370
5371 return host;
5372 }
5373
5374 static void ata_host_stop(struct device *gendev, void *res)
5375 {
5376 struct ata_host *host = dev_get_drvdata(gendev);
5377 int i;
5378
5379 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5380
5381 for (i = 0; i < host->n_ports; i++) {
5382 struct ata_port *ap = host->ports[i];
5383
5384 if (ap->ops->port_stop)
5385 ap->ops->port_stop(ap);
5386 }
5387
5388 if (host->ops->host_stop)
5389 host->ops->host_stop(host);
5390 }
5391
5392 /**
5393 * ata_finalize_port_ops - finalize ata_port_operations
5394 * @ops: ata_port_operations to finalize
5395 *
5396 * An ata_port_operations can inherit from another ops and that
5397 * ops can again inherit from another. This can go on as many
5398 * times as necessary as long as there is no loop in the
5399 * inheritance chain.
5400 *
5401 * Ops tables are finalized when the host is started. NULL or
5402 * unspecified entries are inherited from the closet ancestor
5403 * which has the method and the entry is populated with it.
5404 * After finalization, the ops table directly points to all the
5405 * methods and ->inherits is no longer necessary and cleared.
5406 *
5407 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5408 *
5409 * LOCKING:
5410 * None.
5411 */
5412 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5413 {
5414 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5415 const struct ata_port_operations *cur;
5416 void **begin = (void **)ops;
5417 void **end = (void **)&ops->inherits;
5418 void **pp;
5419
5420 if (!ops || !ops->inherits)
5421 return;
5422
5423 spin_lock(&lock);
5424
5425 for (cur = ops->inherits; cur; cur = cur->inherits) {
5426 void **inherit = (void **)cur;
5427
5428 for (pp = begin; pp < end; pp++, inherit++)
5429 if (!*pp)
5430 *pp = *inherit;
5431 }
5432
5433 for (pp = begin; pp < end; pp++)
5434 if (IS_ERR(*pp))
5435 *pp = NULL;
5436
5437 ops->inherits = NULL;
5438
5439 spin_unlock(&lock);
5440 }
5441
5442 /**
5443 * ata_host_start - start and freeze ports of an ATA host
5444 * @host: ATA host to start ports for
5445 *
5446 * Start and then freeze ports of @host. Started status is
5447 * recorded in host->flags, so this function can be called
5448 * multiple times. Ports are guaranteed to get started only
5449 * once. If host->ops isn't initialized yet, its set to the
5450 * first non-dummy port ops.
5451 *
5452 * LOCKING:
5453 * Inherited from calling layer (may sleep).
5454 *
5455 * RETURNS:
5456 * 0 if all ports are started successfully, -errno otherwise.
5457 */
5458 int ata_host_start(struct ata_host *host)
5459 {
5460 int have_stop = 0;
5461 void *start_dr = NULL;
5462 int i, rc;
5463
5464 if (host->flags & ATA_HOST_STARTED)
5465 return 0;
5466
5467 ata_finalize_port_ops(host->ops);
5468
5469 for (i = 0; i < host->n_ports; i++) {
5470 struct ata_port *ap = host->ports[i];
5471
5472 ata_finalize_port_ops(ap->ops);
5473
5474 if (!host->ops && !ata_port_is_dummy(ap))
5475 host->ops = ap->ops;
5476
5477 if (ap->ops->port_stop)
5478 have_stop = 1;
5479 }
5480
5481 if (host->ops->host_stop)
5482 have_stop = 1;
5483
5484 if (have_stop) {
5485 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5486 if (!start_dr)
5487 return -ENOMEM;
5488 }
5489
5490 for (i = 0; i < host->n_ports; i++) {
5491 struct ata_port *ap = host->ports[i];
5492
5493 if (ap->ops->port_start) {
5494 rc = ap->ops->port_start(ap);
5495 if (rc) {
5496 if (rc != -ENODEV)
5497 dev_printk(KERN_ERR, host->dev,
5498 "failed to start port %d "
5499 "(errno=%d)\n", i, rc);
5500 goto err_out;
5501 }
5502 }
5503 ata_eh_freeze_port(ap);
5504 }
5505
5506 if (start_dr)
5507 devres_add(host->dev, start_dr);
5508 host->flags |= ATA_HOST_STARTED;
5509 return 0;
5510
5511 err_out:
5512 while (--i >= 0) {
5513 struct ata_port *ap = host->ports[i];
5514
5515 if (ap->ops->port_stop)
5516 ap->ops->port_stop(ap);
5517 }
5518 devres_free(start_dr);
5519 return rc;
5520 }
5521
5522 /**
5523 * ata_sas_host_init - Initialize a host struct
5524 * @host: host to initialize
5525 * @dev: device host is attached to
5526 * @flags: host flags
5527 * @ops: port_ops
5528 *
5529 * LOCKING:
5530 * PCI/etc. bus probe sem.
5531 *
5532 */
5533 /* KILLME - the only user left is ipr */
5534 void ata_host_init(struct ata_host *host, struct device *dev,
5535 unsigned long flags, struct ata_port_operations *ops)
5536 {
5537 spin_lock_init(&host->lock);
5538 host->dev = dev;
5539 host->flags = flags;
5540 host->ops = ops;
5541 }
5542
5543 /**
5544 * ata_host_register - register initialized ATA host
5545 * @host: ATA host to register
5546 * @sht: template for SCSI host
5547 *
5548 * Register initialized ATA host. @host is allocated using
5549 * ata_host_alloc() and fully initialized by LLD. This function
5550 * starts ports, registers @host with ATA and SCSI layers and
5551 * probe registered devices.
5552 *
5553 * LOCKING:
5554 * Inherited from calling layer (may sleep).
5555 *
5556 * RETURNS:
5557 * 0 on success, -errno otherwise.
5558 */
5559 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5560 {
5561 int i, rc;
5562
5563 /* host must have been started */
5564 if (!(host->flags & ATA_HOST_STARTED)) {
5565 dev_printk(KERN_ERR, host->dev,
5566 "BUG: trying to register unstarted host\n");
5567 WARN_ON(1);
5568 return -EINVAL;
5569 }
5570
5571 /* Blow away unused ports. This happens when LLD can't
5572 * determine the exact number of ports to allocate at
5573 * allocation time.
5574 */
5575 for (i = host->n_ports; host->ports[i]; i++)
5576 kfree(host->ports[i]);
5577
5578 /* give ports names and add SCSI hosts */
5579 for (i = 0; i < host->n_ports; i++)
5580 host->ports[i]->print_id = ata_print_id++;
5581
5582 rc = ata_scsi_add_hosts(host, sht);
5583 if (rc)
5584 return rc;
5585
5586 /* associate with ACPI nodes */
5587 ata_acpi_associate(host);
5588
5589 /* set cable, sata_spd_limit and report */
5590 for (i = 0; i < host->n_ports; i++) {
5591 struct ata_port *ap = host->ports[i];
5592 unsigned long xfer_mask;
5593
5594 /* set SATA cable type if still unset */
5595 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5596 ap->cbl = ATA_CBL_SATA;
5597
5598 /* init sata_spd_limit to the current value */
5599 sata_link_init_spd(&ap->link);
5600
5601 /* print per-port info to dmesg */
5602 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5603 ap->udma_mask);
5604
5605 if (!ata_port_is_dummy(ap)) {
5606 ata_port_printk(ap, KERN_INFO,
5607 "%cATA max %s %s\n",
5608 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5609 ata_mode_string(xfer_mask),
5610 ap->link.eh_info.desc);
5611 ata_ehi_clear_desc(&ap->link.eh_info);
5612 } else
5613 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5614 }
5615
5616 /* perform each probe synchronously */
5617 DPRINTK("probe begin\n");
5618 for (i = 0; i < host->n_ports; i++) {
5619 struct ata_port *ap = host->ports[i];
5620
5621 /* probe */
5622 if (ap->ops->error_handler) {
5623 struct ata_eh_info *ehi = &ap->link.eh_info;
5624 unsigned long flags;
5625
5626 ata_port_probe(ap);
5627
5628 /* kick EH for boot probing */
5629 spin_lock_irqsave(ap->lock, flags);
5630
5631 ehi->probe_mask |= ATA_ALL_DEVICES;
5632 ehi->action |= ATA_EH_RESET;
5633 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5634
5635 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5636 ap->pflags |= ATA_PFLAG_LOADING;
5637 ata_port_schedule_eh(ap);
5638
5639 spin_unlock_irqrestore(ap->lock, flags);
5640
5641 /* wait for EH to finish */
5642 ata_port_wait_eh(ap);
5643 } else {
5644 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5645 rc = ata_bus_probe(ap);
5646 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5647
5648 if (rc) {
5649 /* FIXME: do something useful here?
5650 * Current libata behavior will
5651 * tear down everything when
5652 * the module is removed
5653 * or the h/w is unplugged.
5654 */
5655 }
5656 }
5657 }
5658
5659 /* probes are done, now scan each port's disk(s) */
5660 DPRINTK("host probe begin\n");
5661 for (i = 0; i < host->n_ports; i++) {
5662 struct ata_port *ap = host->ports[i];
5663
5664 ata_scsi_scan_host(ap, 1);
5665 ata_lpm_schedule(ap, ap->pm_policy);
5666 }
5667
5668 return 0;
5669 }
5670
5671 /**
5672 * ata_host_activate - start host, request IRQ and register it
5673 * @host: target ATA host
5674 * @irq: IRQ to request
5675 * @irq_handler: irq_handler used when requesting IRQ
5676 * @irq_flags: irq_flags used when requesting IRQ
5677 * @sht: scsi_host_template to use when registering the host
5678 *
5679 * After allocating an ATA host and initializing it, most libata
5680 * LLDs perform three steps to activate the host - start host,
5681 * request IRQ and register it. This helper takes necessasry
5682 * arguments and performs the three steps in one go.
5683 *
5684 * An invalid IRQ skips the IRQ registration and expects the host to
5685 * have set polling mode on the port. In this case, @irq_handler
5686 * should be NULL.
5687 *
5688 * LOCKING:
5689 * Inherited from calling layer (may sleep).
5690 *
5691 * RETURNS:
5692 * 0 on success, -errno otherwise.
5693 */
5694 int ata_host_activate(struct ata_host *host, int irq,
5695 irq_handler_t irq_handler, unsigned long irq_flags,
5696 struct scsi_host_template *sht)
5697 {
5698 int i, rc;
5699
5700 rc = ata_host_start(host);
5701 if (rc)
5702 return rc;
5703
5704 /* Special case for polling mode */
5705 if (!irq) {
5706 WARN_ON(irq_handler);
5707 return ata_host_register(host, sht);
5708 }
5709
5710 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5711 dev_driver_string(host->dev), host);
5712 if (rc)
5713 return rc;
5714
5715 for (i = 0; i < host->n_ports; i++)
5716 ata_port_desc(host->ports[i], "irq %d", irq);
5717
5718 rc = ata_host_register(host, sht);
5719 /* if failed, just free the IRQ and leave ports alone */
5720 if (rc)
5721 devm_free_irq(host->dev, irq, host);
5722
5723 return rc;
5724 }
5725
5726 /**
5727 * ata_port_detach - Detach ATA port in prepration of device removal
5728 * @ap: ATA port to be detached
5729 *
5730 * Detach all ATA devices and the associated SCSI devices of @ap;
5731 * then, remove the associated SCSI host. @ap is guaranteed to
5732 * be quiescent on return from this function.
5733 *
5734 * LOCKING:
5735 * Kernel thread context (may sleep).
5736 */
5737 static void ata_port_detach(struct ata_port *ap)
5738 {
5739 unsigned long flags;
5740 struct ata_link *link;
5741 struct ata_device *dev;
5742
5743 if (!ap->ops->error_handler)
5744 goto skip_eh;
5745
5746 /* tell EH we're leaving & flush EH */
5747 spin_lock_irqsave(ap->lock, flags);
5748 ap->pflags |= ATA_PFLAG_UNLOADING;
5749 spin_unlock_irqrestore(ap->lock, flags);
5750
5751 ata_port_wait_eh(ap);
5752
5753 /* EH is now guaranteed to see UNLOADING - EH context belongs
5754 * to us. Disable all existing devices.
5755 */
5756 ata_port_for_each_link(link, ap) {
5757 ata_link_for_each_dev(dev, link)
5758 ata_dev_disable(dev);
5759 }
5760
5761 /* Final freeze & EH. All in-flight commands are aborted. EH
5762 * will be skipped and retrials will be terminated with bad
5763 * target.
5764 */
5765 spin_lock_irqsave(ap->lock, flags);
5766 ata_port_freeze(ap); /* won't be thawed */
5767 spin_unlock_irqrestore(ap->lock, flags);
5768
5769 ata_port_wait_eh(ap);
5770 cancel_rearming_delayed_work(&ap->hotplug_task);
5771
5772 skip_eh:
5773 /* remove the associated SCSI host */
5774 scsi_remove_host(ap->scsi_host);
5775 }
5776
5777 /**
5778 * ata_host_detach - Detach all ports of an ATA host
5779 * @host: Host to detach
5780 *
5781 * Detach all ports of @host.
5782 *
5783 * LOCKING:
5784 * Kernel thread context (may sleep).
5785 */
5786 void ata_host_detach(struct ata_host *host)
5787 {
5788 int i;
5789
5790 for (i = 0; i < host->n_ports; i++)
5791 ata_port_detach(host->ports[i]);
5792
5793 /* the host is dead now, dissociate ACPI */
5794 ata_acpi_dissociate(host);
5795 }
5796
5797 #ifdef CONFIG_PCI
5798
5799 /**
5800 * ata_pci_remove_one - PCI layer callback for device removal
5801 * @pdev: PCI device that was removed
5802 *
5803 * PCI layer indicates to libata via this hook that hot-unplug or
5804 * module unload event has occurred. Detach all ports. Resource
5805 * release is handled via devres.
5806 *
5807 * LOCKING:
5808 * Inherited from PCI layer (may sleep).
5809 */
5810 void ata_pci_remove_one(struct pci_dev *pdev)
5811 {
5812 struct device *dev = &pdev->dev;
5813 struct ata_host *host = dev_get_drvdata(dev);
5814
5815 ata_host_detach(host);
5816 }
5817
5818 /* move to PCI subsystem */
5819 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5820 {
5821 unsigned long tmp = 0;
5822
5823 switch (bits->width) {
5824 case 1: {
5825 u8 tmp8 = 0;
5826 pci_read_config_byte(pdev, bits->reg, &tmp8);
5827 tmp = tmp8;
5828 break;
5829 }
5830 case 2: {
5831 u16 tmp16 = 0;
5832 pci_read_config_word(pdev, bits->reg, &tmp16);
5833 tmp = tmp16;
5834 break;
5835 }
5836 case 4: {
5837 u32 tmp32 = 0;
5838 pci_read_config_dword(pdev, bits->reg, &tmp32);
5839 tmp = tmp32;
5840 break;
5841 }
5842
5843 default:
5844 return -EINVAL;
5845 }
5846
5847 tmp &= bits->mask;
5848
5849 return (tmp == bits->val) ? 1 : 0;
5850 }
5851
5852 #ifdef CONFIG_PM
5853 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5854 {
5855 pci_save_state(pdev);
5856 pci_disable_device(pdev);
5857
5858 if (mesg.event & PM_EVENT_SLEEP)
5859 pci_set_power_state(pdev, PCI_D3hot);
5860 }
5861
5862 int ata_pci_device_do_resume(struct pci_dev *pdev)
5863 {
5864 int rc;
5865
5866 pci_set_power_state(pdev, PCI_D0);
5867 pci_restore_state(pdev);
5868
5869 rc = pcim_enable_device(pdev);
5870 if (rc) {
5871 dev_printk(KERN_ERR, &pdev->dev,
5872 "failed to enable device after resume (%d)\n", rc);
5873 return rc;
5874 }
5875
5876 pci_set_master(pdev);
5877 return 0;
5878 }
5879
5880 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5881 {
5882 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5883 int rc = 0;
5884
5885 rc = ata_host_suspend(host, mesg);
5886 if (rc)
5887 return rc;
5888
5889 ata_pci_device_do_suspend(pdev, mesg);
5890
5891 return 0;
5892 }
5893
5894 int ata_pci_device_resume(struct pci_dev *pdev)
5895 {
5896 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5897 int rc;
5898
5899 rc = ata_pci_device_do_resume(pdev);
5900 if (rc == 0)
5901 ata_host_resume(host);
5902 return rc;
5903 }
5904 #endif /* CONFIG_PM */
5905
5906 #endif /* CONFIG_PCI */
5907
5908 static int __init ata_parse_force_one(char **cur,
5909 struct ata_force_ent *force_ent,
5910 const char **reason)
5911 {
5912 /* FIXME: Currently, there's no way to tag init const data and
5913 * using __initdata causes build failure on some versions of
5914 * gcc. Once __initdataconst is implemented, add const to the
5915 * following structure.
5916 */
5917 static struct ata_force_param force_tbl[] __initdata = {
5918 { "40c", .cbl = ATA_CBL_PATA40 },
5919 { "80c", .cbl = ATA_CBL_PATA80 },
5920 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5921 { "unk", .cbl = ATA_CBL_PATA_UNK },
5922 { "ign", .cbl = ATA_CBL_PATA_IGN },
5923 { "sata", .cbl = ATA_CBL_SATA },
5924 { "1.5Gbps", .spd_limit = 1 },
5925 { "3.0Gbps", .spd_limit = 2 },
5926 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5927 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5928 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5929 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5930 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5931 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5932 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5933 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5934 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5935 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5936 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5937 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5938 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5939 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5940 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5941 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5942 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5943 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5944 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5945 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5946 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5947 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5948 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5949 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5950 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5951 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5952 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5953 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5954 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5955 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5956 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5957 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5958 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5959 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5960 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5961 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5962 };
5963 char *start = *cur, *p = *cur;
5964 char *id, *val, *endp;
5965 const struct ata_force_param *match_fp = NULL;
5966 int nr_matches = 0, i;
5967
5968 /* find where this param ends and update *cur */
5969 while (*p != '\0' && *p != ',')
5970 p++;
5971
5972 if (*p == '\0')
5973 *cur = p;
5974 else
5975 *cur = p + 1;
5976
5977 *p = '\0';
5978
5979 /* parse */
5980 p = strchr(start, ':');
5981 if (!p) {
5982 val = strstrip(start);
5983 goto parse_val;
5984 }
5985 *p = '\0';
5986
5987 id = strstrip(start);
5988 val = strstrip(p + 1);
5989
5990 /* parse id */
5991 p = strchr(id, '.');
5992 if (p) {
5993 *p++ = '\0';
5994 force_ent->device = simple_strtoul(p, &endp, 10);
5995 if (p == endp || *endp != '\0') {
5996 *reason = "invalid device";
5997 return -EINVAL;
5998 }
5999 }
6000
6001 force_ent->port = simple_strtoul(id, &endp, 10);
6002 if (p == endp || *endp != '\0') {
6003 *reason = "invalid port/link";
6004 return -EINVAL;
6005 }
6006
6007 parse_val:
6008 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6009 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6010 const struct ata_force_param *fp = &force_tbl[i];
6011
6012 if (strncasecmp(val, fp->name, strlen(val)))
6013 continue;
6014
6015 nr_matches++;
6016 match_fp = fp;
6017
6018 if (strcasecmp(val, fp->name) == 0) {
6019 nr_matches = 1;
6020 break;
6021 }
6022 }
6023
6024 if (!nr_matches) {
6025 *reason = "unknown value";
6026 return -EINVAL;
6027 }
6028 if (nr_matches > 1) {
6029 *reason = "ambigious value";
6030 return -EINVAL;
6031 }
6032
6033 force_ent->param = *match_fp;
6034
6035 return 0;
6036 }
6037
6038 static void __init ata_parse_force_param(void)
6039 {
6040 int idx = 0, size = 1;
6041 int last_port = -1, last_device = -1;
6042 char *p, *cur, *next;
6043
6044 /* calculate maximum number of params and allocate force_tbl */
6045 for (p = ata_force_param_buf; *p; p++)
6046 if (*p == ',')
6047 size++;
6048
6049 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6050 if (!ata_force_tbl) {
6051 printk(KERN_WARNING "ata: failed to extend force table, "
6052 "libata.force ignored\n");
6053 return;
6054 }
6055
6056 /* parse and populate the table */
6057 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6058 const char *reason = "";
6059 struct ata_force_ent te = { .port = -1, .device = -1 };
6060
6061 next = cur;
6062 if (ata_parse_force_one(&next, &te, &reason)) {
6063 printk(KERN_WARNING "ata: failed to parse force "
6064 "parameter \"%s\" (%s)\n",
6065 cur, reason);
6066 continue;
6067 }
6068
6069 if (te.port == -1) {
6070 te.port = last_port;
6071 te.device = last_device;
6072 }
6073
6074 ata_force_tbl[idx++] = te;
6075
6076 last_port = te.port;
6077 last_device = te.device;
6078 }
6079
6080 ata_force_tbl_size = idx;
6081 }
6082
6083 static int __init ata_init(void)
6084 {
6085 ata_probe_timeout *= HZ;
6086
6087 ata_parse_force_param();
6088
6089 ata_wq = create_workqueue("ata");
6090 if (!ata_wq)
6091 return -ENOMEM;
6092
6093 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6094 if (!ata_aux_wq) {
6095 destroy_workqueue(ata_wq);
6096 return -ENOMEM;
6097 }
6098
6099 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6100 return 0;
6101 }
6102
6103 static void __exit ata_exit(void)
6104 {
6105 kfree(ata_force_tbl);
6106 destroy_workqueue(ata_wq);
6107 destroy_workqueue(ata_aux_wq);
6108 }
6109
6110 subsys_initcall(ata_init);
6111 module_exit(ata_exit);
6112
6113 static unsigned long ratelimit_time;
6114 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6115
6116 int ata_ratelimit(void)
6117 {
6118 int rc;
6119 unsigned long flags;
6120
6121 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6122
6123 if (time_after(jiffies, ratelimit_time)) {
6124 rc = 1;
6125 ratelimit_time = jiffies + (HZ/5);
6126 } else
6127 rc = 0;
6128
6129 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6130
6131 return rc;
6132 }
6133
6134 /**
6135 * ata_wait_register - wait until register value changes
6136 * @reg: IO-mapped register
6137 * @mask: Mask to apply to read register value
6138 * @val: Wait condition
6139 * @interval_msec: polling interval in milliseconds
6140 * @timeout_msec: timeout in milliseconds
6141 *
6142 * Waiting for some bits of register to change is a common
6143 * operation for ATA controllers. This function reads 32bit LE
6144 * IO-mapped register @reg and tests for the following condition.
6145 *
6146 * (*@reg & mask) != val
6147 *
6148 * If the condition is met, it returns; otherwise, the process is
6149 * repeated after @interval_msec until timeout.
6150 *
6151 * LOCKING:
6152 * Kernel thread context (may sleep)
6153 *
6154 * RETURNS:
6155 * The final register value.
6156 */
6157 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6158 unsigned long interval_msec,
6159 unsigned long timeout_msec)
6160 {
6161 unsigned long timeout;
6162 u32 tmp;
6163
6164 tmp = ioread32(reg);
6165
6166 /* Calculate timeout _after_ the first read to make sure
6167 * preceding writes reach the controller before starting to
6168 * eat away the timeout.
6169 */
6170 timeout = jiffies + (timeout_msec * HZ) / 1000;
6171
6172 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6173 msleep(interval_msec);
6174 tmp = ioread32(reg);
6175 }
6176
6177 return tmp;
6178 }
6179
6180 /*
6181 * Dummy port_ops
6182 */
6183 static void ata_dummy_noret(struct ata_port *ap) { }
6184 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6185 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6186
6187 static u8 ata_dummy_check_status(struct ata_port *ap)
6188 {
6189 return ATA_DRDY;
6190 }
6191
6192 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6193 {
6194 return AC_ERR_SYSTEM;
6195 }
6196
6197 struct ata_port_operations ata_dummy_port_ops = {
6198 .sff_check_status = ata_dummy_check_status,
6199 .sff_check_altstatus = ata_dummy_check_status,
6200 .sff_dev_select = ata_noop_dev_select,
6201 .qc_prep = ata_noop_qc_prep,
6202 .qc_issue = ata_dummy_qc_issue,
6203 .freeze = ata_dummy_noret,
6204 .thaw = ata_dummy_noret,
6205 .error_handler = ata_dummy_noret,
6206 .post_internal_cmd = ata_dummy_qc_noret,
6207 .sff_irq_clear = ata_dummy_noret,
6208 .port_start = ata_dummy_ret0,
6209 .port_stop = ata_dummy_noret,
6210 };
6211
6212 const struct ata_port_info ata_dummy_port_info = {
6213 .port_ops = &ata_dummy_port_ops,
6214 };
6215
6216 /*
6217 * libata is essentially a library of internal helper functions for
6218 * low-level ATA host controller drivers. As such, the API/ABI is
6219 * likely to change as new drivers are added and updated.
6220 * Do not depend on ABI/API stability.
6221 */
6222 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6223 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6224 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6225 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6226 EXPORT_SYMBOL_GPL(sata_port_ops);
6227 EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
6228 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6229 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6230 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6231 EXPORT_SYMBOL_GPL(ata_host_init);
6232 EXPORT_SYMBOL_GPL(ata_host_alloc);
6233 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6234 EXPORT_SYMBOL_GPL(ata_host_start);
6235 EXPORT_SYMBOL_GPL(ata_host_register);
6236 EXPORT_SYMBOL_GPL(ata_host_activate);
6237 EXPORT_SYMBOL_GPL(ata_host_detach);
6238 EXPORT_SYMBOL_GPL(ata_sg_init);
6239 EXPORT_SYMBOL_GPL(ata_qc_complete);
6240 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6241 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6242 EXPORT_SYMBOL_GPL(sata_print_link_status);
6243 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6244 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6245 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6246 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6247 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6248 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6249 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6250 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6251 EXPORT_SYMBOL_GPL(ata_mode_string);
6252 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6253 EXPORT_SYMBOL_GPL(ata_port_start);
6254 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6255 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6256 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6257 EXPORT_SYMBOL_GPL(ata_port_probe);
6258 EXPORT_SYMBOL_GPL(ata_dev_disable);
6259 EXPORT_SYMBOL_GPL(sata_set_spd);
6260 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6261 EXPORT_SYMBOL_GPL(sata_link_debounce);
6262 EXPORT_SYMBOL_GPL(sata_link_resume);
6263 EXPORT_SYMBOL_GPL(ata_std_prereset);
6264 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6265 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6266 EXPORT_SYMBOL_GPL(ata_std_postreset);
6267 EXPORT_SYMBOL_GPL(ata_dev_classify);
6268 EXPORT_SYMBOL_GPL(ata_dev_pair);
6269 EXPORT_SYMBOL_GPL(ata_port_disable);
6270 EXPORT_SYMBOL_GPL(ata_ratelimit);
6271 EXPORT_SYMBOL_GPL(ata_wait_register);
6272 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6273 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6274 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6275 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6276 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6277 EXPORT_SYMBOL_GPL(sata_scr_valid);
6278 EXPORT_SYMBOL_GPL(sata_scr_read);
6279 EXPORT_SYMBOL_GPL(sata_scr_write);
6280 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6281 EXPORT_SYMBOL_GPL(ata_link_online);
6282 EXPORT_SYMBOL_GPL(ata_link_offline);
6283 #ifdef CONFIG_PM
6284 EXPORT_SYMBOL_GPL(ata_host_suspend);
6285 EXPORT_SYMBOL_GPL(ata_host_resume);
6286 #endif /* CONFIG_PM */
6287 EXPORT_SYMBOL_GPL(ata_id_string);
6288 EXPORT_SYMBOL_GPL(ata_id_c_string);
6289 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6290
6291 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6292 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6293 EXPORT_SYMBOL_GPL(ata_timing_compute);
6294 EXPORT_SYMBOL_GPL(ata_timing_merge);
6295 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6296
6297 #ifdef CONFIG_PCI
6298 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6299 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6300 #ifdef CONFIG_PM
6301 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6302 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6303 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6304 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6305 #endif /* CONFIG_PM */
6306 #endif /* CONFIG_PCI */
6307
6308 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
6309 EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
6310
6311 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6312 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6313 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6314 EXPORT_SYMBOL_GPL(ata_port_desc);
6315 #ifdef CONFIG_PCI
6316 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6317 #endif /* CONFIG_PCI */
6318 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6319 EXPORT_SYMBOL_GPL(ata_link_abort);
6320 EXPORT_SYMBOL_GPL(ata_port_abort);
6321 EXPORT_SYMBOL_GPL(ata_port_freeze);
6322 EXPORT_SYMBOL_GPL(sata_async_notification);
6323 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6324 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6325 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6326 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6327 EXPORT_SYMBOL_GPL(ata_do_eh);
6328 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6329
6330 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6331 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6332 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6333 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6334 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 0.19734 seconds and 6 git commands to generate.