libata: rename SFF functions
[deliverable/linux.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/jiffies.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h>
64 #include <asm/semaphore.h>
65 #include <asm/byteorder.h>
66 #include <linux/cdrom.h>
67
68 #include "libata.h"
69
70
71 /* debounce timing parameters in msecs { interval, duration, timeout } */
72 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
75
76 const struct ata_port_operations ata_base_port_ops = {
77 .irq_clear = ata_noop_irq_clear,
78 .prereset = ata_sff_prereset,
79 .hardreset = sata_sff_hardreset,
80 .postreset = ata_sff_postreset,
81 .error_handler = ata_std_error_handler,
82 };
83
84 const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .dev_select = ata_noop_dev_select,
89 };
90
91 const struct ata_port_operations sata_pmp_port_ops = {
92 .inherits = &sata_port_ops,
93
94 .pmp_prereset = sata_pmp_std_prereset,
95 .pmp_hardreset = sata_pmp_std_hardreset,
96 .pmp_postreset = sata_pmp_std_postreset,
97 .error_handler = sata_pmp_error_handler,
98 };
99
100 static unsigned int ata_dev_init_params(struct ata_device *dev,
101 u16 heads, u16 sectors);
102 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
103 static unsigned int ata_dev_set_feature(struct ata_device *dev,
104 u8 enable, u8 feature);
105 static void ata_dev_xfermask(struct ata_device *dev);
106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107
108 unsigned int ata_print_id = 1;
109 static struct workqueue_struct *ata_wq;
110
111 struct workqueue_struct *ata_aux_wq;
112
113 struct ata_force_param {
114 const char *name;
115 unsigned int cbl;
116 int spd_limit;
117 unsigned long xfer_mask;
118 unsigned int horkage_on;
119 unsigned int horkage_off;
120 };
121
122 struct ata_force_ent {
123 int port;
124 int device;
125 struct ata_force_param param;
126 };
127
128 static struct ata_force_ent *ata_force_tbl;
129 static int ata_force_tbl_size;
130
131 static char ata_force_param_buf[PAGE_SIZE] __initdata;
132 /* param_buf is thrown away after initialization, disallow read */
133 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
134 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
135
136 int atapi_enabled = 1;
137 module_param(atapi_enabled, int, 0444);
138 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
139
140 static int atapi_dmadir = 0;
141 module_param(atapi_dmadir, int, 0444);
142 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
143
144 int atapi_passthru16 = 1;
145 module_param(atapi_passthru16, int, 0444);
146 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
147
148 int libata_fua = 0;
149 module_param_named(fua, libata_fua, int, 0444);
150 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
151
152 static int ata_ignore_hpa;
153 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
154 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
155
156 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
157 module_param_named(dma, libata_dma_mask, int, 0444);
158 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
159
160 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
161 module_param(ata_probe_timeout, int, 0444);
162 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
163
164 int libata_noacpi = 0;
165 module_param_named(noacpi, libata_noacpi, int, 0444);
166 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
167
168 int libata_allow_tpm = 0;
169 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
170 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
171
172 MODULE_AUTHOR("Jeff Garzik");
173 MODULE_DESCRIPTION("Library module for ATA devices");
174 MODULE_LICENSE("GPL");
175 MODULE_VERSION(DRV_VERSION);
176
177
178 /**
179 * ata_force_cbl - force cable type according to libata.force
180 * @ap: ATA port of interest
181 *
182 * Force cable type according to libata.force and whine about it.
183 * The last entry which has matching port number is used, so it
184 * can be specified as part of device force parameters. For
185 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
186 * same effect.
187 *
188 * LOCKING:
189 * EH context.
190 */
191 void ata_force_cbl(struct ata_port *ap)
192 {
193 int i;
194
195 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
196 const struct ata_force_ent *fe = &ata_force_tbl[i];
197
198 if (fe->port != -1 && fe->port != ap->print_id)
199 continue;
200
201 if (fe->param.cbl == ATA_CBL_NONE)
202 continue;
203
204 ap->cbl = fe->param.cbl;
205 ata_port_printk(ap, KERN_NOTICE,
206 "FORCE: cable set to %s\n", fe->param.name);
207 return;
208 }
209 }
210
211 /**
212 * ata_force_spd_limit - force SATA spd limit according to libata.force
213 * @link: ATA link of interest
214 *
215 * Force SATA spd limit according to libata.force and whine about
216 * it. When only the port part is specified (e.g. 1:), the limit
217 * applies to all links connected to both the host link and all
218 * fan-out ports connected via PMP. If the device part is
219 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
220 * link not the host link. Device number 15 always points to the
221 * host link whether PMP is attached or not.
222 *
223 * LOCKING:
224 * EH context.
225 */
226 static void ata_force_spd_limit(struct ata_link *link)
227 {
228 int linkno, i;
229
230 if (ata_is_host_link(link))
231 linkno = 15;
232 else
233 linkno = link->pmp;
234
235 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
236 const struct ata_force_ent *fe = &ata_force_tbl[i];
237
238 if (fe->port != -1 && fe->port != link->ap->print_id)
239 continue;
240
241 if (fe->device != -1 && fe->device != linkno)
242 continue;
243
244 if (!fe->param.spd_limit)
245 continue;
246
247 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
248 ata_link_printk(link, KERN_NOTICE,
249 "FORCE: PHY spd limit set to %s\n", fe->param.name);
250 return;
251 }
252 }
253
254 /**
255 * ata_force_xfermask - force xfermask according to libata.force
256 * @dev: ATA device of interest
257 *
258 * Force xfer_mask according to libata.force and whine about it.
259 * For consistency with link selection, device number 15 selects
260 * the first device connected to the host link.
261 *
262 * LOCKING:
263 * EH context.
264 */
265 static void ata_force_xfermask(struct ata_device *dev)
266 {
267 int devno = dev->link->pmp + dev->devno;
268 int alt_devno = devno;
269 int i;
270
271 /* allow n.15 for the first device attached to host port */
272 if (ata_is_host_link(dev->link) && devno == 0)
273 alt_devno = 15;
274
275 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
276 const struct ata_force_ent *fe = &ata_force_tbl[i];
277 unsigned long pio_mask, mwdma_mask, udma_mask;
278
279 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
280 continue;
281
282 if (fe->device != -1 && fe->device != devno &&
283 fe->device != alt_devno)
284 continue;
285
286 if (!fe->param.xfer_mask)
287 continue;
288
289 ata_unpack_xfermask(fe->param.xfer_mask,
290 &pio_mask, &mwdma_mask, &udma_mask);
291 if (udma_mask)
292 dev->udma_mask = udma_mask;
293 else if (mwdma_mask) {
294 dev->udma_mask = 0;
295 dev->mwdma_mask = mwdma_mask;
296 } else {
297 dev->udma_mask = 0;
298 dev->mwdma_mask = 0;
299 dev->pio_mask = pio_mask;
300 }
301
302 ata_dev_printk(dev, KERN_NOTICE,
303 "FORCE: xfer_mask set to %s\n", fe->param.name);
304 return;
305 }
306 }
307
308 /**
309 * ata_force_horkage - force horkage according to libata.force
310 * @dev: ATA device of interest
311 *
312 * Force horkage according to libata.force and whine about it.
313 * For consistency with link selection, device number 15 selects
314 * the first device connected to the host link.
315 *
316 * LOCKING:
317 * EH context.
318 */
319 static void ata_force_horkage(struct ata_device *dev)
320 {
321 int devno = dev->link->pmp + dev->devno;
322 int alt_devno = devno;
323 int i;
324
325 /* allow n.15 for the first device attached to host port */
326 if (ata_is_host_link(dev->link) && devno == 0)
327 alt_devno = 15;
328
329 for (i = 0; i < ata_force_tbl_size; i++) {
330 const struct ata_force_ent *fe = &ata_force_tbl[i];
331
332 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
333 continue;
334
335 if (fe->device != -1 && fe->device != devno &&
336 fe->device != alt_devno)
337 continue;
338
339 if (!(~dev->horkage & fe->param.horkage_on) &&
340 !(dev->horkage & fe->param.horkage_off))
341 continue;
342
343 dev->horkage |= fe->param.horkage_on;
344 dev->horkage &= ~fe->param.horkage_off;
345
346 ata_dev_printk(dev, KERN_NOTICE,
347 "FORCE: horkage modified (%s)\n", fe->param.name);
348 }
349 }
350
351 /**
352 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
353 * @opcode: SCSI opcode
354 *
355 * Determine ATAPI command type from @opcode.
356 *
357 * LOCKING:
358 * None.
359 *
360 * RETURNS:
361 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
362 */
363 int atapi_cmd_type(u8 opcode)
364 {
365 switch (opcode) {
366 case GPCMD_READ_10:
367 case GPCMD_READ_12:
368 return ATAPI_READ;
369
370 case GPCMD_WRITE_10:
371 case GPCMD_WRITE_12:
372 case GPCMD_WRITE_AND_VERIFY_10:
373 return ATAPI_WRITE;
374
375 case GPCMD_READ_CD:
376 case GPCMD_READ_CD_MSF:
377 return ATAPI_READ_CD;
378
379 case ATA_16:
380 case ATA_12:
381 if (atapi_passthru16)
382 return ATAPI_PASS_THRU;
383 /* fall thru */
384 default:
385 return ATAPI_MISC;
386 }
387 }
388
389 /**
390 * ata_noop_irq_clear - Noop placeholder for irq_clear
391 * @ap: Port associated with this ATA transaction.
392 */
393 void ata_noop_irq_clear(struct ata_port *ap)
394 {
395 }
396
397 /**
398 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
399 * @tf: Taskfile to convert
400 * @pmp: Port multiplier port
401 * @is_cmd: This FIS is for command
402 * @fis: Buffer into which data will output
403 *
404 * Converts a standard ATA taskfile to a Serial ATA
405 * FIS structure (Register - Host to Device).
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
411 {
412 fis[0] = 0x27; /* Register - Host to Device FIS */
413 fis[1] = pmp & 0xf; /* Port multiplier number*/
414 if (is_cmd)
415 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
416
417 fis[2] = tf->command;
418 fis[3] = tf->feature;
419
420 fis[4] = tf->lbal;
421 fis[5] = tf->lbam;
422 fis[6] = tf->lbah;
423 fis[7] = tf->device;
424
425 fis[8] = tf->hob_lbal;
426 fis[9] = tf->hob_lbam;
427 fis[10] = tf->hob_lbah;
428 fis[11] = tf->hob_feature;
429
430 fis[12] = tf->nsect;
431 fis[13] = tf->hob_nsect;
432 fis[14] = 0;
433 fis[15] = tf->ctl;
434
435 fis[16] = 0;
436 fis[17] = 0;
437 fis[18] = 0;
438 fis[19] = 0;
439 }
440
441 /**
442 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
443 * @fis: Buffer from which data will be input
444 * @tf: Taskfile to output
445 *
446 * Converts a serial ATA FIS structure to a standard ATA taskfile.
447 *
448 * LOCKING:
449 * Inherited from caller.
450 */
451
452 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
453 {
454 tf->command = fis[2]; /* status */
455 tf->feature = fis[3]; /* error */
456
457 tf->lbal = fis[4];
458 tf->lbam = fis[5];
459 tf->lbah = fis[6];
460 tf->device = fis[7];
461
462 tf->hob_lbal = fis[8];
463 tf->hob_lbam = fis[9];
464 tf->hob_lbah = fis[10];
465
466 tf->nsect = fis[12];
467 tf->hob_nsect = fis[13];
468 }
469
470 static const u8 ata_rw_cmds[] = {
471 /* pio multi */
472 ATA_CMD_READ_MULTI,
473 ATA_CMD_WRITE_MULTI,
474 ATA_CMD_READ_MULTI_EXT,
475 ATA_CMD_WRITE_MULTI_EXT,
476 0,
477 0,
478 0,
479 ATA_CMD_WRITE_MULTI_FUA_EXT,
480 /* pio */
481 ATA_CMD_PIO_READ,
482 ATA_CMD_PIO_WRITE,
483 ATA_CMD_PIO_READ_EXT,
484 ATA_CMD_PIO_WRITE_EXT,
485 0,
486 0,
487 0,
488 0,
489 /* dma */
490 ATA_CMD_READ,
491 ATA_CMD_WRITE,
492 ATA_CMD_READ_EXT,
493 ATA_CMD_WRITE_EXT,
494 0,
495 0,
496 0,
497 ATA_CMD_WRITE_FUA_EXT
498 };
499
500 /**
501 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
502 * @tf: command to examine and configure
503 * @dev: device tf belongs to
504 *
505 * Examine the device configuration and tf->flags to calculate
506 * the proper read/write commands and protocol to use.
507 *
508 * LOCKING:
509 * caller.
510 */
511 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
512 {
513 u8 cmd;
514
515 int index, fua, lba48, write;
516
517 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
518 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
519 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
520
521 if (dev->flags & ATA_DFLAG_PIO) {
522 tf->protocol = ATA_PROT_PIO;
523 index = dev->multi_count ? 0 : 8;
524 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
525 /* Unable to use DMA due to host limitation */
526 tf->protocol = ATA_PROT_PIO;
527 index = dev->multi_count ? 0 : 8;
528 } else {
529 tf->protocol = ATA_PROT_DMA;
530 index = 16;
531 }
532
533 cmd = ata_rw_cmds[index + fua + lba48 + write];
534 if (cmd) {
535 tf->command = cmd;
536 return 0;
537 }
538 return -1;
539 }
540
541 /**
542 * ata_tf_read_block - Read block address from ATA taskfile
543 * @tf: ATA taskfile of interest
544 * @dev: ATA device @tf belongs to
545 *
546 * LOCKING:
547 * None.
548 *
549 * Read block address from @tf. This function can handle all
550 * three address formats - LBA, LBA48 and CHS. tf->protocol and
551 * flags select the address format to use.
552 *
553 * RETURNS:
554 * Block address read from @tf.
555 */
556 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
557 {
558 u64 block = 0;
559
560 if (tf->flags & ATA_TFLAG_LBA) {
561 if (tf->flags & ATA_TFLAG_LBA48) {
562 block |= (u64)tf->hob_lbah << 40;
563 block |= (u64)tf->hob_lbam << 32;
564 block |= tf->hob_lbal << 24;
565 } else
566 block |= (tf->device & 0xf) << 24;
567
568 block |= tf->lbah << 16;
569 block |= tf->lbam << 8;
570 block |= tf->lbal;
571 } else {
572 u32 cyl, head, sect;
573
574 cyl = tf->lbam | (tf->lbah << 8);
575 head = tf->device & 0xf;
576 sect = tf->lbal;
577
578 block = (cyl * dev->heads + head) * dev->sectors + sect;
579 }
580
581 return block;
582 }
583
584 /**
585 * ata_build_rw_tf - Build ATA taskfile for given read/write request
586 * @tf: Target ATA taskfile
587 * @dev: ATA device @tf belongs to
588 * @block: Block address
589 * @n_block: Number of blocks
590 * @tf_flags: RW/FUA etc...
591 * @tag: tag
592 *
593 * LOCKING:
594 * None.
595 *
596 * Build ATA taskfile @tf for read/write request described by
597 * @block, @n_block, @tf_flags and @tag on @dev.
598 *
599 * RETURNS:
600 *
601 * 0 on success, -ERANGE if the request is too large for @dev,
602 * -EINVAL if the request is invalid.
603 */
604 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
605 u64 block, u32 n_block, unsigned int tf_flags,
606 unsigned int tag)
607 {
608 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
609 tf->flags |= tf_flags;
610
611 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
612 /* yay, NCQ */
613 if (!lba_48_ok(block, n_block))
614 return -ERANGE;
615
616 tf->protocol = ATA_PROT_NCQ;
617 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
618
619 if (tf->flags & ATA_TFLAG_WRITE)
620 tf->command = ATA_CMD_FPDMA_WRITE;
621 else
622 tf->command = ATA_CMD_FPDMA_READ;
623
624 tf->nsect = tag << 3;
625 tf->hob_feature = (n_block >> 8) & 0xff;
626 tf->feature = n_block & 0xff;
627
628 tf->hob_lbah = (block >> 40) & 0xff;
629 tf->hob_lbam = (block >> 32) & 0xff;
630 tf->hob_lbal = (block >> 24) & 0xff;
631 tf->lbah = (block >> 16) & 0xff;
632 tf->lbam = (block >> 8) & 0xff;
633 tf->lbal = block & 0xff;
634
635 tf->device = 1 << 6;
636 if (tf->flags & ATA_TFLAG_FUA)
637 tf->device |= 1 << 7;
638 } else if (dev->flags & ATA_DFLAG_LBA) {
639 tf->flags |= ATA_TFLAG_LBA;
640
641 if (lba_28_ok(block, n_block)) {
642 /* use LBA28 */
643 tf->device |= (block >> 24) & 0xf;
644 } else if (lba_48_ok(block, n_block)) {
645 if (!(dev->flags & ATA_DFLAG_LBA48))
646 return -ERANGE;
647
648 /* use LBA48 */
649 tf->flags |= ATA_TFLAG_LBA48;
650
651 tf->hob_nsect = (n_block >> 8) & 0xff;
652
653 tf->hob_lbah = (block >> 40) & 0xff;
654 tf->hob_lbam = (block >> 32) & 0xff;
655 tf->hob_lbal = (block >> 24) & 0xff;
656 } else
657 /* request too large even for LBA48 */
658 return -ERANGE;
659
660 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
661 return -EINVAL;
662
663 tf->nsect = n_block & 0xff;
664
665 tf->lbah = (block >> 16) & 0xff;
666 tf->lbam = (block >> 8) & 0xff;
667 tf->lbal = block & 0xff;
668
669 tf->device |= ATA_LBA;
670 } else {
671 /* CHS */
672 u32 sect, head, cyl, track;
673
674 /* The request -may- be too large for CHS addressing. */
675 if (!lba_28_ok(block, n_block))
676 return -ERANGE;
677
678 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
679 return -EINVAL;
680
681 /* Convert LBA to CHS */
682 track = (u32)block / dev->sectors;
683 cyl = track / dev->heads;
684 head = track % dev->heads;
685 sect = (u32)block % dev->sectors + 1;
686
687 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
688 (u32)block, track, cyl, head, sect);
689
690 /* Check whether the converted CHS can fit.
691 Cylinder: 0-65535
692 Head: 0-15
693 Sector: 1-255*/
694 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
695 return -ERANGE;
696
697 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
698 tf->lbal = sect;
699 tf->lbam = cyl;
700 tf->lbah = cyl >> 8;
701 tf->device |= head;
702 }
703
704 return 0;
705 }
706
707 /**
708 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
709 * @pio_mask: pio_mask
710 * @mwdma_mask: mwdma_mask
711 * @udma_mask: udma_mask
712 *
713 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
714 * unsigned int xfer_mask.
715 *
716 * LOCKING:
717 * None.
718 *
719 * RETURNS:
720 * Packed xfer_mask.
721 */
722 unsigned long ata_pack_xfermask(unsigned long pio_mask,
723 unsigned long mwdma_mask,
724 unsigned long udma_mask)
725 {
726 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
727 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
728 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
729 }
730
731 /**
732 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
733 * @xfer_mask: xfer_mask to unpack
734 * @pio_mask: resulting pio_mask
735 * @mwdma_mask: resulting mwdma_mask
736 * @udma_mask: resulting udma_mask
737 *
738 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
739 * Any NULL distination masks will be ignored.
740 */
741 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
742 unsigned long *mwdma_mask, unsigned long *udma_mask)
743 {
744 if (pio_mask)
745 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
746 if (mwdma_mask)
747 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
748 if (udma_mask)
749 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
750 }
751
752 static const struct ata_xfer_ent {
753 int shift, bits;
754 u8 base;
755 } ata_xfer_tbl[] = {
756 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
757 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
758 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
759 { -1, },
760 };
761
762 /**
763 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
764 * @xfer_mask: xfer_mask of interest
765 *
766 * Return matching XFER_* value for @xfer_mask. Only the highest
767 * bit of @xfer_mask is considered.
768 *
769 * LOCKING:
770 * None.
771 *
772 * RETURNS:
773 * Matching XFER_* value, 0xff if no match found.
774 */
775 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
776 {
777 int highbit = fls(xfer_mask) - 1;
778 const struct ata_xfer_ent *ent;
779
780 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
781 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
782 return ent->base + highbit - ent->shift;
783 return 0xff;
784 }
785
786 /**
787 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
788 * @xfer_mode: XFER_* of interest
789 *
790 * Return matching xfer_mask for @xfer_mode.
791 *
792 * LOCKING:
793 * None.
794 *
795 * RETURNS:
796 * Matching xfer_mask, 0 if no match found.
797 */
798 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
799 {
800 const struct ata_xfer_ent *ent;
801
802 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
803 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
804 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
805 & ~((1 << ent->shift) - 1);
806 return 0;
807 }
808
809 /**
810 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
811 * @xfer_mode: XFER_* of interest
812 *
813 * Return matching xfer_shift for @xfer_mode.
814 *
815 * LOCKING:
816 * None.
817 *
818 * RETURNS:
819 * Matching xfer_shift, -1 if no match found.
820 */
821 int ata_xfer_mode2shift(unsigned long xfer_mode)
822 {
823 const struct ata_xfer_ent *ent;
824
825 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
826 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
827 return ent->shift;
828 return -1;
829 }
830
831 /**
832 * ata_mode_string - convert xfer_mask to string
833 * @xfer_mask: mask of bits supported; only highest bit counts.
834 *
835 * Determine string which represents the highest speed
836 * (highest bit in @modemask).
837 *
838 * LOCKING:
839 * None.
840 *
841 * RETURNS:
842 * Constant C string representing highest speed listed in
843 * @mode_mask, or the constant C string "<n/a>".
844 */
845 const char *ata_mode_string(unsigned long xfer_mask)
846 {
847 static const char * const xfer_mode_str[] = {
848 "PIO0",
849 "PIO1",
850 "PIO2",
851 "PIO3",
852 "PIO4",
853 "PIO5",
854 "PIO6",
855 "MWDMA0",
856 "MWDMA1",
857 "MWDMA2",
858 "MWDMA3",
859 "MWDMA4",
860 "UDMA/16",
861 "UDMA/25",
862 "UDMA/33",
863 "UDMA/44",
864 "UDMA/66",
865 "UDMA/100",
866 "UDMA/133",
867 "UDMA7",
868 };
869 int highbit;
870
871 highbit = fls(xfer_mask) - 1;
872 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
873 return xfer_mode_str[highbit];
874 return "<n/a>";
875 }
876
877 static const char *sata_spd_string(unsigned int spd)
878 {
879 static const char * const spd_str[] = {
880 "1.5 Gbps",
881 "3.0 Gbps",
882 };
883
884 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
885 return "<unknown>";
886 return spd_str[spd - 1];
887 }
888
889 void ata_dev_disable(struct ata_device *dev)
890 {
891 if (ata_dev_enabled(dev)) {
892 if (ata_msg_drv(dev->link->ap))
893 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
894 ata_acpi_on_disable(dev);
895 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
896 ATA_DNXFER_QUIET);
897 dev->class++;
898 }
899 }
900
901 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
902 {
903 struct ata_link *link = dev->link;
904 struct ata_port *ap = link->ap;
905 u32 scontrol;
906 unsigned int err_mask;
907 int rc;
908
909 /*
910 * disallow DIPM for drivers which haven't set
911 * ATA_FLAG_IPM. This is because when DIPM is enabled,
912 * phy ready will be set in the interrupt status on
913 * state changes, which will cause some drivers to
914 * think there are errors - additionally drivers will
915 * need to disable hot plug.
916 */
917 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
918 ap->pm_policy = NOT_AVAILABLE;
919 return -EINVAL;
920 }
921
922 /*
923 * For DIPM, we will only enable it for the
924 * min_power setting.
925 *
926 * Why? Because Disks are too stupid to know that
927 * If the host rejects a request to go to SLUMBER
928 * they should retry at PARTIAL, and instead it
929 * just would give up. So, for medium_power to
930 * work at all, we need to only allow HIPM.
931 */
932 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
933 if (rc)
934 return rc;
935
936 switch (policy) {
937 case MIN_POWER:
938 /* no restrictions on IPM transitions */
939 scontrol &= ~(0x3 << 8);
940 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
941 if (rc)
942 return rc;
943
944 /* enable DIPM */
945 if (dev->flags & ATA_DFLAG_DIPM)
946 err_mask = ata_dev_set_feature(dev,
947 SETFEATURES_SATA_ENABLE, SATA_DIPM);
948 break;
949 case MEDIUM_POWER:
950 /* allow IPM to PARTIAL */
951 scontrol &= ~(0x1 << 8);
952 scontrol |= (0x2 << 8);
953 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
954 if (rc)
955 return rc;
956
957 /*
958 * we don't have to disable DIPM since IPM flags
959 * disallow transitions to SLUMBER, which effectively
960 * disable DIPM if it does not support PARTIAL
961 */
962 break;
963 case NOT_AVAILABLE:
964 case MAX_PERFORMANCE:
965 /* disable all IPM transitions */
966 scontrol |= (0x3 << 8);
967 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
968 if (rc)
969 return rc;
970
971 /*
972 * we don't have to disable DIPM since IPM flags
973 * disallow all transitions which effectively
974 * disable DIPM anyway.
975 */
976 break;
977 }
978
979 /* FIXME: handle SET FEATURES failure */
980 (void) err_mask;
981
982 return 0;
983 }
984
985 /**
986 * ata_dev_enable_pm - enable SATA interface power management
987 * @dev: device to enable power management
988 * @policy: the link power management policy
989 *
990 * Enable SATA Interface power management. This will enable
991 * Device Interface Power Management (DIPM) for min_power
992 * policy, and then call driver specific callbacks for
993 * enabling Host Initiated Power management.
994 *
995 * Locking: Caller.
996 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
997 */
998 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
999 {
1000 int rc = 0;
1001 struct ata_port *ap = dev->link->ap;
1002
1003 /* set HIPM first, then DIPM */
1004 if (ap->ops->enable_pm)
1005 rc = ap->ops->enable_pm(ap, policy);
1006 if (rc)
1007 goto enable_pm_out;
1008 rc = ata_dev_set_dipm(dev, policy);
1009
1010 enable_pm_out:
1011 if (rc)
1012 ap->pm_policy = MAX_PERFORMANCE;
1013 else
1014 ap->pm_policy = policy;
1015 return /* rc */; /* hopefully we can use 'rc' eventually */
1016 }
1017
1018 #ifdef CONFIG_PM
1019 /**
1020 * ata_dev_disable_pm - disable SATA interface power management
1021 * @dev: device to disable power management
1022 *
1023 * Disable SATA Interface power management. This will disable
1024 * Device Interface Power Management (DIPM) without changing
1025 * policy, call driver specific callbacks for disabling Host
1026 * Initiated Power management.
1027 *
1028 * Locking: Caller.
1029 * Returns: void
1030 */
1031 static void ata_dev_disable_pm(struct ata_device *dev)
1032 {
1033 struct ata_port *ap = dev->link->ap;
1034
1035 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1036 if (ap->ops->disable_pm)
1037 ap->ops->disable_pm(ap);
1038 }
1039 #endif /* CONFIG_PM */
1040
1041 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1042 {
1043 ap->pm_policy = policy;
1044 ap->link.eh_info.action |= ATA_EH_LPM;
1045 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1046 ata_port_schedule_eh(ap);
1047 }
1048
1049 #ifdef CONFIG_PM
1050 static void ata_lpm_enable(struct ata_host *host)
1051 {
1052 struct ata_link *link;
1053 struct ata_port *ap;
1054 struct ata_device *dev;
1055 int i;
1056
1057 for (i = 0; i < host->n_ports; i++) {
1058 ap = host->ports[i];
1059 ata_port_for_each_link(link, ap) {
1060 ata_link_for_each_dev(dev, link)
1061 ata_dev_disable_pm(dev);
1062 }
1063 }
1064 }
1065
1066 static void ata_lpm_disable(struct ata_host *host)
1067 {
1068 int i;
1069
1070 for (i = 0; i < host->n_ports; i++) {
1071 struct ata_port *ap = host->ports[i];
1072 ata_lpm_schedule(ap, ap->pm_policy);
1073 }
1074 }
1075 #endif /* CONFIG_PM */
1076
1077 /**
1078 * ata_dev_classify - determine device type based on ATA-spec signature
1079 * @tf: ATA taskfile register set for device to be identified
1080 *
1081 * Determine from taskfile register contents whether a device is
1082 * ATA or ATAPI, as per "Signature and persistence" section
1083 * of ATA/PI spec (volume 1, sect 5.14).
1084 *
1085 * LOCKING:
1086 * None.
1087 *
1088 * RETURNS:
1089 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1090 * %ATA_DEV_UNKNOWN the event of failure.
1091 */
1092 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1093 {
1094 /* Apple's open source Darwin code hints that some devices only
1095 * put a proper signature into the LBA mid/high registers,
1096 * So, we only check those. It's sufficient for uniqueness.
1097 *
1098 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1099 * signatures for ATA and ATAPI devices attached on SerialATA,
1100 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1101 * spec has never mentioned about using different signatures
1102 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1103 * Multiplier specification began to use 0x69/0x96 to identify
1104 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1105 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1106 * 0x69/0x96 shortly and described them as reserved for
1107 * SerialATA.
1108 *
1109 * We follow the current spec and consider that 0x69/0x96
1110 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1111 */
1112 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1113 DPRINTK("found ATA device by sig\n");
1114 return ATA_DEV_ATA;
1115 }
1116
1117 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1118 DPRINTK("found ATAPI device by sig\n");
1119 return ATA_DEV_ATAPI;
1120 }
1121
1122 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1123 DPRINTK("found PMP device by sig\n");
1124 return ATA_DEV_PMP;
1125 }
1126
1127 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1128 printk(KERN_INFO "ata: SEMB device ignored\n");
1129 return ATA_DEV_SEMB_UNSUP; /* not yet */
1130 }
1131
1132 DPRINTK("unknown device\n");
1133 return ATA_DEV_UNKNOWN;
1134 }
1135
1136 /**
1137 * ata_id_string - Convert IDENTIFY DEVICE page into string
1138 * @id: IDENTIFY DEVICE results we will examine
1139 * @s: string into which data is output
1140 * @ofs: offset into identify device page
1141 * @len: length of string to return. must be an even number.
1142 *
1143 * The strings in the IDENTIFY DEVICE page are broken up into
1144 * 16-bit chunks. Run through the string, and output each
1145 * 8-bit chunk linearly, regardless of platform.
1146 *
1147 * LOCKING:
1148 * caller.
1149 */
1150
1151 void ata_id_string(const u16 *id, unsigned char *s,
1152 unsigned int ofs, unsigned int len)
1153 {
1154 unsigned int c;
1155
1156 while (len > 0) {
1157 c = id[ofs] >> 8;
1158 *s = c;
1159 s++;
1160
1161 c = id[ofs] & 0xff;
1162 *s = c;
1163 s++;
1164
1165 ofs++;
1166 len -= 2;
1167 }
1168 }
1169
1170 /**
1171 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1172 * @id: IDENTIFY DEVICE results we will examine
1173 * @s: string into which data is output
1174 * @ofs: offset into identify device page
1175 * @len: length of string to return. must be an odd number.
1176 *
1177 * This function is identical to ata_id_string except that it
1178 * trims trailing spaces and terminates the resulting string with
1179 * null. @len must be actual maximum length (even number) + 1.
1180 *
1181 * LOCKING:
1182 * caller.
1183 */
1184 void ata_id_c_string(const u16 *id, unsigned char *s,
1185 unsigned int ofs, unsigned int len)
1186 {
1187 unsigned char *p;
1188
1189 WARN_ON(!(len & 1));
1190
1191 ata_id_string(id, s, ofs, len - 1);
1192
1193 p = s + strnlen(s, len - 1);
1194 while (p > s && p[-1] == ' ')
1195 p--;
1196 *p = '\0';
1197 }
1198
1199 static u64 ata_id_n_sectors(const u16 *id)
1200 {
1201 if (ata_id_has_lba(id)) {
1202 if (ata_id_has_lba48(id))
1203 return ata_id_u64(id, 100);
1204 else
1205 return ata_id_u32(id, 60);
1206 } else {
1207 if (ata_id_current_chs_valid(id))
1208 return ata_id_u32(id, 57);
1209 else
1210 return id[1] * id[3] * id[6];
1211 }
1212 }
1213
1214 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1215 {
1216 u64 sectors = 0;
1217
1218 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1219 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1220 sectors |= (tf->hob_lbal & 0xff) << 24;
1221 sectors |= (tf->lbah & 0xff) << 16;
1222 sectors |= (tf->lbam & 0xff) << 8;
1223 sectors |= (tf->lbal & 0xff);
1224
1225 return sectors;
1226 }
1227
1228 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1229 {
1230 u64 sectors = 0;
1231
1232 sectors |= (tf->device & 0x0f) << 24;
1233 sectors |= (tf->lbah & 0xff) << 16;
1234 sectors |= (tf->lbam & 0xff) << 8;
1235 sectors |= (tf->lbal & 0xff);
1236
1237 return sectors;
1238 }
1239
1240 /**
1241 * ata_read_native_max_address - Read native max address
1242 * @dev: target device
1243 * @max_sectors: out parameter for the result native max address
1244 *
1245 * Perform an LBA48 or LBA28 native size query upon the device in
1246 * question.
1247 *
1248 * RETURNS:
1249 * 0 on success, -EACCES if command is aborted by the drive.
1250 * -EIO on other errors.
1251 */
1252 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1253 {
1254 unsigned int err_mask;
1255 struct ata_taskfile tf;
1256 int lba48 = ata_id_has_lba48(dev->id);
1257
1258 ata_tf_init(dev, &tf);
1259
1260 /* always clear all address registers */
1261 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1262
1263 if (lba48) {
1264 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1265 tf.flags |= ATA_TFLAG_LBA48;
1266 } else
1267 tf.command = ATA_CMD_READ_NATIVE_MAX;
1268
1269 tf.protocol |= ATA_PROT_NODATA;
1270 tf.device |= ATA_LBA;
1271
1272 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1273 if (err_mask) {
1274 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1275 "max address (err_mask=0x%x)\n", err_mask);
1276 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1277 return -EACCES;
1278 return -EIO;
1279 }
1280
1281 if (lba48)
1282 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1283 else
1284 *max_sectors = ata_tf_to_lba(&tf) + 1;
1285 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1286 (*max_sectors)--;
1287 return 0;
1288 }
1289
1290 /**
1291 * ata_set_max_sectors - Set max sectors
1292 * @dev: target device
1293 * @new_sectors: new max sectors value to set for the device
1294 *
1295 * Set max sectors of @dev to @new_sectors.
1296 *
1297 * RETURNS:
1298 * 0 on success, -EACCES if command is aborted or denied (due to
1299 * previous non-volatile SET_MAX) by the drive. -EIO on other
1300 * errors.
1301 */
1302 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1303 {
1304 unsigned int err_mask;
1305 struct ata_taskfile tf;
1306 int lba48 = ata_id_has_lba48(dev->id);
1307
1308 new_sectors--;
1309
1310 ata_tf_init(dev, &tf);
1311
1312 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1313
1314 if (lba48) {
1315 tf.command = ATA_CMD_SET_MAX_EXT;
1316 tf.flags |= ATA_TFLAG_LBA48;
1317
1318 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1319 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1320 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1321 } else {
1322 tf.command = ATA_CMD_SET_MAX;
1323
1324 tf.device |= (new_sectors >> 24) & 0xf;
1325 }
1326
1327 tf.protocol |= ATA_PROT_NODATA;
1328 tf.device |= ATA_LBA;
1329
1330 tf.lbal = (new_sectors >> 0) & 0xff;
1331 tf.lbam = (new_sectors >> 8) & 0xff;
1332 tf.lbah = (new_sectors >> 16) & 0xff;
1333
1334 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1335 if (err_mask) {
1336 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1337 "max address (err_mask=0x%x)\n", err_mask);
1338 if (err_mask == AC_ERR_DEV &&
1339 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1340 return -EACCES;
1341 return -EIO;
1342 }
1343
1344 return 0;
1345 }
1346
1347 /**
1348 * ata_hpa_resize - Resize a device with an HPA set
1349 * @dev: Device to resize
1350 *
1351 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1352 * it if required to the full size of the media. The caller must check
1353 * the drive has the HPA feature set enabled.
1354 *
1355 * RETURNS:
1356 * 0 on success, -errno on failure.
1357 */
1358 static int ata_hpa_resize(struct ata_device *dev)
1359 {
1360 struct ata_eh_context *ehc = &dev->link->eh_context;
1361 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1362 u64 sectors = ata_id_n_sectors(dev->id);
1363 u64 native_sectors;
1364 int rc;
1365
1366 /* do we need to do it? */
1367 if (dev->class != ATA_DEV_ATA ||
1368 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1369 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1370 return 0;
1371
1372 /* read native max address */
1373 rc = ata_read_native_max_address(dev, &native_sectors);
1374 if (rc) {
1375 /* If device aborted the command or HPA isn't going to
1376 * be unlocked, skip HPA resizing.
1377 */
1378 if (rc == -EACCES || !ata_ignore_hpa) {
1379 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1380 "broken, skipping HPA handling\n");
1381 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1382
1383 /* we can continue if device aborted the command */
1384 if (rc == -EACCES)
1385 rc = 0;
1386 }
1387
1388 return rc;
1389 }
1390
1391 /* nothing to do? */
1392 if (native_sectors <= sectors || !ata_ignore_hpa) {
1393 if (!print_info || native_sectors == sectors)
1394 return 0;
1395
1396 if (native_sectors > sectors)
1397 ata_dev_printk(dev, KERN_INFO,
1398 "HPA detected: current %llu, native %llu\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)native_sectors);
1401 else if (native_sectors < sectors)
1402 ata_dev_printk(dev, KERN_WARNING,
1403 "native sectors (%llu) is smaller than "
1404 "sectors (%llu)\n",
1405 (unsigned long long)native_sectors,
1406 (unsigned long long)sectors);
1407 return 0;
1408 }
1409
1410 /* let's unlock HPA */
1411 rc = ata_set_max_sectors(dev, native_sectors);
1412 if (rc == -EACCES) {
1413 /* if device aborted the command, skip HPA resizing */
1414 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1415 "(%llu -> %llu), skipping HPA handling\n",
1416 (unsigned long long)sectors,
1417 (unsigned long long)native_sectors);
1418 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1419 return 0;
1420 } else if (rc)
1421 return rc;
1422
1423 /* re-read IDENTIFY data */
1424 rc = ata_dev_reread_id(dev, 0);
1425 if (rc) {
1426 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1427 "data after HPA resizing\n");
1428 return rc;
1429 }
1430
1431 if (print_info) {
1432 u64 new_sectors = ata_id_n_sectors(dev->id);
1433 ata_dev_printk(dev, KERN_INFO,
1434 "HPA unlocked: %llu -> %llu, native %llu\n",
1435 (unsigned long long)sectors,
1436 (unsigned long long)new_sectors,
1437 (unsigned long long)native_sectors);
1438 }
1439
1440 return 0;
1441 }
1442
1443 /**
1444 * ata_noop_dev_select - Select device 0/1 on ATA bus
1445 * @ap: ATA channel to manipulate
1446 * @device: ATA device (numbered from zero) to select
1447 *
1448 * This function performs no actual function.
1449 *
1450 * May be used as the dev_select() entry in ata_port_operations.
1451 *
1452 * LOCKING:
1453 * caller.
1454 */
1455 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1456 {
1457 }
1458
1459 /**
1460 * ata_dump_id - IDENTIFY DEVICE info debugging output
1461 * @id: IDENTIFY DEVICE page to dump
1462 *
1463 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1464 * page.
1465 *
1466 * LOCKING:
1467 * caller.
1468 */
1469
1470 static inline void ata_dump_id(const u16 *id)
1471 {
1472 DPRINTK("49==0x%04x "
1473 "53==0x%04x "
1474 "63==0x%04x "
1475 "64==0x%04x "
1476 "75==0x%04x \n",
1477 id[49],
1478 id[53],
1479 id[63],
1480 id[64],
1481 id[75]);
1482 DPRINTK("80==0x%04x "
1483 "81==0x%04x "
1484 "82==0x%04x "
1485 "83==0x%04x "
1486 "84==0x%04x \n",
1487 id[80],
1488 id[81],
1489 id[82],
1490 id[83],
1491 id[84]);
1492 DPRINTK("88==0x%04x "
1493 "93==0x%04x\n",
1494 id[88],
1495 id[93]);
1496 }
1497
1498 /**
1499 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1500 * @id: IDENTIFY data to compute xfer mask from
1501 *
1502 * Compute the xfermask for this device. This is not as trivial
1503 * as it seems if we must consider early devices correctly.
1504 *
1505 * FIXME: pre IDE drive timing (do we care ?).
1506 *
1507 * LOCKING:
1508 * None.
1509 *
1510 * RETURNS:
1511 * Computed xfermask
1512 */
1513 unsigned long ata_id_xfermask(const u16 *id)
1514 {
1515 unsigned long pio_mask, mwdma_mask, udma_mask;
1516
1517 /* Usual case. Word 53 indicates word 64 is valid */
1518 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1519 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1520 pio_mask <<= 3;
1521 pio_mask |= 0x7;
1522 } else {
1523 /* If word 64 isn't valid then Word 51 high byte holds
1524 * the PIO timing number for the maximum. Turn it into
1525 * a mask.
1526 */
1527 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1528 if (mode < 5) /* Valid PIO range */
1529 pio_mask = (2 << mode) - 1;
1530 else
1531 pio_mask = 1;
1532
1533 /* But wait.. there's more. Design your standards by
1534 * committee and you too can get a free iordy field to
1535 * process. However its the speeds not the modes that
1536 * are supported... Note drivers using the timing API
1537 * will get this right anyway
1538 */
1539 }
1540
1541 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1542
1543 if (ata_id_is_cfa(id)) {
1544 /*
1545 * Process compact flash extended modes
1546 */
1547 int pio = id[163] & 0x7;
1548 int dma = (id[163] >> 3) & 7;
1549
1550 if (pio)
1551 pio_mask |= (1 << 5);
1552 if (pio > 1)
1553 pio_mask |= (1 << 6);
1554 if (dma)
1555 mwdma_mask |= (1 << 3);
1556 if (dma > 1)
1557 mwdma_mask |= (1 << 4);
1558 }
1559
1560 udma_mask = 0;
1561 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1562 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1563
1564 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1565 }
1566
1567 /**
1568 * ata_pio_queue_task - Queue port_task
1569 * @ap: The ata_port to queue port_task for
1570 * @fn: workqueue function to be scheduled
1571 * @data: data for @fn to use
1572 * @delay: delay time for workqueue function
1573 *
1574 * Schedule @fn(@data) for execution after @delay jiffies using
1575 * port_task. There is one port_task per port and it's the
1576 * user(low level driver)'s responsibility to make sure that only
1577 * one task is active at any given time.
1578 *
1579 * libata core layer takes care of synchronization between
1580 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1581 * synchronization.
1582 *
1583 * LOCKING:
1584 * Inherited from caller.
1585 */
1586 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1587 {
1588 ap->port_task_data = data;
1589
1590 /* may fail if ata_port_flush_task() in progress */
1591 queue_delayed_work(ata_wq, &ap->port_task, delay);
1592 }
1593
1594 /**
1595 * ata_port_flush_task - Flush port_task
1596 * @ap: The ata_port to flush port_task for
1597 *
1598 * After this function completes, port_task is guranteed not to
1599 * be running or scheduled.
1600 *
1601 * LOCKING:
1602 * Kernel thread context (may sleep)
1603 */
1604 void ata_port_flush_task(struct ata_port *ap)
1605 {
1606 DPRINTK("ENTER\n");
1607
1608 cancel_rearming_delayed_work(&ap->port_task);
1609
1610 if (ata_msg_ctl(ap))
1611 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1612 }
1613
1614 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1615 {
1616 struct completion *waiting = qc->private_data;
1617
1618 complete(waiting);
1619 }
1620
1621 /**
1622 * ata_exec_internal_sg - execute libata internal command
1623 * @dev: Device to which the command is sent
1624 * @tf: Taskfile registers for the command and the result
1625 * @cdb: CDB for packet command
1626 * @dma_dir: Data tranfer direction of the command
1627 * @sgl: sg list for the data buffer of the command
1628 * @n_elem: Number of sg entries
1629 * @timeout: Timeout in msecs (0 for default)
1630 *
1631 * Executes libata internal command with timeout. @tf contains
1632 * command on entry and result on return. Timeout and error
1633 * conditions are reported via return value. No recovery action
1634 * is taken after a command times out. It's caller's duty to
1635 * clean up after timeout.
1636 *
1637 * LOCKING:
1638 * None. Should be called with kernel context, might sleep.
1639 *
1640 * RETURNS:
1641 * Zero on success, AC_ERR_* mask on failure
1642 */
1643 unsigned ata_exec_internal_sg(struct ata_device *dev,
1644 struct ata_taskfile *tf, const u8 *cdb,
1645 int dma_dir, struct scatterlist *sgl,
1646 unsigned int n_elem, unsigned long timeout)
1647 {
1648 struct ata_link *link = dev->link;
1649 struct ata_port *ap = link->ap;
1650 u8 command = tf->command;
1651 struct ata_queued_cmd *qc;
1652 unsigned int tag, preempted_tag;
1653 u32 preempted_sactive, preempted_qc_active;
1654 int preempted_nr_active_links;
1655 DECLARE_COMPLETION_ONSTACK(wait);
1656 unsigned long flags;
1657 unsigned int err_mask;
1658 int rc;
1659
1660 spin_lock_irqsave(ap->lock, flags);
1661
1662 /* no internal command while frozen */
1663 if (ap->pflags & ATA_PFLAG_FROZEN) {
1664 spin_unlock_irqrestore(ap->lock, flags);
1665 return AC_ERR_SYSTEM;
1666 }
1667
1668 /* initialize internal qc */
1669
1670 /* XXX: Tag 0 is used for drivers with legacy EH as some
1671 * drivers choke if any other tag is given. This breaks
1672 * ata_tag_internal() test for those drivers. Don't use new
1673 * EH stuff without converting to it.
1674 */
1675 if (ap->ops->error_handler)
1676 tag = ATA_TAG_INTERNAL;
1677 else
1678 tag = 0;
1679
1680 if (test_and_set_bit(tag, &ap->qc_allocated))
1681 BUG();
1682 qc = __ata_qc_from_tag(ap, tag);
1683
1684 qc->tag = tag;
1685 qc->scsicmd = NULL;
1686 qc->ap = ap;
1687 qc->dev = dev;
1688 ata_qc_reinit(qc);
1689
1690 preempted_tag = link->active_tag;
1691 preempted_sactive = link->sactive;
1692 preempted_qc_active = ap->qc_active;
1693 preempted_nr_active_links = ap->nr_active_links;
1694 link->active_tag = ATA_TAG_POISON;
1695 link->sactive = 0;
1696 ap->qc_active = 0;
1697 ap->nr_active_links = 0;
1698
1699 /* prepare & issue qc */
1700 qc->tf = *tf;
1701 if (cdb)
1702 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1703 qc->flags |= ATA_QCFLAG_RESULT_TF;
1704 qc->dma_dir = dma_dir;
1705 if (dma_dir != DMA_NONE) {
1706 unsigned int i, buflen = 0;
1707 struct scatterlist *sg;
1708
1709 for_each_sg(sgl, sg, n_elem, i)
1710 buflen += sg->length;
1711
1712 ata_sg_init(qc, sgl, n_elem);
1713 qc->nbytes = buflen;
1714 }
1715
1716 qc->private_data = &wait;
1717 qc->complete_fn = ata_qc_complete_internal;
1718
1719 ata_qc_issue(qc);
1720
1721 spin_unlock_irqrestore(ap->lock, flags);
1722
1723 if (!timeout)
1724 timeout = ata_probe_timeout * 1000 / HZ;
1725
1726 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1727
1728 ata_port_flush_task(ap);
1729
1730 if (!rc) {
1731 spin_lock_irqsave(ap->lock, flags);
1732
1733 /* We're racing with irq here. If we lose, the
1734 * following test prevents us from completing the qc
1735 * twice. If we win, the port is frozen and will be
1736 * cleaned up by ->post_internal_cmd().
1737 */
1738 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1739 qc->err_mask |= AC_ERR_TIMEOUT;
1740
1741 if (ap->ops->error_handler)
1742 ata_port_freeze(ap);
1743 else
1744 ata_qc_complete(qc);
1745
1746 if (ata_msg_warn(ap))
1747 ata_dev_printk(dev, KERN_WARNING,
1748 "qc timeout (cmd 0x%x)\n", command);
1749 }
1750
1751 spin_unlock_irqrestore(ap->lock, flags);
1752 }
1753
1754 /* do post_internal_cmd */
1755 if (ap->ops->post_internal_cmd)
1756 ap->ops->post_internal_cmd(qc);
1757
1758 /* perform minimal error analysis */
1759 if (qc->flags & ATA_QCFLAG_FAILED) {
1760 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1761 qc->err_mask |= AC_ERR_DEV;
1762
1763 if (!qc->err_mask)
1764 qc->err_mask |= AC_ERR_OTHER;
1765
1766 if (qc->err_mask & ~AC_ERR_OTHER)
1767 qc->err_mask &= ~AC_ERR_OTHER;
1768 }
1769
1770 /* finish up */
1771 spin_lock_irqsave(ap->lock, flags);
1772
1773 *tf = qc->result_tf;
1774 err_mask = qc->err_mask;
1775
1776 ata_qc_free(qc);
1777 link->active_tag = preempted_tag;
1778 link->sactive = preempted_sactive;
1779 ap->qc_active = preempted_qc_active;
1780 ap->nr_active_links = preempted_nr_active_links;
1781
1782 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1783 * Until those drivers are fixed, we detect the condition
1784 * here, fail the command with AC_ERR_SYSTEM and reenable the
1785 * port.
1786 *
1787 * Note that this doesn't change any behavior as internal
1788 * command failure results in disabling the device in the
1789 * higher layer for LLDDs without new reset/EH callbacks.
1790 *
1791 * Kill the following code as soon as those drivers are fixed.
1792 */
1793 if (ap->flags & ATA_FLAG_DISABLED) {
1794 err_mask |= AC_ERR_SYSTEM;
1795 ata_port_probe(ap);
1796 }
1797
1798 spin_unlock_irqrestore(ap->lock, flags);
1799
1800 return err_mask;
1801 }
1802
1803 /**
1804 * ata_exec_internal - execute libata internal command
1805 * @dev: Device to which the command is sent
1806 * @tf: Taskfile registers for the command and the result
1807 * @cdb: CDB for packet command
1808 * @dma_dir: Data tranfer direction of the command
1809 * @buf: Data buffer of the command
1810 * @buflen: Length of data buffer
1811 * @timeout: Timeout in msecs (0 for default)
1812 *
1813 * Wrapper around ata_exec_internal_sg() which takes simple
1814 * buffer instead of sg list.
1815 *
1816 * LOCKING:
1817 * None. Should be called with kernel context, might sleep.
1818 *
1819 * RETURNS:
1820 * Zero on success, AC_ERR_* mask on failure
1821 */
1822 unsigned ata_exec_internal(struct ata_device *dev,
1823 struct ata_taskfile *tf, const u8 *cdb,
1824 int dma_dir, void *buf, unsigned int buflen,
1825 unsigned long timeout)
1826 {
1827 struct scatterlist *psg = NULL, sg;
1828 unsigned int n_elem = 0;
1829
1830 if (dma_dir != DMA_NONE) {
1831 WARN_ON(!buf);
1832 sg_init_one(&sg, buf, buflen);
1833 psg = &sg;
1834 n_elem++;
1835 }
1836
1837 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1838 timeout);
1839 }
1840
1841 /**
1842 * ata_do_simple_cmd - execute simple internal command
1843 * @dev: Device to which the command is sent
1844 * @cmd: Opcode to execute
1845 *
1846 * Execute a 'simple' command, that only consists of the opcode
1847 * 'cmd' itself, without filling any other registers
1848 *
1849 * LOCKING:
1850 * Kernel thread context (may sleep).
1851 *
1852 * RETURNS:
1853 * Zero on success, AC_ERR_* mask on failure
1854 */
1855 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1856 {
1857 struct ata_taskfile tf;
1858
1859 ata_tf_init(dev, &tf);
1860
1861 tf.command = cmd;
1862 tf.flags |= ATA_TFLAG_DEVICE;
1863 tf.protocol = ATA_PROT_NODATA;
1864
1865 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1866 }
1867
1868 /**
1869 * ata_pio_need_iordy - check if iordy needed
1870 * @adev: ATA device
1871 *
1872 * Check if the current speed of the device requires IORDY. Used
1873 * by various controllers for chip configuration.
1874 */
1875
1876 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1877 {
1878 /* Controller doesn't support IORDY. Probably a pointless check
1879 as the caller should know this */
1880 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1881 return 0;
1882 /* PIO3 and higher it is mandatory */
1883 if (adev->pio_mode > XFER_PIO_2)
1884 return 1;
1885 /* We turn it on when possible */
1886 if (ata_id_has_iordy(adev->id))
1887 return 1;
1888 return 0;
1889 }
1890
1891 /**
1892 * ata_pio_mask_no_iordy - Return the non IORDY mask
1893 * @adev: ATA device
1894 *
1895 * Compute the highest mode possible if we are not using iordy. Return
1896 * -1 if no iordy mode is available.
1897 */
1898
1899 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1900 {
1901 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1902 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1903 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1904 /* Is the speed faster than the drive allows non IORDY ? */
1905 if (pio) {
1906 /* This is cycle times not frequency - watch the logic! */
1907 if (pio > 240) /* PIO2 is 240nS per cycle */
1908 return 3 << ATA_SHIFT_PIO;
1909 return 7 << ATA_SHIFT_PIO;
1910 }
1911 }
1912 return 3 << ATA_SHIFT_PIO;
1913 }
1914
1915 /**
1916 * ata_dev_read_id - Read ID data from the specified device
1917 * @dev: target device
1918 * @p_class: pointer to class of the target device (may be changed)
1919 * @flags: ATA_READID_* flags
1920 * @id: buffer to read IDENTIFY data into
1921 *
1922 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1923 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1924 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1925 * for pre-ATA4 drives.
1926 *
1927 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1928 * now we abort if we hit that case.
1929 *
1930 * LOCKING:
1931 * Kernel thread context (may sleep)
1932 *
1933 * RETURNS:
1934 * 0 on success, -errno otherwise.
1935 */
1936 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1937 unsigned int flags, u16 *id)
1938 {
1939 struct ata_port *ap = dev->link->ap;
1940 unsigned int class = *p_class;
1941 struct ata_taskfile tf;
1942 unsigned int err_mask = 0;
1943 const char *reason;
1944 int may_fallback = 1, tried_spinup = 0;
1945 int rc;
1946
1947 if (ata_msg_ctl(ap))
1948 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1949
1950 retry:
1951 ata_tf_init(dev, &tf);
1952
1953 switch (class) {
1954 case ATA_DEV_ATA:
1955 tf.command = ATA_CMD_ID_ATA;
1956 break;
1957 case ATA_DEV_ATAPI:
1958 tf.command = ATA_CMD_ID_ATAPI;
1959 break;
1960 default:
1961 rc = -ENODEV;
1962 reason = "unsupported class";
1963 goto err_out;
1964 }
1965
1966 tf.protocol = ATA_PROT_PIO;
1967
1968 /* Some devices choke if TF registers contain garbage. Make
1969 * sure those are properly initialized.
1970 */
1971 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1972
1973 /* Device presence detection is unreliable on some
1974 * controllers. Always poll IDENTIFY if available.
1975 */
1976 tf.flags |= ATA_TFLAG_POLLING;
1977
1978 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1979 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1980 if (err_mask) {
1981 if (err_mask & AC_ERR_NODEV_HINT) {
1982 ata_dev_printk(dev, KERN_DEBUG,
1983 "NODEV after polling detection\n");
1984 return -ENOENT;
1985 }
1986
1987 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1988 /* Device or controller might have reported
1989 * the wrong device class. Give a shot at the
1990 * other IDENTIFY if the current one is
1991 * aborted by the device.
1992 */
1993 if (may_fallback) {
1994 may_fallback = 0;
1995
1996 if (class == ATA_DEV_ATA)
1997 class = ATA_DEV_ATAPI;
1998 else
1999 class = ATA_DEV_ATA;
2000 goto retry;
2001 }
2002
2003 /* Control reaches here iff the device aborted
2004 * both flavors of IDENTIFYs which happens
2005 * sometimes with phantom devices.
2006 */
2007 ata_dev_printk(dev, KERN_DEBUG,
2008 "both IDENTIFYs aborted, assuming NODEV\n");
2009 return -ENOENT;
2010 }
2011
2012 rc = -EIO;
2013 reason = "I/O error";
2014 goto err_out;
2015 }
2016
2017 /* Falling back doesn't make sense if ID data was read
2018 * successfully at least once.
2019 */
2020 may_fallback = 0;
2021
2022 swap_buf_le16(id, ATA_ID_WORDS);
2023
2024 /* sanity check */
2025 rc = -EINVAL;
2026 reason = "device reports invalid type";
2027
2028 if (class == ATA_DEV_ATA) {
2029 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2030 goto err_out;
2031 } else {
2032 if (ata_id_is_ata(id))
2033 goto err_out;
2034 }
2035
2036 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2037 tried_spinup = 1;
2038 /*
2039 * Drive powered-up in standby mode, and requires a specific
2040 * SET_FEATURES spin-up subcommand before it will accept
2041 * anything other than the original IDENTIFY command.
2042 */
2043 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2044 if (err_mask && id[2] != 0x738c) {
2045 rc = -EIO;
2046 reason = "SPINUP failed";
2047 goto err_out;
2048 }
2049 /*
2050 * If the drive initially returned incomplete IDENTIFY info,
2051 * we now must reissue the IDENTIFY command.
2052 */
2053 if (id[2] == 0x37c8)
2054 goto retry;
2055 }
2056
2057 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2058 /*
2059 * The exact sequence expected by certain pre-ATA4 drives is:
2060 * SRST RESET
2061 * IDENTIFY (optional in early ATA)
2062 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2063 * anything else..
2064 * Some drives were very specific about that exact sequence.
2065 *
2066 * Note that ATA4 says lba is mandatory so the second check
2067 * shoud never trigger.
2068 */
2069 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2070 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2071 if (err_mask) {
2072 rc = -EIO;
2073 reason = "INIT_DEV_PARAMS failed";
2074 goto err_out;
2075 }
2076
2077 /* current CHS translation info (id[53-58]) might be
2078 * changed. reread the identify device info.
2079 */
2080 flags &= ~ATA_READID_POSTRESET;
2081 goto retry;
2082 }
2083 }
2084
2085 *p_class = class;
2086
2087 return 0;
2088
2089 err_out:
2090 if (ata_msg_warn(ap))
2091 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2092 "(%s, err_mask=0x%x)\n", reason, err_mask);
2093 return rc;
2094 }
2095
2096 static inline u8 ata_dev_knobble(struct ata_device *dev)
2097 {
2098 struct ata_port *ap = dev->link->ap;
2099 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2100 }
2101
2102 static void ata_dev_config_ncq(struct ata_device *dev,
2103 char *desc, size_t desc_sz)
2104 {
2105 struct ata_port *ap = dev->link->ap;
2106 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2107
2108 if (!ata_id_has_ncq(dev->id)) {
2109 desc[0] = '\0';
2110 return;
2111 }
2112 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2113 snprintf(desc, desc_sz, "NCQ (not used)");
2114 return;
2115 }
2116 if (ap->flags & ATA_FLAG_NCQ) {
2117 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2118 dev->flags |= ATA_DFLAG_NCQ;
2119 }
2120
2121 if (hdepth >= ddepth)
2122 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2123 else
2124 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2125 }
2126
2127 /**
2128 * ata_dev_configure - Configure the specified ATA/ATAPI device
2129 * @dev: Target device to configure
2130 *
2131 * Configure @dev according to @dev->id. Generic and low-level
2132 * driver specific fixups are also applied.
2133 *
2134 * LOCKING:
2135 * Kernel thread context (may sleep)
2136 *
2137 * RETURNS:
2138 * 0 on success, -errno otherwise
2139 */
2140 int ata_dev_configure(struct ata_device *dev)
2141 {
2142 struct ata_port *ap = dev->link->ap;
2143 struct ata_eh_context *ehc = &dev->link->eh_context;
2144 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2145 const u16 *id = dev->id;
2146 unsigned long xfer_mask;
2147 char revbuf[7]; /* XYZ-99\0 */
2148 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2149 char modelbuf[ATA_ID_PROD_LEN+1];
2150 int rc;
2151
2152 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2153 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2154 __func__);
2155 return 0;
2156 }
2157
2158 if (ata_msg_probe(ap))
2159 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2160
2161 /* set horkage */
2162 dev->horkage |= ata_dev_blacklisted(dev);
2163 ata_force_horkage(dev);
2164
2165 /* let ACPI work its magic */
2166 rc = ata_acpi_on_devcfg(dev);
2167 if (rc)
2168 return rc;
2169
2170 /* massage HPA, do it early as it might change IDENTIFY data */
2171 rc = ata_hpa_resize(dev);
2172 if (rc)
2173 return rc;
2174
2175 /* print device capabilities */
2176 if (ata_msg_probe(ap))
2177 ata_dev_printk(dev, KERN_DEBUG,
2178 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2179 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2180 __func__,
2181 id[49], id[82], id[83], id[84],
2182 id[85], id[86], id[87], id[88]);
2183
2184 /* initialize to-be-configured parameters */
2185 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2186 dev->max_sectors = 0;
2187 dev->cdb_len = 0;
2188 dev->n_sectors = 0;
2189 dev->cylinders = 0;
2190 dev->heads = 0;
2191 dev->sectors = 0;
2192
2193 /*
2194 * common ATA, ATAPI feature tests
2195 */
2196
2197 /* find max transfer mode; for printk only */
2198 xfer_mask = ata_id_xfermask(id);
2199
2200 if (ata_msg_probe(ap))
2201 ata_dump_id(id);
2202
2203 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2204 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2205 sizeof(fwrevbuf));
2206
2207 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2208 sizeof(modelbuf));
2209
2210 /* ATA-specific feature tests */
2211 if (dev->class == ATA_DEV_ATA) {
2212 if (ata_id_is_cfa(id)) {
2213 if (id[162] & 1) /* CPRM may make this media unusable */
2214 ata_dev_printk(dev, KERN_WARNING,
2215 "supports DRM functions and may "
2216 "not be fully accessable.\n");
2217 snprintf(revbuf, 7, "CFA");
2218 } else {
2219 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2220 /* Warn the user if the device has TPM extensions */
2221 if (ata_id_has_tpm(id))
2222 ata_dev_printk(dev, KERN_WARNING,
2223 "supports DRM functions and may "
2224 "not be fully accessable.\n");
2225 }
2226
2227 dev->n_sectors = ata_id_n_sectors(id);
2228
2229 if (dev->id[59] & 0x100)
2230 dev->multi_count = dev->id[59] & 0xff;
2231
2232 if (ata_id_has_lba(id)) {
2233 const char *lba_desc;
2234 char ncq_desc[20];
2235
2236 lba_desc = "LBA";
2237 dev->flags |= ATA_DFLAG_LBA;
2238 if (ata_id_has_lba48(id)) {
2239 dev->flags |= ATA_DFLAG_LBA48;
2240 lba_desc = "LBA48";
2241
2242 if (dev->n_sectors >= (1UL << 28) &&
2243 ata_id_has_flush_ext(id))
2244 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2245 }
2246
2247 /* config NCQ */
2248 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2249
2250 /* print device info to dmesg */
2251 if (ata_msg_drv(ap) && print_info) {
2252 ata_dev_printk(dev, KERN_INFO,
2253 "%s: %s, %s, max %s\n",
2254 revbuf, modelbuf, fwrevbuf,
2255 ata_mode_string(xfer_mask));
2256 ata_dev_printk(dev, KERN_INFO,
2257 "%Lu sectors, multi %u: %s %s\n",
2258 (unsigned long long)dev->n_sectors,
2259 dev->multi_count, lba_desc, ncq_desc);
2260 }
2261 } else {
2262 /* CHS */
2263
2264 /* Default translation */
2265 dev->cylinders = id[1];
2266 dev->heads = id[3];
2267 dev->sectors = id[6];
2268
2269 if (ata_id_current_chs_valid(id)) {
2270 /* Current CHS translation is valid. */
2271 dev->cylinders = id[54];
2272 dev->heads = id[55];
2273 dev->sectors = id[56];
2274 }
2275
2276 /* print device info to dmesg */
2277 if (ata_msg_drv(ap) && print_info) {
2278 ata_dev_printk(dev, KERN_INFO,
2279 "%s: %s, %s, max %s\n",
2280 revbuf, modelbuf, fwrevbuf,
2281 ata_mode_string(xfer_mask));
2282 ata_dev_printk(dev, KERN_INFO,
2283 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2284 (unsigned long long)dev->n_sectors,
2285 dev->multi_count, dev->cylinders,
2286 dev->heads, dev->sectors);
2287 }
2288 }
2289
2290 dev->cdb_len = 16;
2291 }
2292
2293 /* ATAPI-specific feature tests */
2294 else if (dev->class == ATA_DEV_ATAPI) {
2295 const char *cdb_intr_string = "";
2296 const char *atapi_an_string = "";
2297 const char *dma_dir_string = "";
2298 u32 sntf;
2299
2300 rc = atapi_cdb_len(id);
2301 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2302 if (ata_msg_warn(ap))
2303 ata_dev_printk(dev, KERN_WARNING,
2304 "unsupported CDB len\n");
2305 rc = -EINVAL;
2306 goto err_out_nosup;
2307 }
2308 dev->cdb_len = (unsigned int) rc;
2309
2310 /* Enable ATAPI AN if both the host and device have
2311 * the support. If PMP is attached, SNTF is required
2312 * to enable ATAPI AN to discern between PHY status
2313 * changed notifications and ATAPI ANs.
2314 */
2315 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2316 (!ap->nr_pmp_links ||
2317 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2318 unsigned int err_mask;
2319
2320 /* issue SET feature command to turn this on */
2321 err_mask = ata_dev_set_feature(dev,
2322 SETFEATURES_SATA_ENABLE, SATA_AN);
2323 if (err_mask)
2324 ata_dev_printk(dev, KERN_ERR,
2325 "failed to enable ATAPI AN "
2326 "(err_mask=0x%x)\n", err_mask);
2327 else {
2328 dev->flags |= ATA_DFLAG_AN;
2329 atapi_an_string = ", ATAPI AN";
2330 }
2331 }
2332
2333 if (ata_id_cdb_intr(dev->id)) {
2334 dev->flags |= ATA_DFLAG_CDB_INTR;
2335 cdb_intr_string = ", CDB intr";
2336 }
2337
2338 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2339 dev->flags |= ATA_DFLAG_DMADIR;
2340 dma_dir_string = ", DMADIR";
2341 }
2342
2343 /* print device info to dmesg */
2344 if (ata_msg_drv(ap) && print_info)
2345 ata_dev_printk(dev, KERN_INFO,
2346 "ATAPI: %s, %s, max %s%s%s%s\n",
2347 modelbuf, fwrevbuf,
2348 ata_mode_string(xfer_mask),
2349 cdb_intr_string, atapi_an_string,
2350 dma_dir_string);
2351 }
2352
2353 /* determine max_sectors */
2354 dev->max_sectors = ATA_MAX_SECTORS;
2355 if (dev->flags & ATA_DFLAG_LBA48)
2356 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2357
2358 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2359 if (ata_id_has_hipm(dev->id))
2360 dev->flags |= ATA_DFLAG_HIPM;
2361 if (ata_id_has_dipm(dev->id))
2362 dev->flags |= ATA_DFLAG_DIPM;
2363 }
2364
2365 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2366 200 sectors */
2367 if (ata_dev_knobble(dev)) {
2368 if (ata_msg_drv(ap) && print_info)
2369 ata_dev_printk(dev, KERN_INFO,
2370 "applying bridge limits\n");
2371 dev->udma_mask &= ATA_UDMA5;
2372 dev->max_sectors = ATA_MAX_SECTORS;
2373 }
2374
2375 if ((dev->class == ATA_DEV_ATAPI) &&
2376 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2377 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2378 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2379 }
2380
2381 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2382 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2383 dev->max_sectors);
2384
2385 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2386 dev->horkage |= ATA_HORKAGE_IPM;
2387
2388 /* reset link pm_policy for this port to no pm */
2389 ap->pm_policy = MAX_PERFORMANCE;
2390 }
2391
2392 if (ap->ops->dev_config)
2393 ap->ops->dev_config(dev);
2394
2395 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2396 /* Let the user know. We don't want to disallow opens for
2397 rescue purposes, or in case the vendor is just a blithering
2398 idiot. Do this after the dev_config call as some controllers
2399 with buggy firmware may want to avoid reporting false device
2400 bugs */
2401
2402 if (print_info) {
2403 ata_dev_printk(dev, KERN_WARNING,
2404 "Drive reports diagnostics failure. This may indicate a drive\n");
2405 ata_dev_printk(dev, KERN_WARNING,
2406 "fault or invalid emulation. Contact drive vendor for information.\n");
2407 }
2408 }
2409
2410 return 0;
2411
2412 err_out_nosup:
2413 if (ata_msg_probe(ap))
2414 ata_dev_printk(dev, KERN_DEBUG,
2415 "%s: EXIT, err\n", __func__);
2416 return rc;
2417 }
2418
2419 /**
2420 * ata_cable_40wire - return 40 wire cable type
2421 * @ap: port
2422 *
2423 * Helper method for drivers which want to hardwire 40 wire cable
2424 * detection.
2425 */
2426
2427 int ata_cable_40wire(struct ata_port *ap)
2428 {
2429 return ATA_CBL_PATA40;
2430 }
2431
2432 /**
2433 * ata_cable_80wire - return 80 wire cable type
2434 * @ap: port
2435 *
2436 * Helper method for drivers which want to hardwire 80 wire cable
2437 * detection.
2438 */
2439
2440 int ata_cable_80wire(struct ata_port *ap)
2441 {
2442 return ATA_CBL_PATA80;
2443 }
2444
2445 /**
2446 * ata_cable_unknown - return unknown PATA cable.
2447 * @ap: port
2448 *
2449 * Helper method for drivers which have no PATA cable detection.
2450 */
2451
2452 int ata_cable_unknown(struct ata_port *ap)
2453 {
2454 return ATA_CBL_PATA_UNK;
2455 }
2456
2457 /**
2458 * ata_cable_ignore - return ignored PATA cable.
2459 * @ap: port
2460 *
2461 * Helper method for drivers which don't use cable type to limit
2462 * transfer mode.
2463 */
2464 int ata_cable_ignore(struct ata_port *ap)
2465 {
2466 return ATA_CBL_PATA_IGN;
2467 }
2468
2469 /**
2470 * ata_cable_sata - return SATA cable type
2471 * @ap: port
2472 *
2473 * Helper method for drivers which have SATA cables
2474 */
2475
2476 int ata_cable_sata(struct ata_port *ap)
2477 {
2478 return ATA_CBL_SATA;
2479 }
2480
2481 /**
2482 * ata_bus_probe - Reset and probe ATA bus
2483 * @ap: Bus to probe
2484 *
2485 * Master ATA bus probing function. Initiates a hardware-dependent
2486 * bus reset, then attempts to identify any devices found on
2487 * the bus.
2488 *
2489 * LOCKING:
2490 * PCI/etc. bus probe sem.
2491 *
2492 * RETURNS:
2493 * Zero on success, negative errno otherwise.
2494 */
2495
2496 int ata_bus_probe(struct ata_port *ap)
2497 {
2498 unsigned int classes[ATA_MAX_DEVICES];
2499 int tries[ATA_MAX_DEVICES];
2500 int rc;
2501 struct ata_device *dev;
2502
2503 ata_port_probe(ap);
2504
2505 ata_link_for_each_dev(dev, &ap->link)
2506 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2507
2508 retry:
2509 ata_link_for_each_dev(dev, &ap->link) {
2510 /* If we issue an SRST then an ATA drive (not ATAPI)
2511 * may change configuration and be in PIO0 timing. If
2512 * we do a hard reset (or are coming from power on)
2513 * this is true for ATA or ATAPI. Until we've set a
2514 * suitable controller mode we should not touch the
2515 * bus as we may be talking too fast.
2516 */
2517 dev->pio_mode = XFER_PIO_0;
2518
2519 /* If the controller has a pio mode setup function
2520 * then use it to set the chipset to rights. Don't
2521 * touch the DMA setup as that will be dealt with when
2522 * configuring devices.
2523 */
2524 if (ap->ops->set_piomode)
2525 ap->ops->set_piomode(ap, dev);
2526 }
2527
2528 /* reset and determine device classes */
2529 ap->ops->phy_reset(ap);
2530
2531 ata_link_for_each_dev(dev, &ap->link) {
2532 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2533 dev->class != ATA_DEV_UNKNOWN)
2534 classes[dev->devno] = dev->class;
2535 else
2536 classes[dev->devno] = ATA_DEV_NONE;
2537
2538 dev->class = ATA_DEV_UNKNOWN;
2539 }
2540
2541 ata_port_probe(ap);
2542
2543 /* read IDENTIFY page and configure devices. We have to do the identify
2544 specific sequence bass-ackwards so that PDIAG- is released by
2545 the slave device */
2546
2547 ata_link_for_each_dev_reverse(dev, &ap->link) {
2548 if (tries[dev->devno])
2549 dev->class = classes[dev->devno];
2550
2551 if (!ata_dev_enabled(dev))
2552 continue;
2553
2554 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2555 dev->id);
2556 if (rc)
2557 goto fail;
2558 }
2559
2560 /* Now ask for the cable type as PDIAG- should have been released */
2561 if (ap->ops->cable_detect)
2562 ap->cbl = ap->ops->cable_detect(ap);
2563
2564 /* We may have SATA bridge glue hiding here irrespective of the
2565 reported cable types and sensed types */
2566 ata_link_for_each_dev(dev, &ap->link) {
2567 if (!ata_dev_enabled(dev))
2568 continue;
2569 /* SATA drives indicate we have a bridge. We don't know which
2570 end of the link the bridge is which is a problem */
2571 if (ata_id_is_sata(dev->id))
2572 ap->cbl = ATA_CBL_SATA;
2573 }
2574
2575 /* After the identify sequence we can now set up the devices. We do
2576 this in the normal order so that the user doesn't get confused */
2577
2578 ata_link_for_each_dev(dev, &ap->link) {
2579 if (!ata_dev_enabled(dev))
2580 continue;
2581
2582 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2583 rc = ata_dev_configure(dev);
2584 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2585 if (rc)
2586 goto fail;
2587 }
2588
2589 /* configure transfer mode */
2590 rc = ata_set_mode(&ap->link, &dev);
2591 if (rc)
2592 goto fail;
2593
2594 ata_link_for_each_dev(dev, &ap->link)
2595 if (ata_dev_enabled(dev))
2596 return 0;
2597
2598 /* no device present, disable port */
2599 ata_port_disable(ap);
2600 return -ENODEV;
2601
2602 fail:
2603 tries[dev->devno]--;
2604
2605 switch (rc) {
2606 case -EINVAL:
2607 /* eeek, something went very wrong, give up */
2608 tries[dev->devno] = 0;
2609 break;
2610
2611 case -ENODEV:
2612 /* give it just one more chance */
2613 tries[dev->devno] = min(tries[dev->devno], 1);
2614 case -EIO:
2615 if (tries[dev->devno] == 1) {
2616 /* This is the last chance, better to slow
2617 * down than lose it.
2618 */
2619 sata_down_spd_limit(&ap->link);
2620 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2621 }
2622 }
2623
2624 if (!tries[dev->devno])
2625 ata_dev_disable(dev);
2626
2627 goto retry;
2628 }
2629
2630 /**
2631 * ata_port_probe - Mark port as enabled
2632 * @ap: Port for which we indicate enablement
2633 *
2634 * Modify @ap data structure such that the system
2635 * thinks that the entire port is enabled.
2636 *
2637 * LOCKING: host lock, or some other form of
2638 * serialization.
2639 */
2640
2641 void ata_port_probe(struct ata_port *ap)
2642 {
2643 ap->flags &= ~ATA_FLAG_DISABLED;
2644 }
2645
2646 /**
2647 * sata_print_link_status - Print SATA link status
2648 * @link: SATA link to printk link status about
2649 *
2650 * This function prints link speed and status of a SATA link.
2651 *
2652 * LOCKING:
2653 * None.
2654 */
2655 void sata_print_link_status(struct ata_link *link)
2656 {
2657 u32 sstatus, scontrol, tmp;
2658
2659 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2660 return;
2661 sata_scr_read(link, SCR_CONTROL, &scontrol);
2662
2663 if (ata_link_online(link)) {
2664 tmp = (sstatus >> 4) & 0xf;
2665 ata_link_printk(link, KERN_INFO,
2666 "SATA link up %s (SStatus %X SControl %X)\n",
2667 sata_spd_string(tmp), sstatus, scontrol);
2668 } else {
2669 ata_link_printk(link, KERN_INFO,
2670 "SATA link down (SStatus %X SControl %X)\n",
2671 sstatus, scontrol);
2672 }
2673 }
2674
2675 /**
2676 * ata_dev_pair - return other device on cable
2677 * @adev: device
2678 *
2679 * Obtain the other device on the same cable, or if none is
2680 * present NULL is returned
2681 */
2682
2683 struct ata_device *ata_dev_pair(struct ata_device *adev)
2684 {
2685 struct ata_link *link = adev->link;
2686 struct ata_device *pair = &link->device[1 - adev->devno];
2687 if (!ata_dev_enabled(pair))
2688 return NULL;
2689 return pair;
2690 }
2691
2692 /**
2693 * ata_port_disable - Disable port.
2694 * @ap: Port to be disabled.
2695 *
2696 * Modify @ap data structure such that the system
2697 * thinks that the entire port is disabled, and should
2698 * never attempt to probe or communicate with devices
2699 * on this port.
2700 *
2701 * LOCKING: host lock, or some other form of
2702 * serialization.
2703 */
2704
2705 void ata_port_disable(struct ata_port *ap)
2706 {
2707 ap->link.device[0].class = ATA_DEV_NONE;
2708 ap->link.device[1].class = ATA_DEV_NONE;
2709 ap->flags |= ATA_FLAG_DISABLED;
2710 }
2711
2712 /**
2713 * sata_down_spd_limit - adjust SATA spd limit downward
2714 * @link: Link to adjust SATA spd limit for
2715 *
2716 * Adjust SATA spd limit of @link downward. Note that this
2717 * function only adjusts the limit. The change must be applied
2718 * using sata_set_spd().
2719 *
2720 * LOCKING:
2721 * Inherited from caller.
2722 *
2723 * RETURNS:
2724 * 0 on success, negative errno on failure
2725 */
2726 int sata_down_spd_limit(struct ata_link *link)
2727 {
2728 u32 sstatus, spd, mask;
2729 int rc, highbit;
2730
2731 if (!sata_scr_valid(link))
2732 return -EOPNOTSUPP;
2733
2734 /* If SCR can be read, use it to determine the current SPD.
2735 * If not, use cached value in link->sata_spd.
2736 */
2737 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2738 if (rc == 0)
2739 spd = (sstatus >> 4) & 0xf;
2740 else
2741 spd = link->sata_spd;
2742
2743 mask = link->sata_spd_limit;
2744 if (mask <= 1)
2745 return -EINVAL;
2746
2747 /* unconditionally mask off the highest bit */
2748 highbit = fls(mask) - 1;
2749 mask &= ~(1 << highbit);
2750
2751 /* Mask off all speeds higher than or equal to the current
2752 * one. Force 1.5Gbps if current SPD is not available.
2753 */
2754 if (spd > 1)
2755 mask &= (1 << (spd - 1)) - 1;
2756 else
2757 mask &= 1;
2758
2759 /* were we already at the bottom? */
2760 if (!mask)
2761 return -EINVAL;
2762
2763 link->sata_spd_limit = mask;
2764
2765 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2766 sata_spd_string(fls(mask)));
2767
2768 return 0;
2769 }
2770
2771 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2772 {
2773 struct ata_link *host_link = &link->ap->link;
2774 u32 limit, target, spd;
2775
2776 limit = link->sata_spd_limit;
2777
2778 /* Don't configure downstream link faster than upstream link.
2779 * It doesn't speed up anything and some PMPs choke on such
2780 * configuration.
2781 */
2782 if (!ata_is_host_link(link) && host_link->sata_spd)
2783 limit &= (1 << host_link->sata_spd) - 1;
2784
2785 if (limit == UINT_MAX)
2786 target = 0;
2787 else
2788 target = fls(limit);
2789
2790 spd = (*scontrol >> 4) & 0xf;
2791 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2792
2793 return spd != target;
2794 }
2795
2796 /**
2797 * sata_set_spd_needed - is SATA spd configuration needed
2798 * @link: Link in question
2799 *
2800 * Test whether the spd limit in SControl matches
2801 * @link->sata_spd_limit. This function is used to determine
2802 * whether hardreset is necessary to apply SATA spd
2803 * configuration.
2804 *
2805 * LOCKING:
2806 * Inherited from caller.
2807 *
2808 * RETURNS:
2809 * 1 if SATA spd configuration is needed, 0 otherwise.
2810 */
2811 int sata_set_spd_needed(struct ata_link *link)
2812 {
2813 u32 scontrol;
2814
2815 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2816 return 1;
2817
2818 return __sata_set_spd_needed(link, &scontrol);
2819 }
2820
2821 /**
2822 * sata_set_spd - set SATA spd according to spd limit
2823 * @link: Link to set SATA spd for
2824 *
2825 * Set SATA spd of @link according to sata_spd_limit.
2826 *
2827 * LOCKING:
2828 * Inherited from caller.
2829 *
2830 * RETURNS:
2831 * 0 if spd doesn't need to be changed, 1 if spd has been
2832 * changed. Negative errno if SCR registers are inaccessible.
2833 */
2834 int sata_set_spd(struct ata_link *link)
2835 {
2836 u32 scontrol;
2837 int rc;
2838
2839 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2840 return rc;
2841
2842 if (!__sata_set_spd_needed(link, &scontrol))
2843 return 0;
2844
2845 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2846 return rc;
2847
2848 return 1;
2849 }
2850
2851 /*
2852 * This mode timing computation functionality is ported over from
2853 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2854 */
2855 /*
2856 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2857 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2858 * for UDMA6, which is currently supported only by Maxtor drives.
2859 *
2860 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2861 */
2862
2863 static const struct ata_timing ata_timing[] = {
2864 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2865 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2866 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2867 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2868 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2869 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2870 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2871 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2872
2873 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2874 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2875 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2876
2877 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2878 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2879 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2880 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2881 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2882
2883 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2884 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2885 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2886 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2887 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2888 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2889 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2890 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2891
2892 { 0xFF }
2893 };
2894
2895 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2896 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2897
2898 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2899 {
2900 q->setup = EZ(t->setup * 1000, T);
2901 q->act8b = EZ(t->act8b * 1000, T);
2902 q->rec8b = EZ(t->rec8b * 1000, T);
2903 q->cyc8b = EZ(t->cyc8b * 1000, T);
2904 q->active = EZ(t->active * 1000, T);
2905 q->recover = EZ(t->recover * 1000, T);
2906 q->cycle = EZ(t->cycle * 1000, T);
2907 q->udma = EZ(t->udma * 1000, UT);
2908 }
2909
2910 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2911 struct ata_timing *m, unsigned int what)
2912 {
2913 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2914 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2915 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2916 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2917 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2918 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2919 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2920 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2921 }
2922
2923 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2924 {
2925 const struct ata_timing *t = ata_timing;
2926
2927 while (xfer_mode > t->mode)
2928 t++;
2929
2930 if (xfer_mode == t->mode)
2931 return t;
2932 return NULL;
2933 }
2934
2935 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2936 struct ata_timing *t, int T, int UT)
2937 {
2938 const struct ata_timing *s;
2939 struct ata_timing p;
2940
2941 /*
2942 * Find the mode.
2943 */
2944
2945 if (!(s = ata_timing_find_mode(speed)))
2946 return -EINVAL;
2947
2948 memcpy(t, s, sizeof(*s));
2949
2950 /*
2951 * If the drive is an EIDE drive, it can tell us it needs extended
2952 * PIO/MW_DMA cycle timing.
2953 */
2954
2955 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2956 memset(&p, 0, sizeof(p));
2957 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2958 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2959 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2960 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2961 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2962 }
2963 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2964 }
2965
2966 /*
2967 * Convert the timing to bus clock counts.
2968 */
2969
2970 ata_timing_quantize(t, t, T, UT);
2971
2972 /*
2973 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2974 * S.M.A.R.T * and some other commands. We have to ensure that the
2975 * DMA cycle timing is slower/equal than the fastest PIO timing.
2976 */
2977
2978 if (speed > XFER_PIO_6) {
2979 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2980 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2981 }
2982
2983 /*
2984 * Lengthen active & recovery time so that cycle time is correct.
2985 */
2986
2987 if (t->act8b + t->rec8b < t->cyc8b) {
2988 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2989 t->rec8b = t->cyc8b - t->act8b;
2990 }
2991
2992 if (t->active + t->recover < t->cycle) {
2993 t->active += (t->cycle - (t->active + t->recover)) / 2;
2994 t->recover = t->cycle - t->active;
2995 }
2996
2997 /* In a few cases quantisation may produce enough errors to
2998 leave t->cycle too low for the sum of active and recovery
2999 if so we must correct this */
3000 if (t->active + t->recover > t->cycle)
3001 t->cycle = t->active + t->recover;
3002
3003 return 0;
3004 }
3005
3006 /**
3007 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3008 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3009 * @cycle: cycle duration in ns
3010 *
3011 * Return matching xfer mode for @cycle. The returned mode is of
3012 * the transfer type specified by @xfer_shift. If @cycle is too
3013 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3014 * than the fastest known mode, the fasted mode is returned.
3015 *
3016 * LOCKING:
3017 * None.
3018 *
3019 * RETURNS:
3020 * Matching xfer_mode, 0xff if no match found.
3021 */
3022 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3023 {
3024 u8 base_mode = 0xff, last_mode = 0xff;
3025 const struct ata_xfer_ent *ent;
3026 const struct ata_timing *t;
3027
3028 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3029 if (ent->shift == xfer_shift)
3030 base_mode = ent->base;
3031
3032 for (t = ata_timing_find_mode(base_mode);
3033 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3034 unsigned short this_cycle;
3035
3036 switch (xfer_shift) {
3037 case ATA_SHIFT_PIO:
3038 case ATA_SHIFT_MWDMA:
3039 this_cycle = t->cycle;
3040 break;
3041 case ATA_SHIFT_UDMA:
3042 this_cycle = t->udma;
3043 break;
3044 default:
3045 return 0xff;
3046 }
3047
3048 if (cycle > this_cycle)
3049 break;
3050
3051 last_mode = t->mode;
3052 }
3053
3054 return last_mode;
3055 }
3056
3057 /**
3058 * ata_down_xfermask_limit - adjust dev xfer masks downward
3059 * @dev: Device to adjust xfer masks
3060 * @sel: ATA_DNXFER_* selector
3061 *
3062 * Adjust xfer masks of @dev downward. Note that this function
3063 * does not apply the change. Invoking ata_set_mode() afterwards
3064 * will apply the limit.
3065 *
3066 * LOCKING:
3067 * Inherited from caller.
3068 *
3069 * RETURNS:
3070 * 0 on success, negative errno on failure
3071 */
3072 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3073 {
3074 char buf[32];
3075 unsigned long orig_mask, xfer_mask;
3076 unsigned long pio_mask, mwdma_mask, udma_mask;
3077 int quiet, highbit;
3078
3079 quiet = !!(sel & ATA_DNXFER_QUIET);
3080 sel &= ~ATA_DNXFER_QUIET;
3081
3082 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3083 dev->mwdma_mask,
3084 dev->udma_mask);
3085 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3086
3087 switch (sel) {
3088 case ATA_DNXFER_PIO:
3089 highbit = fls(pio_mask) - 1;
3090 pio_mask &= ~(1 << highbit);
3091 break;
3092
3093 case ATA_DNXFER_DMA:
3094 if (udma_mask) {
3095 highbit = fls(udma_mask) - 1;
3096 udma_mask &= ~(1 << highbit);
3097 if (!udma_mask)
3098 return -ENOENT;
3099 } else if (mwdma_mask) {
3100 highbit = fls(mwdma_mask) - 1;
3101 mwdma_mask &= ~(1 << highbit);
3102 if (!mwdma_mask)
3103 return -ENOENT;
3104 }
3105 break;
3106
3107 case ATA_DNXFER_40C:
3108 udma_mask &= ATA_UDMA_MASK_40C;
3109 break;
3110
3111 case ATA_DNXFER_FORCE_PIO0:
3112 pio_mask &= 1;
3113 case ATA_DNXFER_FORCE_PIO:
3114 mwdma_mask = 0;
3115 udma_mask = 0;
3116 break;
3117
3118 default:
3119 BUG();
3120 }
3121
3122 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3123
3124 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3125 return -ENOENT;
3126
3127 if (!quiet) {
3128 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3129 snprintf(buf, sizeof(buf), "%s:%s",
3130 ata_mode_string(xfer_mask),
3131 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3132 else
3133 snprintf(buf, sizeof(buf), "%s",
3134 ata_mode_string(xfer_mask));
3135
3136 ata_dev_printk(dev, KERN_WARNING,
3137 "limiting speed to %s\n", buf);
3138 }
3139
3140 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3141 &dev->udma_mask);
3142
3143 return 0;
3144 }
3145
3146 static int ata_dev_set_mode(struct ata_device *dev)
3147 {
3148 struct ata_eh_context *ehc = &dev->link->eh_context;
3149 const char *dev_err_whine = "";
3150 int ign_dev_err = 0;
3151 unsigned int err_mask;
3152 int rc;
3153
3154 dev->flags &= ~ATA_DFLAG_PIO;
3155 if (dev->xfer_shift == ATA_SHIFT_PIO)
3156 dev->flags |= ATA_DFLAG_PIO;
3157
3158 err_mask = ata_dev_set_xfermode(dev);
3159
3160 if (err_mask & ~AC_ERR_DEV)
3161 goto fail;
3162
3163 /* revalidate */
3164 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3165 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3166 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3167 if (rc)
3168 return rc;
3169
3170 /* Old CFA may refuse this command, which is just fine */
3171 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3172 ign_dev_err = 1;
3173
3174 /* Some very old devices and some bad newer ones fail any kind of
3175 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3176 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3177 dev->pio_mode <= XFER_PIO_2)
3178 ign_dev_err = 1;
3179
3180 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3181 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3182 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3183 dev->dma_mode == XFER_MW_DMA_0 &&
3184 (dev->id[63] >> 8) & 1)
3185 ign_dev_err = 1;
3186
3187 /* if the device is actually configured correctly, ignore dev err */
3188 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3189 ign_dev_err = 1;
3190
3191 if (err_mask & AC_ERR_DEV) {
3192 if (!ign_dev_err)
3193 goto fail;
3194 else
3195 dev_err_whine = " (device error ignored)";
3196 }
3197
3198 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3199 dev->xfer_shift, (int)dev->xfer_mode);
3200
3201 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3202 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3203 dev_err_whine);
3204
3205 return 0;
3206
3207 fail:
3208 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3209 "(err_mask=0x%x)\n", err_mask);
3210 return -EIO;
3211 }
3212
3213 /**
3214 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3215 * @link: link on which timings will be programmed
3216 * @r_failed_dev: out parameter for failed device
3217 *
3218 * Standard implementation of the function used to tune and set
3219 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3220 * ata_dev_set_mode() fails, pointer to the failing device is
3221 * returned in @r_failed_dev.
3222 *
3223 * LOCKING:
3224 * PCI/etc. bus probe sem.
3225 *
3226 * RETURNS:
3227 * 0 on success, negative errno otherwise
3228 */
3229
3230 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3231 {
3232 struct ata_port *ap = link->ap;
3233 struct ata_device *dev;
3234 int rc = 0, used_dma = 0, found = 0;
3235
3236 /* step 1: calculate xfer_mask */
3237 ata_link_for_each_dev(dev, link) {
3238 unsigned long pio_mask, dma_mask;
3239 unsigned int mode_mask;
3240
3241 if (!ata_dev_enabled(dev))
3242 continue;
3243
3244 mode_mask = ATA_DMA_MASK_ATA;
3245 if (dev->class == ATA_DEV_ATAPI)
3246 mode_mask = ATA_DMA_MASK_ATAPI;
3247 else if (ata_id_is_cfa(dev->id))
3248 mode_mask = ATA_DMA_MASK_CFA;
3249
3250 ata_dev_xfermask(dev);
3251 ata_force_xfermask(dev);
3252
3253 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3255
3256 if (libata_dma_mask & mode_mask)
3257 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3258 else
3259 dma_mask = 0;
3260
3261 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3262 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3263
3264 found = 1;
3265 if (dev->dma_mode != 0xff)
3266 used_dma = 1;
3267 }
3268 if (!found)
3269 goto out;
3270
3271 /* step 2: always set host PIO timings */
3272 ata_link_for_each_dev(dev, link) {
3273 if (!ata_dev_enabled(dev))
3274 continue;
3275
3276 if (dev->pio_mode == 0xff) {
3277 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3278 rc = -EINVAL;
3279 goto out;
3280 }
3281
3282 dev->xfer_mode = dev->pio_mode;
3283 dev->xfer_shift = ATA_SHIFT_PIO;
3284 if (ap->ops->set_piomode)
3285 ap->ops->set_piomode(ap, dev);
3286 }
3287
3288 /* step 3: set host DMA timings */
3289 ata_link_for_each_dev(dev, link) {
3290 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3291 continue;
3292
3293 dev->xfer_mode = dev->dma_mode;
3294 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3295 if (ap->ops->set_dmamode)
3296 ap->ops->set_dmamode(ap, dev);
3297 }
3298
3299 /* step 4: update devices' xfer mode */
3300 ata_link_for_each_dev(dev, link) {
3301 /* don't update suspended devices' xfer mode */
3302 if (!ata_dev_enabled(dev))
3303 continue;
3304
3305 rc = ata_dev_set_mode(dev);
3306 if (rc)
3307 goto out;
3308 }
3309
3310 /* Record simplex status. If we selected DMA then the other
3311 * host channels are not permitted to do so.
3312 */
3313 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3314 ap->host->simplex_claimed = ap;
3315
3316 out:
3317 if (rc)
3318 *r_failed_dev = dev;
3319 return rc;
3320 }
3321
3322 /**
3323 * sata_link_debounce - debounce SATA phy status
3324 * @link: ATA link to debounce SATA phy status for
3325 * @params: timing parameters { interval, duratinon, timeout } in msec
3326 * @deadline: deadline jiffies for the operation
3327 *
3328 * Make sure SStatus of @link reaches stable state, determined by
3329 * holding the same value where DET is not 1 for @duration polled
3330 * every @interval, before @timeout. Timeout constraints the
3331 * beginning of the stable state. Because DET gets stuck at 1 on
3332 * some controllers after hot unplugging, this functions waits
3333 * until timeout then returns 0 if DET is stable at 1.
3334 *
3335 * @timeout is further limited by @deadline. The sooner of the
3336 * two is used.
3337 *
3338 * LOCKING:
3339 * Kernel thread context (may sleep)
3340 *
3341 * RETURNS:
3342 * 0 on success, -errno on failure.
3343 */
3344 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3345 unsigned long deadline)
3346 {
3347 unsigned long interval_msec = params[0];
3348 unsigned long duration = msecs_to_jiffies(params[1]);
3349 unsigned long last_jiffies, t;
3350 u32 last, cur;
3351 int rc;
3352
3353 t = jiffies + msecs_to_jiffies(params[2]);
3354 if (time_before(t, deadline))
3355 deadline = t;
3356
3357 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3358 return rc;
3359 cur &= 0xf;
3360
3361 last = cur;
3362 last_jiffies = jiffies;
3363
3364 while (1) {
3365 msleep(interval_msec);
3366 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3367 return rc;
3368 cur &= 0xf;
3369
3370 /* DET stable? */
3371 if (cur == last) {
3372 if (cur == 1 && time_before(jiffies, deadline))
3373 continue;
3374 if (time_after(jiffies, last_jiffies + duration))
3375 return 0;
3376 continue;
3377 }
3378
3379 /* unstable, start over */
3380 last = cur;
3381 last_jiffies = jiffies;
3382
3383 /* Check deadline. If debouncing failed, return
3384 * -EPIPE to tell upper layer to lower link speed.
3385 */
3386 if (time_after(jiffies, deadline))
3387 return -EPIPE;
3388 }
3389 }
3390
3391 /**
3392 * sata_link_resume - resume SATA link
3393 * @link: ATA link to resume SATA
3394 * @params: timing parameters { interval, duratinon, timeout } in msec
3395 * @deadline: deadline jiffies for the operation
3396 *
3397 * Resume SATA phy @link and debounce it.
3398 *
3399 * LOCKING:
3400 * Kernel thread context (may sleep)
3401 *
3402 * RETURNS:
3403 * 0 on success, -errno on failure.
3404 */
3405 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3406 unsigned long deadline)
3407 {
3408 u32 scontrol;
3409 int rc;
3410
3411 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3412 return rc;
3413
3414 scontrol = (scontrol & 0x0f0) | 0x300;
3415
3416 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3417 return rc;
3418
3419 /* Some PHYs react badly if SStatus is pounded immediately
3420 * after resuming. Delay 200ms before debouncing.
3421 */
3422 msleep(200);
3423
3424 return sata_link_debounce(link, params, deadline);
3425 }
3426
3427 /**
3428 * ata_sff_prereset - prepare for reset
3429 * @link: ATA link to be reset
3430 * @deadline: deadline jiffies for the operation
3431 *
3432 * @link is about to be reset. Initialize it. Failure from
3433 * prereset makes libata abort whole reset sequence and give up
3434 * that port, so prereset should be best-effort. It does its
3435 * best to prepare for reset sequence but if things go wrong, it
3436 * should just whine, not fail.
3437 *
3438 * LOCKING:
3439 * Kernel thread context (may sleep)
3440 *
3441 * RETURNS:
3442 * 0 on success, -errno otherwise.
3443 */
3444 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
3445 {
3446 struct ata_port *ap = link->ap;
3447 struct ata_eh_context *ehc = &link->eh_context;
3448 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3449 int rc;
3450
3451 /* if we're about to do hardreset, nothing more to do */
3452 if (ehc->i.action & ATA_EH_HARDRESET)
3453 return 0;
3454
3455 /* if SATA, resume link */
3456 if (ap->flags & ATA_FLAG_SATA) {
3457 rc = sata_link_resume(link, timing, deadline);
3458 /* whine about phy resume failure but proceed */
3459 if (rc && rc != -EOPNOTSUPP)
3460 ata_link_printk(link, KERN_WARNING, "failed to resume "
3461 "link for reset (errno=%d)\n", rc);
3462 }
3463
3464 /* wait for !BSY if we don't know that no device is attached */
3465 if (!ata_link_offline(link)) {
3466 rc = ata_sff_wait_ready(ap, deadline);
3467 if (rc && rc != -ENODEV) {
3468 ata_link_printk(link, KERN_WARNING, "device not ready "
3469 "(errno=%d), forcing hardreset\n", rc);
3470 ehc->i.action |= ATA_EH_HARDRESET;
3471 }
3472 }
3473
3474 return 0;
3475 }
3476
3477 /**
3478 * sata_link_hardreset - reset link via SATA phy reset
3479 * @link: link to reset
3480 * @timing: timing parameters { interval, duratinon, timeout } in msec
3481 * @deadline: deadline jiffies for the operation
3482 *
3483 * SATA phy-reset @link using DET bits of SControl register.
3484 *
3485 * LOCKING:
3486 * Kernel thread context (may sleep)
3487 *
3488 * RETURNS:
3489 * 0 on success, -errno otherwise.
3490 */
3491 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3492 unsigned long deadline)
3493 {
3494 u32 scontrol;
3495 int rc;
3496
3497 DPRINTK("ENTER\n");
3498
3499 if (sata_set_spd_needed(link)) {
3500 /* SATA spec says nothing about how to reconfigure
3501 * spd. To be on the safe side, turn off phy during
3502 * reconfiguration. This works for at least ICH7 AHCI
3503 * and Sil3124.
3504 */
3505 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3506 goto out;
3507
3508 scontrol = (scontrol & 0x0f0) | 0x304;
3509
3510 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3511 goto out;
3512
3513 sata_set_spd(link);
3514 }
3515
3516 /* issue phy wake/reset */
3517 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3518 goto out;
3519
3520 scontrol = (scontrol & 0x0f0) | 0x301;
3521
3522 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3523 goto out;
3524
3525 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3526 * 10.4.2 says at least 1 ms.
3527 */
3528 msleep(1);
3529
3530 /* bring link back */
3531 rc = sata_link_resume(link, timing, deadline);
3532 out:
3533 DPRINTK("EXIT, rc=%d\n", rc);
3534 return rc;
3535 }
3536
3537 /**
3538 * ata_sff_postreset - standard postreset callback
3539 * @link: the target ata_link
3540 * @classes: classes of attached devices
3541 *
3542 * This function is invoked after a successful reset. Note that
3543 * the device might have been reset more than once using
3544 * different reset methods before postreset is invoked.
3545 *
3546 * LOCKING:
3547 * Kernel thread context (may sleep)
3548 */
3549 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
3550 {
3551 struct ata_port *ap = link->ap;
3552 u32 serror;
3553
3554 DPRINTK("ENTER\n");
3555
3556 /* print link status */
3557 sata_print_link_status(link);
3558
3559 /* clear SError */
3560 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3561 sata_scr_write(link, SCR_ERROR, serror);
3562 link->eh_info.serror = 0;
3563
3564 /* is double-select really necessary? */
3565 if (classes[0] != ATA_DEV_NONE)
3566 ap->ops->dev_select(ap, 1);
3567 if (classes[1] != ATA_DEV_NONE)
3568 ap->ops->dev_select(ap, 0);
3569
3570 /* bail out if no device is present */
3571 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3572 DPRINTK("EXIT, no device\n");
3573 return;
3574 }
3575
3576 /* set up device control */
3577 if (ap->ioaddr.ctl_addr)
3578 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3579
3580 DPRINTK("EXIT\n");
3581 }
3582
3583 /**
3584 * ata_dev_same_device - Determine whether new ID matches configured device
3585 * @dev: device to compare against
3586 * @new_class: class of the new device
3587 * @new_id: IDENTIFY page of the new device
3588 *
3589 * Compare @new_class and @new_id against @dev and determine
3590 * whether @dev is the device indicated by @new_class and
3591 * @new_id.
3592 *
3593 * LOCKING:
3594 * None.
3595 *
3596 * RETURNS:
3597 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3598 */
3599 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3600 const u16 *new_id)
3601 {
3602 const u16 *old_id = dev->id;
3603 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3604 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3605
3606 if (dev->class != new_class) {
3607 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3608 dev->class, new_class);
3609 return 0;
3610 }
3611
3612 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3613 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3614 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3615 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3616
3617 if (strcmp(model[0], model[1])) {
3618 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3619 "'%s' != '%s'\n", model[0], model[1]);
3620 return 0;
3621 }
3622
3623 if (strcmp(serial[0], serial[1])) {
3624 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3625 "'%s' != '%s'\n", serial[0], serial[1]);
3626 return 0;
3627 }
3628
3629 return 1;
3630 }
3631
3632 /**
3633 * ata_dev_reread_id - Re-read IDENTIFY data
3634 * @dev: target ATA device
3635 * @readid_flags: read ID flags
3636 *
3637 * Re-read IDENTIFY page and make sure @dev is still attached to
3638 * the port.
3639 *
3640 * LOCKING:
3641 * Kernel thread context (may sleep)
3642 *
3643 * RETURNS:
3644 * 0 on success, negative errno otherwise
3645 */
3646 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3647 {
3648 unsigned int class = dev->class;
3649 u16 *id = (void *)dev->link->ap->sector_buf;
3650 int rc;
3651
3652 /* read ID data */
3653 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3654 if (rc)
3655 return rc;
3656
3657 /* is the device still there? */
3658 if (!ata_dev_same_device(dev, class, id))
3659 return -ENODEV;
3660
3661 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3662 return 0;
3663 }
3664
3665 /**
3666 * ata_dev_revalidate - Revalidate ATA device
3667 * @dev: device to revalidate
3668 * @new_class: new class code
3669 * @readid_flags: read ID flags
3670 *
3671 * Re-read IDENTIFY page, make sure @dev is still attached to the
3672 * port and reconfigure it according to the new IDENTIFY page.
3673 *
3674 * LOCKING:
3675 * Kernel thread context (may sleep)
3676 *
3677 * RETURNS:
3678 * 0 on success, negative errno otherwise
3679 */
3680 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3681 unsigned int readid_flags)
3682 {
3683 u64 n_sectors = dev->n_sectors;
3684 int rc;
3685
3686 if (!ata_dev_enabled(dev))
3687 return -ENODEV;
3688
3689 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3690 if (ata_class_enabled(new_class) &&
3691 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3692 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3693 dev->class, new_class);
3694 rc = -ENODEV;
3695 goto fail;
3696 }
3697
3698 /* re-read ID */
3699 rc = ata_dev_reread_id(dev, readid_flags);
3700 if (rc)
3701 goto fail;
3702
3703 /* configure device according to the new ID */
3704 rc = ata_dev_configure(dev);
3705 if (rc)
3706 goto fail;
3707
3708 /* verify n_sectors hasn't changed */
3709 if (dev->class == ATA_DEV_ATA && n_sectors &&
3710 dev->n_sectors != n_sectors) {
3711 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3712 "%llu != %llu\n",
3713 (unsigned long long)n_sectors,
3714 (unsigned long long)dev->n_sectors);
3715
3716 /* restore original n_sectors */
3717 dev->n_sectors = n_sectors;
3718
3719 rc = -ENODEV;
3720 goto fail;
3721 }
3722
3723 return 0;
3724
3725 fail:
3726 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3727 return rc;
3728 }
3729
3730 struct ata_blacklist_entry {
3731 const char *model_num;
3732 const char *model_rev;
3733 unsigned long horkage;
3734 };
3735
3736 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3737 /* Devices with DMA related problems under Linux */
3738 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3739 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3740 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3741 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3742 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3743 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3744 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3745 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3746 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3747 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3748 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3749 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3750 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3751 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3752 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3753 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3754 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3755 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3756 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3757 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3758 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3759 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3760 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3761 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3762 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3763 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3764 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3765 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3766 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3767 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3768 /* Odd clown on sil3726/4726 PMPs */
3769 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3770 ATA_HORKAGE_SKIP_PM },
3771
3772 /* Weird ATAPI devices */
3773 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3774
3775 /* Devices we expect to fail diagnostics */
3776
3777 /* Devices where NCQ should be avoided */
3778 /* NCQ is slow */
3779 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3780 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3781 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3782 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3783 /* NCQ is broken */
3784 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3785 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3786 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3787 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3788
3789 /* Blacklist entries taken from Silicon Image 3124/3132
3790 Windows driver .inf file - also several Linux problem reports */
3791 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3792 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3793 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3794
3795 /* devices which puke on READ_NATIVE_MAX */
3796 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3797 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3798 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3799 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3800
3801 /* Devices which report 1 sector over size HPA */
3802 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3803 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3804 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
3805
3806 /* Devices which get the IVB wrong */
3807 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3808 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
3809 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3810 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3811 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
3812
3813 /* End Marker */
3814 { }
3815 };
3816
3817 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3818 {
3819 const char *p;
3820 int len;
3821
3822 /*
3823 * check for trailing wildcard: *\0
3824 */
3825 p = strchr(patt, wildchar);
3826 if (p && ((*(p + 1)) == 0))
3827 len = p - patt;
3828 else {
3829 len = strlen(name);
3830 if (!len) {
3831 if (!*patt)
3832 return 0;
3833 return -1;
3834 }
3835 }
3836
3837 return strncmp(patt, name, len);
3838 }
3839
3840 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3841 {
3842 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3843 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3844 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3845
3846 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3847 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3848
3849 while (ad->model_num) {
3850 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3851 if (ad->model_rev == NULL)
3852 return ad->horkage;
3853 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3854 return ad->horkage;
3855 }
3856 ad++;
3857 }
3858 return 0;
3859 }
3860
3861 static int ata_dma_blacklisted(const struct ata_device *dev)
3862 {
3863 /* We don't support polling DMA.
3864 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3865 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3866 */
3867 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3868 (dev->flags & ATA_DFLAG_CDB_INTR))
3869 return 1;
3870 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3871 }
3872
3873 /**
3874 * ata_is_40wire - check drive side detection
3875 * @dev: device
3876 *
3877 * Perform drive side detection decoding, allowing for device vendors
3878 * who can't follow the documentation.
3879 */
3880
3881 static int ata_is_40wire(struct ata_device *dev)
3882 {
3883 if (dev->horkage & ATA_HORKAGE_IVB)
3884 return ata_drive_40wire_relaxed(dev->id);
3885 return ata_drive_40wire(dev->id);
3886 }
3887
3888 /**
3889 * cable_is_40wire - 40/80/SATA decider
3890 * @ap: port to consider
3891 *
3892 * This function encapsulates the policy for speed management
3893 * in one place. At the moment we don't cache the result but
3894 * there is a good case for setting ap->cbl to the result when
3895 * we are called with unknown cables (and figuring out if it
3896 * impacts hotplug at all).
3897 *
3898 * Return 1 if the cable appears to be 40 wire.
3899 */
3900
3901 static int cable_is_40wire(struct ata_port *ap)
3902 {
3903 struct ata_link *link;
3904 struct ata_device *dev;
3905
3906 /* If the controller thinks we are 40 wire, we are */
3907 if (ap->cbl == ATA_CBL_PATA40)
3908 return 1;
3909 /* If the controller thinks we are 80 wire, we are */
3910 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
3911 return 0;
3912 /* If the controller doesn't know we scan
3913
3914 - Note: We look for all 40 wire detects at this point.
3915 Any 80 wire detect is taken to be 80 wire cable
3916 because
3917 - In many setups only the one drive (slave if present)
3918 will give a valid detect
3919 - If you have a non detect capable drive you don't
3920 want it to colour the choice
3921 */
3922 ata_port_for_each_link(link, ap) {
3923 ata_link_for_each_dev(dev, link) {
3924 if (!ata_is_40wire(dev))
3925 return 0;
3926 }
3927 }
3928 return 1;
3929 }
3930
3931 /**
3932 * ata_dev_xfermask - Compute supported xfermask of the given device
3933 * @dev: Device to compute xfermask for
3934 *
3935 * Compute supported xfermask of @dev and store it in
3936 * dev->*_mask. This function is responsible for applying all
3937 * known limits including host controller limits, device
3938 * blacklist, etc...
3939 *
3940 * LOCKING:
3941 * None.
3942 */
3943 static void ata_dev_xfermask(struct ata_device *dev)
3944 {
3945 struct ata_link *link = dev->link;
3946 struct ata_port *ap = link->ap;
3947 struct ata_host *host = ap->host;
3948 unsigned long xfer_mask;
3949
3950 /* controller modes available */
3951 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3952 ap->mwdma_mask, ap->udma_mask);
3953
3954 /* drive modes available */
3955 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3956 dev->mwdma_mask, dev->udma_mask);
3957 xfer_mask &= ata_id_xfermask(dev->id);
3958
3959 /*
3960 * CFA Advanced TrueIDE timings are not allowed on a shared
3961 * cable
3962 */
3963 if (ata_dev_pair(dev)) {
3964 /* No PIO5 or PIO6 */
3965 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3966 /* No MWDMA3 or MWDMA 4 */
3967 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3968 }
3969
3970 if (ata_dma_blacklisted(dev)) {
3971 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3972 ata_dev_printk(dev, KERN_WARNING,
3973 "device is on DMA blacklist, disabling DMA\n");
3974 }
3975
3976 if ((host->flags & ATA_HOST_SIMPLEX) &&
3977 host->simplex_claimed && host->simplex_claimed != ap) {
3978 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3979 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3980 "other device, disabling DMA\n");
3981 }
3982
3983 if (ap->flags & ATA_FLAG_NO_IORDY)
3984 xfer_mask &= ata_pio_mask_no_iordy(dev);
3985
3986 if (ap->ops->mode_filter)
3987 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3988
3989 /* Apply cable rule here. Don't apply it early because when
3990 * we handle hot plug the cable type can itself change.
3991 * Check this last so that we know if the transfer rate was
3992 * solely limited by the cable.
3993 * Unknown or 80 wire cables reported host side are checked
3994 * drive side as well. Cases where we know a 40wire cable
3995 * is used safely for 80 are not checked here.
3996 */
3997 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3998 /* UDMA/44 or higher would be available */
3999 if (cable_is_40wire(ap)) {
4000 ata_dev_printk(dev, KERN_WARNING,
4001 "limited to UDMA/33 due to 40-wire cable\n");
4002 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4003 }
4004
4005 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4006 &dev->mwdma_mask, &dev->udma_mask);
4007 }
4008
4009 /**
4010 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4011 * @dev: Device to which command will be sent
4012 *
4013 * Issue SET FEATURES - XFER MODE command to device @dev
4014 * on port @ap.
4015 *
4016 * LOCKING:
4017 * PCI/etc. bus probe sem.
4018 *
4019 * RETURNS:
4020 * 0 on success, AC_ERR_* mask otherwise.
4021 */
4022
4023 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4024 {
4025 struct ata_taskfile tf;
4026 unsigned int err_mask;
4027
4028 /* set up set-features taskfile */
4029 DPRINTK("set features - xfer mode\n");
4030
4031 /* Some controllers and ATAPI devices show flaky interrupt
4032 * behavior after setting xfer mode. Use polling instead.
4033 */
4034 ata_tf_init(dev, &tf);
4035 tf.command = ATA_CMD_SET_FEATURES;
4036 tf.feature = SETFEATURES_XFER;
4037 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4038 tf.protocol = ATA_PROT_NODATA;
4039 /* If we are using IORDY we must send the mode setting command */
4040 if (ata_pio_need_iordy(dev))
4041 tf.nsect = dev->xfer_mode;
4042 /* If the device has IORDY and the controller does not - turn it off */
4043 else if (ata_id_has_iordy(dev->id))
4044 tf.nsect = 0x01;
4045 else /* In the ancient relic department - skip all of this */
4046 return 0;
4047
4048 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4049
4050 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4051 return err_mask;
4052 }
4053 /**
4054 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4055 * @dev: Device to which command will be sent
4056 * @enable: Whether to enable or disable the feature
4057 * @feature: The sector count represents the feature to set
4058 *
4059 * Issue SET FEATURES - SATA FEATURES command to device @dev
4060 * on port @ap with sector count
4061 *
4062 * LOCKING:
4063 * PCI/etc. bus probe sem.
4064 *
4065 * RETURNS:
4066 * 0 on success, AC_ERR_* mask otherwise.
4067 */
4068 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4069 u8 feature)
4070 {
4071 struct ata_taskfile tf;
4072 unsigned int err_mask;
4073
4074 /* set up set-features taskfile */
4075 DPRINTK("set features - SATA features\n");
4076
4077 ata_tf_init(dev, &tf);
4078 tf.command = ATA_CMD_SET_FEATURES;
4079 tf.feature = enable;
4080 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4081 tf.protocol = ATA_PROT_NODATA;
4082 tf.nsect = feature;
4083
4084 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4085
4086 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4087 return err_mask;
4088 }
4089
4090 /**
4091 * ata_dev_init_params - Issue INIT DEV PARAMS command
4092 * @dev: Device to which command will be sent
4093 * @heads: Number of heads (taskfile parameter)
4094 * @sectors: Number of sectors (taskfile parameter)
4095 *
4096 * LOCKING:
4097 * Kernel thread context (may sleep)
4098 *
4099 * RETURNS:
4100 * 0 on success, AC_ERR_* mask otherwise.
4101 */
4102 static unsigned int ata_dev_init_params(struct ata_device *dev,
4103 u16 heads, u16 sectors)
4104 {
4105 struct ata_taskfile tf;
4106 unsigned int err_mask;
4107
4108 /* Number of sectors per track 1-255. Number of heads 1-16 */
4109 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4110 return AC_ERR_INVALID;
4111
4112 /* set up init dev params taskfile */
4113 DPRINTK("init dev params \n");
4114
4115 ata_tf_init(dev, &tf);
4116 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4117 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4118 tf.protocol = ATA_PROT_NODATA;
4119 tf.nsect = sectors;
4120 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4121
4122 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4123 /* A clean abort indicates an original or just out of spec drive
4124 and we should continue as we issue the setup based on the
4125 drive reported working geometry */
4126 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4127 err_mask = 0;
4128
4129 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4130 return err_mask;
4131 }
4132
4133 /**
4134 * ata_sg_clean - Unmap DMA memory associated with command
4135 * @qc: Command containing DMA memory to be released
4136 *
4137 * Unmap all mapped DMA memory associated with this command.
4138 *
4139 * LOCKING:
4140 * spin_lock_irqsave(host lock)
4141 */
4142 void ata_sg_clean(struct ata_queued_cmd *qc)
4143 {
4144 struct ata_port *ap = qc->ap;
4145 struct scatterlist *sg = qc->sg;
4146 int dir = qc->dma_dir;
4147
4148 WARN_ON(sg == NULL);
4149
4150 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4151
4152 if (qc->n_elem)
4153 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4154
4155 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4156 qc->sg = NULL;
4157 }
4158
4159 /**
4160 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4161 * @qc: Metadata associated with taskfile to check
4162 *
4163 * Allow low-level driver to filter ATA PACKET commands, returning
4164 * a status indicating whether or not it is OK to use DMA for the
4165 * supplied PACKET command.
4166 *
4167 * LOCKING:
4168 * spin_lock_irqsave(host lock)
4169 *
4170 * RETURNS: 0 when ATAPI DMA can be used
4171 * nonzero otherwise
4172 */
4173 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4174 {
4175 struct ata_port *ap = qc->ap;
4176
4177 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4178 * few ATAPI devices choke on such DMA requests.
4179 */
4180 if (unlikely(qc->nbytes & 15))
4181 return 1;
4182
4183 if (ap->ops->check_atapi_dma)
4184 return ap->ops->check_atapi_dma(qc);
4185
4186 return 0;
4187 }
4188
4189 /**
4190 * ata_std_qc_defer - Check whether a qc needs to be deferred
4191 * @qc: ATA command in question
4192 *
4193 * Non-NCQ commands cannot run with any other command, NCQ or
4194 * not. As upper layer only knows the queue depth, we are
4195 * responsible for maintaining exclusion. This function checks
4196 * whether a new command @qc can be issued.
4197 *
4198 * LOCKING:
4199 * spin_lock_irqsave(host lock)
4200 *
4201 * RETURNS:
4202 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4203 */
4204 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4205 {
4206 struct ata_link *link = qc->dev->link;
4207
4208 if (qc->tf.protocol == ATA_PROT_NCQ) {
4209 if (!ata_tag_valid(link->active_tag))
4210 return 0;
4211 } else {
4212 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4213 return 0;
4214 }
4215
4216 return ATA_DEFER_LINK;
4217 }
4218
4219 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4220
4221 /**
4222 * ata_sg_init - Associate command with scatter-gather table.
4223 * @qc: Command to be associated
4224 * @sg: Scatter-gather table.
4225 * @n_elem: Number of elements in s/g table.
4226 *
4227 * Initialize the data-related elements of queued_cmd @qc
4228 * to point to a scatter-gather table @sg, containing @n_elem
4229 * elements.
4230 *
4231 * LOCKING:
4232 * spin_lock_irqsave(host lock)
4233 */
4234 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4235 unsigned int n_elem)
4236 {
4237 qc->sg = sg;
4238 qc->n_elem = n_elem;
4239 qc->cursg = qc->sg;
4240 }
4241
4242 /**
4243 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4244 * @qc: Command with scatter-gather table to be mapped.
4245 *
4246 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4247 *
4248 * LOCKING:
4249 * spin_lock_irqsave(host lock)
4250 *
4251 * RETURNS:
4252 * Zero on success, negative on error.
4253 *
4254 */
4255 static int ata_sg_setup(struct ata_queued_cmd *qc)
4256 {
4257 struct ata_port *ap = qc->ap;
4258 unsigned int n_elem;
4259
4260 VPRINTK("ENTER, ata%u\n", ap->print_id);
4261
4262 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4263 if (n_elem < 1)
4264 return -1;
4265
4266 DPRINTK("%d sg elements mapped\n", n_elem);
4267
4268 qc->n_elem = n_elem;
4269 qc->flags |= ATA_QCFLAG_DMAMAP;
4270
4271 return 0;
4272 }
4273
4274 /**
4275 * swap_buf_le16 - swap halves of 16-bit words in place
4276 * @buf: Buffer to swap
4277 * @buf_words: Number of 16-bit words in buffer.
4278 *
4279 * Swap halves of 16-bit words if needed to convert from
4280 * little-endian byte order to native cpu byte order, or
4281 * vice-versa.
4282 *
4283 * LOCKING:
4284 * Inherited from caller.
4285 */
4286 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4287 {
4288 #ifdef __BIG_ENDIAN
4289 unsigned int i;
4290
4291 for (i = 0; i < buf_words; i++)
4292 buf[i] = le16_to_cpu(buf[i]);
4293 #endif /* __BIG_ENDIAN */
4294 }
4295
4296 /**
4297 * ata_qc_new - Request an available ATA command, for queueing
4298 * @ap: Port associated with device @dev
4299 * @dev: Device from whom we request an available command structure
4300 *
4301 * LOCKING:
4302 * None.
4303 */
4304
4305 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4306 {
4307 struct ata_queued_cmd *qc = NULL;
4308 unsigned int i;
4309
4310 /* no command while frozen */
4311 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4312 return NULL;
4313
4314 /* the last tag is reserved for internal command. */
4315 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4316 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4317 qc = __ata_qc_from_tag(ap, i);
4318 break;
4319 }
4320
4321 if (qc)
4322 qc->tag = i;
4323
4324 return qc;
4325 }
4326
4327 /**
4328 * ata_qc_new_init - Request an available ATA command, and initialize it
4329 * @dev: Device from whom we request an available command structure
4330 *
4331 * LOCKING:
4332 * None.
4333 */
4334
4335 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4336 {
4337 struct ata_port *ap = dev->link->ap;
4338 struct ata_queued_cmd *qc;
4339
4340 qc = ata_qc_new(ap);
4341 if (qc) {
4342 qc->scsicmd = NULL;
4343 qc->ap = ap;
4344 qc->dev = dev;
4345
4346 ata_qc_reinit(qc);
4347 }
4348
4349 return qc;
4350 }
4351
4352 /**
4353 * ata_qc_free - free unused ata_queued_cmd
4354 * @qc: Command to complete
4355 *
4356 * Designed to free unused ata_queued_cmd object
4357 * in case something prevents using it.
4358 *
4359 * LOCKING:
4360 * spin_lock_irqsave(host lock)
4361 */
4362 void ata_qc_free(struct ata_queued_cmd *qc)
4363 {
4364 struct ata_port *ap = qc->ap;
4365 unsigned int tag;
4366
4367 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4368
4369 qc->flags = 0;
4370 tag = qc->tag;
4371 if (likely(ata_tag_valid(tag))) {
4372 qc->tag = ATA_TAG_POISON;
4373 clear_bit(tag, &ap->qc_allocated);
4374 }
4375 }
4376
4377 void __ata_qc_complete(struct ata_queued_cmd *qc)
4378 {
4379 struct ata_port *ap = qc->ap;
4380 struct ata_link *link = qc->dev->link;
4381
4382 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4383 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4384
4385 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4386 ata_sg_clean(qc);
4387
4388 /* command should be marked inactive atomically with qc completion */
4389 if (qc->tf.protocol == ATA_PROT_NCQ) {
4390 link->sactive &= ~(1 << qc->tag);
4391 if (!link->sactive)
4392 ap->nr_active_links--;
4393 } else {
4394 link->active_tag = ATA_TAG_POISON;
4395 ap->nr_active_links--;
4396 }
4397
4398 /* clear exclusive status */
4399 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4400 ap->excl_link == link))
4401 ap->excl_link = NULL;
4402
4403 /* atapi: mark qc as inactive to prevent the interrupt handler
4404 * from completing the command twice later, before the error handler
4405 * is called. (when rc != 0 and atapi request sense is needed)
4406 */
4407 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4408 ap->qc_active &= ~(1 << qc->tag);
4409
4410 /* call completion callback */
4411 qc->complete_fn(qc);
4412 }
4413
4414 static void fill_result_tf(struct ata_queued_cmd *qc)
4415 {
4416 struct ata_port *ap = qc->ap;
4417
4418 qc->result_tf.flags = qc->tf.flags;
4419 ap->ops->tf_read(ap, &qc->result_tf);
4420 }
4421
4422 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4423 {
4424 struct ata_device *dev = qc->dev;
4425
4426 if (ata_tag_internal(qc->tag))
4427 return;
4428
4429 if (ata_is_nodata(qc->tf.protocol))
4430 return;
4431
4432 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4433 return;
4434
4435 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4436 }
4437
4438 /**
4439 * ata_qc_complete - Complete an active ATA command
4440 * @qc: Command to complete
4441 * @err_mask: ATA Status register contents
4442 *
4443 * Indicate to the mid and upper layers that an ATA
4444 * command has completed, with either an ok or not-ok status.
4445 *
4446 * LOCKING:
4447 * spin_lock_irqsave(host lock)
4448 */
4449 void ata_qc_complete(struct ata_queued_cmd *qc)
4450 {
4451 struct ata_port *ap = qc->ap;
4452
4453 /* XXX: New EH and old EH use different mechanisms to
4454 * synchronize EH with regular execution path.
4455 *
4456 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4457 * Normal execution path is responsible for not accessing a
4458 * failed qc. libata core enforces the rule by returning NULL
4459 * from ata_qc_from_tag() for failed qcs.
4460 *
4461 * Old EH depends on ata_qc_complete() nullifying completion
4462 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4463 * not synchronize with interrupt handler. Only PIO task is
4464 * taken care of.
4465 */
4466 if (ap->ops->error_handler) {
4467 struct ata_device *dev = qc->dev;
4468 struct ata_eh_info *ehi = &dev->link->eh_info;
4469
4470 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4471
4472 if (unlikely(qc->err_mask))
4473 qc->flags |= ATA_QCFLAG_FAILED;
4474
4475 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4476 if (!ata_tag_internal(qc->tag)) {
4477 /* always fill result TF for failed qc */
4478 fill_result_tf(qc);
4479 ata_qc_schedule_eh(qc);
4480 return;
4481 }
4482 }
4483
4484 /* read result TF if requested */
4485 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4486 fill_result_tf(qc);
4487
4488 /* Some commands need post-processing after successful
4489 * completion.
4490 */
4491 switch (qc->tf.command) {
4492 case ATA_CMD_SET_FEATURES:
4493 if (qc->tf.feature != SETFEATURES_WC_ON &&
4494 qc->tf.feature != SETFEATURES_WC_OFF)
4495 break;
4496 /* fall through */
4497 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4498 case ATA_CMD_SET_MULTI: /* multi_count changed */
4499 /* revalidate device */
4500 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4501 ata_port_schedule_eh(ap);
4502 break;
4503
4504 case ATA_CMD_SLEEP:
4505 dev->flags |= ATA_DFLAG_SLEEPING;
4506 break;
4507 }
4508
4509 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4510 ata_verify_xfer(qc);
4511
4512 __ata_qc_complete(qc);
4513 } else {
4514 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4515 return;
4516
4517 /* read result TF if failed or requested */
4518 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4519 fill_result_tf(qc);
4520
4521 __ata_qc_complete(qc);
4522 }
4523 }
4524
4525 /**
4526 * ata_qc_complete_multiple - Complete multiple qcs successfully
4527 * @ap: port in question
4528 * @qc_active: new qc_active mask
4529 * @finish_qc: LLDD callback invoked before completing a qc
4530 *
4531 * Complete in-flight commands. This functions is meant to be
4532 * called from low-level driver's interrupt routine to complete
4533 * requests normally. ap->qc_active and @qc_active is compared
4534 * and commands are completed accordingly.
4535 *
4536 * LOCKING:
4537 * spin_lock_irqsave(host lock)
4538 *
4539 * RETURNS:
4540 * Number of completed commands on success, -errno otherwise.
4541 */
4542 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4543 void (*finish_qc)(struct ata_queued_cmd *))
4544 {
4545 int nr_done = 0;
4546 u32 done_mask;
4547 int i;
4548
4549 done_mask = ap->qc_active ^ qc_active;
4550
4551 if (unlikely(done_mask & qc_active)) {
4552 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4553 "(%08x->%08x)\n", ap->qc_active, qc_active);
4554 return -EINVAL;
4555 }
4556
4557 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4558 struct ata_queued_cmd *qc;
4559
4560 if (!(done_mask & (1 << i)))
4561 continue;
4562
4563 if ((qc = ata_qc_from_tag(ap, i))) {
4564 if (finish_qc)
4565 finish_qc(qc);
4566 ata_qc_complete(qc);
4567 nr_done++;
4568 }
4569 }
4570
4571 return nr_done;
4572 }
4573
4574 /**
4575 * ata_qc_issue - issue taskfile to device
4576 * @qc: command to issue to device
4577 *
4578 * Prepare an ATA command to submission to device.
4579 * This includes mapping the data into a DMA-able
4580 * area, filling in the S/G table, and finally
4581 * writing the taskfile to hardware, starting the command.
4582 *
4583 * LOCKING:
4584 * spin_lock_irqsave(host lock)
4585 */
4586 void ata_qc_issue(struct ata_queued_cmd *qc)
4587 {
4588 struct ata_port *ap = qc->ap;
4589 struct ata_link *link = qc->dev->link;
4590 u8 prot = qc->tf.protocol;
4591
4592 /* Make sure only one non-NCQ command is outstanding. The
4593 * check is skipped for old EH because it reuses active qc to
4594 * request ATAPI sense.
4595 */
4596 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4597
4598 if (ata_is_ncq(prot)) {
4599 WARN_ON(link->sactive & (1 << qc->tag));
4600
4601 if (!link->sactive)
4602 ap->nr_active_links++;
4603 link->sactive |= 1 << qc->tag;
4604 } else {
4605 WARN_ON(link->sactive);
4606
4607 ap->nr_active_links++;
4608 link->active_tag = qc->tag;
4609 }
4610
4611 qc->flags |= ATA_QCFLAG_ACTIVE;
4612 ap->qc_active |= 1 << qc->tag;
4613
4614 /* We guarantee to LLDs that they will have at least one
4615 * non-zero sg if the command is a data command.
4616 */
4617 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4618
4619 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4620 (ap->flags & ATA_FLAG_PIO_DMA)))
4621 if (ata_sg_setup(qc))
4622 goto sg_err;
4623
4624 /* if device is sleeping, schedule reset and abort the link */
4625 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4626 link->eh_info.action |= ATA_EH_RESET;
4627 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4628 ata_link_abort(link);
4629 return;
4630 }
4631
4632 ap->ops->qc_prep(qc);
4633
4634 qc->err_mask |= ap->ops->qc_issue(qc);
4635 if (unlikely(qc->err_mask))
4636 goto err;
4637 return;
4638
4639 sg_err:
4640 qc->err_mask |= AC_ERR_SYSTEM;
4641 err:
4642 ata_qc_complete(qc);
4643 }
4644
4645 /**
4646 * sata_scr_valid - test whether SCRs are accessible
4647 * @link: ATA link to test SCR accessibility for
4648 *
4649 * Test whether SCRs are accessible for @link.
4650 *
4651 * LOCKING:
4652 * None.
4653 *
4654 * RETURNS:
4655 * 1 if SCRs are accessible, 0 otherwise.
4656 */
4657 int sata_scr_valid(struct ata_link *link)
4658 {
4659 struct ata_port *ap = link->ap;
4660
4661 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4662 }
4663
4664 /**
4665 * sata_scr_read - read SCR register of the specified port
4666 * @link: ATA link to read SCR for
4667 * @reg: SCR to read
4668 * @val: Place to store read value
4669 *
4670 * Read SCR register @reg of @link into *@val. This function is
4671 * guaranteed to succeed if @link is ap->link, the cable type of
4672 * the port is SATA and the port implements ->scr_read.
4673 *
4674 * LOCKING:
4675 * None if @link is ap->link. Kernel thread context otherwise.
4676 *
4677 * RETURNS:
4678 * 0 on success, negative errno on failure.
4679 */
4680 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4681 {
4682 if (ata_is_host_link(link)) {
4683 struct ata_port *ap = link->ap;
4684
4685 if (sata_scr_valid(link))
4686 return ap->ops->scr_read(ap, reg, val);
4687 return -EOPNOTSUPP;
4688 }
4689
4690 return sata_pmp_scr_read(link, reg, val);
4691 }
4692
4693 /**
4694 * sata_scr_write - write SCR register of the specified port
4695 * @link: ATA link to write SCR for
4696 * @reg: SCR to write
4697 * @val: value to write
4698 *
4699 * Write @val to SCR register @reg of @link. This function is
4700 * guaranteed to succeed if @link is ap->link, the cable type of
4701 * the port is SATA and the port implements ->scr_read.
4702 *
4703 * LOCKING:
4704 * None if @link is ap->link. Kernel thread context otherwise.
4705 *
4706 * RETURNS:
4707 * 0 on success, negative errno on failure.
4708 */
4709 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4710 {
4711 if (ata_is_host_link(link)) {
4712 struct ata_port *ap = link->ap;
4713
4714 if (sata_scr_valid(link))
4715 return ap->ops->scr_write(ap, reg, val);
4716 return -EOPNOTSUPP;
4717 }
4718
4719 return sata_pmp_scr_write(link, reg, val);
4720 }
4721
4722 /**
4723 * sata_scr_write_flush - write SCR register of the specified port and flush
4724 * @link: ATA link to write SCR for
4725 * @reg: SCR to write
4726 * @val: value to write
4727 *
4728 * This function is identical to sata_scr_write() except that this
4729 * function performs flush after writing to the register.
4730 *
4731 * LOCKING:
4732 * None if @link is ap->link. Kernel thread context otherwise.
4733 *
4734 * RETURNS:
4735 * 0 on success, negative errno on failure.
4736 */
4737 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4738 {
4739 if (ata_is_host_link(link)) {
4740 struct ata_port *ap = link->ap;
4741 int rc;
4742
4743 if (sata_scr_valid(link)) {
4744 rc = ap->ops->scr_write(ap, reg, val);
4745 if (rc == 0)
4746 rc = ap->ops->scr_read(ap, reg, &val);
4747 return rc;
4748 }
4749 return -EOPNOTSUPP;
4750 }
4751
4752 return sata_pmp_scr_write(link, reg, val);
4753 }
4754
4755 /**
4756 * ata_link_online - test whether the given link is online
4757 * @link: ATA link to test
4758 *
4759 * Test whether @link is online. Note that this function returns
4760 * 0 if online status of @link cannot be obtained, so
4761 * ata_link_online(link) != !ata_link_offline(link).
4762 *
4763 * LOCKING:
4764 * None.
4765 *
4766 * RETURNS:
4767 * 1 if the port online status is available and online.
4768 */
4769 int ata_link_online(struct ata_link *link)
4770 {
4771 u32 sstatus;
4772
4773 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4774 (sstatus & 0xf) == 0x3)
4775 return 1;
4776 return 0;
4777 }
4778
4779 /**
4780 * ata_link_offline - test whether the given link is offline
4781 * @link: ATA link to test
4782 *
4783 * Test whether @link is offline. Note that this function
4784 * returns 0 if offline status of @link cannot be obtained, so
4785 * ata_link_online(link) != !ata_link_offline(link).
4786 *
4787 * LOCKING:
4788 * None.
4789 *
4790 * RETURNS:
4791 * 1 if the port offline status is available and offline.
4792 */
4793 int ata_link_offline(struct ata_link *link)
4794 {
4795 u32 sstatus;
4796
4797 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4798 (sstatus & 0xf) != 0x3)
4799 return 1;
4800 return 0;
4801 }
4802
4803 #ifdef CONFIG_PM
4804 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4805 unsigned int action, unsigned int ehi_flags,
4806 int wait)
4807 {
4808 unsigned long flags;
4809 int i, rc;
4810
4811 for (i = 0; i < host->n_ports; i++) {
4812 struct ata_port *ap = host->ports[i];
4813 struct ata_link *link;
4814
4815 /* Previous resume operation might still be in
4816 * progress. Wait for PM_PENDING to clear.
4817 */
4818 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4819 ata_port_wait_eh(ap);
4820 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4821 }
4822
4823 /* request PM ops to EH */
4824 spin_lock_irqsave(ap->lock, flags);
4825
4826 ap->pm_mesg = mesg;
4827 if (wait) {
4828 rc = 0;
4829 ap->pm_result = &rc;
4830 }
4831
4832 ap->pflags |= ATA_PFLAG_PM_PENDING;
4833 __ata_port_for_each_link(link, ap) {
4834 link->eh_info.action |= action;
4835 link->eh_info.flags |= ehi_flags;
4836 }
4837
4838 ata_port_schedule_eh(ap);
4839
4840 spin_unlock_irqrestore(ap->lock, flags);
4841
4842 /* wait and check result */
4843 if (wait) {
4844 ata_port_wait_eh(ap);
4845 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4846 if (rc)
4847 return rc;
4848 }
4849 }
4850
4851 return 0;
4852 }
4853
4854 /**
4855 * ata_host_suspend - suspend host
4856 * @host: host to suspend
4857 * @mesg: PM message
4858 *
4859 * Suspend @host. Actual operation is performed by EH. This
4860 * function requests EH to perform PM operations and waits for EH
4861 * to finish.
4862 *
4863 * LOCKING:
4864 * Kernel thread context (may sleep).
4865 *
4866 * RETURNS:
4867 * 0 on success, -errno on failure.
4868 */
4869 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
4870 {
4871 int rc;
4872
4873 /*
4874 * disable link pm on all ports before requesting
4875 * any pm activity
4876 */
4877 ata_lpm_enable(host);
4878
4879 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
4880 if (rc == 0)
4881 host->dev->power.power_state = mesg;
4882 return rc;
4883 }
4884
4885 /**
4886 * ata_host_resume - resume host
4887 * @host: host to resume
4888 *
4889 * Resume @host. Actual operation is performed by EH. This
4890 * function requests EH to perform PM operations and returns.
4891 * Note that all resume operations are performed parallely.
4892 *
4893 * LOCKING:
4894 * Kernel thread context (may sleep).
4895 */
4896 void ata_host_resume(struct ata_host *host)
4897 {
4898 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
4899 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
4900 host->dev->power.power_state = PMSG_ON;
4901
4902 /* reenable link pm */
4903 ata_lpm_disable(host);
4904 }
4905 #endif
4906
4907 /**
4908 * ata_port_start - Set port up for dma.
4909 * @ap: Port to initialize
4910 *
4911 * Called just after data structures for each port are
4912 * initialized. Allocates space for PRD table.
4913 *
4914 * May be used as the port_start() entry in ata_port_operations.
4915 *
4916 * LOCKING:
4917 * Inherited from caller.
4918 */
4919 int ata_port_start(struct ata_port *ap)
4920 {
4921 struct device *dev = ap->dev;
4922
4923 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
4924 GFP_KERNEL);
4925 if (!ap->prd)
4926 return -ENOMEM;
4927
4928 return 0;
4929 }
4930
4931 /**
4932 * ata_dev_init - Initialize an ata_device structure
4933 * @dev: Device structure to initialize
4934 *
4935 * Initialize @dev in preparation for probing.
4936 *
4937 * LOCKING:
4938 * Inherited from caller.
4939 */
4940 void ata_dev_init(struct ata_device *dev)
4941 {
4942 struct ata_link *link = dev->link;
4943 struct ata_port *ap = link->ap;
4944 unsigned long flags;
4945
4946 /* SATA spd limit is bound to the first device */
4947 link->sata_spd_limit = link->hw_sata_spd_limit;
4948 link->sata_spd = 0;
4949
4950 /* High bits of dev->flags are used to record warm plug
4951 * requests which occur asynchronously. Synchronize using
4952 * host lock.
4953 */
4954 spin_lock_irqsave(ap->lock, flags);
4955 dev->flags &= ~ATA_DFLAG_INIT_MASK;
4956 dev->horkage = 0;
4957 spin_unlock_irqrestore(ap->lock, flags);
4958
4959 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
4960 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
4961 dev->pio_mask = UINT_MAX;
4962 dev->mwdma_mask = UINT_MAX;
4963 dev->udma_mask = UINT_MAX;
4964 }
4965
4966 /**
4967 * ata_link_init - Initialize an ata_link structure
4968 * @ap: ATA port link is attached to
4969 * @link: Link structure to initialize
4970 * @pmp: Port multiplier port number
4971 *
4972 * Initialize @link.
4973 *
4974 * LOCKING:
4975 * Kernel thread context (may sleep)
4976 */
4977 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4978 {
4979 int i;
4980
4981 /* clear everything except for devices */
4982 memset(link, 0, offsetof(struct ata_link, device[0]));
4983
4984 link->ap = ap;
4985 link->pmp = pmp;
4986 link->active_tag = ATA_TAG_POISON;
4987 link->hw_sata_spd_limit = UINT_MAX;
4988
4989 /* can't use iterator, ap isn't initialized yet */
4990 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4991 struct ata_device *dev = &link->device[i];
4992
4993 dev->link = link;
4994 dev->devno = dev - link->device;
4995 ata_dev_init(dev);
4996 }
4997 }
4998
4999 /**
5000 * sata_link_init_spd - Initialize link->sata_spd_limit
5001 * @link: Link to configure sata_spd_limit for
5002 *
5003 * Initialize @link->[hw_]sata_spd_limit to the currently
5004 * configured value.
5005 *
5006 * LOCKING:
5007 * Kernel thread context (may sleep).
5008 *
5009 * RETURNS:
5010 * 0 on success, -errno on failure.
5011 */
5012 int sata_link_init_spd(struct ata_link *link)
5013 {
5014 u32 scontrol;
5015 u8 spd;
5016 int rc;
5017
5018 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5019 if (rc)
5020 return rc;
5021
5022 spd = (scontrol >> 4) & 0xf;
5023 if (spd)
5024 link->hw_sata_spd_limit &= (1 << spd) - 1;
5025
5026 ata_force_spd_limit(link);
5027
5028 link->sata_spd_limit = link->hw_sata_spd_limit;
5029
5030 return 0;
5031 }
5032
5033 /**
5034 * ata_port_alloc - allocate and initialize basic ATA port resources
5035 * @host: ATA host this allocated port belongs to
5036 *
5037 * Allocate and initialize basic ATA port resources.
5038 *
5039 * RETURNS:
5040 * Allocate ATA port on success, NULL on failure.
5041 *
5042 * LOCKING:
5043 * Inherited from calling layer (may sleep).
5044 */
5045 struct ata_port *ata_port_alloc(struct ata_host *host)
5046 {
5047 struct ata_port *ap;
5048
5049 DPRINTK("ENTER\n");
5050
5051 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5052 if (!ap)
5053 return NULL;
5054
5055 ap->pflags |= ATA_PFLAG_INITIALIZING;
5056 ap->lock = &host->lock;
5057 ap->flags = ATA_FLAG_DISABLED;
5058 ap->print_id = -1;
5059 ap->ctl = ATA_DEVCTL_OBS;
5060 ap->host = host;
5061 ap->dev = host->dev;
5062 ap->last_ctl = 0xFF;
5063
5064 #if defined(ATA_VERBOSE_DEBUG)
5065 /* turn on all debugging levels */
5066 ap->msg_enable = 0x00FF;
5067 #elif defined(ATA_DEBUG)
5068 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5069 #else
5070 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5071 #endif
5072
5073 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5074 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5075 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5076 INIT_LIST_HEAD(&ap->eh_done_q);
5077 init_waitqueue_head(&ap->eh_wait_q);
5078 init_timer_deferrable(&ap->fastdrain_timer);
5079 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5080 ap->fastdrain_timer.data = (unsigned long)ap;
5081
5082 ap->cbl = ATA_CBL_NONE;
5083
5084 ata_link_init(ap, &ap->link, 0);
5085
5086 #ifdef ATA_IRQ_TRAP
5087 ap->stats.unhandled_irq = 1;
5088 ap->stats.idle_irq = 1;
5089 #endif
5090 return ap;
5091 }
5092
5093 static void ata_host_release(struct device *gendev, void *res)
5094 {
5095 struct ata_host *host = dev_get_drvdata(gendev);
5096 int i;
5097
5098 for (i = 0; i < host->n_ports; i++) {
5099 struct ata_port *ap = host->ports[i];
5100
5101 if (!ap)
5102 continue;
5103
5104 if (ap->scsi_host)
5105 scsi_host_put(ap->scsi_host);
5106
5107 kfree(ap->pmp_link);
5108 kfree(ap);
5109 host->ports[i] = NULL;
5110 }
5111
5112 dev_set_drvdata(gendev, NULL);
5113 }
5114
5115 /**
5116 * ata_host_alloc - allocate and init basic ATA host resources
5117 * @dev: generic device this host is associated with
5118 * @max_ports: maximum number of ATA ports associated with this host
5119 *
5120 * Allocate and initialize basic ATA host resources. LLD calls
5121 * this function to allocate a host, initializes it fully and
5122 * attaches it using ata_host_register().
5123 *
5124 * @max_ports ports are allocated and host->n_ports is
5125 * initialized to @max_ports. The caller is allowed to decrease
5126 * host->n_ports before calling ata_host_register(). The unused
5127 * ports will be automatically freed on registration.
5128 *
5129 * RETURNS:
5130 * Allocate ATA host on success, NULL on failure.
5131 *
5132 * LOCKING:
5133 * Inherited from calling layer (may sleep).
5134 */
5135 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5136 {
5137 struct ata_host *host;
5138 size_t sz;
5139 int i;
5140
5141 DPRINTK("ENTER\n");
5142
5143 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5144 return NULL;
5145
5146 /* alloc a container for our list of ATA ports (buses) */
5147 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5148 /* alloc a container for our list of ATA ports (buses) */
5149 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5150 if (!host)
5151 goto err_out;
5152
5153 devres_add(dev, host);
5154 dev_set_drvdata(dev, host);
5155
5156 spin_lock_init(&host->lock);
5157 host->dev = dev;
5158 host->n_ports = max_ports;
5159
5160 /* allocate ports bound to this host */
5161 for (i = 0; i < max_ports; i++) {
5162 struct ata_port *ap;
5163
5164 ap = ata_port_alloc(host);
5165 if (!ap)
5166 goto err_out;
5167
5168 ap->port_no = i;
5169 host->ports[i] = ap;
5170 }
5171
5172 devres_remove_group(dev, NULL);
5173 return host;
5174
5175 err_out:
5176 devres_release_group(dev, NULL);
5177 return NULL;
5178 }
5179
5180 /**
5181 * ata_host_alloc_pinfo - alloc host and init with port_info array
5182 * @dev: generic device this host is associated with
5183 * @ppi: array of ATA port_info to initialize host with
5184 * @n_ports: number of ATA ports attached to this host
5185 *
5186 * Allocate ATA host and initialize with info from @ppi. If NULL
5187 * terminated, @ppi may contain fewer entries than @n_ports. The
5188 * last entry will be used for the remaining ports.
5189 *
5190 * RETURNS:
5191 * Allocate ATA host on success, NULL on failure.
5192 *
5193 * LOCKING:
5194 * Inherited from calling layer (may sleep).
5195 */
5196 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5197 const struct ata_port_info * const * ppi,
5198 int n_ports)
5199 {
5200 const struct ata_port_info *pi;
5201 struct ata_host *host;
5202 int i, j;
5203
5204 host = ata_host_alloc(dev, n_ports);
5205 if (!host)
5206 return NULL;
5207
5208 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5209 struct ata_port *ap = host->ports[i];
5210
5211 if (ppi[j])
5212 pi = ppi[j++];
5213
5214 ap->pio_mask = pi->pio_mask;
5215 ap->mwdma_mask = pi->mwdma_mask;
5216 ap->udma_mask = pi->udma_mask;
5217 ap->flags |= pi->flags;
5218 ap->link.flags |= pi->link_flags;
5219 ap->ops = pi->port_ops;
5220
5221 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5222 host->ops = pi->port_ops;
5223 }
5224
5225 return host;
5226 }
5227
5228 static void ata_host_stop(struct device *gendev, void *res)
5229 {
5230 struct ata_host *host = dev_get_drvdata(gendev);
5231 int i;
5232
5233 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5234
5235 for (i = 0; i < host->n_ports; i++) {
5236 struct ata_port *ap = host->ports[i];
5237
5238 if (ap->ops->port_stop)
5239 ap->ops->port_stop(ap);
5240 }
5241
5242 if (host->ops->host_stop)
5243 host->ops->host_stop(host);
5244 }
5245
5246 /**
5247 * ata_finalize_port_ops - finalize ata_port_operations
5248 * @ops: ata_port_operations to finalize
5249 *
5250 * An ata_port_operations can inherit from another ops and that
5251 * ops can again inherit from another. This can go on as many
5252 * times as necessary as long as there is no loop in the
5253 * inheritance chain.
5254 *
5255 * Ops tables are finalized when the host is started. NULL or
5256 * unspecified entries are inherited from the closet ancestor
5257 * which has the method and the entry is populated with it.
5258 * After finalization, the ops table directly points to all the
5259 * methods and ->inherits is no longer necessary and cleared.
5260 *
5261 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5262 *
5263 * LOCKING:
5264 * None.
5265 */
5266 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5267 {
5268 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5269 const struct ata_port_operations *cur;
5270 void **begin = (void **)ops;
5271 void **end = (void **)&ops->inherits;
5272 void **pp;
5273
5274 if (!ops || !ops->inherits)
5275 return;
5276
5277 spin_lock(&lock);
5278
5279 for (cur = ops->inherits; cur; cur = cur->inherits) {
5280 void **inherit = (void **)cur;
5281
5282 for (pp = begin; pp < end; pp++, inherit++)
5283 if (!*pp)
5284 *pp = *inherit;
5285 }
5286
5287 for (pp = begin; pp < end; pp++)
5288 if (IS_ERR(*pp))
5289 *pp = NULL;
5290
5291 ops->inherits = NULL;
5292
5293 spin_unlock(&lock);
5294 }
5295
5296 /**
5297 * ata_host_start - start and freeze ports of an ATA host
5298 * @host: ATA host to start ports for
5299 *
5300 * Start and then freeze ports of @host. Started status is
5301 * recorded in host->flags, so this function can be called
5302 * multiple times. Ports are guaranteed to get started only
5303 * once. If host->ops isn't initialized yet, its set to the
5304 * first non-dummy port ops.
5305 *
5306 * LOCKING:
5307 * Inherited from calling layer (may sleep).
5308 *
5309 * RETURNS:
5310 * 0 if all ports are started successfully, -errno otherwise.
5311 */
5312 int ata_host_start(struct ata_host *host)
5313 {
5314 int have_stop = 0;
5315 void *start_dr = NULL;
5316 int i, rc;
5317
5318 if (host->flags & ATA_HOST_STARTED)
5319 return 0;
5320
5321 ata_finalize_port_ops(host->ops);
5322
5323 for (i = 0; i < host->n_ports; i++) {
5324 struct ata_port *ap = host->ports[i];
5325
5326 ata_finalize_port_ops(ap->ops);
5327
5328 if (!host->ops && !ata_port_is_dummy(ap))
5329 host->ops = ap->ops;
5330
5331 if (ap->ops->port_stop)
5332 have_stop = 1;
5333 }
5334
5335 if (host->ops->host_stop)
5336 have_stop = 1;
5337
5338 if (have_stop) {
5339 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5340 if (!start_dr)
5341 return -ENOMEM;
5342 }
5343
5344 for (i = 0; i < host->n_ports; i++) {
5345 struct ata_port *ap = host->ports[i];
5346
5347 if (ap->ops->port_start) {
5348 rc = ap->ops->port_start(ap);
5349 if (rc) {
5350 if (rc != -ENODEV)
5351 dev_printk(KERN_ERR, host->dev,
5352 "failed to start port %d "
5353 "(errno=%d)\n", i, rc);
5354 goto err_out;
5355 }
5356 }
5357 ata_eh_freeze_port(ap);
5358 }
5359
5360 if (start_dr)
5361 devres_add(host->dev, start_dr);
5362 host->flags |= ATA_HOST_STARTED;
5363 return 0;
5364
5365 err_out:
5366 while (--i >= 0) {
5367 struct ata_port *ap = host->ports[i];
5368
5369 if (ap->ops->port_stop)
5370 ap->ops->port_stop(ap);
5371 }
5372 devres_free(start_dr);
5373 return rc;
5374 }
5375
5376 /**
5377 * ata_sas_host_init - Initialize a host struct
5378 * @host: host to initialize
5379 * @dev: device host is attached to
5380 * @flags: host flags
5381 * @ops: port_ops
5382 *
5383 * LOCKING:
5384 * PCI/etc. bus probe sem.
5385 *
5386 */
5387 /* KILLME - the only user left is ipr */
5388 void ata_host_init(struct ata_host *host, struct device *dev,
5389 unsigned long flags, struct ata_port_operations *ops)
5390 {
5391 spin_lock_init(&host->lock);
5392 host->dev = dev;
5393 host->flags = flags;
5394 host->ops = ops;
5395 }
5396
5397 /**
5398 * ata_host_register - register initialized ATA host
5399 * @host: ATA host to register
5400 * @sht: template for SCSI host
5401 *
5402 * Register initialized ATA host. @host is allocated using
5403 * ata_host_alloc() and fully initialized by LLD. This function
5404 * starts ports, registers @host with ATA and SCSI layers and
5405 * probe registered devices.
5406 *
5407 * LOCKING:
5408 * Inherited from calling layer (may sleep).
5409 *
5410 * RETURNS:
5411 * 0 on success, -errno otherwise.
5412 */
5413 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5414 {
5415 int i, rc;
5416
5417 /* host must have been started */
5418 if (!(host->flags & ATA_HOST_STARTED)) {
5419 dev_printk(KERN_ERR, host->dev,
5420 "BUG: trying to register unstarted host\n");
5421 WARN_ON(1);
5422 return -EINVAL;
5423 }
5424
5425 /* Blow away unused ports. This happens when LLD can't
5426 * determine the exact number of ports to allocate at
5427 * allocation time.
5428 */
5429 for (i = host->n_ports; host->ports[i]; i++)
5430 kfree(host->ports[i]);
5431
5432 /* give ports names and add SCSI hosts */
5433 for (i = 0; i < host->n_ports; i++)
5434 host->ports[i]->print_id = ata_print_id++;
5435
5436 rc = ata_scsi_add_hosts(host, sht);
5437 if (rc)
5438 return rc;
5439
5440 /* associate with ACPI nodes */
5441 ata_acpi_associate(host);
5442
5443 /* set cable, sata_spd_limit and report */
5444 for (i = 0; i < host->n_ports; i++) {
5445 struct ata_port *ap = host->ports[i];
5446 unsigned long xfer_mask;
5447
5448 /* set SATA cable type if still unset */
5449 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5450 ap->cbl = ATA_CBL_SATA;
5451
5452 /* init sata_spd_limit to the current value */
5453 sata_link_init_spd(&ap->link);
5454
5455 /* print per-port info to dmesg */
5456 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5457 ap->udma_mask);
5458
5459 if (!ata_port_is_dummy(ap)) {
5460 ata_port_printk(ap, KERN_INFO,
5461 "%cATA max %s %s\n",
5462 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5463 ata_mode_string(xfer_mask),
5464 ap->link.eh_info.desc);
5465 ata_ehi_clear_desc(&ap->link.eh_info);
5466 } else
5467 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5468 }
5469
5470 /* perform each probe synchronously */
5471 DPRINTK("probe begin\n");
5472 for (i = 0; i < host->n_ports; i++) {
5473 struct ata_port *ap = host->ports[i];
5474
5475 /* probe */
5476 if (ap->ops->error_handler) {
5477 struct ata_eh_info *ehi = &ap->link.eh_info;
5478 unsigned long flags;
5479
5480 ata_port_probe(ap);
5481
5482 /* kick EH for boot probing */
5483 spin_lock_irqsave(ap->lock, flags);
5484
5485 ehi->probe_mask |= ATA_ALL_DEVICES;
5486 ehi->action |= ATA_EH_RESET;
5487 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5488
5489 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5490 ap->pflags |= ATA_PFLAG_LOADING;
5491 ata_port_schedule_eh(ap);
5492
5493 spin_unlock_irqrestore(ap->lock, flags);
5494
5495 /* wait for EH to finish */
5496 ata_port_wait_eh(ap);
5497 } else {
5498 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5499 rc = ata_bus_probe(ap);
5500 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5501
5502 if (rc) {
5503 /* FIXME: do something useful here?
5504 * Current libata behavior will
5505 * tear down everything when
5506 * the module is removed
5507 * or the h/w is unplugged.
5508 */
5509 }
5510 }
5511 }
5512
5513 /* probes are done, now scan each port's disk(s) */
5514 DPRINTK("host probe begin\n");
5515 for (i = 0; i < host->n_ports; i++) {
5516 struct ata_port *ap = host->ports[i];
5517
5518 ata_scsi_scan_host(ap, 1);
5519 ata_lpm_schedule(ap, ap->pm_policy);
5520 }
5521
5522 return 0;
5523 }
5524
5525 /**
5526 * ata_host_activate - start host, request IRQ and register it
5527 * @host: target ATA host
5528 * @irq: IRQ to request
5529 * @irq_handler: irq_handler used when requesting IRQ
5530 * @irq_flags: irq_flags used when requesting IRQ
5531 * @sht: scsi_host_template to use when registering the host
5532 *
5533 * After allocating an ATA host and initializing it, most libata
5534 * LLDs perform three steps to activate the host - start host,
5535 * request IRQ and register it. This helper takes necessasry
5536 * arguments and performs the three steps in one go.
5537 *
5538 * An invalid IRQ skips the IRQ registration and expects the host to
5539 * have set polling mode on the port. In this case, @irq_handler
5540 * should be NULL.
5541 *
5542 * LOCKING:
5543 * Inherited from calling layer (may sleep).
5544 *
5545 * RETURNS:
5546 * 0 on success, -errno otherwise.
5547 */
5548 int ata_host_activate(struct ata_host *host, int irq,
5549 irq_handler_t irq_handler, unsigned long irq_flags,
5550 struct scsi_host_template *sht)
5551 {
5552 int i, rc;
5553
5554 rc = ata_host_start(host);
5555 if (rc)
5556 return rc;
5557
5558 /* Special case for polling mode */
5559 if (!irq) {
5560 WARN_ON(irq_handler);
5561 return ata_host_register(host, sht);
5562 }
5563
5564 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5565 dev_driver_string(host->dev), host);
5566 if (rc)
5567 return rc;
5568
5569 for (i = 0; i < host->n_ports; i++)
5570 ata_port_desc(host->ports[i], "irq %d", irq);
5571
5572 rc = ata_host_register(host, sht);
5573 /* if failed, just free the IRQ and leave ports alone */
5574 if (rc)
5575 devm_free_irq(host->dev, irq, host);
5576
5577 return rc;
5578 }
5579
5580 /**
5581 * ata_port_detach - Detach ATA port in prepration of device removal
5582 * @ap: ATA port to be detached
5583 *
5584 * Detach all ATA devices and the associated SCSI devices of @ap;
5585 * then, remove the associated SCSI host. @ap is guaranteed to
5586 * be quiescent on return from this function.
5587 *
5588 * LOCKING:
5589 * Kernel thread context (may sleep).
5590 */
5591 static void ata_port_detach(struct ata_port *ap)
5592 {
5593 unsigned long flags;
5594 struct ata_link *link;
5595 struct ata_device *dev;
5596
5597 if (!ap->ops->error_handler)
5598 goto skip_eh;
5599
5600 /* tell EH we're leaving & flush EH */
5601 spin_lock_irqsave(ap->lock, flags);
5602 ap->pflags |= ATA_PFLAG_UNLOADING;
5603 spin_unlock_irqrestore(ap->lock, flags);
5604
5605 ata_port_wait_eh(ap);
5606
5607 /* EH is now guaranteed to see UNLOADING - EH context belongs
5608 * to us. Disable all existing devices.
5609 */
5610 ata_port_for_each_link(link, ap) {
5611 ata_link_for_each_dev(dev, link)
5612 ata_dev_disable(dev);
5613 }
5614
5615 /* Final freeze & EH. All in-flight commands are aborted. EH
5616 * will be skipped and retrials will be terminated with bad
5617 * target.
5618 */
5619 spin_lock_irqsave(ap->lock, flags);
5620 ata_port_freeze(ap); /* won't be thawed */
5621 spin_unlock_irqrestore(ap->lock, flags);
5622
5623 ata_port_wait_eh(ap);
5624 cancel_rearming_delayed_work(&ap->hotplug_task);
5625
5626 skip_eh:
5627 /* remove the associated SCSI host */
5628 scsi_remove_host(ap->scsi_host);
5629 }
5630
5631 /**
5632 * ata_host_detach - Detach all ports of an ATA host
5633 * @host: Host to detach
5634 *
5635 * Detach all ports of @host.
5636 *
5637 * LOCKING:
5638 * Kernel thread context (may sleep).
5639 */
5640 void ata_host_detach(struct ata_host *host)
5641 {
5642 int i;
5643
5644 for (i = 0; i < host->n_ports; i++)
5645 ata_port_detach(host->ports[i]);
5646
5647 /* the host is dead now, dissociate ACPI */
5648 ata_acpi_dissociate(host);
5649 }
5650
5651 #ifdef CONFIG_PCI
5652
5653 /**
5654 * ata_pci_remove_one - PCI layer callback for device removal
5655 * @pdev: PCI device that was removed
5656 *
5657 * PCI layer indicates to libata via this hook that hot-unplug or
5658 * module unload event has occurred. Detach all ports. Resource
5659 * release is handled via devres.
5660 *
5661 * LOCKING:
5662 * Inherited from PCI layer (may sleep).
5663 */
5664 void ata_pci_remove_one(struct pci_dev *pdev)
5665 {
5666 struct device *dev = &pdev->dev;
5667 struct ata_host *host = dev_get_drvdata(dev);
5668
5669 ata_host_detach(host);
5670 }
5671
5672 /* move to PCI subsystem */
5673 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5674 {
5675 unsigned long tmp = 0;
5676
5677 switch (bits->width) {
5678 case 1: {
5679 u8 tmp8 = 0;
5680 pci_read_config_byte(pdev, bits->reg, &tmp8);
5681 tmp = tmp8;
5682 break;
5683 }
5684 case 2: {
5685 u16 tmp16 = 0;
5686 pci_read_config_word(pdev, bits->reg, &tmp16);
5687 tmp = tmp16;
5688 break;
5689 }
5690 case 4: {
5691 u32 tmp32 = 0;
5692 pci_read_config_dword(pdev, bits->reg, &tmp32);
5693 tmp = tmp32;
5694 break;
5695 }
5696
5697 default:
5698 return -EINVAL;
5699 }
5700
5701 tmp &= bits->mask;
5702
5703 return (tmp == bits->val) ? 1 : 0;
5704 }
5705
5706 #ifdef CONFIG_PM
5707 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5708 {
5709 pci_save_state(pdev);
5710 pci_disable_device(pdev);
5711
5712 if (mesg.event & PM_EVENT_SLEEP)
5713 pci_set_power_state(pdev, PCI_D3hot);
5714 }
5715
5716 int ata_pci_device_do_resume(struct pci_dev *pdev)
5717 {
5718 int rc;
5719
5720 pci_set_power_state(pdev, PCI_D0);
5721 pci_restore_state(pdev);
5722
5723 rc = pcim_enable_device(pdev);
5724 if (rc) {
5725 dev_printk(KERN_ERR, &pdev->dev,
5726 "failed to enable device after resume (%d)\n", rc);
5727 return rc;
5728 }
5729
5730 pci_set_master(pdev);
5731 return 0;
5732 }
5733
5734 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5735 {
5736 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5737 int rc = 0;
5738
5739 rc = ata_host_suspend(host, mesg);
5740 if (rc)
5741 return rc;
5742
5743 ata_pci_device_do_suspend(pdev, mesg);
5744
5745 return 0;
5746 }
5747
5748 int ata_pci_device_resume(struct pci_dev *pdev)
5749 {
5750 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5751 int rc;
5752
5753 rc = ata_pci_device_do_resume(pdev);
5754 if (rc == 0)
5755 ata_host_resume(host);
5756 return rc;
5757 }
5758 #endif /* CONFIG_PM */
5759
5760 #endif /* CONFIG_PCI */
5761
5762 static int __init ata_parse_force_one(char **cur,
5763 struct ata_force_ent *force_ent,
5764 const char **reason)
5765 {
5766 /* FIXME: Currently, there's no way to tag init const data and
5767 * using __initdata causes build failure on some versions of
5768 * gcc. Once __initdataconst is implemented, add const to the
5769 * following structure.
5770 */
5771 static struct ata_force_param force_tbl[] __initdata = {
5772 { "40c", .cbl = ATA_CBL_PATA40 },
5773 { "80c", .cbl = ATA_CBL_PATA80 },
5774 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5775 { "unk", .cbl = ATA_CBL_PATA_UNK },
5776 { "ign", .cbl = ATA_CBL_PATA_IGN },
5777 { "sata", .cbl = ATA_CBL_SATA },
5778 { "1.5Gbps", .spd_limit = 1 },
5779 { "3.0Gbps", .spd_limit = 2 },
5780 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5781 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5782 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5783 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5784 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5785 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5786 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5787 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5788 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5789 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5790 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5791 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5792 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5793 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5794 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5795 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5796 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5797 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5798 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5799 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5800 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5801 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5802 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5803 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5804 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5805 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5806 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5807 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5808 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5809 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5810 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5811 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5812 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5813 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5814 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5815 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5816 };
5817 char *start = *cur, *p = *cur;
5818 char *id, *val, *endp;
5819 const struct ata_force_param *match_fp = NULL;
5820 int nr_matches = 0, i;
5821
5822 /* find where this param ends and update *cur */
5823 while (*p != '\0' && *p != ',')
5824 p++;
5825
5826 if (*p == '\0')
5827 *cur = p;
5828 else
5829 *cur = p + 1;
5830
5831 *p = '\0';
5832
5833 /* parse */
5834 p = strchr(start, ':');
5835 if (!p) {
5836 val = strstrip(start);
5837 goto parse_val;
5838 }
5839 *p = '\0';
5840
5841 id = strstrip(start);
5842 val = strstrip(p + 1);
5843
5844 /* parse id */
5845 p = strchr(id, '.');
5846 if (p) {
5847 *p++ = '\0';
5848 force_ent->device = simple_strtoul(p, &endp, 10);
5849 if (p == endp || *endp != '\0') {
5850 *reason = "invalid device";
5851 return -EINVAL;
5852 }
5853 }
5854
5855 force_ent->port = simple_strtoul(id, &endp, 10);
5856 if (p == endp || *endp != '\0') {
5857 *reason = "invalid port/link";
5858 return -EINVAL;
5859 }
5860
5861 parse_val:
5862 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
5863 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
5864 const struct ata_force_param *fp = &force_tbl[i];
5865
5866 if (strncasecmp(val, fp->name, strlen(val)))
5867 continue;
5868
5869 nr_matches++;
5870 match_fp = fp;
5871
5872 if (strcasecmp(val, fp->name) == 0) {
5873 nr_matches = 1;
5874 break;
5875 }
5876 }
5877
5878 if (!nr_matches) {
5879 *reason = "unknown value";
5880 return -EINVAL;
5881 }
5882 if (nr_matches > 1) {
5883 *reason = "ambigious value";
5884 return -EINVAL;
5885 }
5886
5887 force_ent->param = *match_fp;
5888
5889 return 0;
5890 }
5891
5892 static void __init ata_parse_force_param(void)
5893 {
5894 int idx = 0, size = 1;
5895 int last_port = -1, last_device = -1;
5896 char *p, *cur, *next;
5897
5898 /* calculate maximum number of params and allocate force_tbl */
5899 for (p = ata_force_param_buf; *p; p++)
5900 if (*p == ',')
5901 size++;
5902
5903 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
5904 if (!ata_force_tbl) {
5905 printk(KERN_WARNING "ata: failed to extend force table, "
5906 "libata.force ignored\n");
5907 return;
5908 }
5909
5910 /* parse and populate the table */
5911 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
5912 const char *reason = "";
5913 struct ata_force_ent te = { .port = -1, .device = -1 };
5914
5915 next = cur;
5916 if (ata_parse_force_one(&next, &te, &reason)) {
5917 printk(KERN_WARNING "ata: failed to parse force "
5918 "parameter \"%s\" (%s)\n",
5919 cur, reason);
5920 continue;
5921 }
5922
5923 if (te.port == -1) {
5924 te.port = last_port;
5925 te.device = last_device;
5926 }
5927
5928 ata_force_tbl[idx++] = te;
5929
5930 last_port = te.port;
5931 last_device = te.device;
5932 }
5933
5934 ata_force_tbl_size = idx;
5935 }
5936
5937 static int __init ata_init(void)
5938 {
5939 ata_probe_timeout *= HZ;
5940
5941 ata_parse_force_param();
5942
5943 ata_wq = create_workqueue("ata");
5944 if (!ata_wq)
5945 return -ENOMEM;
5946
5947 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5948 if (!ata_aux_wq) {
5949 destroy_workqueue(ata_wq);
5950 return -ENOMEM;
5951 }
5952
5953 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5954 return 0;
5955 }
5956
5957 static void __exit ata_exit(void)
5958 {
5959 kfree(ata_force_tbl);
5960 destroy_workqueue(ata_wq);
5961 destroy_workqueue(ata_aux_wq);
5962 }
5963
5964 subsys_initcall(ata_init);
5965 module_exit(ata_exit);
5966
5967 static unsigned long ratelimit_time;
5968 static DEFINE_SPINLOCK(ata_ratelimit_lock);
5969
5970 int ata_ratelimit(void)
5971 {
5972 int rc;
5973 unsigned long flags;
5974
5975 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5976
5977 if (time_after(jiffies, ratelimit_time)) {
5978 rc = 1;
5979 ratelimit_time = jiffies + (HZ/5);
5980 } else
5981 rc = 0;
5982
5983 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5984
5985 return rc;
5986 }
5987
5988 /**
5989 * ata_wait_register - wait until register value changes
5990 * @reg: IO-mapped register
5991 * @mask: Mask to apply to read register value
5992 * @val: Wait condition
5993 * @interval_msec: polling interval in milliseconds
5994 * @timeout_msec: timeout in milliseconds
5995 *
5996 * Waiting for some bits of register to change is a common
5997 * operation for ATA controllers. This function reads 32bit LE
5998 * IO-mapped register @reg and tests for the following condition.
5999 *
6000 * (*@reg & mask) != val
6001 *
6002 * If the condition is met, it returns; otherwise, the process is
6003 * repeated after @interval_msec until timeout.
6004 *
6005 * LOCKING:
6006 * Kernel thread context (may sleep)
6007 *
6008 * RETURNS:
6009 * The final register value.
6010 */
6011 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6012 unsigned long interval_msec,
6013 unsigned long timeout_msec)
6014 {
6015 unsigned long timeout;
6016 u32 tmp;
6017
6018 tmp = ioread32(reg);
6019
6020 /* Calculate timeout _after_ the first read to make sure
6021 * preceding writes reach the controller before starting to
6022 * eat away the timeout.
6023 */
6024 timeout = jiffies + (timeout_msec * HZ) / 1000;
6025
6026 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6027 msleep(interval_msec);
6028 tmp = ioread32(reg);
6029 }
6030
6031 return tmp;
6032 }
6033
6034 /*
6035 * Dummy port_ops
6036 */
6037 static void ata_dummy_noret(struct ata_port *ap) { }
6038 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6039 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6040
6041 static u8 ata_dummy_check_status(struct ata_port *ap)
6042 {
6043 return ATA_DRDY;
6044 }
6045
6046 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6047 {
6048 return AC_ERR_SYSTEM;
6049 }
6050
6051 struct ata_port_operations ata_dummy_port_ops = {
6052 .check_status = ata_dummy_check_status,
6053 .check_altstatus = ata_dummy_check_status,
6054 .dev_select = ata_noop_dev_select,
6055 .qc_prep = ata_noop_qc_prep,
6056 .qc_issue = ata_dummy_qc_issue,
6057 .freeze = ata_dummy_noret,
6058 .thaw = ata_dummy_noret,
6059 .error_handler = ata_dummy_noret,
6060 .post_internal_cmd = ata_dummy_qc_noret,
6061 .irq_clear = ata_dummy_noret,
6062 .port_start = ata_dummy_ret0,
6063 .port_stop = ata_dummy_noret,
6064 };
6065
6066 const struct ata_port_info ata_dummy_port_info = {
6067 .port_ops = &ata_dummy_port_ops,
6068 };
6069
6070 /*
6071 * libata is essentially a library of internal helper functions for
6072 * low-level ATA host controller drivers. As such, the API/ABI is
6073 * likely to change as new drivers are added and updated.
6074 * Do not depend on ABI/API stability.
6075 */
6076 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6077 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6078 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6079 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6080 EXPORT_SYMBOL_GPL(sata_port_ops);
6081 EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
6082 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6083 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6084 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6085 EXPORT_SYMBOL_GPL(ata_host_init);
6086 EXPORT_SYMBOL_GPL(ata_host_alloc);
6087 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6088 EXPORT_SYMBOL_GPL(ata_host_start);
6089 EXPORT_SYMBOL_GPL(ata_host_register);
6090 EXPORT_SYMBOL_GPL(ata_host_activate);
6091 EXPORT_SYMBOL_GPL(ata_host_detach);
6092 EXPORT_SYMBOL_GPL(ata_sg_init);
6093 EXPORT_SYMBOL_GPL(ata_qc_complete);
6094 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6095 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6096 EXPORT_SYMBOL_GPL(sata_print_link_status);
6097 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6098 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6099 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6100 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6101 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6102 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6103 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6104 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6105 EXPORT_SYMBOL_GPL(ata_mode_string);
6106 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6107 EXPORT_SYMBOL_GPL(ata_port_start);
6108 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6109 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6110 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6111 EXPORT_SYMBOL_GPL(ata_noop_irq_clear);
6112 EXPORT_SYMBOL_GPL(ata_port_probe);
6113 EXPORT_SYMBOL_GPL(ata_dev_disable);
6114 EXPORT_SYMBOL_GPL(sata_set_spd);
6115 EXPORT_SYMBOL_GPL(sata_link_debounce);
6116 EXPORT_SYMBOL_GPL(sata_link_resume);
6117 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6118 EXPORT_SYMBOL_GPL(ata_dev_classify);
6119 EXPORT_SYMBOL_GPL(ata_dev_pair);
6120 EXPORT_SYMBOL_GPL(ata_port_disable);
6121 EXPORT_SYMBOL_GPL(ata_ratelimit);
6122 EXPORT_SYMBOL_GPL(ata_wait_register);
6123 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6124 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6125 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6126 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6127 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6128 EXPORT_SYMBOL_GPL(sata_scr_valid);
6129 EXPORT_SYMBOL_GPL(sata_scr_read);
6130 EXPORT_SYMBOL_GPL(sata_scr_write);
6131 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6132 EXPORT_SYMBOL_GPL(ata_link_online);
6133 EXPORT_SYMBOL_GPL(ata_link_offline);
6134 #ifdef CONFIG_PM
6135 EXPORT_SYMBOL_GPL(ata_host_suspend);
6136 EXPORT_SYMBOL_GPL(ata_host_resume);
6137 #endif /* CONFIG_PM */
6138 EXPORT_SYMBOL_GPL(ata_id_string);
6139 EXPORT_SYMBOL_GPL(ata_id_c_string);
6140 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6141
6142 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6143 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6144 EXPORT_SYMBOL_GPL(ata_timing_compute);
6145 EXPORT_SYMBOL_GPL(ata_timing_merge);
6146 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6147
6148 #ifdef CONFIG_PCI
6149 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6150 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6151 #ifdef CONFIG_PM
6152 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6153 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6154 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6155 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6156 #endif /* CONFIG_PM */
6157 #endif /* CONFIG_PCI */
6158
6159 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
6160 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
6161 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
6162 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
6163 EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
6164
6165 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6166 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6167 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6168 EXPORT_SYMBOL_GPL(ata_port_desc);
6169 #ifdef CONFIG_PCI
6170 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6171 #endif /* CONFIG_PCI */
6172 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6173 EXPORT_SYMBOL_GPL(ata_link_abort);
6174 EXPORT_SYMBOL_GPL(ata_port_abort);
6175 EXPORT_SYMBOL_GPL(ata_port_freeze);
6176 EXPORT_SYMBOL_GPL(sata_async_notification);
6177 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6178 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6179 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6180 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6181 EXPORT_SYMBOL_GPL(ata_do_eh);
6182 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6183
6184 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6185 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6186 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6187 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6188 EXPORT_SYMBOL_GPL(ata_cable_sata);
This page took 1.266616 seconds and 5 git commands to generate.