[libata] irq-pio: Fix merge mistake
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
69 struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
74
75 int atapi_enabled = 1;
76 module_param(atapi_enabled, int, 0444);
77 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78
79 int libata_fua = 0;
80 module_param_named(fua, libata_fua, int, 0444);
81 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82
83 MODULE_AUTHOR("Jeff Garzik");
84 MODULE_DESCRIPTION("Library module for ATA devices");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION);
87
88
89 /**
90 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
91 * @tf: Taskfile to convert
92 * @fis: Buffer into which data will output
93 * @pmp: Port multiplier port
94 *
95 * Converts a standard ATA taskfile to a Serial ATA
96 * FIS structure (Register - Host to Device).
97 *
98 * LOCKING:
99 * Inherited from caller.
100 */
101
102 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 {
104 fis[0] = 0x27; /* Register - Host to Device FIS */
105 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
106 bit 7 indicates Command FIS */
107 fis[2] = tf->command;
108 fis[3] = tf->feature;
109
110 fis[4] = tf->lbal;
111 fis[5] = tf->lbam;
112 fis[6] = tf->lbah;
113 fis[7] = tf->device;
114
115 fis[8] = tf->hob_lbal;
116 fis[9] = tf->hob_lbam;
117 fis[10] = tf->hob_lbah;
118 fis[11] = tf->hob_feature;
119
120 fis[12] = tf->nsect;
121 fis[13] = tf->hob_nsect;
122 fis[14] = 0;
123 fis[15] = tf->ctl;
124
125 fis[16] = 0;
126 fis[17] = 0;
127 fis[18] = 0;
128 fis[19] = 0;
129 }
130
131 /**
132 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
133 * @fis: Buffer from which data will be input
134 * @tf: Taskfile to output
135 *
136 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 *
138 * LOCKING:
139 * Inherited from caller.
140 */
141
142 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 {
144 tf->command = fis[2]; /* status */
145 tf->feature = fis[3]; /* error */
146
147 tf->lbal = fis[4];
148 tf->lbam = fis[5];
149 tf->lbah = fis[6];
150 tf->device = fis[7];
151
152 tf->hob_lbal = fis[8];
153 tf->hob_lbam = fis[9];
154 tf->hob_lbah = fis[10];
155
156 tf->nsect = fis[12];
157 tf->hob_nsect = fis[13];
158 }
159
160 static const u8 ata_rw_cmds[] = {
161 /* pio multi */
162 ATA_CMD_READ_MULTI,
163 ATA_CMD_WRITE_MULTI,
164 ATA_CMD_READ_MULTI_EXT,
165 ATA_CMD_WRITE_MULTI_EXT,
166 0,
167 0,
168 0,
169 ATA_CMD_WRITE_MULTI_FUA_EXT,
170 /* pio */
171 ATA_CMD_PIO_READ,
172 ATA_CMD_PIO_WRITE,
173 ATA_CMD_PIO_READ_EXT,
174 ATA_CMD_PIO_WRITE_EXT,
175 0,
176 0,
177 0,
178 0,
179 /* dma */
180 ATA_CMD_READ,
181 ATA_CMD_WRITE,
182 ATA_CMD_READ_EXT,
183 ATA_CMD_WRITE_EXT,
184 0,
185 0,
186 0,
187 ATA_CMD_WRITE_FUA_EXT
188 };
189
190 /**
191 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
192 * @qc: command to examine and configure
193 *
194 * Examine the device configuration and tf->flags to calculate
195 * the proper read/write commands and protocol to use.
196 *
197 * LOCKING:
198 * caller.
199 */
200 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 {
202 struct ata_taskfile *tf = &qc->tf;
203 struct ata_device *dev = qc->dev;
204 u8 cmd;
205
206 int index, fua, lba48, write;
207
208 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
209 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
210 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211
212 if (dev->flags & ATA_DFLAG_PIO) {
213 tf->protocol = ATA_PROT_PIO;
214 index = dev->multi_count ? 0 : 8;
215 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
216 /* Unable to use DMA due to host limitation */
217 tf->protocol = ATA_PROT_PIO;
218 index = dev->multi_count ? 0 : 8;
219 } else {
220 tf->protocol = ATA_PROT_DMA;
221 index = 16;
222 }
223
224 cmd = ata_rw_cmds[index + fua + lba48 + write];
225 if (cmd) {
226 tf->command = cmd;
227 return 0;
228 }
229 return -1;
230 }
231
232 /**
233 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
234 * @pio_mask: pio_mask
235 * @mwdma_mask: mwdma_mask
236 * @udma_mask: udma_mask
237 *
238 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
239 * unsigned int xfer_mask.
240 *
241 * LOCKING:
242 * None.
243 *
244 * RETURNS:
245 * Packed xfer_mask.
246 */
247 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
248 unsigned int mwdma_mask,
249 unsigned int udma_mask)
250 {
251 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
252 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
253 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
254 }
255
256 /**
257 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
258 * @xfer_mask: xfer_mask to unpack
259 * @pio_mask: resulting pio_mask
260 * @mwdma_mask: resulting mwdma_mask
261 * @udma_mask: resulting udma_mask
262 *
263 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
264 * Any NULL distination masks will be ignored.
265 */
266 static void ata_unpack_xfermask(unsigned int xfer_mask,
267 unsigned int *pio_mask,
268 unsigned int *mwdma_mask,
269 unsigned int *udma_mask)
270 {
271 if (pio_mask)
272 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
273 if (mwdma_mask)
274 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
275 if (udma_mask)
276 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
277 }
278
279 static const struct ata_xfer_ent {
280 int shift, bits;
281 u8 base;
282 } ata_xfer_tbl[] = {
283 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
284 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
285 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
286 { -1, },
287 };
288
289 /**
290 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
291 * @xfer_mask: xfer_mask of interest
292 *
293 * Return matching XFER_* value for @xfer_mask. Only the highest
294 * bit of @xfer_mask is considered.
295 *
296 * LOCKING:
297 * None.
298 *
299 * RETURNS:
300 * Matching XFER_* value, 0 if no match found.
301 */
302 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
303 {
304 int highbit = fls(xfer_mask) - 1;
305 const struct ata_xfer_ent *ent;
306
307 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
308 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
309 return ent->base + highbit - ent->shift;
310 return 0;
311 }
312
313 /**
314 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
315 * @xfer_mode: XFER_* of interest
316 *
317 * Return matching xfer_mask for @xfer_mode.
318 *
319 * LOCKING:
320 * None.
321 *
322 * RETURNS:
323 * Matching xfer_mask, 0 if no match found.
324 */
325 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
326 {
327 const struct ata_xfer_ent *ent;
328
329 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
330 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
331 return 1 << (ent->shift + xfer_mode - ent->base);
332 return 0;
333 }
334
335 /**
336 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
337 * @xfer_mode: XFER_* of interest
338 *
339 * Return matching xfer_shift for @xfer_mode.
340 *
341 * LOCKING:
342 * None.
343 *
344 * RETURNS:
345 * Matching xfer_shift, -1 if no match found.
346 */
347 static int ata_xfer_mode2shift(unsigned int xfer_mode)
348 {
349 const struct ata_xfer_ent *ent;
350
351 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
352 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
353 return ent->shift;
354 return -1;
355 }
356
357 /**
358 * ata_mode_string - convert xfer_mask to string
359 * @xfer_mask: mask of bits supported; only highest bit counts.
360 *
361 * Determine string which represents the highest speed
362 * (highest bit in @modemask).
363 *
364 * LOCKING:
365 * None.
366 *
367 * RETURNS:
368 * Constant C string representing highest speed listed in
369 * @mode_mask, or the constant C string "<n/a>".
370 */
371 static const char *ata_mode_string(unsigned int xfer_mask)
372 {
373 static const char * const xfer_mode_str[] = {
374 "PIO0",
375 "PIO1",
376 "PIO2",
377 "PIO3",
378 "PIO4",
379 "MWDMA0",
380 "MWDMA1",
381 "MWDMA2",
382 "UDMA/16",
383 "UDMA/25",
384 "UDMA/33",
385 "UDMA/44",
386 "UDMA/66",
387 "UDMA/100",
388 "UDMA/133",
389 "UDMA7",
390 };
391 int highbit;
392
393 highbit = fls(xfer_mask) - 1;
394 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
395 return xfer_mode_str[highbit];
396 return "<n/a>";
397 }
398
399 static const char *sata_spd_string(unsigned int spd)
400 {
401 static const char * const spd_str[] = {
402 "1.5 Gbps",
403 "3.0 Gbps",
404 };
405
406 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
407 return "<unknown>";
408 return spd_str[spd - 1];
409 }
410
411 void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
412 {
413 if (ata_dev_enabled(dev)) {
414 printk(KERN_WARNING "ata%u: dev %u disabled\n",
415 ap->id, dev->devno);
416 dev->class++;
417 }
418 }
419
420 /**
421 * ata_pio_devchk - PATA device presence detection
422 * @ap: ATA channel to examine
423 * @device: Device to examine (starting at zero)
424 *
425 * This technique was originally described in
426 * Hale Landis's ATADRVR (www.ata-atapi.com), and
427 * later found its way into the ATA/ATAPI spec.
428 *
429 * Write a pattern to the ATA shadow registers,
430 * and if a device is present, it will respond by
431 * correctly storing and echoing back the
432 * ATA shadow register contents.
433 *
434 * LOCKING:
435 * caller.
436 */
437
438 static unsigned int ata_pio_devchk(struct ata_port *ap,
439 unsigned int device)
440 {
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442 u8 nsect, lbal;
443
444 ap->ops->dev_select(ap, device);
445
446 outb(0x55, ioaddr->nsect_addr);
447 outb(0xaa, ioaddr->lbal_addr);
448
449 outb(0xaa, ioaddr->nsect_addr);
450 outb(0x55, ioaddr->lbal_addr);
451
452 outb(0x55, ioaddr->nsect_addr);
453 outb(0xaa, ioaddr->lbal_addr);
454
455 nsect = inb(ioaddr->nsect_addr);
456 lbal = inb(ioaddr->lbal_addr);
457
458 if ((nsect == 0x55) && (lbal == 0xaa))
459 return 1; /* we found a device */
460
461 return 0; /* nothing found */
462 }
463
464 /**
465 * ata_mmio_devchk - PATA device presence detection
466 * @ap: ATA channel to examine
467 * @device: Device to examine (starting at zero)
468 *
469 * This technique was originally described in
470 * Hale Landis's ATADRVR (www.ata-atapi.com), and
471 * later found its way into the ATA/ATAPI spec.
472 *
473 * Write a pattern to the ATA shadow registers,
474 * and if a device is present, it will respond by
475 * correctly storing and echoing back the
476 * ATA shadow register contents.
477 *
478 * LOCKING:
479 * caller.
480 */
481
482 static unsigned int ata_mmio_devchk(struct ata_port *ap,
483 unsigned int device)
484 {
485 struct ata_ioports *ioaddr = &ap->ioaddr;
486 u8 nsect, lbal;
487
488 ap->ops->dev_select(ap, device);
489
490 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
491 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
492
493 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
494 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
495
496 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
497 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
498
499 nsect = readb((void __iomem *) ioaddr->nsect_addr);
500 lbal = readb((void __iomem *) ioaddr->lbal_addr);
501
502 if ((nsect == 0x55) && (lbal == 0xaa))
503 return 1; /* we found a device */
504
505 return 0; /* nothing found */
506 }
507
508 /**
509 * ata_devchk - PATA device presence detection
510 * @ap: ATA channel to examine
511 * @device: Device to examine (starting at zero)
512 *
513 * Dispatch ATA device presence detection, depending
514 * on whether we are using PIO or MMIO to talk to the
515 * ATA shadow registers.
516 *
517 * LOCKING:
518 * caller.
519 */
520
521 static unsigned int ata_devchk(struct ata_port *ap,
522 unsigned int device)
523 {
524 if (ap->flags & ATA_FLAG_MMIO)
525 return ata_mmio_devchk(ap, device);
526 return ata_pio_devchk(ap, device);
527 }
528
529 /**
530 * ata_dev_classify - determine device type based on ATA-spec signature
531 * @tf: ATA taskfile register set for device to be identified
532 *
533 * Determine from taskfile register contents whether a device is
534 * ATA or ATAPI, as per "Signature and persistence" section
535 * of ATA/PI spec (volume 1, sect 5.14).
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
542 * the event of failure.
543 */
544
545 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
546 {
547 /* Apple's open source Darwin code hints that some devices only
548 * put a proper signature into the LBA mid/high registers,
549 * So, we only check those. It's sufficient for uniqueness.
550 */
551
552 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
553 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
554 DPRINTK("found ATA device by sig\n");
555 return ATA_DEV_ATA;
556 }
557
558 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
559 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
560 DPRINTK("found ATAPI device by sig\n");
561 return ATA_DEV_ATAPI;
562 }
563
564 DPRINTK("unknown device\n");
565 return ATA_DEV_UNKNOWN;
566 }
567
568 /**
569 * ata_dev_try_classify - Parse returned ATA device signature
570 * @ap: ATA channel to examine
571 * @device: Device to examine (starting at zero)
572 * @r_err: Value of error register on completion
573 *
574 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
575 * an ATA/ATAPI-defined set of values is placed in the ATA
576 * shadow registers, indicating the results of device detection
577 * and diagnostics.
578 *
579 * Select the ATA device, and read the values from the ATA shadow
580 * registers. Then parse according to the Error register value,
581 * and the spec-defined values examined by ata_dev_classify().
582 *
583 * LOCKING:
584 * caller.
585 *
586 * RETURNS:
587 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
588 */
589
590 static unsigned int
591 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
592 {
593 struct ata_taskfile tf;
594 unsigned int class;
595 u8 err;
596
597 ap->ops->dev_select(ap, device);
598
599 memset(&tf, 0, sizeof(tf));
600
601 ap->ops->tf_read(ap, &tf);
602 err = tf.feature;
603 if (r_err)
604 *r_err = err;
605
606 /* see if device passed diags */
607 if (err == 1)
608 /* do nothing */ ;
609 else if ((device == 0) && (err == 0x81))
610 /* do nothing */ ;
611 else
612 return ATA_DEV_NONE;
613
614 /* determine if device is ATA or ATAPI */
615 class = ata_dev_classify(&tf);
616
617 if (class == ATA_DEV_UNKNOWN)
618 return ATA_DEV_NONE;
619 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
620 return ATA_DEV_NONE;
621 return class;
622 }
623
624 /**
625 * ata_id_string - Convert IDENTIFY DEVICE page into string
626 * @id: IDENTIFY DEVICE results we will examine
627 * @s: string into which data is output
628 * @ofs: offset into identify device page
629 * @len: length of string to return. must be an even number.
630 *
631 * The strings in the IDENTIFY DEVICE page are broken up into
632 * 16-bit chunks. Run through the string, and output each
633 * 8-bit chunk linearly, regardless of platform.
634 *
635 * LOCKING:
636 * caller.
637 */
638
639 void ata_id_string(const u16 *id, unsigned char *s,
640 unsigned int ofs, unsigned int len)
641 {
642 unsigned int c;
643
644 while (len > 0) {
645 c = id[ofs] >> 8;
646 *s = c;
647 s++;
648
649 c = id[ofs] & 0xff;
650 *s = c;
651 s++;
652
653 ofs++;
654 len -= 2;
655 }
656 }
657
658 /**
659 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
660 * @id: IDENTIFY DEVICE results we will examine
661 * @s: string into which data is output
662 * @ofs: offset into identify device page
663 * @len: length of string to return. must be an odd number.
664 *
665 * This function is identical to ata_id_string except that it
666 * trims trailing spaces and terminates the resulting string with
667 * null. @len must be actual maximum length (even number) + 1.
668 *
669 * LOCKING:
670 * caller.
671 */
672 void ata_id_c_string(const u16 *id, unsigned char *s,
673 unsigned int ofs, unsigned int len)
674 {
675 unsigned char *p;
676
677 WARN_ON(!(len & 1));
678
679 ata_id_string(id, s, ofs, len - 1);
680
681 p = s + strnlen(s, len - 1);
682 while (p > s && p[-1] == ' ')
683 p--;
684 *p = '\0';
685 }
686
687 static u64 ata_id_n_sectors(const u16 *id)
688 {
689 if (ata_id_has_lba(id)) {
690 if (ata_id_has_lba48(id))
691 return ata_id_u64(id, 100);
692 else
693 return ata_id_u32(id, 60);
694 } else {
695 if (ata_id_current_chs_valid(id))
696 return ata_id_u32(id, 57);
697 else
698 return id[1] * id[3] * id[6];
699 }
700 }
701
702 /**
703 * ata_noop_dev_select - Select device 0/1 on ATA bus
704 * @ap: ATA channel to manipulate
705 * @device: ATA device (numbered from zero) to select
706 *
707 * This function performs no actual function.
708 *
709 * May be used as the dev_select() entry in ata_port_operations.
710 *
711 * LOCKING:
712 * caller.
713 */
714 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
715 {
716 }
717
718
719 /**
720 * ata_std_dev_select - Select device 0/1 on ATA bus
721 * @ap: ATA channel to manipulate
722 * @device: ATA device (numbered from zero) to select
723 *
724 * Use the method defined in the ATA specification to
725 * make either device 0, or device 1, active on the
726 * ATA channel. Works with both PIO and MMIO.
727 *
728 * May be used as the dev_select() entry in ata_port_operations.
729 *
730 * LOCKING:
731 * caller.
732 */
733
734 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
735 {
736 u8 tmp;
737
738 if (device == 0)
739 tmp = ATA_DEVICE_OBS;
740 else
741 tmp = ATA_DEVICE_OBS | ATA_DEV1;
742
743 if (ap->flags & ATA_FLAG_MMIO) {
744 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
745 } else {
746 outb(tmp, ap->ioaddr.device_addr);
747 }
748 ata_pause(ap); /* needed; also flushes, for mmio */
749 }
750
751 /**
752 * ata_dev_select - Select device 0/1 on ATA bus
753 * @ap: ATA channel to manipulate
754 * @device: ATA device (numbered from zero) to select
755 * @wait: non-zero to wait for Status register BSY bit to clear
756 * @can_sleep: non-zero if context allows sleeping
757 *
758 * Use the method defined in the ATA specification to
759 * make either device 0, or device 1, active on the
760 * ATA channel.
761 *
762 * This is a high-level version of ata_std_dev_select(),
763 * which additionally provides the services of inserting
764 * the proper pauses and status polling, where needed.
765 *
766 * LOCKING:
767 * caller.
768 */
769
770 void ata_dev_select(struct ata_port *ap, unsigned int device,
771 unsigned int wait, unsigned int can_sleep)
772 {
773 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
774 ap->id, device, wait);
775
776 if (wait)
777 ata_wait_idle(ap);
778
779 ap->ops->dev_select(ap, device);
780
781 if (wait) {
782 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
783 msleep(150);
784 ata_wait_idle(ap);
785 }
786 }
787
788 /**
789 * ata_dump_id - IDENTIFY DEVICE info debugging output
790 * @id: IDENTIFY DEVICE page to dump
791 *
792 * Dump selected 16-bit words from the given IDENTIFY DEVICE
793 * page.
794 *
795 * LOCKING:
796 * caller.
797 */
798
799 static inline void ata_dump_id(const u16 *id)
800 {
801 DPRINTK("49==0x%04x "
802 "53==0x%04x "
803 "63==0x%04x "
804 "64==0x%04x "
805 "75==0x%04x \n",
806 id[49],
807 id[53],
808 id[63],
809 id[64],
810 id[75]);
811 DPRINTK("80==0x%04x "
812 "81==0x%04x "
813 "82==0x%04x "
814 "83==0x%04x "
815 "84==0x%04x \n",
816 id[80],
817 id[81],
818 id[82],
819 id[83],
820 id[84]);
821 DPRINTK("88==0x%04x "
822 "93==0x%04x\n",
823 id[88],
824 id[93]);
825 }
826
827 /**
828 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
829 * @id: IDENTIFY data to compute xfer mask from
830 *
831 * Compute the xfermask for this device. This is not as trivial
832 * as it seems if we must consider early devices correctly.
833 *
834 * FIXME: pre IDE drive timing (do we care ?).
835 *
836 * LOCKING:
837 * None.
838 *
839 * RETURNS:
840 * Computed xfermask
841 */
842 static unsigned int ata_id_xfermask(const u16 *id)
843 {
844 unsigned int pio_mask, mwdma_mask, udma_mask;
845
846 /* Usual case. Word 53 indicates word 64 is valid */
847 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
848 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
849 pio_mask <<= 3;
850 pio_mask |= 0x7;
851 } else {
852 /* If word 64 isn't valid then Word 51 high byte holds
853 * the PIO timing number for the maximum. Turn it into
854 * a mask.
855 */
856 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
857
858 /* But wait.. there's more. Design your standards by
859 * committee and you too can get a free iordy field to
860 * process. However its the speeds not the modes that
861 * are supported... Note drivers using the timing API
862 * will get this right anyway
863 */
864 }
865
866 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
867
868 udma_mask = 0;
869 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
870 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
871
872 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
873 }
874
875 /**
876 * ata_port_queue_task - Queue port_task
877 * @ap: The ata_port to queue port_task for
878 *
879 * Schedule @fn(@data) for execution after @delay jiffies using
880 * port_task. There is one port_task per port and it's the
881 * user(low level driver)'s responsibility to make sure that only
882 * one task is active at any given time.
883 *
884 * libata core layer takes care of synchronization between
885 * port_task and EH. ata_port_queue_task() may be ignored for EH
886 * synchronization.
887 *
888 * LOCKING:
889 * Inherited from caller.
890 */
891 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
892 unsigned long delay)
893 {
894 int rc;
895
896 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
897 return;
898
899 PREPARE_WORK(&ap->port_task, fn, data);
900
901 if (!delay)
902 rc = queue_work(ata_wq, &ap->port_task);
903 else
904 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
905
906 /* rc == 0 means that another user is using port task */
907 WARN_ON(rc == 0);
908 }
909
910 /**
911 * ata_port_flush_task - Flush port_task
912 * @ap: The ata_port to flush port_task for
913 *
914 * After this function completes, port_task is guranteed not to
915 * be running or scheduled.
916 *
917 * LOCKING:
918 * Kernel thread context (may sleep)
919 */
920 void ata_port_flush_task(struct ata_port *ap)
921 {
922 unsigned long flags;
923
924 DPRINTK("ENTER\n");
925
926 spin_lock_irqsave(&ap->host_set->lock, flags);
927 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
928 spin_unlock_irqrestore(&ap->host_set->lock, flags);
929
930 DPRINTK("flush #1\n");
931 flush_workqueue(ata_wq);
932
933 /*
934 * At this point, if a task is running, it's guaranteed to see
935 * the FLUSH flag; thus, it will never queue pio tasks again.
936 * Cancel and flush.
937 */
938 if (!cancel_delayed_work(&ap->port_task)) {
939 DPRINTK("flush #2\n");
940 flush_workqueue(ata_wq);
941 }
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
945 spin_unlock_irqrestore(&ap->host_set->lock, flags);
946
947 DPRINTK("EXIT\n");
948 }
949
950 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
951 {
952 struct completion *waiting = qc->private_data;
953
954 qc->ap->ops->tf_read(qc->ap, &qc->tf);
955 complete(waiting);
956 }
957
958 /**
959 * ata_exec_internal - execute libata internal command
960 * @ap: Port to which the command is sent
961 * @dev: Device to which the command is sent
962 * @tf: Taskfile registers for the command and the result
963 * @cdb: CDB for packet command
964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer
967 *
968 * Executes libata internal command with timeout. @tf contains
969 * command on entry and result on return. Timeout and error
970 * conditions are reported via return value. No recovery action
971 * is taken after a command times out. It's caller's duty to
972 * clean up after timeout.
973 *
974 * LOCKING:
975 * None. Should be called with kernel context, might sleep.
976 */
977
978 unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
979 struct ata_taskfile *tf, const u8 *cdb,
980 int dma_dir, void *buf, unsigned int buflen)
981 {
982 u8 command = tf->command;
983 struct ata_queued_cmd *qc;
984 DECLARE_COMPLETION(wait);
985 unsigned long flags;
986 unsigned int err_mask;
987
988 spin_lock_irqsave(&ap->host_set->lock, flags);
989
990 qc = ata_qc_new_init(ap, dev);
991 BUG_ON(qc == NULL);
992
993 qc->tf = *tf;
994 if (cdb)
995 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
996 qc->dma_dir = dma_dir;
997 if (dma_dir != DMA_NONE) {
998 ata_sg_init_one(qc, buf, buflen);
999 qc->nsect = buflen / ATA_SECT_SIZE;
1000 }
1001
1002 qc->private_data = &wait;
1003 qc->complete_fn = ata_qc_complete_internal;
1004
1005 ata_qc_issue(qc);
1006
1007 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1008
1009 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1010 ata_port_flush_task(ap);
1011
1012 spin_lock_irqsave(&ap->host_set->lock, flags);
1013
1014 /* We're racing with irq here. If we lose, the
1015 * following test prevents us from completing the qc
1016 * again. If completion irq occurs after here but
1017 * before the caller cleans up, it will result in a
1018 * spurious interrupt. We can live with that.
1019 */
1020 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1021 qc->err_mask = AC_ERR_TIMEOUT;
1022 ata_qc_complete(qc);
1023 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1024 ap->id, command);
1025 }
1026
1027 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1028 }
1029
1030 *tf = qc->tf;
1031 err_mask = qc->err_mask;
1032
1033 ata_qc_free(qc);
1034
1035 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1036 * Until those drivers are fixed, we detect the condition
1037 * here, fail the command with AC_ERR_SYSTEM and reenable the
1038 * port.
1039 *
1040 * Note that this doesn't change any behavior as internal
1041 * command failure results in disabling the device in the
1042 * higher layer for LLDDs without new reset/EH callbacks.
1043 *
1044 * Kill the following code as soon as those drivers are fixed.
1045 */
1046 if (ap->flags & ATA_FLAG_DISABLED) {
1047 err_mask |= AC_ERR_SYSTEM;
1048 ata_port_probe(ap);
1049 }
1050
1051 return err_mask;
1052 }
1053
1054 /**
1055 * ata_pio_need_iordy - check if iordy needed
1056 * @adev: ATA device
1057 *
1058 * Check if the current speed of the device requires IORDY. Used
1059 * by various controllers for chip configuration.
1060 */
1061
1062 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1063 {
1064 int pio;
1065 int speed = adev->pio_mode - XFER_PIO_0;
1066
1067 if (speed < 2)
1068 return 0;
1069 if (speed > 2)
1070 return 1;
1071
1072 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1073
1074 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1075 pio = adev->id[ATA_ID_EIDE_PIO];
1076 /* Is the speed faster than the drive allows non IORDY ? */
1077 if (pio) {
1078 /* This is cycle times not frequency - watch the logic! */
1079 if (pio > 240) /* PIO2 is 240nS per cycle */
1080 return 1;
1081 return 0;
1082 }
1083 }
1084 return 0;
1085 }
1086
1087 /**
1088 * ata_dev_read_id - Read ID data from the specified device
1089 * @ap: port on which target device resides
1090 * @dev: target device
1091 * @p_class: pointer to class of the target device (may be changed)
1092 * @post_reset: is this read ID post-reset?
1093 * @p_id: read IDENTIFY page (newly allocated)
1094 *
1095 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1096 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1097 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1098 * for pre-ATA4 drives.
1099 *
1100 * LOCKING:
1101 * Kernel thread context (may sleep)
1102 *
1103 * RETURNS:
1104 * 0 on success, -errno otherwise.
1105 */
1106 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1107 unsigned int *p_class, int post_reset, u16 **p_id)
1108 {
1109 unsigned int class = *p_class;
1110 struct ata_taskfile tf;
1111 unsigned int err_mask = 0;
1112 u16 *id;
1113 const char *reason;
1114 int rc;
1115
1116 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1117
1118 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1119
1120 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1121 if (id == NULL) {
1122 rc = -ENOMEM;
1123 reason = "out of memory";
1124 goto err_out;
1125 }
1126
1127 retry:
1128 ata_tf_init(ap, &tf, dev->devno);
1129
1130 switch (class) {
1131 case ATA_DEV_ATA:
1132 tf.command = ATA_CMD_ID_ATA;
1133 break;
1134 case ATA_DEV_ATAPI:
1135 tf.command = ATA_CMD_ID_ATAPI;
1136 break;
1137 default:
1138 rc = -ENODEV;
1139 reason = "unsupported class";
1140 goto err_out;
1141 }
1142
1143 tf.protocol = ATA_PROT_PIO;
1144
1145 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE,
1146 id, sizeof(id[0]) * ATA_ID_WORDS);
1147 if (err_mask) {
1148 rc = -EIO;
1149 reason = "I/O error";
1150 goto err_out;
1151 }
1152
1153 swap_buf_le16(id, ATA_ID_WORDS);
1154
1155 /* sanity check */
1156 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1157 rc = -EINVAL;
1158 reason = "device reports illegal type";
1159 goto err_out;
1160 }
1161
1162 if (post_reset && class == ATA_DEV_ATA) {
1163 /*
1164 * The exact sequence expected by certain pre-ATA4 drives is:
1165 * SRST RESET
1166 * IDENTIFY
1167 * INITIALIZE DEVICE PARAMETERS
1168 * anything else..
1169 * Some drives were very specific about that exact sequence.
1170 */
1171 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1172 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1173 if (err_mask) {
1174 rc = -EIO;
1175 reason = "INIT_DEV_PARAMS failed";
1176 goto err_out;
1177 }
1178
1179 /* current CHS translation info (id[53-58]) might be
1180 * changed. reread the identify device info.
1181 */
1182 post_reset = 0;
1183 goto retry;
1184 }
1185 }
1186
1187 *p_class = class;
1188 *p_id = id;
1189 return 0;
1190
1191 err_out:
1192 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1193 ap->id, dev->devno, reason);
1194 kfree(id);
1195 return rc;
1196 }
1197
1198 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1199 struct ata_device *dev)
1200 {
1201 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1202 }
1203
1204 /**
1205 * ata_dev_configure - Configure the specified ATA/ATAPI device
1206 * @ap: Port on which target device resides
1207 * @dev: Target device to configure
1208 * @print_info: Enable device info printout
1209 *
1210 * Configure @dev according to @dev->id. Generic and low-level
1211 * driver specific fixups are also applied.
1212 *
1213 * LOCKING:
1214 * Kernel thread context (may sleep)
1215 *
1216 * RETURNS:
1217 * 0 on success, -errno otherwise
1218 */
1219 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1220 int print_info)
1221 {
1222 const u16 *id = dev->id;
1223 unsigned int xfer_mask;
1224 int i, rc;
1225
1226 if (!ata_dev_enabled(dev)) {
1227 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1228 ap->id, dev->devno);
1229 return 0;
1230 }
1231
1232 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1233
1234 /* print device capabilities */
1235 if (print_info)
1236 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1237 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1238 ap->id, dev->devno, id[49], id[82], id[83],
1239 id[84], id[85], id[86], id[87], id[88]);
1240
1241 /* initialize to-be-configured parameters */
1242 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1243 dev->max_sectors = 0;
1244 dev->cdb_len = 0;
1245 dev->n_sectors = 0;
1246 dev->cylinders = 0;
1247 dev->heads = 0;
1248 dev->sectors = 0;
1249
1250 /*
1251 * common ATA, ATAPI feature tests
1252 */
1253
1254 /* find max transfer mode; for printk only */
1255 xfer_mask = ata_id_xfermask(id);
1256
1257 ata_dump_id(id);
1258
1259 /* ATA-specific feature tests */
1260 if (dev->class == ATA_DEV_ATA) {
1261 dev->n_sectors = ata_id_n_sectors(id);
1262
1263 if (ata_id_has_lba(id)) {
1264 const char *lba_desc;
1265
1266 lba_desc = "LBA";
1267 dev->flags |= ATA_DFLAG_LBA;
1268 if (ata_id_has_lba48(id)) {
1269 dev->flags |= ATA_DFLAG_LBA48;
1270 lba_desc = "LBA48";
1271 }
1272
1273 /* print device info to dmesg */
1274 if (print_info)
1275 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1276 "max %s, %Lu sectors: %s\n",
1277 ap->id, dev->devno,
1278 ata_id_major_version(id),
1279 ata_mode_string(xfer_mask),
1280 (unsigned long long)dev->n_sectors,
1281 lba_desc);
1282 } else {
1283 /* CHS */
1284
1285 /* Default translation */
1286 dev->cylinders = id[1];
1287 dev->heads = id[3];
1288 dev->sectors = id[6];
1289
1290 if (ata_id_current_chs_valid(id)) {
1291 /* Current CHS translation is valid. */
1292 dev->cylinders = id[54];
1293 dev->heads = id[55];
1294 dev->sectors = id[56];
1295 }
1296
1297 /* print device info to dmesg */
1298 if (print_info)
1299 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1300 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1301 ap->id, dev->devno,
1302 ata_id_major_version(id),
1303 ata_mode_string(xfer_mask),
1304 (unsigned long long)dev->n_sectors,
1305 dev->cylinders, dev->heads, dev->sectors);
1306 }
1307
1308 if (dev->id[59] & 0x100) {
1309 dev->multi_count = dev->id[59] & 0xff;
1310 DPRINTK("ata%u: dev %u multi count %u\n",
1311 ap->id, dev->devno, dev->multi_count);
1312 }
1313
1314 dev->cdb_len = 16;
1315 }
1316
1317 /* ATAPI-specific feature tests */
1318 else if (dev->class == ATA_DEV_ATAPI) {
1319 char *cdb_intr_string = "";
1320
1321 rc = atapi_cdb_len(id);
1322 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1323 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1324 rc = -EINVAL;
1325 goto err_out_nosup;
1326 }
1327 dev->cdb_len = (unsigned int) rc;
1328
1329 if (ata_id_cdb_intr(dev->id)) {
1330 dev->flags |= ATA_DFLAG_CDB_INTR;
1331 cdb_intr_string = ", CDB intr";
1332 }
1333
1334 /* print device info to dmesg */
1335 if (print_info)
1336 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
1337 ap->id, dev->devno, ata_mode_string(xfer_mask),
1338 cdb_intr_string);
1339 }
1340
1341 ap->host->max_cmd_len = 0;
1342 for (i = 0; i < ATA_MAX_DEVICES; i++)
1343 ap->host->max_cmd_len = max_t(unsigned int,
1344 ap->host->max_cmd_len,
1345 ap->device[i].cdb_len);
1346
1347 /* limit bridge transfers to udma5, 200 sectors */
1348 if (ata_dev_knobble(ap, dev)) {
1349 if (print_info)
1350 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1351 ap->id, dev->devno);
1352 dev->udma_mask &= ATA_UDMA5;
1353 dev->max_sectors = ATA_MAX_SECTORS;
1354 }
1355
1356 if (ap->ops->dev_config)
1357 ap->ops->dev_config(ap, dev);
1358
1359 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1360 return 0;
1361
1362 err_out_nosup:
1363 DPRINTK("EXIT, err\n");
1364 return rc;
1365 }
1366
1367 /**
1368 * ata_bus_probe - Reset and probe ATA bus
1369 * @ap: Bus to probe
1370 *
1371 * Master ATA bus probing function. Initiates a hardware-dependent
1372 * bus reset, then attempts to identify any devices found on
1373 * the bus.
1374 *
1375 * LOCKING:
1376 * PCI/etc. bus probe sem.
1377 *
1378 * RETURNS:
1379 * Zero on success, negative errno otherwise.
1380 */
1381
1382 static int ata_bus_probe(struct ata_port *ap)
1383 {
1384 unsigned int classes[ATA_MAX_DEVICES];
1385 int tries[ATA_MAX_DEVICES];
1386 int i, rc, down_xfermask;
1387 struct ata_device *dev;
1388
1389 ata_port_probe(ap);
1390
1391 for (i = 0; i < ATA_MAX_DEVICES; i++)
1392 tries[i] = ATA_PROBE_MAX_TRIES;
1393
1394 retry:
1395 down_xfermask = 0;
1396
1397 /* reset and determine device classes */
1398 for (i = 0; i < ATA_MAX_DEVICES; i++)
1399 classes[i] = ATA_DEV_UNKNOWN;
1400
1401 if (ap->ops->probe_reset) {
1402 rc = ap->ops->probe_reset(ap, classes);
1403 if (rc) {
1404 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1405 return rc;
1406 }
1407 } else {
1408 ap->ops->phy_reset(ap);
1409
1410 if (!(ap->flags & ATA_FLAG_DISABLED))
1411 for (i = 0; i < ATA_MAX_DEVICES; i++)
1412 classes[i] = ap->device[i].class;
1413
1414 ata_port_probe(ap);
1415 }
1416
1417 for (i = 0; i < ATA_MAX_DEVICES; i++)
1418 if (classes[i] == ATA_DEV_UNKNOWN)
1419 classes[i] = ATA_DEV_NONE;
1420
1421 /* read IDENTIFY page and configure devices */
1422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1423 dev = &ap->device[i];
1424 dev->class = classes[i];
1425
1426 if (!tries[i]) {
1427 ata_down_xfermask_limit(ap, dev, 1);
1428 ata_dev_disable(ap, dev);
1429 }
1430
1431 if (!ata_dev_enabled(dev))
1432 continue;
1433
1434 kfree(dev->id);
1435 dev->id = NULL;
1436 rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
1437 if (rc)
1438 goto fail;
1439
1440 rc = ata_dev_configure(ap, dev, 1);
1441 if (rc)
1442 goto fail;
1443 }
1444
1445 /* configure transfer mode */
1446 if (ap->ops->set_mode) {
1447 /* FIXME: make ->set_mode handle no device case and
1448 * return error code and failing device on failure as
1449 * ata_set_mode() does.
1450 */
1451 for (i = 0; i < ATA_MAX_DEVICES; i++)
1452 if (ata_dev_enabled(&ap->device[i])) {
1453 ap->ops->set_mode(ap);
1454 break;
1455 }
1456 rc = 0;
1457 } else {
1458 rc = ata_set_mode(ap, &dev);
1459 if (rc) {
1460 down_xfermask = 1;
1461 goto fail;
1462 }
1463 }
1464
1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
1466 if (ata_dev_enabled(&ap->device[i]))
1467 return 0;
1468
1469 /* no device present, disable port */
1470 ata_port_disable(ap);
1471 ap->ops->port_disable(ap);
1472 return -ENODEV;
1473
1474 fail:
1475 switch (rc) {
1476 case -EINVAL:
1477 case -ENODEV:
1478 tries[dev->devno] = 0;
1479 break;
1480 case -EIO:
1481 ata_down_sata_spd_limit(ap);
1482 /* fall through */
1483 default:
1484 tries[dev->devno]--;
1485 if (down_xfermask &&
1486 ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1))
1487 tries[dev->devno] = 0;
1488 }
1489
1490 goto retry;
1491 }
1492
1493 /**
1494 * ata_port_probe - Mark port as enabled
1495 * @ap: Port for which we indicate enablement
1496 *
1497 * Modify @ap data structure such that the system
1498 * thinks that the entire port is enabled.
1499 *
1500 * LOCKING: host_set lock, or some other form of
1501 * serialization.
1502 */
1503
1504 void ata_port_probe(struct ata_port *ap)
1505 {
1506 ap->flags &= ~ATA_FLAG_DISABLED;
1507 }
1508
1509 /**
1510 * sata_print_link_status - Print SATA link status
1511 * @ap: SATA port to printk link status about
1512 *
1513 * This function prints link speed and status of a SATA link.
1514 *
1515 * LOCKING:
1516 * None.
1517 */
1518 static void sata_print_link_status(struct ata_port *ap)
1519 {
1520 u32 sstatus, tmp;
1521
1522 if (!ap->ops->scr_read)
1523 return;
1524
1525 sstatus = scr_read(ap, SCR_STATUS);
1526
1527 if (sata_dev_present(ap)) {
1528 tmp = (sstatus >> 4) & 0xf;
1529 printk(KERN_INFO "ata%u: SATA link up %s (SStatus %X)\n",
1530 ap->id, sata_spd_string(tmp), sstatus);
1531 } else {
1532 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1533 ap->id, sstatus);
1534 }
1535 }
1536
1537 /**
1538 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1539 * @ap: SATA port associated with target SATA PHY.
1540 *
1541 * This function issues commands to standard SATA Sxxx
1542 * PHY registers, to wake up the phy (and device), and
1543 * clear any reset condition.
1544 *
1545 * LOCKING:
1546 * PCI/etc. bus probe sem.
1547 *
1548 */
1549 void __sata_phy_reset(struct ata_port *ap)
1550 {
1551 u32 sstatus;
1552 unsigned long timeout = jiffies + (HZ * 5);
1553
1554 if (ap->flags & ATA_FLAG_SATA_RESET) {
1555 /* issue phy wake/reset */
1556 scr_write_flush(ap, SCR_CONTROL, 0x301);
1557 /* Couldn't find anything in SATA I/II specs, but
1558 * AHCI-1.1 10.4.2 says at least 1 ms. */
1559 mdelay(1);
1560 }
1561 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1562
1563 /* wait for phy to become ready, if necessary */
1564 do {
1565 msleep(200);
1566 sstatus = scr_read(ap, SCR_STATUS);
1567 if ((sstatus & 0xf) != 1)
1568 break;
1569 } while (time_before(jiffies, timeout));
1570
1571 /* print link status */
1572 sata_print_link_status(ap);
1573
1574 /* TODO: phy layer with polling, timeouts, etc. */
1575 if (sata_dev_present(ap))
1576 ata_port_probe(ap);
1577 else
1578 ata_port_disable(ap);
1579
1580 if (ap->flags & ATA_FLAG_DISABLED)
1581 return;
1582
1583 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1584 ata_port_disable(ap);
1585 return;
1586 }
1587
1588 ap->cbl = ATA_CBL_SATA;
1589 }
1590
1591 /**
1592 * sata_phy_reset - Reset SATA bus.
1593 * @ap: SATA port associated with target SATA PHY.
1594 *
1595 * This function resets the SATA bus, and then probes
1596 * the bus for devices.
1597 *
1598 * LOCKING:
1599 * PCI/etc. bus probe sem.
1600 *
1601 */
1602 void sata_phy_reset(struct ata_port *ap)
1603 {
1604 __sata_phy_reset(ap);
1605 if (ap->flags & ATA_FLAG_DISABLED)
1606 return;
1607 ata_bus_reset(ap);
1608 }
1609
1610 /**
1611 * ata_dev_pair - return other device on cable
1612 * @ap: port
1613 * @adev: device
1614 *
1615 * Obtain the other device on the same cable, or if none is
1616 * present NULL is returned
1617 */
1618
1619 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1620 {
1621 struct ata_device *pair = &ap->device[1 - adev->devno];
1622 if (!ata_dev_enabled(pair))
1623 return NULL;
1624 return pair;
1625 }
1626
1627 /**
1628 * ata_port_disable - Disable port.
1629 * @ap: Port to be disabled.
1630 *
1631 * Modify @ap data structure such that the system
1632 * thinks that the entire port is disabled, and should
1633 * never attempt to probe or communicate with devices
1634 * on this port.
1635 *
1636 * LOCKING: host_set lock, or some other form of
1637 * serialization.
1638 */
1639
1640 void ata_port_disable(struct ata_port *ap)
1641 {
1642 ap->device[0].class = ATA_DEV_NONE;
1643 ap->device[1].class = ATA_DEV_NONE;
1644 ap->flags |= ATA_FLAG_DISABLED;
1645 }
1646
1647 /**
1648 * ata_down_sata_spd_limit - adjust SATA spd limit downward
1649 * @ap: Port to adjust SATA spd limit for
1650 *
1651 * Adjust SATA spd limit of @ap downward. Note that this
1652 * function only adjusts the limit. The change must be applied
1653 * using ata_set_sata_spd().
1654 *
1655 * LOCKING:
1656 * Inherited from caller.
1657 *
1658 * RETURNS:
1659 * 0 on success, negative errno on failure
1660 */
1661 int ata_down_sata_spd_limit(struct ata_port *ap)
1662 {
1663 u32 spd, mask;
1664 int highbit;
1665
1666 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1667 return -EOPNOTSUPP;
1668
1669 mask = ap->sata_spd_limit;
1670 if (mask <= 1)
1671 return -EINVAL;
1672 highbit = fls(mask) - 1;
1673 mask &= ~(1 << highbit);
1674
1675 spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf;
1676 if (spd <= 1)
1677 return -EINVAL;
1678 spd--;
1679 mask &= (1 << spd) - 1;
1680 if (!mask)
1681 return -EINVAL;
1682
1683 ap->sata_spd_limit = mask;
1684
1685 printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n",
1686 ap->id, sata_spd_string(fls(mask)));
1687
1688 return 0;
1689 }
1690
1691 static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1692 {
1693 u32 spd, limit;
1694
1695 if (ap->sata_spd_limit == UINT_MAX)
1696 limit = 0;
1697 else
1698 limit = fls(ap->sata_spd_limit);
1699
1700 spd = (*scontrol >> 4) & 0xf;
1701 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1702
1703 return spd != limit;
1704 }
1705
1706 /**
1707 * ata_set_sata_spd_needed - is SATA spd configuration needed
1708 * @ap: Port in question
1709 *
1710 * Test whether the spd limit in SControl matches
1711 * @ap->sata_spd_limit. This function is used to determine
1712 * whether hardreset is necessary to apply SATA spd
1713 * configuration.
1714 *
1715 * LOCKING:
1716 * Inherited from caller.
1717 *
1718 * RETURNS:
1719 * 1 if SATA spd configuration is needed, 0 otherwise.
1720 */
1721 int ata_set_sata_spd_needed(struct ata_port *ap)
1722 {
1723 u32 scontrol;
1724
1725 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1726 return 0;
1727
1728 scontrol = scr_read(ap, SCR_CONTROL);
1729
1730 return __ata_set_sata_spd_needed(ap, &scontrol);
1731 }
1732
1733 /**
1734 * ata_set_sata_spd - set SATA spd according to spd limit
1735 * @ap: Port to set SATA spd for
1736 *
1737 * Set SATA spd of @ap according to sata_spd_limit.
1738 *
1739 * LOCKING:
1740 * Inherited from caller.
1741 *
1742 * RETURNS:
1743 * 0 if spd doesn't need to be changed, 1 if spd has been
1744 * changed. -EOPNOTSUPP if SCR registers are inaccessible.
1745 */
1746 static int ata_set_sata_spd(struct ata_port *ap)
1747 {
1748 u32 scontrol;
1749
1750 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1751 return -EOPNOTSUPP;
1752
1753 scontrol = scr_read(ap, SCR_CONTROL);
1754 if (!__ata_set_sata_spd_needed(ap, &scontrol))
1755 return 0;
1756
1757 scr_write(ap, SCR_CONTROL, scontrol);
1758 return 1;
1759 }
1760
1761 /*
1762 * This mode timing computation functionality is ported over from
1763 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1764 */
1765 /*
1766 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1767 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1768 * for PIO 5, which is a nonstandard extension and UDMA6, which
1769 * is currently supported only by Maxtor drives.
1770 */
1771
1772 static const struct ata_timing ata_timing[] = {
1773
1774 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1775 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1776 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1777 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1778
1779 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1780 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1781 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1782
1783 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1784
1785 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1786 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1787 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1788
1789 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1790 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1791 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1792
1793 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1794 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1795 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1796
1797 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1798 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1799 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1800
1801 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1802
1803 { 0xFF }
1804 };
1805
1806 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1807 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1808
1809 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1810 {
1811 q->setup = EZ(t->setup * 1000, T);
1812 q->act8b = EZ(t->act8b * 1000, T);
1813 q->rec8b = EZ(t->rec8b * 1000, T);
1814 q->cyc8b = EZ(t->cyc8b * 1000, T);
1815 q->active = EZ(t->active * 1000, T);
1816 q->recover = EZ(t->recover * 1000, T);
1817 q->cycle = EZ(t->cycle * 1000, T);
1818 q->udma = EZ(t->udma * 1000, UT);
1819 }
1820
1821 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1822 struct ata_timing *m, unsigned int what)
1823 {
1824 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1825 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1826 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1827 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1828 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1829 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1830 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1831 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1832 }
1833
1834 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1835 {
1836 const struct ata_timing *t;
1837
1838 for (t = ata_timing; t->mode != speed; t++)
1839 if (t->mode == 0xFF)
1840 return NULL;
1841 return t;
1842 }
1843
1844 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1845 struct ata_timing *t, int T, int UT)
1846 {
1847 const struct ata_timing *s;
1848 struct ata_timing p;
1849
1850 /*
1851 * Find the mode.
1852 */
1853
1854 if (!(s = ata_timing_find_mode(speed)))
1855 return -EINVAL;
1856
1857 memcpy(t, s, sizeof(*s));
1858
1859 /*
1860 * If the drive is an EIDE drive, it can tell us it needs extended
1861 * PIO/MW_DMA cycle timing.
1862 */
1863
1864 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1865 memset(&p, 0, sizeof(p));
1866 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1867 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1868 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1869 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1870 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1871 }
1872 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1873 }
1874
1875 /*
1876 * Convert the timing to bus clock counts.
1877 */
1878
1879 ata_timing_quantize(t, t, T, UT);
1880
1881 /*
1882 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1883 * S.M.A.R.T * and some other commands. We have to ensure that the
1884 * DMA cycle timing is slower/equal than the fastest PIO timing.
1885 */
1886
1887 if (speed > XFER_PIO_4) {
1888 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1889 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1890 }
1891
1892 /*
1893 * Lengthen active & recovery time so that cycle time is correct.
1894 */
1895
1896 if (t->act8b + t->rec8b < t->cyc8b) {
1897 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1898 t->rec8b = t->cyc8b - t->act8b;
1899 }
1900
1901 if (t->active + t->recover < t->cycle) {
1902 t->active += (t->cycle - (t->active + t->recover)) / 2;
1903 t->recover = t->cycle - t->active;
1904 }
1905
1906 return 0;
1907 }
1908
1909 /**
1910 * ata_down_xfermask_limit - adjust dev xfer masks downward
1911 * @ap: Port associated with device @dev
1912 * @dev: Device to adjust xfer masks
1913 * @force_pio0: Force PIO0
1914 *
1915 * Adjust xfer masks of @dev downward. Note that this function
1916 * does not apply the change. Invoking ata_set_mode() afterwards
1917 * will apply the limit.
1918 *
1919 * LOCKING:
1920 * Inherited from caller.
1921 *
1922 * RETURNS:
1923 * 0 on success, negative errno on failure
1924 */
1925 int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1926 int force_pio0)
1927 {
1928 unsigned long xfer_mask;
1929 int highbit;
1930
1931 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
1932 dev->udma_mask);
1933
1934 if (!xfer_mask)
1935 goto fail;
1936 /* don't gear down to MWDMA from UDMA, go directly to PIO */
1937 if (xfer_mask & ATA_MASK_UDMA)
1938 xfer_mask &= ~ATA_MASK_MWDMA;
1939
1940 highbit = fls(xfer_mask) - 1;
1941 xfer_mask &= ~(1 << highbit);
1942 if (force_pio0)
1943 xfer_mask &= 1 << ATA_SHIFT_PIO;
1944 if (!xfer_mask)
1945 goto fail;
1946
1947 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1948 &dev->udma_mask);
1949
1950 printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n",
1951 ap->id, dev->devno, ata_mode_string(xfer_mask));
1952
1953 return 0;
1954
1955 fail:
1956 return -EINVAL;
1957 }
1958
1959 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1960 {
1961 unsigned int err_mask;
1962 int rc;
1963
1964 dev->flags &= ~ATA_DFLAG_PIO;
1965 if (dev->xfer_shift == ATA_SHIFT_PIO)
1966 dev->flags |= ATA_DFLAG_PIO;
1967
1968 err_mask = ata_dev_set_xfermode(ap, dev);
1969 if (err_mask) {
1970 printk(KERN_ERR
1971 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1972 ap->id, err_mask);
1973 return -EIO;
1974 }
1975
1976 rc = ata_dev_revalidate(ap, dev, 0);
1977 if (rc)
1978 return rc;
1979
1980 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1981 dev->xfer_shift, (int)dev->xfer_mode);
1982
1983 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1984 ap->id, dev->devno,
1985 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1986 return 0;
1987 }
1988
1989 /**
1990 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1991 * @ap: port on which timings will be programmed
1992 * @r_failed_dev: out paramter for failed device
1993 *
1994 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
1995 * ata_set_mode() fails, pointer to the failing device is
1996 * returned in @r_failed_dev.
1997 *
1998 * LOCKING:
1999 * PCI/etc. bus probe sem.
2000 *
2001 * RETURNS:
2002 * 0 on success, negative errno otherwise
2003 */
2004 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2005 {
2006 struct ata_device *dev;
2007 int i, rc = 0, used_dma = 0, found = 0;
2008
2009 /* step 1: calculate xfer_mask */
2010 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2011 unsigned int pio_mask, dma_mask;
2012
2013 dev = &ap->device[i];
2014
2015 if (!ata_dev_enabled(dev))
2016 continue;
2017
2018 ata_dev_xfermask(ap, dev);
2019
2020 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2021 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2022 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2023 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2024
2025 found = 1;
2026 if (dev->dma_mode)
2027 used_dma = 1;
2028 }
2029 if (!found)
2030 goto out;
2031
2032 /* step 2: always set host PIO timings */
2033 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2034 dev = &ap->device[i];
2035 if (!ata_dev_enabled(dev))
2036 continue;
2037
2038 if (!dev->pio_mode) {
2039 printk(KERN_WARNING "ata%u: dev %u no PIO support\n",
2040 ap->id, dev->devno);
2041 rc = -EINVAL;
2042 goto out;
2043 }
2044
2045 dev->xfer_mode = dev->pio_mode;
2046 dev->xfer_shift = ATA_SHIFT_PIO;
2047 if (ap->ops->set_piomode)
2048 ap->ops->set_piomode(ap, dev);
2049 }
2050
2051 /* step 3: set host DMA timings */
2052 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2053 dev = &ap->device[i];
2054
2055 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2056 continue;
2057
2058 dev->xfer_mode = dev->dma_mode;
2059 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2060 if (ap->ops->set_dmamode)
2061 ap->ops->set_dmamode(ap, dev);
2062 }
2063
2064 /* step 4: update devices' xfer mode */
2065 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2066 dev = &ap->device[i];
2067
2068 if (!ata_dev_enabled(dev))
2069 continue;
2070
2071 rc = ata_dev_set_mode(ap, dev);
2072 if (rc)
2073 goto out;
2074 }
2075
2076 /* Record simplex status. If we selected DMA then the other
2077 * host channels are not permitted to do so.
2078 */
2079 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2080 ap->host_set->simplex_claimed = 1;
2081
2082 /* step5: chip specific finalisation */
2083 if (ap->ops->post_set_mode)
2084 ap->ops->post_set_mode(ap);
2085
2086 out:
2087 if (rc)
2088 *r_failed_dev = dev;
2089 return rc;
2090 }
2091
2092 /**
2093 * ata_tf_to_host - issue ATA taskfile to host controller
2094 * @ap: port to which command is being issued
2095 * @tf: ATA taskfile register set
2096 *
2097 * Issues ATA taskfile register set to ATA host controller,
2098 * with proper synchronization with interrupt handler and
2099 * other threads.
2100 *
2101 * LOCKING:
2102 * spin_lock_irqsave(host_set lock)
2103 */
2104
2105 static inline void ata_tf_to_host(struct ata_port *ap,
2106 const struct ata_taskfile *tf)
2107 {
2108 ap->ops->tf_load(ap, tf);
2109 ap->ops->exec_command(ap, tf);
2110 }
2111
2112 /**
2113 * ata_busy_sleep - sleep until BSY clears, or timeout
2114 * @ap: port containing status register to be polled
2115 * @tmout_pat: impatience timeout
2116 * @tmout: overall timeout
2117 *
2118 * Sleep until ATA Status register bit BSY clears,
2119 * or a timeout occurs.
2120 *
2121 * LOCKING: None.
2122 */
2123
2124 unsigned int ata_busy_sleep (struct ata_port *ap,
2125 unsigned long tmout_pat, unsigned long tmout)
2126 {
2127 unsigned long timer_start, timeout;
2128 u8 status;
2129
2130 status = ata_busy_wait(ap, ATA_BUSY, 300);
2131 timer_start = jiffies;
2132 timeout = timer_start + tmout_pat;
2133 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2134 msleep(50);
2135 status = ata_busy_wait(ap, ATA_BUSY, 3);
2136 }
2137
2138 if (status & ATA_BUSY)
2139 printk(KERN_WARNING "ata%u is slow to respond, "
2140 "please be patient\n", ap->id);
2141
2142 timeout = timer_start + tmout;
2143 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2144 msleep(50);
2145 status = ata_chk_status(ap);
2146 }
2147
2148 if (status & ATA_BUSY) {
2149 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
2150 ap->id, tmout / HZ);
2151 return 1;
2152 }
2153
2154 return 0;
2155 }
2156
2157 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2158 {
2159 struct ata_ioports *ioaddr = &ap->ioaddr;
2160 unsigned int dev0 = devmask & (1 << 0);
2161 unsigned int dev1 = devmask & (1 << 1);
2162 unsigned long timeout;
2163
2164 /* if device 0 was found in ata_devchk, wait for its
2165 * BSY bit to clear
2166 */
2167 if (dev0)
2168 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2169
2170 /* if device 1 was found in ata_devchk, wait for
2171 * register access, then wait for BSY to clear
2172 */
2173 timeout = jiffies + ATA_TMOUT_BOOT;
2174 while (dev1) {
2175 u8 nsect, lbal;
2176
2177 ap->ops->dev_select(ap, 1);
2178 if (ap->flags & ATA_FLAG_MMIO) {
2179 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2180 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2181 } else {
2182 nsect = inb(ioaddr->nsect_addr);
2183 lbal = inb(ioaddr->lbal_addr);
2184 }
2185 if ((nsect == 1) && (lbal == 1))
2186 break;
2187 if (time_after(jiffies, timeout)) {
2188 dev1 = 0;
2189 break;
2190 }
2191 msleep(50); /* give drive a breather */
2192 }
2193 if (dev1)
2194 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2195
2196 /* is all this really necessary? */
2197 ap->ops->dev_select(ap, 0);
2198 if (dev1)
2199 ap->ops->dev_select(ap, 1);
2200 if (dev0)
2201 ap->ops->dev_select(ap, 0);
2202 }
2203
2204 static unsigned int ata_bus_softreset(struct ata_port *ap,
2205 unsigned int devmask)
2206 {
2207 struct ata_ioports *ioaddr = &ap->ioaddr;
2208
2209 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2210
2211 /* software reset. causes dev0 to be selected */
2212 if (ap->flags & ATA_FLAG_MMIO) {
2213 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2214 udelay(20); /* FIXME: flush */
2215 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2216 udelay(20); /* FIXME: flush */
2217 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2218 } else {
2219 outb(ap->ctl, ioaddr->ctl_addr);
2220 udelay(10);
2221 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2222 udelay(10);
2223 outb(ap->ctl, ioaddr->ctl_addr);
2224 }
2225
2226 /* spec mandates ">= 2ms" before checking status.
2227 * We wait 150ms, because that was the magic delay used for
2228 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2229 * between when the ATA command register is written, and then
2230 * status is checked. Because waiting for "a while" before
2231 * checking status is fine, post SRST, we perform this magic
2232 * delay here as well.
2233 *
2234 * Old drivers/ide uses the 2mS rule and then waits for ready
2235 */
2236 msleep(150);
2237
2238 /* Before we perform post reset processing we want to see if
2239 * the bus shows 0xFF because the odd clown forgets the D7
2240 * pulldown resistor.
2241 */
2242 if (ata_check_status(ap) == 0xFF)
2243 return AC_ERR_OTHER;
2244
2245 ata_bus_post_reset(ap, devmask);
2246
2247 return 0;
2248 }
2249
2250 /**
2251 * ata_bus_reset - reset host port and associated ATA channel
2252 * @ap: port to reset
2253 *
2254 * This is typically the first time we actually start issuing
2255 * commands to the ATA channel. We wait for BSY to clear, then
2256 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2257 * result. Determine what devices, if any, are on the channel
2258 * by looking at the device 0/1 error register. Look at the signature
2259 * stored in each device's taskfile registers, to determine if
2260 * the device is ATA or ATAPI.
2261 *
2262 * LOCKING:
2263 * PCI/etc. bus probe sem.
2264 * Obtains host_set lock.
2265 *
2266 * SIDE EFFECTS:
2267 * Sets ATA_FLAG_DISABLED if bus reset fails.
2268 */
2269
2270 void ata_bus_reset(struct ata_port *ap)
2271 {
2272 struct ata_ioports *ioaddr = &ap->ioaddr;
2273 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2274 u8 err;
2275 unsigned int dev0, dev1 = 0, devmask = 0;
2276
2277 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2278
2279 /* determine if device 0/1 are present */
2280 if (ap->flags & ATA_FLAG_SATA_RESET)
2281 dev0 = 1;
2282 else {
2283 dev0 = ata_devchk(ap, 0);
2284 if (slave_possible)
2285 dev1 = ata_devchk(ap, 1);
2286 }
2287
2288 if (dev0)
2289 devmask |= (1 << 0);
2290 if (dev1)
2291 devmask |= (1 << 1);
2292
2293 /* select device 0 again */
2294 ap->ops->dev_select(ap, 0);
2295
2296 /* issue bus reset */
2297 if (ap->flags & ATA_FLAG_SRST)
2298 if (ata_bus_softreset(ap, devmask))
2299 goto err_out;
2300
2301 /*
2302 * determine by signature whether we have ATA or ATAPI devices
2303 */
2304 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2305 if ((slave_possible) && (err != 0x81))
2306 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2307
2308 /* re-enable interrupts */
2309 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2310 ata_irq_on(ap);
2311
2312 /* is double-select really necessary? */
2313 if (ap->device[1].class != ATA_DEV_NONE)
2314 ap->ops->dev_select(ap, 1);
2315 if (ap->device[0].class != ATA_DEV_NONE)
2316 ap->ops->dev_select(ap, 0);
2317
2318 /* if no devices were detected, disable this port */
2319 if ((ap->device[0].class == ATA_DEV_NONE) &&
2320 (ap->device[1].class == ATA_DEV_NONE))
2321 goto err_out;
2322
2323 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2324 /* set up device control for ATA_FLAG_SATA_RESET */
2325 if (ap->flags & ATA_FLAG_MMIO)
2326 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2327 else
2328 outb(ap->ctl, ioaddr->ctl_addr);
2329 }
2330
2331 DPRINTK("EXIT\n");
2332 return;
2333
2334 err_out:
2335 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2336 ap->ops->port_disable(ap);
2337
2338 DPRINTK("EXIT\n");
2339 }
2340
2341 static int sata_phy_resume(struct ata_port *ap)
2342 {
2343 unsigned long timeout = jiffies + (HZ * 5);
2344 u32 scontrol, sstatus;
2345
2346 scontrol = scr_read(ap, SCR_CONTROL);
2347 scontrol = (scontrol & 0x0f0) | 0x300;
2348 scr_write_flush(ap, SCR_CONTROL, scontrol);
2349
2350 /* Wait for phy to become ready, if necessary. */
2351 do {
2352 msleep(200);
2353 sstatus = scr_read(ap, SCR_STATUS);
2354 if ((sstatus & 0xf) != 1)
2355 return 0;
2356 } while (time_before(jiffies, timeout));
2357
2358 return -1;
2359 }
2360
2361 /**
2362 * ata_std_probeinit - initialize probing
2363 * @ap: port to be probed
2364 *
2365 * @ap is about to be probed. Initialize it. This function is
2366 * to be used as standard callback for ata_drive_probe_reset().
2367 *
2368 * NOTE!!! Do not use this function as probeinit if a low level
2369 * driver implements only hardreset. Just pass NULL as probeinit
2370 * in that case. Using this function is probably okay but doing
2371 * so makes reset sequence different from the original
2372 * ->phy_reset implementation and Jeff nervous. :-P
2373 */
2374 void ata_std_probeinit(struct ata_port *ap)
2375 {
2376 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2377 u32 spd;
2378
2379 sata_phy_resume(ap);
2380
2381 spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4;
2382 if (spd)
2383 ap->sata_spd_limit &= (1 << spd) - 1;
2384
2385 if (sata_dev_present(ap))
2386 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2387 }
2388 }
2389
2390 /**
2391 * ata_std_softreset - reset host port via ATA SRST
2392 * @ap: port to reset
2393 * @verbose: fail verbosely
2394 * @classes: resulting classes of attached devices
2395 *
2396 * Reset host port using ATA SRST. This function is to be used
2397 * as standard callback for ata_drive_*_reset() functions.
2398 *
2399 * LOCKING:
2400 * Kernel thread context (may sleep)
2401 *
2402 * RETURNS:
2403 * 0 on success, -errno otherwise.
2404 */
2405 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2406 {
2407 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2408 unsigned int devmask = 0, err_mask;
2409 u8 err;
2410
2411 DPRINTK("ENTER\n");
2412
2413 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2414 classes[0] = ATA_DEV_NONE;
2415 goto out;
2416 }
2417
2418 /* determine if device 0/1 are present */
2419 if (ata_devchk(ap, 0))
2420 devmask |= (1 << 0);
2421 if (slave_possible && ata_devchk(ap, 1))
2422 devmask |= (1 << 1);
2423
2424 /* select device 0 again */
2425 ap->ops->dev_select(ap, 0);
2426
2427 /* issue bus reset */
2428 DPRINTK("about to softreset, devmask=%x\n", devmask);
2429 err_mask = ata_bus_softreset(ap, devmask);
2430 if (err_mask) {
2431 if (verbose)
2432 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2433 ap->id, err_mask);
2434 else
2435 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2436 err_mask);
2437 return -EIO;
2438 }
2439
2440 /* determine by signature whether we have ATA or ATAPI devices */
2441 classes[0] = ata_dev_try_classify(ap, 0, &err);
2442 if (slave_possible && err != 0x81)
2443 classes[1] = ata_dev_try_classify(ap, 1, &err);
2444
2445 out:
2446 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2447 return 0;
2448 }
2449
2450 /**
2451 * sata_std_hardreset - reset host port via SATA phy reset
2452 * @ap: port to reset
2453 * @verbose: fail verbosely
2454 * @class: resulting class of attached device
2455 *
2456 * SATA phy-reset host port using DET bits of SControl register.
2457 * This function is to be used as standard callback for
2458 * ata_drive_*_reset().
2459 *
2460 * LOCKING:
2461 * Kernel thread context (may sleep)
2462 *
2463 * RETURNS:
2464 * 0 on success, -errno otherwise.
2465 */
2466 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2467 {
2468 u32 scontrol;
2469
2470 DPRINTK("ENTER\n");
2471
2472 if (ata_set_sata_spd_needed(ap)) {
2473 /* SATA spec says nothing about how to reconfigure
2474 * spd. To be on the safe side, turn off phy during
2475 * reconfiguration. This works for at least ICH7 AHCI
2476 * and Sil3124.
2477 */
2478 scontrol = scr_read(ap, SCR_CONTROL);
2479 scontrol = (scontrol & 0x0f0) | 0x302;
2480 scr_write_flush(ap, SCR_CONTROL, scontrol);
2481
2482 ata_set_sata_spd(ap);
2483 }
2484
2485 /* issue phy wake/reset */
2486 scontrol = scr_read(ap, SCR_CONTROL);
2487 scontrol = (scontrol & 0x0f0) | 0x301;
2488 scr_write_flush(ap, SCR_CONTROL, scontrol);
2489
2490 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2491 * 10.4.2 says at least 1 ms.
2492 */
2493 msleep(1);
2494
2495 /* bring phy back */
2496 sata_phy_resume(ap);
2497
2498 /* TODO: phy layer with polling, timeouts, etc. */
2499 if (!sata_dev_present(ap)) {
2500 *class = ATA_DEV_NONE;
2501 DPRINTK("EXIT, link offline\n");
2502 return 0;
2503 }
2504
2505 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2506 if (verbose)
2507 printk(KERN_ERR "ata%u: COMRESET failed "
2508 "(device not ready)\n", ap->id);
2509 else
2510 DPRINTK("EXIT, device not ready\n");
2511 return -EIO;
2512 }
2513
2514 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2515
2516 *class = ata_dev_try_classify(ap, 0, NULL);
2517
2518 DPRINTK("EXIT, class=%u\n", *class);
2519 return 0;
2520 }
2521
2522 /**
2523 * ata_std_postreset - standard postreset callback
2524 * @ap: the target ata_port
2525 * @classes: classes of attached devices
2526 *
2527 * This function is invoked after a successful reset. Note that
2528 * the device might have been reset more than once using
2529 * different reset methods before postreset is invoked.
2530 *
2531 * This function is to be used as standard callback for
2532 * ata_drive_*_reset().
2533 *
2534 * LOCKING:
2535 * Kernel thread context (may sleep)
2536 */
2537 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2538 {
2539 DPRINTK("ENTER\n");
2540
2541 /* set cable type if it isn't already set */
2542 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2543 ap->cbl = ATA_CBL_SATA;
2544
2545 /* print link status */
2546 if (ap->cbl == ATA_CBL_SATA)
2547 sata_print_link_status(ap);
2548
2549 /* re-enable interrupts */
2550 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2551 ata_irq_on(ap);
2552
2553 /* is double-select really necessary? */
2554 if (classes[0] != ATA_DEV_NONE)
2555 ap->ops->dev_select(ap, 1);
2556 if (classes[1] != ATA_DEV_NONE)
2557 ap->ops->dev_select(ap, 0);
2558
2559 /* bail out if no device is present */
2560 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2561 DPRINTK("EXIT, no device\n");
2562 return;
2563 }
2564
2565 /* set up device control */
2566 if (ap->ioaddr.ctl_addr) {
2567 if (ap->flags & ATA_FLAG_MMIO)
2568 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2569 else
2570 outb(ap->ctl, ap->ioaddr.ctl_addr);
2571 }
2572
2573 DPRINTK("EXIT\n");
2574 }
2575
2576 /**
2577 * ata_std_probe_reset - standard probe reset method
2578 * @ap: prot to perform probe-reset
2579 * @classes: resulting classes of attached devices
2580 *
2581 * The stock off-the-shelf ->probe_reset method.
2582 *
2583 * LOCKING:
2584 * Kernel thread context (may sleep)
2585 *
2586 * RETURNS:
2587 * 0 on success, -errno otherwise.
2588 */
2589 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2590 {
2591 ata_reset_fn_t hardreset;
2592
2593 hardreset = NULL;
2594 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2595 hardreset = sata_std_hardreset;
2596
2597 return ata_drive_probe_reset(ap, ata_std_probeinit,
2598 ata_std_softreset, hardreset,
2599 ata_std_postreset, classes);
2600 }
2601
2602 int ata_do_reset(struct ata_port *ap,
2603 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2604 int verbose, unsigned int *classes)
2605 {
2606 int i, rc;
2607
2608 for (i = 0; i < ATA_MAX_DEVICES; i++)
2609 classes[i] = ATA_DEV_UNKNOWN;
2610
2611 rc = reset(ap, verbose, classes);
2612 if (rc)
2613 return rc;
2614
2615 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2616 * is complete and convert all ATA_DEV_UNKNOWN to
2617 * ATA_DEV_NONE.
2618 */
2619 for (i = 0; i < ATA_MAX_DEVICES; i++)
2620 if (classes[i] != ATA_DEV_UNKNOWN)
2621 break;
2622
2623 if (i < ATA_MAX_DEVICES)
2624 for (i = 0; i < ATA_MAX_DEVICES; i++)
2625 if (classes[i] == ATA_DEV_UNKNOWN)
2626 classes[i] = ATA_DEV_NONE;
2627
2628 if (postreset)
2629 postreset(ap, classes);
2630
2631 return 0;
2632 }
2633
2634 /**
2635 * ata_drive_probe_reset - Perform probe reset with given methods
2636 * @ap: port to reset
2637 * @probeinit: probeinit method (can be NULL)
2638 * @softreset: softreset method (can be NULL)
2639 * @hardreset: hardreset method (can be NULL)
2640 * @postreset: postreset method (can be NULL)
2641 * @classes: resulting classes of attached devices
2642 *
2643 * Reset the specified port and classify attached devices using
2644 * given methods. This function prefers softreset but tries all
2645 * possible reset sequences to reset and classify devices. This
2646 * function is intended to be used for constructing ->probe_reset
2647 * callback by low level drivers.
2648 *
2649 * Reset methods should follow the following rules.
2650 *
2651 * - Return 0 on sucess, -errno on failure.
2652 * - If classification is supported, fill classes[] with
2653 * recognized class codes.
2654 * - If classification is not supported, leave classes[] alone.
2655 * - If verbose is non-zero, print error message on failure;
2656 * otherwise, shut up.
2657 *
2658 * LOCKING:
2659 * Kernel thread context (may sleep)
2660 *
2661 * RETURNS:
2662 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2663 * if classification fails, and any error code from reset
2664 * methods.
2665 */
2666 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2667 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2668 ata_postreset_fn_t postreset, unsigned int *classes)
2669 {
2670 int rc = -EINVAL;
2671
2672 if (probeinit)
2673 probeinit(ap);
2674
2675 if (softreset && !ata_set_sata_spd_needed(ap)) {
2676 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2677 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2678 goto done;
2679 printk(KERN_INFO "ata%u: softreset failed, will try "
2680 "hardreset in 5 secs\n", ap->id);
2681 ssleep(5);
2682 }
2683
2684 if (!hardreset)
2685 goto done;
2686
2687 while (1) {
2688 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2689 if (rc == 0) {
2690 if (classes[0] != ATA_DEV_UNKNOWN)
2691 goto done;
2692 break;
2693 }
2694
2695 if (ata_down_sata_spd_limit(ap))
2696 goto done;
2697
2698 printk(KERN_INFO "ata%u: hardreset failed, will retry "
2699 "in 5 secs\n", ap->id);
2700 ssleep(5);
2701 }
2702
2703 if (softreset) {
2704 printk(KERN_INFO "ata%u: hardreset succeeded without "
2705 "classification, will retry softreset in 5 secs\n",
2706 ap->id);
2707 ssleep(5);
2708
2709 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2710 }
2711
2712 done:
2713 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
2714 rc = -ENODEV;
2715 return rc;
2716 }
2717
2718 /**
2719 * ata_dev_same_device - Determine whether new ID matches configured device
2720 * @ap: port on which the device to compare against resides
2721 * @dev: device to compare against
2722 * @new_class: class of the new device
2723 * @new_id: IDENTIFY page of the new device
2724 *
2725 * Compare @new_class and @new_id against @dev and determine
2726 * whether @dev is the device indicated by @new_class and
2727 * @new_id.
2728 *
2729 * LOCKING:
2730 * None.
2731 *
2732 * RETURNS:
2733 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2734 */
2735 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2736 unsigned int new_class, const u16 *new_id)
2737 {
2738 const u16 *old_id = dev->id;
2739 unsigned char model[2][41], serial[2][21];
2740 u64 new_n_sectors;
2741
2742 if (dev->class != new_class) {
2743 printk(KERN_INFO
2744 "ata%u: dev %u class mismatch %d != %d\n",
2745 ap->id, dev->devno, dev->class, new_class);
2746 return 0;
2747 }
2748
2749 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2750 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2751 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2752 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2753 new_n_sectors = ata_id_n_sectors(new_id);
2754
2755 if (strcmp(model[0], model[1])) {
2756 printk(KERN_INFO
2757 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2758 ap->id, dev->devno, model[0], model[1]);
2759 return 0;
2760 }
2761
2762 if (strcmp(serial[0], serial[1])) {
2763 printk(KERN_INFO
2764 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2765 ap->id, dev->devno, serial[0], serial[1]);
2766 return 0;
2767 }
2768
2769 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2770 printk(KERN_INFO
2771 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2772 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2773 (unsigned long long)new_n_sectors);
2774 return 0;
2775 }
2776
2777 return 1;
2778 }
2779
2780 /**
2781 * ata_dev_revalidate - Revalidate ATA device
2782 * @ap: port on which the device to revalidate resides
2783 * @dev: device to revalidate
2784 * @post_reset: is this revalidation after reset?
2785 *
2786 * Re-read IDENTIFY page and make sure @dev is still attached to
2787 * the port.
2788 *
2789 * LOCKING:
2790 * Kernel thread context (may sleep)
2791 *
2792 * RETURNS:
2793 * 0 on success, negative errno otherwise
2794 */
2795 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2796 int post_reset)
2797 {
2798 unsigned int class = dev->class;
2799 u16 *id = NULL;
2800 int rc;
2801
2802 if (!ata_dev_enabled(dev)) {
2803 rc = -ENODEV;
2804 goto fail;
2805 }
2806
2807 /* allocate & read ID data */
2808 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2809 if (rc)
2810 goto fail;
2811
2812 /* is the device still there? */
2813 if (!ata_dev_same_device(ap, dev, class, id)) {
2814 rc = -ENODEV;
2815 goto fail;
2816 }
2817
2818 kfree(dev->id);
2819 dev->id = id;
2820
2821 /* configure device according to the new ID */
2822 rc = ata_dev_configure(ap, dev, 0);
2823 if (rc == 0)
2824 return 0;
2825
2826 fail:
2827 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2828 ap->id, dev->devno, rc);
2829 kfree(id);
2830 return rc;
2831 }
2832
2833 static const char * const ata_dma_blacklist [] = {
2834 "WDC AC11000H", NULL,
2835 "WDC AC22100H", NULL,
2836 "WDC AC32500H", NULL,
2837 "WDC AC33100H", NULL,
2838 "WDC AC31600H", NULL,
2839 "WDC AC32100H", "24.09P07",
2840 "WDC AC23200L", "21.10N21",
2841 "Compaq CRD-8241B", NULL,
2842 "CRD-8400B", NULL,
2843 "CRD-8480B", NULL,
2844 "CRD-8482B", NULL,
2845 "CRD-84", NULL,
2846 "SanDisk SDP3B", NULL,
2847 "SanDisk SDP3B-64", NULL,
2848 "SANYO CD-ROM CRD", NULL,
2849 "HITACHI CDR-8", NULL,
2850 "HITACHI CDR-8335", NULL,
2851 "HITACHI CDR-8435", NULL,
2852 "Toshiba CD-ROM XM-6202B", NULL,
2853 "TOSHIBA CD-ROM XM-1702BC", NULL,
2854 "CD-532E-A", NULL,
2855 "E-IDE CD-ROM CR-840", NULL,
2856 "CD-ROM Drive/F5A", NULL,
2857 "WPI CDD-820", NULL,
2858 "SAMSUNG CD-ROM SC-148C", NULL,
2859 "SAMSUNG CD-ROM SC", NULL,
2860 "SanDisk SDP3B-64", NULL,
2861 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2862 "_NEC DV5800A", NULL,
2863 "SAMSUNG CD-ROM SN-124", "N001"
2864 };
2865
2866 static int ata_strim(char *s, size_t len)
2867 {
2868 len = strnlen(s, len);
2869
2870 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2871 while ((len > 0) && (s[len - 1] == ' ')) {
2872 len--;
2873 s[len] = 0;
2874 }
2875 return len;
2876 }
2877
2878 static int ata_dma_blacklisted(const struct ata_device *dev)
2879 {
2880 unsigned char model_num[40];
2881 unsigned char model_rev[16];
2882 unsigned int nlen, rlen;
2883 int i;
2884
2885 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2886 sizeof(model_num));
2887 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2888 sizeof(model_rev));
2889 nlen = ata_strim(model_num, sizeof(model_num));
2890 rlen = ata_strim(model_rev, sizeof(model_rev));
2891
2892 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2893 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2894 if (ata_dma_blacklist[i+1] == NULL)
2895 return 1;
2896 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2897 return 1;
2898 }
2899 }
2900 return 0;
2901 }
2902
2903 /**
2904 * ata_dev_xfermask - Compute supported xfermask of the given device
2905 * @ap: Port on which the device to compute xfermask for resides
2906 * @dev: Device to compute xfermask for
2907 *
2908 * Compute supported xfermask of @dev and store it in
2909 * dev->*_mask. This function is responsible for applying all
2910 * known limits including host controller limits, device
2911 * blacklist, etc...
2912 *
2913 * FIXME: The current implementation limits all transfer modes to
2914 * the fastest of the lowested device on the port. This is not
2915 * required on most controllers.
2916 *
2917 * LOCKING:
2918 * None.
2919 */
2920 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2921 {
2922 struct ata_host_set *hs = ap->host_set;
2923 unsigned long xfer_mask;
2924 int i;
2925
2926 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2927 ap->mwdma_mask, ap->udma_mask);
2928
2929 /* Apply cable rule here. Don't apply it early because when
2930 * we handle hot plug the cable type can itself change.
2931 */
2932 if (ap->cbl == ATA_CBL_PATA40)
2933 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2934
2935 /* FIXME: Use port-wide xfermask for now */
2936 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2937 struct ata_device *d = &ap->device[i];
2938
2939 if (ata_dev_absent(d))
2940 continue;
2941
2942 if (ata_dev_disabled(d)) {
2943 /* to avoid violating device selection timing */
2944 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2945 UINT_MAX, UINT_MAX);
2946 continue;
2947 }
2948
2949 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2950 d->mwdma_mask, d->udma_mask);
2951 xfer_mask &= ata_id_xfermask(d->id);
2952 if (ata_dma_blacklisted(d))
2953 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2954 }
2955
2956 if (ata_dma_blacklisted(dev))
2957 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2958 "disabling DMA\n", ap->id, dev->devno);
2959
2960 if (hs->flags & ATA_HOST_SIMPLEX) {
2961 if (hs->simplex_claimed)
2962 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2963 }
2964
2965 if (ap->ops->mode_filter)
2966 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2967
2968 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2969 &dev->mwdma_mask, &dev->udma_mask);
2970 }
2971
2972 /**
2973 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2974 * @ap: Port associated with device @dev
2975 * @dev: Device to which command will be sent
2976 *
2977 * Issue SET FEATURES - XFER MODE command to device @dev
2978 * on port @ap.
2979 *
2980 * LOCKING:
2981 * PCI/etc. bus probe sem.
2982 *
2983 * RETURNS:
2984 * 0 on success, AC_ERR_* mask otherwise.
2985 */
2986
2987 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2988 struct ata_device *dev)
2989 {
2990 struct ata_taskfile tf;
2991 unsigned int err_mask;
2992
2993 /* set up set-features taskfile */
2994 DPRINTK("set features - xfer mode\n");
2995
2996 ata_tf_init(ap, &tf, dev->devno);
2997 tf.command = ATA_CMD_SET_FEATURES;
2998 tf.feature = SETFEATURES_XFER;
2999 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3000 tf.protocol = ATA_PROT_NODATA;
3001 tf.nsect = dev->xfer_mode;
3002
3003 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
3004
3005 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3006 return err_mask;
3007 }
3008
3009 /**
3010 * ata_dev_init_params - Issue INIT DEV PARAMS command
3011 * @ap: Port associated with device @dev
3012 * @dev: Device to which command will be sent
3013 *
3014 * LOCKING:
3015 * Kernel thread context (may sleep)
3016 *
3017 * RETURNS:
3018 * 0 on success, AC_ERR_* mask otherwise.
3019 */
3020
3021 static unsigned int ata_dev_init_params(struct ata_port *ap,
3022 struct ata_device *dev,
3023 u16 heads,
3024 u16 sectors)
3025 {
3026 struct ata_taskfile tf;
3027 unsigned int err_mask;
3028
3029 /* Number of sectors per track 1-255. Number of heads 1-16 */
3030 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3031 return AC_ERR_INVALID;
3032
3033 /* set up init dev params taskfile */
3034 DPRINTK("init dev params \n");
3035
3036 ata_tf_init(ap, &tf, dev->devno);
3037 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3038 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3039 tf.protocol = ATA_PROT_NODATA;
3040 tf.nsect = sectors;
3041 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3042
3043 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
3044
3045 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3046 return err_mask;
3047 }
3048
3049 /**
3050 * ata_sg_clean - Unmap DMA memory associated with command
3051 * @qc: Command containing DMA memory to be released
3052 *
3053 * Unmap all mapped DMA memory associated with this command.
3054 *
3055 * LOCKING:
3056 * spin_lock_irqsave(host_set lock)
3057 */
3058
3059 static void ata_sg_clean(struct ata_queued_cmd *qc)
3060 {
3061 struct ata_port *ap = qc->ap;
3062 struct scatterlist *sg = qc->__sg;
3063 int dir = qc->dma_dir;
3064 void *pad_buf = NULL;
3065
3066 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3067 WARN_ON(sg == NULL);
3068
3069 if (qc->flags & ATA_QCFLAG_SINGLE)
3070 WARN_ON(qc->n_elem > 1);
3071
3072 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3073
3074 /* if we padded the buffer out to 32-bit bound, and data
3075 * xfer direction is from-device, we must copy from the
3076 * pad buffer back into the supplied buffer
3077 */
3078 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3079 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3080
3081 if (qc->flags & ATA_QCFLAG_SG) {
3082 if (qc->n_elem)
3083 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3084 /* restore last sg */
3085 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3086 if (pad_buf) {
3087 struct scatterlist *psg = &qc->pad_sgent;
3088 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3089 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3090 kunmap_atomic(addr, KM_IRQ0);
3091 }
3092 } else {
3093 if (qc->n_elem)
3094 dma_unmap_single(ap->dev,
3095 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3096 dir);
3097 /* restore sg */
3098 sg->length += qc->pad_len;
3099 if (pad_buf)
3100 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3101 pad_buf, qc->pad_len);
3102 }
3103
3104 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3105 qc->__sg = NULL;
3106 }
3107
3108 /**
3109 * ata_fill_sg - Fill PCI IDE PRD table
3110 * @qc: Metadata associated with taskfile to be transferred
3111 *
3112 * Fill PCI IDE PRD (scatter-gather) table with segments
3113 * associated with the current disk command.
3114 *
3115 * LOCKING:
3116 * spin_lock_irqsave(host_set lock)
3117 *
3118 */
3119 static void ata_fill_sg(struct ata_queued_cmd *qc)
3120 {
3121 struct ata_port *ap = qc->ap;
3122 struct scatterlist *sg;
3123 unsigned int idx;
3124
3125 WARN_ON(qc->__sg == NULL);
3126 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3127
3128 idx = 0;
3129 ata_for_each_sg(sg, qc) {
3130 u32 addr, offset;
3131 u32 sg_len, len;
3132
3133 /* determine if physical DMA addr spans 64K boundary.
3134 * Note h/w doesn't support 64-bit, so we unconditionally
3135 * truncate dma_addr_t to u32.
3136 */
3137 addr = (u32) sg_dma_address(sg);
3138 sg_len = sg_dma_len(sg);
3139
3140 while (sg_len) {
3141 offset = addr & 0xffff;
3142 len = sg_len;
3143 if ((offset + sg_len) > 0x10000)
3144 len = 0x10000 - offset;
3145
3146 ap->prd[idx].addr = cpu_to_le32(addr);
3147 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3148 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3149
3150 idx++;
3151 sg_len -= len;
3152 addr += len;
3153 }
3154 }
3155
3156 if (idx)
3157 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3158 }
3159 /**
3160 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3161 * @qc: Metadata associated with taskfile to check
3162 *
3163 * Allow low-level driver to filter ATA PACKET commands, returning
3164 * a status indicating whether or not it is OK to use DMA for the
3165 * supplied PACKET command.
3166 *
3167 * LOCKING:
3168 * spin_lock_irqsave(host_set lock)
3169 *
3170 * RETURNS: 0 when ATAPI DMA can be used
3171 * nonzero otherwise
3172 */
3173 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3174 {
3175 struct ata_port *ap = qc->ap;
3176 int rc = 0; /* Assume ATAPI DMA is OK by default */
3177
3178 if (ap->ops->check_atapi_dma)
3179 rc = ap->ops->check_atapi_dma(qc);
3180
3181 /* We don't support polling DMA.
3182 * Use PIO if the LLDD handles only interrupts in
3183 * the HSM_ST_LAST state and the ATAPI device
3184 * generates CDB interrupts.
3185 */
3186 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3187 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3188 rc = 1;
3189
3190 return rc;
3191 }
3192 /**
3193 * ata_qc_prep - Prepare taskfile for submission
3194 * @qc: Metadata associated with taskfile to be prepared
3195 *
3196 * Prepare ATA taskfile for submission.
3197 *
3198 * LOCKING:
3199 * spin_lock_irqsave(host_set lock)
3200 */
3201 void ata_qc_prep(struct ata_queued_cmd *qc)
3202 {
3203 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3204 return;
3205
3206 ata_fill_sg(qc);
3207 }
3208
3209 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3210
3211 /**
3212 * ata_sg_init_one - Associate command with memory buffer
3213 * @qc: Command to be associated
3214 * @buf: Memory buffer
3215 * @buflen: Length of memory buffer, in bytes.
3216 *
3217 * Initialize the data-related elements of queued_cmd @qc
3218 * to point to a single memory buffer, @buf of byte length @buflen.
3219 *
3220 * LOCKING:
3221 * spin_lock_irqsave(host_set lock)
3222 */
3223
3224 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3225 {
3226 struct scatterlist *sg;
3227
3228 qc->flags |= ATA_QCFLAG_SINGLE;
3229
3230 memset(&qc->sgent, 0, sizeof(qc->sgent));
3231 qc->__sg = &qc->sgent;
3232 qc->n_elem = 1;
3233 qc->orig_n_elem = 1;
3234 qc->buf_virt = buf;
3235
3236 sg = qc->__sg;
3237 sg_init_one(sg, buf, buflen);
3238 }
3239
3240 /**
3241 * ata_sg_init - Associate command with scatter-gather table.
3242 * @qc: Command to be associated
3243 * @sg: Scatter-gather table.
3244 * @n_elem: Number of elements in s/g table.
3245 *
3246 * Initialize the data-related elements of queued_cmd @qc
3247 * to point to a scatter-gather table @sg, containing @n_elem
3248 * elements.
3249 *
3250 * LOCKING:
3251 * spin_lock_irqsave(host_set lock)
3252 */
3253
3254 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3255 unsigned int n_elem)
3256 {
3257 qc->flags |= ATA_QCFLAG_SG;
3258 qc->__sg = sg;
3259 qc->n_elem = n_elem;
3260 qc->orig_n_elem = n_elem;
3261 }
3262
3263 /**
3264 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3265 * @qc: Command with memory buffer to be mapped.
3266 *
3267 * DMA-map the memory buffer associated with queued_cmd @qc.
3268 *
3269 * LOCKING:
3270 * spin_lock_irqsave(host_set lock)
3271 *
3272 * RETURNS:
3273 * Zero on success, negative on error.
3274 */
3275
3276 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3277 {
3278 struct ata_port *ap = qc->ap;
3279 int dir = qc->dma_dir;
3280 struct scatterlist *sg = qc->__sg;
3281 dma_addr_t dma_address;
3282 int trim_sg = 0;
3283
3284 /* we must lengthen transfers to end on a 32-bit boundary */
3285 qc->pad_len = sg->length & 3;
3286 if (qc->pad_len) {
3287 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3288 struct scatterlist *psg = &qc->pad_sgent;
3289
3290 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3291
3292 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3293
3294 if (qc->tf.flags & ATA_TFLAG_WRITE)
3295 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3296 qc->pad_len);
3297
3298 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3299 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3300 /* trim sg */
3301 sg->length -= qc->pad_len;
3302 if (sg->length == 0)
3303 trim_sg = 1;
3304
3305 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3306 sg->length, qc->pad_len);
3307 }
3308
3309 if (trim_sg) {
3310 qc->n_elem--;
3311 goto skip_map;
3312 }
3313
3314 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3315 sg->length, dir);
3316 if (dma_mapping_error(dma_address)) {
3317 /* restore sg */
3318 sg->length += qc->pad_len;
3319 return -1;
3320 }
3321
3322 sg_dma_address(sg) = dma_address;
3323 sg_dma_len(sg) = sg->length;
3324
3325 skip_map:
3326 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3327 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3328
3329 return 0;
3330 }
3331
3332 /**
3333 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3334 * @qc: Command with scatter-gather table to be mapped.
3335 *
3336 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3337 *
3338 * LOCKING:
3339 * spin_lock_irqsave(host_set lock)
3340 *
3341 * RETURNS:
3342 * Zero on success, negative on error.
3343 *
3344 */
3345
3346 static int ata_sg_setup(struct ata_queued_cmd *qc)
3347 {
3348 struct ata_port *ap = qc->ap;
3349 struct scatterlist *sg = qc->__sg;
3350 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3351 int n_elem, pre_n_elem, dir, trim_sg = 0;
3352
3353 VPRINTK("ENTER, ata%u\n", ap->id);
3354 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3355
3356 /* we must lengthen transfers to end on a 32-bit boundary */
3357 qc->pad_len = lsg->length & 3;
3358 if (qc->pad_len) {
3359 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3360 struct scatterlist *psg = &qc->pad_sgent;
3361 unsigned int offset;
3362
3363 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3364
3365 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3366
3367 /*
3368 * psg->page/offset are used to copy to-be-written
3369 * data in this function or read data in ata_sg_clean.
3370 */
3371 offset = lsg->offset + lsg->length - qc->pad_len;
3372 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3373 psg->offset = offset_in_page(offset);
3374
3375 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3376 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3377 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3378 kunmap_atomic(addr, KM_IRQ0);
3379 }
3380
3381 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3382 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3383 /* trim last sg */
3384 lsg->length -= qc->pad_len;
3385 if (lsg->length == 0)
3386 trim_sg = 1;
3387
3388 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3389 qc->n_elem - 1, lsg->length, qc->pad_len);
3390 }
3391
3392 pre_n_elem = qc->n_elem;
3393 if (trim_sg && pre_n_elem)
3394 pre_n_elem--;
3395
3396 if (!pre_n_elem) {
3397 n_elem = 0;
3398 goto skip_map;
3399 }
3400
3401 dir = qc->dma_dir;
3402 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3403 if (n_elem < 1) {
3404 /* restore last sg */
3405 lsg->length += qc->pad_len;
3406 return -1;
3407 }
3408
3409 DPRINTK("%d sg elements mapped\n", n_elem);
3410
3411 skip_map:
3412 qc->n_elem = n_elem;
3413
3414 return 0;
3415 }
3416
3417 /**
3418 * ata_poll_qc_complete - turn irq back on and finish qc
3419 * @qc: Command to complete
3420 * @err_mask: ATA status register content
3421 *
3422 * LOCKING:
3423 * None. (grabs host lock)
3424 */
3425
3426 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3427 {
3428 struct ata_port *ap = qc->ap;
3429 unsigned long flags;
3430
3431 spin_lock_irqsave(&ap->host_set->lock, flags);
3432 ata_irq_on(ap);
3433 ata_qc_complete(qc);
3434 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3435 }
3436
3437 /**
3438 * swap_buf_le16 - swap halves of 16-bit words in place
3439 * @buf: Buffer to swap
3440 * @buf_words: Number of 16-bit words in buffer.
3441 *
3442 * Swap halves of 16-bit words if needed to convert from
3443 * little-endian byte order to native cpu byte order, or
3444 * vice-versa.
3445 *
3446 * LOCKING:
3447 * Inherited from caller.
3448 */
3449 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3450 {
3451 #ifdef __BIG_ENDIAN
3452 unsigned int i;
3453
3454 for (i = 0; i < buf_words; i++)
3455 buf[i] = le16_to_cpu(buf[i]);
3456 #endif /* __BIG_ENDIAN */
3457 }
3458
3459 /**
3460 * ata_mmio_data_xfer - Transfer data by MMIO
3461 * @ap: port to read/write
3462 * @buf: data buffer
3463 * @buflen: buffer length
3464 * @write_data: read/write
3465 *
3466 * Transfer data from/to the device data register by MMIO.
3467 *
3468 * LOCKING:
3469 * Inherited from caller.
3470 */
3471
3472 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3473 unsigned int buflen, int write_data)
3474 {
3475 unsigned int i;
3476 unsigned int words = buflen >> 1;
3477 u16 *buf16 = (u16 *) buf;
3478 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3479
3480 /* Transfer multiple of 2 bytes */
3481 if (write_data) {
3482 for (i = 0; i < words; i++)
3483 writew(le16_to_cpu(buf16[i]), mmio);
3484 } else {
3485 for (i = 0; i < words; i++)
3486 buf16[i] = cpu_to_le16(readw(mmio));
3487 }
3488
3489 /* Transfer trailing 1 byte, if any. */
3490 if (unlikely(buflen & 0x01)) {
3491 u16 align_buf[1] = { 0 };
3492 unsigned char *trailing_buf = buf + buflen - 1;
3493
3494 if (write_data) {
3495 memcpy(align_buf, trailing_buf, 1);
3496 writew(le16_to_cpu(align_buf[0]), mmio);
3497 } else {
3498 align_buf[0] = cpu_to_le16(readw(mmio));
3499 memcpy(trailing_buf, align_buf, 1);
3500 }
3501 }
3502 }
3503
3504 /**
3505 * ata_pio_data_xfer - Transfer data by PIO
3506 * @ap: port to read/write
3507 * @buf: data buffer
3508 * @buflen: buffer length
3509 * @write_data: read/write
3510 *
3511 * Transfer data from/to the device data register by PIO.
3512 *
3513 * LOCKING:
3514 * Inherited from caller.
3515 */
3516
3517 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3518 unsigned int buflen, int write_data)
3519 {
3520 unsigned int words = buflen >> 1;
3521
3522 /* Transfer multiple of 2 bytes */
3523 if (write_data)
3524 outsw(ap->ioaddr.data_addr, buf, words);
3525 else
3526 insw(ap->ioaddr.data_addr, buf, words);
3527
3528 /* Transfer trailing 1 byte, if any. */
3529 if (unlikely(buflen & 0x01)) {
3530 u16 align_buf[1] = { 0 };
3531 unsigned char *trailing_buf = buf + buflen - 1;
3532
3533 if (write_data) {
3534 memcpy(align_buf, trailing_buf, 1);
3535 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3536 } else {
3537 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3538 memcpy(trailing_buf, align_buf, 1);
3539 }
3540 }
3541 }
3542
3543 /**
3544 * ata_data_xfer - Transfer data from/to the data register.
3545 * @ap: port to read/write
3546 * @buf: data buffer
3547 * @buflen: buffer length
3548 * @do_write: read/write
3549 *
3550 * Transfer data from/to the device data register.
3551 *
3552 * LOCKING:
3553 * Inherited from caller.
3554 */
3555
3556 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3557 unsigned int buflen, int do_write)
3558 {
3559 /* Make the crap hardware pay the costs not the good stuff */
3560 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3561 unsigned long flags;
3562 local_irq_save(flags);
3563 if (ap->flags & ATA_FLAG_MMIO)
3564 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3565 else
3566 ata_pio_data_xfer(ap, buf, buflen, do_write);
3567 local_irq_restore(flags);
3568 } else {
3569 if (ap->flags & ATA_FLAG_MMIO)
3570 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3571 else
3572 ata_pio_data_xfer(ap, buf, buflen, do_write);
3573 }
3574 }
3575
3576 /**
3577 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3578 * @qc: Command on going
3579 *
3580 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3581 *
3582 * LOCKING:
3583 * Inherited from caller.
3584 */
3585
3586 static void ata_pio_sector(struct ata_queued_cmd *qc)
3587 {
3588 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3589 struct scatterlist *sg = qc->__sg;
3590 struct ata_port *ap = qc->ap;
3591 struct page *page;
3592 unsigned int offset;
3593 unsigned char *buf;
3594
3595 if (qc->cursect == (qc->nsect - 1))
3596 ap->hsm_task_state = HSM_ST_LAST;
3597
3598 page = sg[qc->cursg].page;
3599 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3600
3601 /* get the current page and offset */
3602 page = nth_page(page, (offset >> PAGE_SHIFT));
3603 offset %= PAGE_SIZE;
3604
3605 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3606
3607 if (PageHighMem(page)) {
3608 unsigned long flags;
3609
3610 local_irq_save(flags);
3611 buf = kmap_atomic(page, KM_IRQ0);
3612
3613 /* do the actual data transfer */
3614 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3615
3616 kunmap_atomic(buf, KM_IRQ0);
3617 local_irq_restore(flags);
3618 } else {
3619 buf = page_address(page);
3620 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3621 }
3622
3623 qc->cursect++;
3624 qc->cursg_ofs++;
3625
3626 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3627 qc->cursg++;
3628 qc->cursg_ofs = 0;
3629 }
3630 }
3631
3632 /**
3633 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3634 * @qc: Command on going
3635 *
3636 * Transfer one or many ATA_SECT_SIZE of data from/to the
3637 * ATA device for the DRQ request.
3638 *
3639 * LOCKING:
3640 * Inherited from caller.
3641 */
3642
3643 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3644 {
3645 if (is_multi_taskfile(&qc->tf)) {
3646 /* READ/WRITE MULTIPLE */
3647 unsigned int nsect;
3648
3649 WARN_ON(qc->dev->multi_count == 0);
3650
3651 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3652 while (nsect--)
3653 ata_pio_sector(qc);
3654 } else
3655 ata_pio_sector(qc);
3656 }
3657
3658 /**
3659 * atapi_send_cdb - Write CDB bytes to hardware
3660 * @ap: Port to which ATAPI device is attached.
3661 * @qc: Taskfile currently active
3662 *
3663 * When device has indicated its readiness to accept
3664 * a CDB, this function is called. Send the CDB.
3665 *
3666 * LOCKING:
3667 * caller.
3668 */
3669
3670 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3671 {
3672 /* send SCSI cdb */
3673 DPRINTK("send cdb\n");
3674 WARN_ON(qc->dev->cdb_len < 12);
3675
3676 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3677 ata_altstatus(ap); /* flush */
3678
3679 switch (qc->tf.protocol) {
3680 case ATA_PROT_ATAPI:
3681 ap->hsm_task_state = HSM_ST;
3682 break;
3683 case ATA_PROT_ATAPI_NODATA:
3684 ap->hsm_task_state = HSM_ST_LAST;
3685 break;
3686 case ATA_PROT_ATAPI_DMA:
3687 ap->hsm_task_state = HSM_ST_LAST;
3688 /* initiate bmdma */
3689 ap->ops->bmdma_start(qc);
3690 break;
3691 }
3692 }
3693
3694 /**
3695 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3696 * @qc: Command on going
3697 * @bytes: number of bytes
3698 *
3699 * Transfer Transfer data from/to the ATAPI device.
3700 *
3701 * LOCKING:
3702 * Inherited from caller.
3703 *
3704 */
3705
3706 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3707 {
3708 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3709 struct scatterlist *sg = qc->__sg;
3710 struct ata_port *ap = qc->ap;
3711 struct page *page;
3712 unsigned char *buf;
3713 unsigned int offset, count;
3714
3715 if (qc->curbytes + bytes >= qc->nbytes)
3716 ap->hsm_task_state = HSM_ST_LAST;
3717
3718 next_sg:
3719 if (unlikely(qc->cursg >= qc->n_elem)) {
3720 /*
3721 * The end of qc->sg is reached and the device expects
3722 * more data to transfer. In order not to overrun qc->sg
3723 * and fulfill length specified in the byte count register,
3724 * - for read case, discard trailing data from the device
3725 * - for write case, padding zero data to the device
3726 */
3727 u16 pad_buf[1] = { 0 };
3728 unsigned int words = bytes >> 1;
3729 unsigned int i;
3730
3731 if (words) /* warning if bytes > 1 */
3732 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3733 ap->id, bytes);
3734
3735 for (i = 0; i < words; i++)
3736 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3737
3738 ap->hsm_task_state = HSM_ST_LAST;
3739 return;
3740 }
3741
3742 sg = &qc->__sg[qc->cursg];
3743
3744 page = sg->page;
3745 offset = sg->offset + qc->cursg_ofs;
3746
3747 /* get the current page and offset */
3748 page = nth_page(page, (offset >> PAGE_SHIFT));
3749 offset %= PAGE_SIZE;
3750
3751 /* don't overrun current sg */
3752 count = min(sg->length - qc->cursg_ofs, bytes);
3753
3754 /* don't cross page boundaries */
3755 count = min(count, (unsigned int)PAGE_SIZE - offset);
3756
3757 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3758
3759 if (PageHighMem(page)) {
3760 unsigned long flags;
3761
3762 local_irq_save(flags);
3763 buf = kmap_atomic(page, KM_IRQ0);
3764
3765 /* do the actual data transfer */
3766 ata_data_xfer(ap, buf + offset, count, do_write);
3767
3768 kunmap_atomic(buf, KM_IRQ0);
3769 local_irq_restore(flags);
3770 } else {
3771 buf = page_address(page);
3772 ata_data_xfer(ap, buf + offset, count, do_write);
3773 }
3774
3775 bytes -= count;
3776 qc->curbytes += count;
3777 qc->cursg_ofs += count;
3778
3779 if (qc->cursg_ofs == sg->length) {
3780 qc->cursg++;
3781 qc->cursg_ofs = 0;
3782 }
3783
3784 if (bytes)
3785 goto next_sg;
3786 }
3787
3788 /**
3789 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3790 * @qc: Command on going
3791 *
3792 * Transfer Transfer data from/to the ATAPI device.
3793 *
3794 * LOCKING:
3795 * Inherited from caller.
3796 */
3797
3798 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3799 {
3800 struct ata_port *ap = qc->ap;
3801 struct ata_device *dev = qc->dev;
3802 unsigned int ireason, bc_lo, bc_hi, bytes;
3803 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3804
3805 ap->ops->tf_read(ap, &qc->tf);
3806 ireason = qc->tf.nsect;
3807 bc_lo = qc->tf.lbam;
3808 bc_hi = qc->tf.lbah;
3809 bytes = (bc_hi << 8) | bc_lo;
3810
3811 /* shall be cleared to zero, indicating xfer of data */
3812 if (ireason & (1 << 0))
3813 goto err_out;
3814
3815 /* make sure transfer direction matches expected */
3816 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3817 if (do_write != i_write)
3818 goto err_out;
3819
3820 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3821
3822 __atapi_pio_bytes(qc, bytes);
3823
3824 return;
3825
3826 err_out:
3827 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3828 ap->id, dev->devno);
3829 qc->err_mask |= AC_ERR_HSM;
3830 ap->hsm_task_state = HSM_ST_ERR;
3831 }
3832
3833 /**
3834 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3835 * @ap: the target ata_port
3836 * @qc: qc on going
3837 *
3838 * RETURNS:
3839 * 1 if ok in workqueue, 0 otherwise.
3840 */
3841
3842 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3843 {
3844 if (qc->tf.flags & ATA_TFLAG_POLLING)
3845 return 1;
3846
3847 if (ap->hsm_task_state == HSM_ST_FIRST) {
3848 if (qc->tf.protocol == ATA_PROT_PIO &&
3849 (qc->tf.flags & ATA_TFLAG_WRITE))
3850 return 1;
3851
3852 if (is_atapi_taskfile(&qc->tf) &&
3853 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3854 return 1;
3855 }
3856
3857 return 0;
3858 }
3859
3860 /**
3861 * ata_hsm_move - move the HSM to the next state.
3862 * @ap: the target ata_port
3863 * @qc: qc on going
3864 * @status: current device status
3865 * @in_wq: 1 if called from workqueue, 0 otherwise
3866 *
3867 * RETURNS:
3868 * 1 when poll next status needed, 0 otherwise.
3869 */
3870
3871 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3872 u8 status, int in_wq)
3873 {
3874 unsigned long flags = 0;
3875 int poll_next;
3876
3877 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3878
3879 /* Make sure ata_qc_issue_prot() does not throw things
3880 * like DMA polling into the workqueue. Notice that
3881 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3882 */
3883 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3884
3885 fsm_start:
3886 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3887 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3888
3889 switch (ap->hsm_task_state) {
3890 case HSM_ST_FIRST:
3891 /* Send first data block or PACKET CDB */
3892
3893 /* If polling, we will stay in the work queue after
3894 * sending the data. Otherwise, interrupt handler
3895 * takes over after sending the data.
3896 */
3897 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3898
3899 /* check device status */
3900 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3901 /* Wrong status. Let EH handle this */
3902 qc->err_mask |= AC_ERR_HSM;
3903 ap->hsm_task_state = HSM_ST_ERR;
3904 goto fsm_start;
3905 }
3906
3907 /* Device should not ask for data transfer (DRQ=1)
3908 * when it finds something wrong.
3909 * We ignore DRQ here and stop the HSM by
3910 * changing hsm_task_state to HSM_ST_ERR and
3911 * let the EH abort the command or reset the device.
3912 */
3913 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3914 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3915 ap->id, status);
3916 qc->err_mask |= AC_ERR_DEV;
3917 ap->hsm_task_state = HSM_ST_ERR;
3918 goto fsm_start;
3919 }
3920
3921 /* Send the CDB (atapi) or the first data block (ata pio out).
3922 * During the state transition, interrupt handler shouldn't
3923 * be invoked before the data transfer is complete and
3924 * hsm_task_state is changed. Hence, the following locking.
3925 */
3926 if (in_wq)
3927 spin_lock_irqsave(&ap->host_set->lock, flags);
3928
3929 if (qc->tf.protocol == ATA_PROT_PIO) {
3930 /* PIO data out protocol.
3931 * send first data block.
3932 */
3933
3934 /* ata_pio_sectors() might change the state
3935 * to HSM_ST_LAST. so, the state is changed here
3936 * before ata_pio_sectors().
3937 */
3938 ap->hsm_task_state = HSM_ST;
3939 ata_pio_sectors(qc);
3940 ata_altstatus(ap); /* flush */
3941 } else
3942 /* send CDB */
3943 atapi_send_cdb(ap, qc);
3944
3945 if (in_wq)
3946 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3947
3948 /* if polling, ata_pio_task() handles the rest.
3949 * otherwise, interrupt handler takes over from here.
3950 */
3951 break;
3952
3953 case HSM_ST:
3954 /* complete command or read/write the data register */
3955 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3956 /* ATAPI PIO protocol */
3957 if ((status & ATA_DRQ) == 0) {
3958 /* no more data to transfer */
3959 ap->hsm_task_state = HSM_ST_LAST;
3960 goto fsm_start;
3961 }
3962
3963 /* Device should not ask for data transfer (DRQ=1)
3964 * when it finds something wrong.
3965 * We ignore DRQ here and stop the HSM by
3966 * changing hsm_task_state to HSM_ST_ERR and
3967 * let the EH abort the command or reset the device.
3968 */
3969 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3970 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3971 ap->id, status);
3972 qc->err_mask |= AC_ERR_DEV;
3973 ap->hsm_task_state = HSM_ST_ERR;
3974 goto fsm_start;
3975 }
3976
3977 atapi_pio_bytes(qc);
3978
3979 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3980 /* bad ireason reported by device */
3981 goto fsm_start;
3982
3983 } else {
3984 /* ATA PIO protocol */
3985 if (unlikely((status & ATA_DRQ) == 0)) {
3986 /* handle BSY=0, DRQ=0 as error */
3987 qc->err_mask |= AC_ERR_HSM;
3988 ap->hsm_task_state = HSM_ST_ERR;
3989 goto fsm_start;
3990 }
3991
3992 /* For PIO reads, some devices may ask for
3993 * data transfer (DRQ=1) alone with ERR=1.
3994 * We respect DRQ here and transfer one
3995 * block of junk data before changing the
3996 * hsm_task_state to HSM_ST_ERR.
3997 *
3998 * For PIO writes, ERR=1 DRQ=1 doesn't make
3999 * sense since the data block has been
4000 * transferred to the device.
4001 */
4002 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4003 /* data might be corrputed */
4004 qc->err_mask |= AC_ERR_DEV;
4005
4006 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4007 ata_pio_sectors(qc);
4008 ata_altstatus(ap);
4009 status = ata_wait_idle(ap);
4010 }
4011
4012 /* ata_pio_sectors() might change the
4013 * state to HSM_ST_LAST. so, the state
4014 * is changed after ata_pio_sectors().
4015 */
4016 ap->hsm_task_state = HSM_ST_ERR;
4017 goto fsm_start;
4018 }
4019
4020 ata_pio_sectors(qc);
4021
4022 if (ap->hsm_task_state == HSM_ST_LAST &&
4023 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4024 /* all data read */
4025 ata_altstatus(ap);
4026 status = ata_wait_idle(ap);
4027 goto fsm_start;
4028 }
4029 }
4030
4031 ata_altstatus(ap); /* flush */
4032 poll_next = 1;
4033 break;
4034
4035 case HSM_ST_LAST:
4036 if (unlikely(!ata_ok(status))) {
4037 qc->err_mask |= __ac_err_mask(status);
4038 ap->hsm_task_state = HSM_ST_ERR;
4039 goto fsm_start;
4040 }
4041
4042 /* no more data to transfer */
4043 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4044 ap->id, status);
4045
4046 WARN_ON(qc->err_mask);
4047
4048 ap->hsm_task_state = HSM_ST_IDLE;
4049
4050 /* complete taskfile transaction */
4051 if (in_wq)
4052 ata_poll_qc_complete(qc);
4053 else
4054 ata_qc_complete(qc);
4055
4056 poll_next = 0;
4057 break;
4058
4059 case HSM_ST_ERR:
4060 if (qc->tf.command != ATA_CMD_PACKET)
4061 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x\n",
4062 ap->id, status);
4063
4064 /* make sure qc->err_mask is available to
4065 * know what's wrong and recover
4066 */
4067 WARN_ON(qc->err_mask == 0);
4068
4069 ap->hsm_task_state = HSM_ST_IDLE;
4070
4071 /* complete taskfile transaction */
4072 if (in_wq)
4073 ata_poll_qc_complete(qc);
4074 else
4075 ata_qc_complete(qc);
4076
4077 poll_next = 0;
4078 break;
4079 default:
4080 poll_next = 0;
4081 BUG();
4082 }
4083
4084 return poll_next;
4085 }
4086
4087 static void ata_pio_task(void *_data)
4088 {
4089 struct ata_queued_cmd *qc = _data;
4090 struct ata_port *ap = qc->ap;
4091 u8 status;
4092 int poll_next;
4093
4094 fsm_start:
4095 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4096
4097 qc = ata_qc_from_tag(ap, ap->active_tag);
4098 WARN_ON(qc == NULL);
4099
4100 /*
4101 * This is purely heuristic. This is a fast path.
4102 * Sometimes when we enter, BSY will be cleared in
4103 * a chk-status or two. If not, the drive is probably seeking
4104 * or something. Snooze for a couple msecs, then
4105 * chk-status again. If still busy, queue delayed work.
4106 */
4107 status = ata_busy_wait(ap, ATA_BUSY, 5);
4108 if (status & ATA_BUSY) {
4109 msleep(2);
4110 status = ata_busy_wait(ap, ATA_BUSY, 10);
4111 if (status & ATA_BUSY) {
4112 ata_port_queue_task(ap, ata_pio_task, ap, ATA_SHORT_PAUSE);
4113 return;
4114 }
4115 }
4116
4117 /* move the HSM */
4118 poll_next = ata_hsm_move(ap, qc, status, 1);
4119
4120 /* another command or interrupt handler
4121 * may be running at this point.
4122 */
4123 if (poll_next)
4124 goto fsm_start;
4125 }
4126
4127 /**
4128 * ata_qc_new - Request an available ATA command, for queueing
4129 * @ap: Port associated with device @dev
4130 * @dev: Device from whom we request an available command structure
4131 *
4132 * LOCKING:
4133 * None.
4134 */
4135
4136 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4137 {
4138 struct ata_queued_cmd *qc = NULL;
4139 unsigned int i;
4140
4141 for (i = 0; i < ATA_MAX_QUEUE; i++)
4142 if (!test_and_set_bit(i, &ap->qactive)) {
4143 qc = ata_qc_from_tag(ap, i);
4144 break;
4145 }
4146
4147 if (qc)
4148 qc->tag = i;
4149
4150 return qc;
4151 }
4152
4153 /**
4154 * ata_qc_new_init - Request an available ATA command, and initialize it
4155 * @ap: Port associated with device @dev
4156 * @dev: Device from whom we request an available command structure
4157 *
4158 * LOCKING:
4159 * None.
4160 */
4161
4162 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
4163 struct ata_device *dev)
4164 {
4165 struct ata_queued_cmd *qc;
4166
4167 qc = ata_qc_new(ap);
4168 if (qc) {
4169 qc->scsicmd = NULL;
4170 qc->ap = ap;
4171 qc->dev = dev;
4172
4173 ata_qc_reinit(qc);
4174 }
4175
4176 return qc;
4177 }
4178
4179 /**
4180 * ata_qc_free - free unused ata_queued_cmd
4181 * @qc: Command to complete
4182 *
4183 * Designed to free unused ata_queued_cmd object
4184 * in case something prevents using it.
4185 *
4186 * LOCKING:
4187 * spin_lock_irqsave(host_set lock)
4188 */
4189 void ata_qc_free(struct ata_queued_cmd *qc)
4190 {
4191 struct ata_port *ap = qc->ap;
4192 unsigned int tag;
4193
4194 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4195
4196 qc->flags = 0;
4197 tag = qc->tag;
4198 if (likely(ata_tag_valid(tag))) {
4199 if (tag == ap->active_tag)
4200 ap->active_tag = ATA_TAG_POISON;
4201 qc->tag = ATA_TAG_POISON;
4202 clear_bit(tag, &ap->qactive);
4203 }
4204 }
4205
4206 void __ata_qc_complete(struct ata_queued_cmd *qc)
4207 {
4208 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4209 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4210
4211 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4212 ata_sg_clean(qc);
4213
4214 /* atapi: mark qc as inactive to prevent the interrupt handler
4215 * from completing the command twice later, before the error handler
4216 * is called. (when rc != 0 and atapi request sense is needed)
4217 */
4218 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4219
4220 /* call completion callback */
4221 qc->complete_fn(qc);
4222 }
4223
4224 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4225 {
4226 struct ata_port *ap = qc->ap;
4227
4228 switch (qc->tf.protocol) {
4229 case ATA_PROT_DMA:
4230 case ATA_PROT_ATAPI_DMA:
4231 return 1;
4232
4233 case ATA_PROT_ATAPI:
4234 case ATA_PROT_PIO:
4235 if (ap->flags & ATA_FLAG_PIO_DMA)
4236 return 1;
4237
4238 /* fall through */
4239
4240 default:
4241 return 0;
4242 }
4243
4244 /* never reached */
4245 }
4246
4247 /**
4248 * ata_qc_issue - issue taskfile to device
4249 * @qc: command to issue to device
4250 *
4251 * Prepare an ATA command to submission to device.
4252 * This includes mapping the data into a DMA-able
4253 * area, filling in the S/G table, and finally
4254 * writing the taskfile to hardware, starting the command.
4255 *
4256 * LOCKING:
4257 * spin_lock_irqsave(host_set lock)
4258 */
4259 void ata_qc_issue(struct ata_queued_cmd *qc)
4260 {
4261 struct ata_port *ap = qc->ap;
4262
4263 qc->ap->active_tag = qc->tag;
4264 qc->flags |= ATA_QCFLAG_ACTIVE;
4265
4266 if (ata_should_dma_map(qc)) {
4267 if (qc->flags & ATA_QCFLAG_SG) {
4268 if (ata_sg_setup(qc))
4269 goto sg_err;
4270 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4271 if (ata_sg_setup_one(qc))
4272 goto sg_err;
4273 }
4274 } else {
4275 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4276 }
4277
4278 ap->ops->qc_prep(qc);
4279
4280 qc->err_mask |= ap->ops->qc_issue(qc);
4281 if (unlikely(qc->err_mask))
4282 goto err;
4283 return;
4284
4285 sg_err:
4286 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4287 qc->err_mask |= AC_ERR_SYSTEM;
4288 err:
4289 ata_qc_complete(qc);
4290 }
4291
4292 /**
4293 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4294 * @qc: command to issue to device
4295 *
4296 * Using various libata functions and hooks, this function
4297 * starts an ATA command. ATA commands are grouped into
4298 * classes called "protocols", and issuing each type of protocol
4299 * is slightly different.
4300 *
4301 * May be used as the qc_issue() entry in ata_port_operations.
4302 *
4303 * LOCKING:
4304 * spin_lock_irqsave(host_set lock)
4305 *
4306 * RETURNS:
4307 * Zero on success, AC_ERR_* mask on failure
4308 */
4309
4310 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4311 {
4312 struct ata_port *ap = qc->ap;
4313
4314 /* Use polling pio if the LLD doesn't handle
4315 * interrupt driven pio and atapi CDB interrupt.
4316 */
4317 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4318 switch (qc->tf.protocol) {
4319 case ATA_PROT_PIO:
4320 case ATA_PROT_ATAPI:
4321 case ATA_PROT_ATAPI_NODATA:
4322 qc->tf.flags |= ATA_TFLAG_POLLING;
4323 break;
4324 case ATA_PROT_ATAPI_DMA:
4325 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4326 /* see ata_check_atapi_dma() */
4327 BUG();
4328 break;
4329 default:
4330 break;
4331 }
4332 }
4333
4334 /* select the device */
4335 ata_dev_select(ap, qc->dev->devno, 1, 0);
4336
4337 /* start the command */
4338 switch (qc->tf.protocol) {
4339 case ATA_PROT_NODATA:
4340 if (qc->tf.flags & ATA_TFLAG_POLLING)
4341 ata_qc_set_polling(qc);
4342
4343 ata_tf_to_host(ap, &qc->tf);
4344 ap->hsm_task_state = HSM_ST_LAST;
4345
4346 if (qc->tf.flags & ATA_TFLAG_POLLING)
4347 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4348
4349 break;
4350
4351 case ATA_PROT_DMA:
4352 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4353
4354 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4355 ap->ops->bmdma_setup(qc); /* set up bmdma */
4356 ap->ops->bmdma_start(qc); /* initiate bmdma */
4357 ap->hsm_task_state = HSM_ST_LAST;
4358 break;
4359
4360 case ATA_PROT_PIO:
4361 if (qc->tf.flags & ATA_TFLAG_POLLING)
4362 ata_qc_set_polling(qc);
4363
4364 ata_tf_to_host(ap, &qc->tf);
4365
4366 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4367 /* PIO data out protocol */
4368 ap->hsm_task_state = HSM_ST_FIRST;
4369 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4370
4371 /* always send first data block using
4372 * the ata_pio_task() codepath.
4373 */
4374 } else {
4375 /* PIO data in protocol */
4376 ap->hsm_task_state = HSM_ST;
4377
4378 if (qc->tf.flags & ATA_TFLAG_POLLING)
4379 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4380
4381 /* if polling, ata_pio_task() handles the rest.
4382 * otherwise, interrupt handler takes over from here.
4383 */
4384 }
4385
4386 break;
4387
4388 case ATA_PROT_ATAPI:
4389 case ATA_PROT_ATAPI_NODATA:
4390 if (qc->tf.flags & ATA_TFLAG_POLLING)
4391 ata_qc_set_polling(qc);
4392
4393 ata_tf_to_host(ap, &qc->tf);
4394
4395 ap->hsm_task_state = HSM_ST_FIRST;
4396
4397 /* send cdb by polling if no cdb interrupt */
4398 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4399 (qc->tf.flags & ATA_TFLAG_POLLING))
4400 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4401 break;
4402
4403 case ATA_PROT_ATAPI_DMA:
4404 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4405
4406 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4407 ap->ops->bmdma_setup(qc); /* set up bmdma */
4408 ap->hsm_task_state = HSM_ST_FIRST;
4409
4410 /* send cdb by polling if no cdb interrupt */
4411 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4412 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4413 break;
4414
4415 default:
4416 WARN_ON(1);
4417 return AC_ERR_SYSTEM;
4418 }
4419
4420 return 0;
4421 }
4422
4423 /**
4424 * ata_host_intr - Handle host interrupt for given (port, task)
4425 * @ap: Port on which interrupt arrived (possibly...)
4426 * @qc: Taskfile currently active in engine
4427 *
4428 * Handle host interrupt for given queued command. Currently,
4429 * only DMA interrupts are handled. All other commands are
4430 * handled via polling with interrupts disabled (nIEN bit).
4431 *
4432 * LOCKING:
4433 * spin_lock_irqsave(host_set lock)
4434 *
4435 * RETURNS:
4436 * One if interrupt was handled, zero if not (shared irq).
4437 */
4438
4439 inline unsigned int ata_host_intr (struct ata_port *ap,
4440 struct ata_queued_cmd *qc)
4441 {
4442 u8 status, host_stat = 0;
4443
4444 VPRINTK("ata%u: protocol %d task_state %d\n",
4445 ap->id, qc->tf.protocol, ap->hsm_task_state);
4446
4447 /* Check whether we are expecting interrupt in this state */
4448 switch (ap->hsm_task_state) {
4449 case HSM_ST_FIRST:
4450 /* Some pre-ATAPI-4 devices assert INTRQ
4451 * at this state when ready to receive CDB.
4452 */
4453
4454 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4455 * The flag was turned on only for atapi devices.
4456 * No need to check is_atapi_taskfile(&qc->tf) again.
4457 */
4458 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4459 goto idle_irq;
4460 break;
4461 case HSM_ST_LAST:
4462 if (qc->tf.protocol == ATA_PROT_DMA ||
4463 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4464 /* check status of DMA engine */
4465 host_stat = ap->ops->bmdma_status(ap);
4466 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4467
4468 /* if it's not our irq... */
4469 if (!(host_stat & ATA_DMA_INTR))
4470 goto idle_irq;
4471
4472 /* before we do anything else, clear DMA-Start bit */
4473 ap->ops->bmdma_stop(qc);
4474
4475 if (unlikely(host_stat & ATA_DMA_ERR)) {
4476 /* error when transfering data to/from memory */
4477 qc->err_mask |= AC_ERR_HOST_BUS;
4478 ap->hsm_task_state = HSM_ST_ERR;
4479 }
4480 }
4481 break;
4482 case HSM_ST:
4483 break;
4484 default:
4485 goto idle_irq;
4486 }
4487
4488 /* check altstatus */
4489 status = ata_altstatus(ap);
4490 if (status & ATA_BUSY)
4491 goto idle_irq;
4492
4493 /* check main status, clearing INTRQ */
4494 status = ata_chk_status(ap);
4495 if (unlikely(status & ATA_BUSY))
4496 goto idle_irq;
4497
4498 /* ack bmdma irq events */
4499 ap->ops->irq_clear(ap);
4500
4501 ata_hsm_move(ap, qc, status, 0);
4502 return 1; /* irq handled */
4503
4504 idle_irq:
4505 ap->stats.idle_irq++;
4506
4507 #ifdef ATA_IRQ_TRAP
4508 if ((ap->stats.idle_irq % 1000) == 0) {
4509 ata_irq_ack(ap, 0); /* debug trap */
4510 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4511 return 1;
4512 }
4513 #endif
4514 return 0; /* irq not handled */
4515 }
4516
4517 /**
4518 * ata_interrupt - Default ATA host interrupt handler
4519 * @irq: irq line (unused)
4520 * @dev_instance: pointer to our ata_host_set information structure
4521 * @regs: unused
4522 *
4523 * Default interrupt handler for PCI IDE devices. Calls
4524 * ata_host_intr() for each port that is not disabled.
4525 *
4526 * LOCKING:
4527 * Obtains host_set lock during operation.
4528 *
4529 * RETURNS:
4530 * IRQ_NONE or IRQ_HANDLED.
4531 */
4532
4533 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4534 {
4535 struct ata_host_set *host_set = dev_instance;
4536 unsigned int i;
4537 unsigned int handled = 0;
4538 unsigned long flags;
4539
4540 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4541 spin_lock_irqsave(&host_set->lock, flags);
4542
4543 for (i = 0; i < host_set->n_ports; i++) {
4544 struct ata_port *ap;
4545
4546 ap = host_set->ports[i];
4547 if (ap &&
4548 !(ap->flags & ATA_FLAG_DISABLED)) {
4549 struct ata_queued_cmd *qc;
4550
4551 qc = ata_qc_from_tag(ap, ap->active_tag);
4552 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4553 (qc->flags & ATA_QCFLAG_ACTIVE))
4554 handled |= ata_host_intr(ap, qc);
4555 }
4556 }
4557
4558 spin_unlock_irqrestore(&host_set->lock, flags);
4559
4560 return IRQ_RETVAL(handled);
4561 }
4562
4563
4564 /*
4565 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4566 * without filling any other registers
4567 */
4568 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4569 u8 cmd)
4570 {
4571 struct ata_taskfile tf;
4572 int err;
4573
4574 ata_tf_init(ap, &tf, dev->devno);
4575
4576 tf.command = cmd;
4577 tf.flags |= ATA_TFLAG_DEVICE;
4578 tf.protocol = ATA_PROT_NODATA;
4579
4580 err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
4581 if (err)
4582 printk(KERN_ERR "%s: ata command failed: %d\n",
4583 __FUNCTION__, err);
4584
4585 return err;
4586 }
4587
4588 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4589 {
4590 u8 cmd;
4591
4592 if (!ata_try_flush_cache(dev))
4593 return 0;
4594
4595 if (ata_id_has_flush_ext(dev->id))
4596 cmd = ATA_CMD_FLUSH_EXT;
4597 else
4598 cmd = ATA_CMD_FLUSH;
4599
4600 return ata_do_simple_cmd(ap, dev, cmd);
4601 }
4602
4603 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4604 {
4605 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4606 }
4607
4608 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4609 {
4610 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4611 }
4612
4613 /**
4614 * ata_device_resume - wakeup a previously suspended devices
4615 * @ap: port the device is connected to
4616 * @dev: the device to resume
4617 *
4618 * Kick the drive back into action, by sending it an idle immediate
4619 * command and making sure its transfer mode matches between drive
4620 * and host.
4621 *
4622 */
4623 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4624 {
4625 if (ap->flags & ATA_FLAG_SUSPENDED) {
4626 struct ata_device *failed_dev;
4627 ap->flags &= ~ATA_FLAG_SUSPENDED;
4628 while (ata_set_mode(ap, &failed_dev))
4629 ata_dev_disable(ap, failed_dev);
4630 }
4631 if (!ata_dev_enabled(dev))
4632 return 0;
4633 if (dev->class == ATA_DEV_ATA)
4634 ata_start_drive(ap, dev);
4635
4636 return 0;
4637 }
4638
4639 /**
4640 * ata_device_suspend - prepare a device for suspend
4641 * @ap: port the device is connected to
4642 * @dev: the device to suspend
4643 *
4644 * Flush the cache on the drive, if appropriate, then issue a
4645 * standbynow command.
4646 */
4647 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4648 {
4649 if (!ata_dev_enabled(dev))
4650 return 0;
4651 if (dev->class == ATA_DEV_ATA)
4652 ata_flush_cache(ap, dev);
4653
4654 if (state.event != PM_EVENT_FREEZE)
4655 ata_standby_drive(ap, dev);
4656 ap->flags |= ATA_FLAG_SUSPENDED;
4657 return 0;
4658 }
4659
4660 /**
4661 * ata_port_start - Set port up for dma.
4662 * @ap: Port to initialize
4663 *
4664 * Called just after data structures for each port are
4665 * initialized. Allocates space for PRD table.
4666 *
4667 * May be used as the port_start() entry in ata_port_operations.
4668 *
4669 * LOCKING:
4670 * Inherited from caller.
4671 */
4672
4673 int ata_port_start (struct ata_port *ap)
4674 {
4675 struct device *dev = ap->dev;
4676 int rc;
4677
4678 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4679 if (!ap->prd)
4680 return -ENOMEM;
4681
4682 rc = ata_pad_alloc(ap, dev);
4683 if (rc) {
4684 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4685 return rc;
4686 }
4687
4688 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4689
4690 return 0;
4691 }
4692
4693
4694 /**
4695 * ata_port_stop - Undo ata_port_start()
4696 * @ap: Port to shut down
4697 *
4698 * Frees the PRD table.
4699 *
4700 * May be used as the port_stop() entry in ata_port_operations.
4701 *
4702 * LOCKING:
4703 * Inherited from caller.
4704 */
4705
4706 void ata_port_stop (struct ata_port *ap)
4707 {
4708 struct device *dev = ap->dev;
4709
4710 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4711 ata_pad_free(ap, dev);
4712 }
4713
4714 void ata_host_stop (struct ata_host_set *host_set)
4715 {
4716 if (host_set->mmio_base)
4717 iounmap(host_set->mmio_base);
4718 }
4719
4720
4721 /**
4722 * ata_host_remove - Unregister SCSI host structure with upper layers
4723 * @ap: Port to unregister
4724 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4725 *
4726 * LOCKING:
4727 * Inherited from caller.
4728 */
4729
4730 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4731 {
4732 struct Scsi_Host *sh = ap->host;
4733
4734 DPRINTK("ENTER\n");
4735
4736 if (do_unregister)
4737 scsi_remove_host(sh);
4738
4739 ap->ops->port_stop(ap);
4740 }
4741
4742 /**
4743 * ata_host_init - Initialize an ata_port structure
4744 * @ap: Structure to initialize
4745 * @host: associated SCSI mid-layer structure
4746 * @host_set: Collection of hosts to which @ap belongs
4747 * @ent: Probe information provided by low-level driver
4748 * @port_no: Port number associated with this ata_port
4749 *
4750 * Initialize a new ata_port structure, and its associated
4751 * scsi_host.
4752 *
4753 * LOCKING:
4754 * Inherited from caller.
4755 */
4756
4757 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4758 struct ata_host_set *host_set,
4759 const struct ata_probe_ent *ent, unsigned int port_no)
4760 {
4761 unsigned int i;
4762
4763 host->max_id = 16;
4764 host->max_lun = 1;
4765 host->max_channel = 1;
4766 host->unique_id = ata_unique_id++;
4767 host->max_cmd_len = 12;
4768
4769 ap->flags = ATA_FLAG_DISABLED;
4770 ap->id = host->unique_id;
4771 ap->host = host;
4772 ap->ctl = ATA_DEVCTL_OBS;
4773 ap->host_set = host_set;
4774 ap->dev = ent->dev;
4775 ap->port_no = port_no;
4776 ap->hard_port_no =
4777 ent->legacy_mode ? ent->hard_port_no : port_no;
4778 ap->pio_mask = ent->pio_mask;
4779 ap->mwdma_mask = ent->mwdma_mask;
4780 ap->udma_mask = ent->udma_mask;
4781 ap->flags |= ent->host_flags;
4782 ap->ops = ent->port_ops;
4783 ap->cbl = ATA_CBL_NONE;
4784 ap->sata_spd_limit = UINT_MAX;
4785 ap->active_tag = ATA_TAG_POISON;
4786 ap->last_ctl = 0xFF;
4787
4788 INIT_WORK(&ap->port_task, NULL, NULL);
4789 INIT_LIST_HEAD(&ap->eh_done_q);
4790
4791 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4792 struct ata_device *dev = &ap->device[i];
4793 dev->devno = i;
4794 dev->pio_mask = UINT_MAX;
4795 dev->mwdma_mask = UINT_MAX;
4796 dev->udma_mask = UINT_MAX;
4797 }
4798
4799 #ifdef ATA_IRQ_TRAP
4800 ap->stats.unhandled_irq = 1;
4801 ap->stats.idle_irq = 1;
4802 #endif
4803
4804 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4805 }
4806
4807 /**
4808 * ata_host_add - Attach low-level ATA driver to system
4809 * @ent: Information provided by low-level driver
4810 * @host_set: Collections of ports to which we add
4811 * @port_no: Port number associated with this host
4812 *
4813 * Attach low-level ATA driver to system.
4814 *
4815 * LOCKING:
4816 * PCI/etc. bus probe sem.
4817 *
4818 * RETURNS:
4819 * New ata_port on success, for NULL on error.
4820 */
4821
4822 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4823 struct ata_host_set *host_set,
4824 unsigned int port_no)
4825 {
4826 struct Scsi_Host *host;
4827 struct ata_port *ap;
4828 int rc;
4829
4830 DPRINTK("ENTER\n");
4831
4832 if (!ent->port_ops->probe_reset &&
4833 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4834 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4835 port_no);
4836 return NULL;
4837 }
4838
4839 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4840 if (!host)
4841 return NULL;
4842
4843 host->transportt = &ata_scsi_transport_template;
4844
4845 ap = (struct ata_port *) &host->hostdata[0];
4846
4847 ata_host_init(ap, host, host_set, ent, port_no);
4848
4849 rc = ap->ops->port_start(ap);
4850 if (rc)
4851 goto err_out;
4852
4853 return ap;
4854
4855 err_out:
4856 scsi_host_put(host);
4857 return NULL;
4858 }
4859
4860 /**
4861 * ata_device_add - Register hardware device with ATA and SCSI layers
4862 * @ent: Probe information describing hardware device to be registered
4863 *
4864 * This function processes the information provided in the probe
4865 * information struct @ent, allocates the necessary ATA and SCSI
4866 * host information structures, initializes them, and registers
4867 * everything with requisite kernel subsystems.
4868 *
4869 * This function requests irqs, probes the ATA bus, and probes
4870 * the SCSI bus.
4871 *
4872 * LOCKING:
4873 * PCI/etc. bus probe sem.
4874 *
4875 * RETURNS:
4876 * Number of ports registered. Zero on error (no ports registered).
4877 */
4878
4879 int ata_device_add(const struct ata_probe_ent *ent)
4880 {
4881 unsigned int count = 0, i;
4882 struct device *dev = ent->dev;
4883 struct ata_host_set *host_set;
4884
4885 DPRINTK("ENTER\n");
4886 /* alloc a container for our list of ATA ports (buses) */
4887 host_set = kzalloc(sizeof(struct ata_host_set) +
4888 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4889 if (!host_set)
4890 return 0;
4891 spin_lock_init(&host_set->lock);
4892
4893 host_set->dev = dev;
4894 host_set->n_ports = ent->n_ports;
4895 host_set->irq = ent->irq;
4896 host_set->mmio_base = ent->mmio_base;
4897 host_set->private_data = ent->private_data;
4898 host_set->ops = ent->port_ops;
4899 host_set->flags = ent->host_set_flags;
4900
4901 /* register each port bound to this device */
4902 for (i = 0; i < ent->n_ports; i++) {
4903 struct ata_port *ap;
4904 unsigned long xfer_mode_mask;
4905
4906 ap = ata_host_add(ent, host_set, i);
4907 if (!ap)
4908 goto err_out;
4909
4910 host_set->ports[i] = ap;
4911 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4912 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4913 (ap->pio_mask << ATA_SHIFT_PIO);
4914
4915 /* print per-port info to dmesg */
4916 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4917 "bmdma 0x%lX irq %lu\n",
4918 ap->id,
4919 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4920 ata_mode_string(xfer_mode_mask),
4921 ap->ioaddr.cmd_addr,
4922 ap->ioaddr.ctl_addr,
4923 ap->ioaddr.bmdma_addr,
4924 ent->irq);
4925
4926 ata_chk_status(ap);
4927 host_set->ops->irq_clear(ap);
4928 count++;
4929 }
4930
4931 if (!count)
4932 goto err_free_ret;
4933
4934 /* obtain irq, that is shared between channels */
4935 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4936 DRV_NAME, host_set))
4937 goto err_out;
4938
4939 /* perform each probe synchronously */
4940 DPRINTK("probe begin\n");
4941 for (i = 0; i < count; i++) {
4942 struct ata_port *ap;
4943 int rc;
4944
4945 ap = host_set->ports[i];
4946
4947 DPRINTK("ata%u: bus probe begin\n", ap->id);
4948 rc = ata_bus_probe(ap);
4949 DPRINTK("ata%u: bus probe end\n", ap->id);
4950
4951 if (rc) {
4952 /* FIXME: do something useful here?
4953 * Current libata behavior will
4954 * tear down everything when
4955 * the module is removed
4956 * or the h/w is unplugged.
4957 */
4958 }
4959
4960 rc = scsi_add_host(ap->host, dev);
4961 if (rc) {
4962 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4963 ap->id);
4964 /* FIXME: do something useful here */
4965 /* FIXME: handle unconditional calls to
4966 * scsi_scan_host and ata_host_remove, below,
4967 * at the very least
4968 */
4969 }
4970 }
4971
4972 /* probes are done, now scan each port's disk(s) */
4973 DPRINTK("host probe begin\n");
4974 for (i = 0; i < count; i++) {
4975 struct ata_port *ap = host_set->ports[i];
4976
4977 ata_scsi_scan_host(ap);
4978 }
4979
4980 dev_set_drvdata(dev, host_set);
4981
4982 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4983 return ent->n_ports; /* success */
4984
4985 err_out:
4986 for (i = 0; i < count; i++) {
4987 ata_host_remove(host_set->ports[i], 1);
4988 scsi_host_put(host_set->ports[i]->host);
4989 }
4990 err_free_ret:
4991 kfree(host_set);
4992 VPRINTK("EXIT, returning 0\n");
4993 return 0;
4994 }
4995
4996 /**
4997 * ata_host_set_remove - PCI layer callback for device removal
4998 * @host_set: ATA host set that was removed
4999 *
5000 * Unregister all objects associated with this host set. Free those
5001 * objects.
5002 *
5003 * LOCKING:
5004 * Inherited from calling layer (may sleep).
5005 */
5006
5007 void ata_host_set_remove(struct ata_host_set *host_set)
5008 {
5009 struct ata_port *ap;
5010 unsigned int i;
5011
5012 for (i = 0; i < host_set->n_ports; i++) {
5013 ap = host_set->ports[i];
5014 scsi_remove_host(ap->host);
5015 }
5016
5017 free_irq(host_set->irq, host_set);
5018
5019 for (i = 0; i < host_set->n_ports; i++) {
5020 ap = host_set->ports[i];
5021
5022 ata_scsi_release(ap->host);
5023
5024 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5025 struct ata_ioports *ioaddr = &ap->ioaddr;
5026
5027 if (ioaddr->cmd_addr == 0x1f0)
5028 release_region(0x1f0, 8);
5029 else if (ioaddr->cmd_addr == 0x170)
5030 release_region(0x170, 8);
5031 }
5032
5033 scsi_host_put(ap->host);
5034 }
5035
5036 if (host_set->ops->host_stop)
5037 host_set->ops->host_stop(host_set);
5038
5039 kfree(host_set);
5040 }
5041
5042 /**
5043 * ata_scsi_release - SCSI layer callback hook for host unload
5044 * @host: libata host to be unloaded
5045 *
5046 * Performs all duties necessary to shut down a libata port...
5047 * Kill port kthread, disable port, and release resources.
5048 *
5049 * LOCKING:
5050 * Inherited from SCSI layer.
5051 *
5052 * RETURNS:
5053 * One.
5054 */
5055
5056 int ata_scsi_release(struct Scsi_Host *host)
5057 {
5058 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
5059 int i;
5060
5061 DPRINTK("ENTER\n");
5062
5063 ap->ops->port_disable(ap);
5064 ata_host_remove(ap, 0);
5065 for (i = 0; i < ATA_MAX_DEVICES; i++)
5066 kfree(ap->device[i].id);
5067
5068 DPRINTK("EXIT\n");
5069 return 1;
5070 }
5071
5072 /**
5073 * ata_std_ports - initialize ioaddr with standard port offsets.
5074 * @ioaddr: IO address structure to be initialized
5075 *
5076 * Utility function which initializes data_addr, error_addr,
5077 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5078 * device_addr, status_addr, and command_addr to standard offsets
5079 * relative to cmd_addr.
5080 *
5081 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5082 */
5083
5084 void ata_std_ports(struct ata_ioports *ioaddr)
5085 {
5086 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5087 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5088 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5089 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5090 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5091 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5092 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5093 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5094 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5095 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5096 }
5097
5098
5099 #ifdef CONFIG_PCI
5100
5101 void ata_pci_host_stop (struct ata_host_set *host_set)
5102 {
5103 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5104
5105 pci_iounmap(pdev, host_set->mmio_base);
5106 }
5107
5108 /**
5109 * ata_pci_remove_one - PCI layer callback for device removal
5110 * @pdev: PCI device that was removed
5111 *
5112 * PCI layer indicates to libata via this hook that
5113 * hot-unplug or module unload event has occurred.
5114 * Handle this by unregistering all objects associated
5115 * with this PCI device. Free those objects. Then finally
5116 * release PCI resources and disable device.
5117 *
5118 * LOCKING:
5119 * Inherited from PCI layer (may sleep).
5120 */
5121
5122 void ata_pci_remove_one (struct pci_dev *pdev)
5123 {
5124 struct device *dev = pci_dev_to_dev(pdev);
5125 struct ata_host_set *host_set = dev_get_drvdata(dev);
5126
5127 ata_host_set_remove(host_set);
5128 pci_release_regions(pdev);
5129 pci_disable_device(pdev);
5130 dev_set_drvdata(dev, NULL);
5131 }
5132
5133 /* move to PCI subsystem */
5134 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5135 {
5136 unsigned long tmp = 0;
5137
5138 switch (bits->width) {
5139 case 1: {
5140 u8 tmp8 = 0;
5141 pci_read_config_byte(pdev, bits->reg, &tmp8);
5142 tmp = tmp8;
5143 break;
5144 }
5145 case 2: {
5146 u16 tmp16 = 0;
5147 pci_read_config_word(pdev, bits->reg, &tmp16);
5148 tmp = tmp16;
5149 break;
5150 }
5151 case 4: {
5152 u32 tmp32 = 0;
5153 pci_read_config_dword(pdev, bits->reg, &tmp32);
5154 tmp = tmp32;
5155 break;
5156 }
5157
5158 default:
5159 return -EINVAL;
5160 }
5161
5162 tmp &= bits->mask;
5163
5164 return (tmp == bits->val) ? 1 : 0;
5165 }
5166
5167 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5168 {
5169 pci_save_state(pdev);
5170 pci_disable_device(pdev);
5171 pci_set_power_state(pdev, PCI_D3hot);
5172 return 0;
5173 }
5174
5175 int ata_pci_device_resume(struct pci_dev *pdev)
5176 {
5177 pci_set_power_state(pdev, PCI_D0);
5178 pci_restore_state(pdev);
5179 pci_enable_device(pdev);
5180 pci_set_master(pdev);
5181 return 0;
5182 }
5183 #endif /* CONFIG_PCI */
5184
5185
5186 static int __init ata_init(void)
5187 {
5188 ata_wq = create_workqueue("ata");
5189 if (!ata_wq)
5190 return -ENOMEM;
5191
5192 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5193 return 0;
5194 }
5195
5196 static void __exit ata_exit(void)
5197 {
5198 destroy_workqueue(ata_wq);
5199 }
5200
5201 module_init(ata_init);
5202 module_exit(ata_exit);
5203
5204 static unsigned long ratelimit_time;
5205 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5206
5207 int ata_ratelimit(void)
5208 {
5209 int rc;
5210 unsigned long flags;
5211
5212 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5213
5214 if (time_after(jiffies, ratelimit_time)) {
5215 rc = 1;
5216 ratelimit_time = jiffies + (HZ/5);
5217 } else
5218 rc = 0;
5219
5220 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5221
5222 return rc;
5223 }
5224
5225 /*
5226 * libata is essentially a library of internal helper functions for
5227 * low-level ATA host controller drivers. As such, the API/ABI is
5228 * likely to change as new drivers are added and updated.
5229 * Do not depend on ABI/API stability.
5230 */
5231
5232 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5233 EXPORT_SYMBOL_GPL(ata_std_ports);
5234 EXPORT_SYMBOL_GPL(ata_device_add);
5235 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5236 EXPORT_SYMBOL_GPL(ata_sg_init);
5237 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5238 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5239 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5240 EXPORT_SYMBOL_GPL(ata_tf_load);
5241 EXPORT_SYMBOL_GPL(ata_tf_read);
5242 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5243 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5244 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5245 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5246 EXPORT_SYMBOL_GPL(ata_check_status);
5247 EXPORT_SYMBOL_GPL(ata_altstatus);
5248 EXPORT_SYMBOL_GPL(ata_exec_command);
5249 EXPORT_SYMBOL_GPL(ata_port_start);
5250 EXPORT_SYMBOL_GPL(ata_port_stop);
5251 EXPORT_SYMBOL_GPL(ata_host_stop);
5252 EXPORT_SYMBOL_GPL(ata_interrupt);
5253 EXPORT_SYMBOL_GPL(ata_qc_prep);
5254 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5255 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5256 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5257 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5258 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5259 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5260 EXPORT_SYMBOL_GPL(ata_port_probe);
5261 EXPORT_SYMBOL_GPL(sata_phy_reset);
5262 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5263 EXPORT_SYMBOL_GPL(ata_bus_reset);
5264 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5265 EXPORT_SYMBOL_GPL(ata_std_softreset);
5266 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5267 EXPORT_SYMBOL_GPL(ata_std_postreset);
5268 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5269 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5270 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5271 EXPORT_SYMBOL_GPL(ata_dev_classify);
5272 EXPORT_SYMBOL_GPL(ata_dev_pair);
5273 EXPORT_SYMBOL_GPL(ata_port_disable);
5274 EXPORT_SYMBOL_GPL(ata_ratelimit);
5275 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5276 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5277 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5278 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5279 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5280 EXPORT_SYMBOL_GPL(ata_scsi_release);
5281 EXPORT_SYMBOL_GPL(ata_host_intr);
5282 EXPORT_SYMBOL_GPL(ata_id_string);
5283 EXPORT_SYMBOL_GPL(ata_id_c_string);
5284 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5285
5286 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5287 EXPORT_SYMBOL_GPL(ata_timing_compute);
5288 EXPORT_SYMBOL_GPL(ata_timing_merge);
5289
5290 #ifdef CONFIG_PCI
5291 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5292 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5293 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5294 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5295 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5296 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5297 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5298 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5299 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5300 #endif /* CONFIG_PCI */
5301
5302 EXPORT_SYMBOL_GPL(ata_device_suspend);
5303 EXPORT_SYMBOL_GPL(ata_device_resume);
5304 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5305 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5306
5307 EXPORT_SYMBOL_GPL(ata_scsi_error);
5308 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5309 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5310 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
This page took 0.133923 seconds and 6 git commands to generate.