Merge branch 'upstream'
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static void ata_pio_error(struct ata_port *ap);
69 static unsigned int ata_dev_xfermask(struct ata_port *ap,
70 struct ata_device *dev);
71
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
74
75 int atapi_enabled = 1;
76 module_param(atapi_enabled, int, 0444);
77 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78
79 int libata_fua = 0;
80 module_param_named(fua, libata_fua, int, 0444);
81 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82
83 MODULE_AUTHOR("Jeff Garzik");
84 MODULE_DESCRIPTION("Library module for ATA devices");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION);
87
88
89 /**
90 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
91 * @tf: Taskfile to convert
92 * @fis: Buffer into which data will output
93 * @pmp: Port multiplier port
94 *
95 * Converts a standard ATA taskfile to a Serial ATA
96 * FIS structure (Register - Host to Device).
97 *
98 * LOCKING:
99 * Inherited from caller.
100 */
101
102 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 {
104 fis[0] = 0x27; /* Register - Host to Device FIS */
105 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
106 bit 7 indicates Command FIS */
107 fis[2] = tf->command;
108 fis[3] = tf->feature;
109
110 fis[4] = tf->lbal;
111 fis[5] = tf->lbam;
112 fis[6] = tf->lbah;
113 fis[7] = tf->device;
114
115 fis[8] = tf->hob_lbal;
116 fis[9] = tf->hob_lbam;
117 fis[10] = tf->hob_lbah;
118 fis[11] = tf->hob_feature;
119
120 fis[12] = tf->nsect;
121 fis[13] = tf->hob_nsect;
122 fis[14] = 0;
123 fis[15] = tf->ctl;
124
125 fis[16] = 0;
126 fis[17] = 0;
127 fis[18] = 0;
128 fis[19] = 0;
129 }
130
131 /**
132 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
133 * @fis: Buffer from which data will be input
134 * @tf: Taskfile to output
135 *
136 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 *
138 * LOCKING:
139 * Inherited from caller.
140 */
141
142 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 {
144 tf->command = fis[2]; /* status */
145 tf->feature = fis[3]; /* error */
146
147 tf->lbal = fis[4];
148 tf->lbam = fis[5];
149 tf->lbah = fis[6];
150 tf->device = fis[7];
151
152 tf->hob_lbal = fis[8];
153 tf->hob_lbam = fis[9];
154 tf->hob_lbah = fis[10];
155
156 tf->nsect = fis[12];
157 tf->hob_nsect = fis[13];
158 }
159
160 static const u8 ata_rw_cmds[] = {
161 /* pio multi */
162 ATA_CMD_READ_MULTI,
163 ATA_CMD_WRITE_MULTI,
164 ATA_CMD_READ_MULTI_EXT,
165 ATA_CMD_WRITE_MULTI_EXT,
166 0,
167 0,
168 0,
169 ATA_CMD_WRITE_MULTI_FUA_EXT,
170 /* pio */
171 ATA_CMD_PIO_READ,
172 ATA_CMD_PIO_WRITE,
173 ATA_CMD_PIO_READ_EXT,
174 ATA_CMD_PIO_WRITE_EXT,
175 0,
176 0,
177 0,
178 0,
179 /* dma */
180 ATA_CMD_READ,
181 ATA_CMD_WRITE,
182 ATA_CMD_READ_EXT,
183 ATA_CMD_WRITE_EXT,
184 0,
185 0,
186 0,
187 ATA_CMD_WRITE_FUA_EXT
188 };
189
190 /**
191 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
192 * @qc: command to examine and configure
193 *
194 * Examine the device configuration and tf->flags to calculate
195 * the proper read/write commands and protocol to use.
196 *
197 * LOCKING:
198 * caller.
199 */
200 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 {
202 struct ata_taskfile *tf = &qc->tf;
203 struct ata_device *dev = qc->dev;
204 u8 cmd;
205
206 int index, fua, lba48, write;
207
208 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
209 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
210 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211
212 if (dev->flags & ATA_DFLAG_PIO) {
213 tf->protocol = ATA_PROT_PIO;
214 index = dev->multi_count ? 0 : 8;
215 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
216 /* Unable to use DMA due to host limitation */
217 tf->protocol = ATA_PROT_PIO;
218 index = dev->multi_count ? 0 : 8;
219 } else {
220 tf->protocol = ATA_PROT_DMA;
221 index = 16;
222 }
223
224 cmd = ata_rw_cmds[index + fua + lba48 + write];
225 if (cmd) {
226 tf->command = cmd;
227 return 0;
228 }
229 return -1;
230 }
231
232 /**
233 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
234 * @pio_mask: pio_mask
235 * @mwdma_mask: mwdma_mask
236 * @udma_mask: udma_mask
237 *
238 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
239 * unsigned int xfer_mask.
240 *
241 * LOCKING:
242 * None.
243 *
244 * RETURNS:
245 * Packed xfer_mask.
246 */
247 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
248 unsigned int mwdma_mask,
249 unsigned int udma_mask)
250 {
251 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
252 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
253 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
254 }
255
256 static const struct ata_xfer_ent {
257 unsigned int shift, bits;
258 u8 base;
259 } ata_xfer_tbl[] = {
260 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
261 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
262 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
263 { -1, },
264 };
265
266 /**
267 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
268 * @xfer_mask: xfer_mask of interest
269 *
270 * Return matching XFER_* value for @xfer_mask. Only the highest
271 * bit of @xfer_mask is considered.
272 *
273 * LOCKING:
274 * None.
275 *
276 * RETURNS:
277 * Matching XFER_* value, 0 if no match found.
278 */
279 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
280 {
281 int highbit = fls(xfer_mask) - 1;
282 const struct ata_xfer_ent *ent;
283
284 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
285 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
286 return ent->base + highbit - ent->shift;
287 return 0;
288 }
289
290 /**
291 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
292 * @xfer_mode: XFER_* of interest
293 *
294 * Return matching xfer_mask for @xfer_mode.
295 *
296 * LOCKING:
297 * None.
298 *
299 * RETURNS:
300 * Matching xfer_mask, 0 if no match found.
301 */
302 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
303 {
304 const struct ata_xfer_ent *ent;
305
306 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
307 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
308 return 1 << (ent->shift + xfer_mode - ent->base);
309 return 0;
310 }
311
312 /**
313 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
314 * @xfer_mode: XFER_* of interest
315 *
316 * Return matching xfer_shift for @xfer_mode.
317 *
318 * LOCKING:
319 * None.
320 *
321 * RETURNS:
322 * Matching xfer_shift, -1 if no match found.
323 */
324 static int ata_xfer_mode2shift(unsigned int xfer_mode)
325 {
326 const struct ata_xfer_ent *ent;
327
328 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
329 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
330 return ent->shift;
331 return -1;
332 }
333
334 /**
335 * ata_mode_string - convert xfer_mask to string
336 * @xfer_mask: mask of bits supported; only highest bit counts.
337 *
338 * Determine string which represents the highest speed
339 * (highest bit in @modemask).
340 *
341 * LOCKING:
342 * None.
343 *
344 * RETURNS:
345 * Constant C string representing highest speed listed in
346 * @mode_mask, or the constant C string "<n/a>".
347 */
348 static const char *ata_mode_string(unsigned int xfer_mask)
349 {
350 static const char * const xfer_mode_str[] = {
351 "PIO0",
352 "PIO1",
353 "PIO2",
354 "PIO3",
355 "PIO4",
356 "MWDMA0",
357 "MWDMA1",
358 "MWDMA2",
359 "UDMA/16",
360 "UDMA/25",
361 "UDMA/33",
362 "UDMA/44",
363 "UDMA/66",
364 "UDMA/100",
365 "UDMA/133",
366 "UDMA7",
367 };
368 int highbit;
369
370 highbit = fls(xfer_mask) - 1;
371 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
372 return xfer_mode_str[highbit];
373 return "<n/a>";
374 }
375
376 /**
377 * ata_pio_devchk - PATA device presence detection
378 * @ap: ATA channel to examine
379 * @device: Device to examine (starting at zero)
380 *
381 * This technique was originally described in
382 * Hale Landis's ATADRVR (www.ata-atapi.com), and
383 * later found its way into the ATA/ATAPI spec.
384 *
385 * Write a pattern to the ATA shadow registers,
386 * and if a device is present, it will respond by
387 * correctly storing and echoing back the
388 * ATA shadow register contents.
389 *
390 * LOCKING:
391 * caller.
392 */
393
394 static unsigned int ata_pio_devchk(struct ata_port *ap,
395 unsigned int device)
396 {
397 struct ata_ioports *ioaddr = &ap->ioaddr;
398 u8 nsect, lbal;
399
400 ap->ops->dev_select(ap, device);
401
402 outb(0x55, ioaddr->nsect_addr);
403 outb(0xaa, ioaddr->lbal_addr);
404
405 outb(0xaa, ioaddr->nsect_addr);
406 outb(0x55, ioaddr->lbal_addr);
407
408 outb(0x55, ioaddr->nsect_addr);
409 outb(0xaa, ioaddr->lbal_addr);
410
411 nsect = inb(ioaddr->nsect_addr);
412 lbal = inb(ioaddr->lbal_addr);
413
414 if ((nsect == 0x55) && (lbal == 0xaa))
415 return 1; /* we found a device */
416
417 return 0; /* nothing found */
418 }
419
420 /**
421 * ata_mmio_devchk - PATA device presence detection
422 * @ap: ATA channel to examine
423 * @device: Device to examine (starting at zero)
424 *
425 * This technique was originally described in
426 * Hale Landis's ATADRVR (www.ata-atapi.com), and
427 * later found its way into the ATA/ATAPI spec.
428 *
429 * Write a pattern to the ATA shadow registers,
430 * and if a device is present, it will respond by
431 * correctly storing and echoing back the
432 * ATA shadow register contents.
433 *
434 * LOCKING:
435 * caller.
436 */
437
438 static unsigned int ata_mmio_devchk(struct ata_port *ap,
439 unsigned int device)
440 {
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442 u8 nsect, lbal;
443
444 ap->ops->dev_select(ap, device);
445
446 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
447 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
448
449 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
450 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
451
452 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
453 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
454
455 nsect = readb((void __iomem *) ioaddr->nsect_addr);
456 lbal = readb((void __iomem *) ioaddr->lbal_addr);
457
458 if ((nsect == 0x55) && (lbal == 0xaa))
459 return 1; /* we found a device */
460
461 return 0; /* nothing found */
462 }
463
464 /**
465 * ata_devchk - PATA device presence detection
466 * @ap: ATA channel to examine
467 * @device: Device to examine (starting at zero)
468 *
469 * Dispatch ATA device presence detection, depending
470 * on whether we are using PIO or MMIO to talk to the
471 * ATA shadow registers.
472 *
473 * LOCKING:
474 * caller.
475 */
476
477 static unsigned int ata_devchk(struct ata_port *ap,
478 unsigned int device)
479 {
480 if (ap->flags & ATA_FLAG_MMIO)
481 return ata_mmio_devchk(ap, device);
482 return ata_pio_devchk(ap, device);
483 }
484
485 /**
486 * ata_dev_classify - determine device type based on ATA-spec signature
487 * @tf: ATA taskfile register set for device to be identified
488 *
489 * Determine from taskfile register contents whether a device is
490 * ATA or ATAPI, as per "Signature and persistence" section
491 * of ATA/PI spec (volume 1, sect 5.14).
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
498 * the event of failure.
499 */
500
501 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
502 {
503 /* Apple's open source Darwin code hints that some devices only
504 * put a proper signature into the LBA mid/high registers,
505 * So, we only check those. It's sufficient for uniqueness.
506 */
507
508 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
509 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
510 DPRINTK("found ATA device by sig\n");
511 return ATA_DEV_ATA;
512 }
513
514 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
515 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
516 DPRINTK("found ATAPI device by sig\n");
517 return ATA_DEV_ATAPI;
518 }
519
520 DPRINTK("unknown device\n");
521 return ATA_DEV_UNKNOWN;
522 }
523
524 /**
525 * ata_dev_try_classify - Parse returned ATA device signature
526 * @ap: ATA channel to examine
527 * @device: Device to examine (starting at zero)
528 * @r_err: Value of error register on completion
529 *
530 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
531 * an ATA/ATAPI-defined set of values is placed in the ATA
532 * shadow registers, indicating the results of device detection
533 * and diagnostics.
534 *
535 * Select the ATA device, and read the values from the ATA shadow
536 * registers. Then parse according to the Error register value,
537 * and the spec-defined values examined by ata_dev_classify().
538 *
539 * LOCKING:
540 * caller.
541 *
542 * RETURNS:
543 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
544 */
545
546 static unsigned int
547 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
548 {
549 struct ata_taskfile tf;
550 unsigned int class;
551 u8 err;
552
553 ap->ops->dev_select(ap, device);
554
555 memset(&tf, 0, sizeof(tf));
556
557 ap->ops->tf_read(ap, &tf);
558 err = tf.feature;
559 if (r_err)
560 *r_err = err;
561
562 /* see if device passed diags */
563 if (err == 1)
564 /* do nothing */ ;
565 else if ((device == 0) && (err == 0x81))
566 /* do nothing */ ;
567 else
568 return ATA_DEV_NONE;
569
570 /* determine if device is ATA or ATAPI */
571 class = ata_dev_classify(&tf);
572
573 if (class == ATA_DEV_UNKNOWN)
574 return ATA_DEV_NONE;
575 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
576 return ATA_DEV_NONE;
577 return class;
578 }
579
580 /**
581 * ata_id_string - Convert IDENTIFY DEVICE page into string
582 * @id: IDENTIFY DEVICE results we will examine
583 * @s: string into which data is output
584 * @ofs: offset into identify device page
585 * @len: length of string to return. must be an even number.
586 *
587 * The strings in the IDENTIFY DEVICE page are broken up into
588 * 16-bit chunks. Run through the string, and output each
589 * 8-bit chunk linearly, regardless of platform.
590 *
591 * LOCKING:
592 * caller.
593 */
594
595 void ata_id_string(const u16 *id, unsigned char *s,
596 unsigned int ofs, unsigned int len)
597 {
598 unsigned int c;
599
600 while (len > 0) {
601 c = id[ofs] >> 8;
602 *s = c;
603 s++;
604
605 c = id[ofs] & 0xff;
606 *s = c;
607 s++;
608
609 ofs++;
610 len -= 2;
611 }
612 }
613
614 /**
615 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
616 * @id: IDENTIFY DEVICE results we will examine
617 * @s: string into which data is output
618 * @ofs: offset into identify device page
619 * @len: length of string to return. must be an odd number.
620 *
621 * This function is identical to ata_id_string except that it
622 * trims trailing spaces and terminates the resulting string with
623 * null. @len must be actual maximum length (even number) + 1.
624 *
625 * LOCKING:
626 * caller.
627 */
628 void ata_id_c_string(const u16 *id, unsigned char *s,
629 unsigned int ofs, unsigned int len)
630 {
631 unsigned char *p;
632
633 WARN_ON(!(len & 1));
634
635 ata_id_string(id, s, ofs, len - 1);
636
637 p = s + strnlen(s, len - 1);
638 while (p > s && p[-1] == ' ')
639 p--;
640 *p = '\0';
641 }
642
643 static u64 ata_id_n_sectors(const u16 *id)
644 {
645 if (ata_id_has_lba(id)) {
646 if (ata_id_has_lba48(id))
647 return ata_id_u64(id, 100);
648 else
649 return ata_id_u32(id, 60);
650 } else {
651 if (ata_id_current_chs_valid(id))
652 return ata_id_u32(id, 57);
653 else
654 return id[1] * id[3] * id[6];
655 }
656 }
657
658 /**
659 * ata_noop_dev_select - Select device 0/1 on ATA bus
660 * @ap: ATA channel to manipulate
661 * @device: ATA device (numbered from zero) to select
662 *
663 * This function performs no actual function.
664 *
665 * May be used as the dev_select() entry in ata_port_operations.
666 *
667 * LOCKING:
668 * caller.
669 */
670 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
671 {
672 }
673
674
675 /**
676 * ata_std_dev_select - Select device 0/1 on ATA bus
677 * @ap: ATA channel to manipulate
678 * @device: ATA device (numbered from zero) to select
679 *
680 * Use the method defined in the ATA specification to
681 * make either device 0, or device 1, active on the
682 * ATA channel. Works with both PIO and MMIO.
683 *
684 * May be used as the dev_select() entry in ata_port_operations.
685 *
686 * LOCKING:
687 * caller.
688 */
689
690 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
691 {
692 u8 tmp;
693
694 if (device == 0)
695 tmp = ATA_DEVICE_OBS;
696 else
697 tmp = ATA_DEVICE_OBS | ATA_DEV1;
698
699 if (ap->flags & ATA_FLAG_MMIO) {
700 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
701 } else {
702 outb(tmp, ap->ioaddr.device_addr);
703 }
704 ata_pause(ap); /* needed; also flushes, for mmio */
705 }
706
707 /**
708 * ata_dev_select - Select device 0/1 on ATA bus
709 * @ap: ATA channel to manipulate
710 * @device: ATA device (numbered from zero) to select
711 * @wait: non-zero to wait for Status register BSY bit to clear
712 * @can_sleep: non-zero if context allows sleeping
713 *
714 * Use the method defined in the ATA specification to
715 * make either device 0, or device 1, active on the
716 * ATA channel.
717 *
718 * This is a high-level version of ata_std_dev_select(),
719 * which additionally provides the services of inserting
720 * the proper pauses and status polling, where needed.
721 *
722 * LOCKING:
723 * caller.
724 */
725
726 void ata_dev_select(struct ata_port *ap, unsigned int device,
727 unsigned int wait, unsigned int can_sleep)
728 {
729 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
730 ap->id, device, wait);
731
732 if (wait)
733 ata_wait_idle(ap);
734
735 ap->ops->dev_select(ap, device);
736
737 if (wait) {
738 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
739 msleep(150);
740 ata_wait_idle(ap);
741 }
742 }
743
744 /**
745 * ata_dump_id - IDENTIFY DEVICE info debugging output
746 * @id: IDENTIFY DEVICE page to dump
747 *
748 * Dump selected 16-bit words from the given IDENTIFY DEVICE
749 * page.
750 *
751 * LOCKING:
752 * caller.
753 */
754
755 static inline void ata_dump_id(const u16 *id)
756 {
757 DPRINTK("49==0x%04x "
758 "53==0x%04x "
759 "63==0x%04x "
760 "64==0x%04x "
761 "75==0x%04x \n",
762 id[49],
763 id[53],
764 id[63],
765 id[64],
766 id[75]);
767 DPRINTK("80==0x%04x "
768 "81==0x%04x "
769 "82==0x%04x "
770 "83==0x%04x "
771 "84==0x%04x \n",
772 id[80],
773 id[81],
774 id[82],
775 id[83],
776 id[84]);
777 DPRINTK("88==0x%04x "
778 "93==0x%04x\n",
779 id[88],
780 id[93]);
781 }
782
783 /**
784 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
785 * @id: IDENTIFY data to compute xfer mask from
786 *
787 * Compute the xfermask for this device. This is not as trivial
788 * as it seems if we must consider early devices correctly.
789 *
790 * FIXME: pre IDE drive timing (do we care ?).
791 *
792 * LOCKING:
793 * None.
794 *
795 * RETURNS:
796 * Computed xfermask
797 */
798 static unsigned int ata_id_xfermask(const u16 *id)
799 {
800 unsigned int pio_mask, mwdma_mask, udma_mask;
801
802 /* Usual case. Word 53 indicates word 64 is valid */
803 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
804 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
805 pio_mask <<= 3;
806 pio_mask |= 0x7;
807 } else {
808 /* If word 64 isn't valid then Word 51 high byte holds
809 * the PIO timing number for the maximum. Turn it into
810 * a mask.
811 */
812 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
813
814 /* But wait.. there's more. Design your standards by
815 * committee and you too can get a free iordy field to
816 * process. However its the speeds not the modes that
817 * are supported... Note drivers using the timing API
818 * will get this right anyway
819 */
820 }
821
822 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
823
824 udma_mask = 0;
825 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
826 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
827
828 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
829 }
830
831 /**
832 * ata_port_queue_task - Queue port_task
833 * @ap: The ata_port to queue port_task for
834 *
835 * Schedule @fn(@data) for execution after @delay jiffies using
836 * port_task. There is one port_task per port and it's the
837 * user(low level driver)'s responsibility to make sure that only
838 * one task is active at any given time.
839 *
840 * libata core layer takes care of synchronization between
841 * port_task and EH. ata_port_queue_task() may be ignored for EH
842 * synchronization.
843 *
844 * LOCKING:
845 * Inherited from caller.
846 */
847 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
848 unsigned long delay)
849 {
850 int rc;
851
852 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
853 return;
854
855 PREPARE_WORK(&ap->port_task, fn, data);
856
857 if (!delay)
858 rc = queue_work(ata_wq, &ap->port_task);
859 else
860 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
861
862 /* rc == 0 means that another user is using port task */
863 WARN_ON(rc == 0);
864 }
865
866 /**
867 * ata_port_flush_task - Flush port_task
868 * @ap: The ata_port to flush port_task for
869 *
870 * After this function completes, port_task is guranteed not to
871 * be running or scheduled.
872 *
873 * LOCKING:
874 * Kernel thread context (may sleep)
875 */
876 void ata_port_flush_task(struct ata_port *ap)
877 {
878 unsigned long flags;
879
880 DPRINTK("ENTER\n");
881
882 spin_lock_irqsave(&ap->host_set->lock, flags);
883 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
884 spin_unlock_irqrestore(&ap->host_set->lock, flags);
885
886 DPRINTK("flush #1\n");
887 flush_workqueue(ata_wq);
888
889 /*
890 * At this point, if a task is running, it's guaranteed to see
891 * the FLUSH flag; thus, it will never queue pio tasks again.
892 * Cancel and flush.
893 */
894 if (!cancel_delayed_work(&ap->port_task)) {
895 DPRINTK("flush #2\n");
896 flush_workqueue(ata_wq);
897 }
898
899 spin_lock_irqsave(&ap->host_set->lock, flags);
900 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
901 spin_unlock_irqrestore(&ap->host_set->lock, flags);
902
903 DPRINTK("EXIT\n");
904 }
905
906 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
907 {
908 struct completion *waiting = qc->private_data;
909
910 qc->ap->ops->tf_read(qc->ap, &qc->tf);
911 complete(waiting);
912 }
913
914 /**
915 * ata_exec_internal - execute libata internal command
916 * @ap: Port to which the command is sent
917 * @dev: Device to which the command is sent
918 * @tf: Taskfile registers for the command and the result
919 * @dma_dir: Data tranfer direction of the command
920 * @buf: Data buffer of the command
921 * @buflen: Length of data buffer
922 *
923 * Executes libata internal command with timeout. @tf contains
924 * command on entry and result on return. Timeout and error
925 * conditions are reported via return value. No recovery action
926 * is taken after a command times out. It's caller's duty to
927 * clean up after timeout.
928 *
929 * LOCKING:
930 * None. Should be called with kernel context, might sleep.
931 */
932
933 static unsigned
934 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
935 struct ata_taskfile *tf,
936 int dma_dir, void *buf, unsigned int buflen)
937 {
938 u8 command = tf->command;
939 struct ata_queued_cmd *qc;
940 DECLARE_COMPLETION(wait);
941 unsigned long flags;
942 unsigned int err_mask;
943
944 spin_lock_irqsave(&ap->host_set->lock, flags);
945
946 qc = ata_qc_new_init(ap, dev);
947 BUG_ON(qc == NULL);
948
949 qc->tf = *tf;
950 qc->dma_dir = dma_dir;
951 if (dma_dir != DMA_NONE) {
952 ata_sg_init_one(qc, buf, buflen);
953 qc->nsect = buflen / ATA_SECT_SIZE;
954 }
955
956 qc->private_data = &wait;
957 qc->complete_fn = ata_qc_complete_internal;
958
959 qc->err_mask = ata_qc_issue(qc);
960 if (qc->err_mask)
961 ata_qc_complete(qc);
962
963 spin_unlock_irqrestore(&ap->host_set->lock, flags);
964
965 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
966 ata_port_flush_task(ap);
967
968 spin_lock_irqsave(&ap->host_set->lock, flags);
969
970 /* We're racing with irq here. If we lose, the
971 * following test prevents us from completing the qc
972 * again. If completion irq occurs after here but
973 * before the caller cleans up, it will result in a
974 * spurious interrupt. We can live with that.
975 */
976 if (qc->flags & ATA_QCFLAG_ACTIVE) {
977 qc->err_mask = AC_ERR_TIMEOUT;
978 ata_qc_complete(qc);
979 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
980 ap->id, command);
981 }
982
983 spin_unlock_irqrestore(&ap->host_set->lock, flags);
984 }
985
986 *tf = qc->tf;
987 err_mask = qc->err_mask;
988
989 ata_qc_free(qc);
990
991 return err_mask;
992 }
993
994 /**
995 * ata_pio_need_iordy - check if iordy needed
996 * @adev: ATA device
997 *
998 * Check if the current speed of the device requires IORDY. Used
999 * by various controllers for chip configuration.
1000 */
1001
1002 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1003 {
1004 int pio;
1005 int speed = adev->pio_mode - XFER_PIO_0;
1006
1007 if (speed < 2)
1008 return 0;
1009 if (speed > 2)
1010 return 1;
1011
1012 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1013
1014 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1015 pio = adev->id[ATA_ID_EIDE_PIO];
1016 /* Is the speed faster than the drive allows non IORDY ? */
1017 if (pio) {
1018 /* This is cycle times not frequency - watch the logic! */
1019 if (pio > 240) /* PIO2 is 240nS per cycle */
1020 return 1;
1021 return 0;
1022 }
1023 }
1024 return 0;
1025 }
1026
1027 /**
1028 * ata_dev_read_id - Read ID data from the specified device
1029 * @ap: port on which target device resides
1030 * @dev: target device
1031 * @p_class: pointer to class of the target device (may be changed)
1032 * @post_reset: is this read ID post-reset?
1033 * @p_id: read IDENTIFY page (newly allocated)
1034 *
1035 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1036 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1037 * devices. This function also takes care of EDD signature
1038 * misreporting (to be removed once EDD support is gone) and
1039 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1040 *
1041 * LOCKING:
1042 * Kernel thread context (may sleep)
1043 *
1044 * RETURNS:
1045 * 0 on success, -errno otherwise.
1046 */
1047 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1048 unsigned int *p_class, int post_reset, u16 **p_id)
1049 {
1050 unsigned int class = *p_class;
1051 unsigned int using_edd;
1052 struct ata_taskfile tf;
1053 unsigned int err_mask = 0;
1054 u16 *id;
1055 const char *reason;
1056 int rc;
1057
1058 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1059
1060 if (ap->ops->probe_reset ||
1061 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1062 using_edd = 0;
1063 else
1064 using_edd = 1;
1065
1066 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1067
1068 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1069 if (id == NULL) {
1070 rc = -ENOMEM;
1071 reason = "out of memory";
1072 goto err_out;
1073 }
1074
1075 retry:
1076 ata_tf_init(ap, &tf, dev->devno);
1077
1078 switch (class) {
1079 case ATA_DEV_ATA:
1080 tf.command = ATA_CMD_ID_ATA;
1081 break;
1082 case ATA_DEV_ATAPI:
1083 tf.command = ATA_CMD_ID_ATAPI;
1084 break;
1085 default:
1086 rc = -ENODEV;
1087 reason = "unsupported class";
1088 goto err_out;
1089 }
1090
1091 tf.protocol = ATA_PROT_PIO;
1092
1093 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1094 id, sizeof(id[0]) * ATA_ID_WORDS);
1095
1096 if (err_mask) {
1097 rc = -EIO;
1098 reason = "I/O error";
1099
1100 if (err_mask & ~AC_ERR_DEV)
1101 goto err_out;
1102
1103 /*
1104 * arg! EDD works for all test cases, but seems to return
1105 * the ATA signature for some ATAPI devices. Until the
1106 * reason for this is found and fixed, we fix up the mess
1107 * here. If IDENTIFY DEVICE returns command aborted
1108 * (as ATAPI devices do), then we issue an
1109 * IDENTIFY PACKET DEVICE.
1110 *
1111 * ATA software reset (SRST, the default) does not appear
1112 * to have this problem.
1113 */
1114 if ((using_edd) && (class == ATA_DEV_ATA)) {
1115 u8 err = tf.feature;
1116 if (err & ATA_ABORTED) {
1117 class = ATA_DEV_ATAPI;
1118 goto retry;
1119 }
1120 }
1121 goto err_out;
1122 }
1123
1124 swap_buf_le16(id, ATA_ID_WORDS);
1125
1126 /* sanity check */
1127 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1128 rc = -EINVAL;
1129 reason = "device reports illegal type";
1130 goto err_out;
1131 }
1132
1133 if (post_reset && class == ATA_DEV_ATA) {
1134 /*
1135 * The exact sequence expected by certain pre-ATA4 drives is:
1136 * SRST RESET
1137 * IDENTIFY
1138 * INITIALIZE DEVICE PARAMETERS
1139 * anything else..
1140 * Some drives were very specific about that exact sequence.
1141 */
1142 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1143 err_mask = ata_dev_init_params(ap, dev);
1144 if (err_mask) {
1145 rc = -EIO;
1146 reason = "INIT_DEV_PARAMS failed";
1147 goto err_out;
1148 }
1149
1150 /* current CHS translation info (id[53-58]) might be
1151 * changed. reread the identify device info.
1152 */
1153 post_reset = 0;
1154 goto retry;
1155 }
1156 }
1157
1158 *p_class = class;
1159 *p_id = id;
1160 return 0;
1161
1162 err_out:
1163 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1164 ap->id, dev->devno, reason);
1165 kfree(id);
1166 return rc;
1167 }
1168
1169 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1170 struct ata_device *dev)
1171 {
1172 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1173 }
1174
1175 /**
1176 * ata_dev_configure - Configure the specified ATA/ATAPI device
1177 * @ap: Port on which target device resides
1178 * @dev: Target device to configure
1179 * @print_info: Enable device info printout
1180 *
1181 * Configure @dev according to @dev->id. Generic and low-level
1182 * driver specific fixups are also applied.
1183 *
1184 * LOCKING:
1185 * Kernel thread context (may sleep)
1186 *
1187 * RETURNS:
1188 * 0 on success, -errno otherwise
1189 */
1190 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1191 int print_info)
1192 {
1193 const u16 *id = dev->id;
1194 unsigned int xfer_mask;
1195 int i, rc;
1196
1197 if (!ata_dev_present(dev)) {
1198 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1199 ap->id, dev->devno);
1200 return 0;
1201 }
1202
1203 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1204
1205 /* print device capabilities */
1206 if (print_info)
1207 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1208 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1209 ap->id, dev->devno, id[49], id[82], id[83],
1210 id[84], id[85], id[86], id[87], id[88]);
1211
1212 /* initialize to-be-configured parameters */
1213 dev->flags = 0;
1214 dev->max_sectors = 0;
1215 dev->cdb_len = 0;
1216 dev->n_sectors = 0;
1217 dev->cylinders = 0;
1218 dev->heads = 0;
1219 dev->sectors = 0;
1220
1221 /*
1222 * common ATA, ATAPI feature tests
1223 */
1224
1225 /* find max transfer mode; for printk only */
1226 xfer_mask = ata_id_xfermask(id);
1227
1228 ata_dump_id(id);
1229
1230 /* ATA-specific feature tests */
1231 if (dev->class == ATA_DEV_ATA) {
1232 dev->n_sectors = ata_id_n_sectors(id);
1233
1234 if (ata_id_has_lba(id)) {
1235 const char *lba_desc;
1236
1237 lba_desc = "LBA";
1238 dev->flags |= ATA_DFLAG_LBA;
1239 if (ata_id_has_lba48(id)) {
1240 dev->flags |= ATA_DFLAG_LBA48;
1241 lba_desc = "LBA48";
1242 }
1243
1244 /* print device info to dmesg */
1245 if (print_info)
1246 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1247 "max %s, %Lu sectors: %s\n",
1248 ap->id, dev->devno,
1249 ata_id_major_version(id),
1250 ata_mode_string(xfer_mask),
1251 (unsigned long long)dev->n_sectors,
1252 lba_desc);
1253 } else {
1254 /* CHS */
1255
1256 /* Default translation */
1257 dev->cylinders = id[1];
1258 dev->heads = id[3];
1259 dev->sectors = id[6];
1260
1261 if (ata_id_current_chs_valid(id)) {
1262 /* Current CHS translation is valid. */
1263 dev->cylinders = id[54];
1264 dev->heads = id[55];
1265 dev->sectors = id[56];
1266 }
1267
1268 /* print device info to dmesg */
1269 if (print_info)
1270 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1271 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1272 ap->id, dev->devno,
1273 ata_id_major_version(id),
1274 ata_mode_string(xfer_mask),
1275 (unsigned long long)dev->n_sectors,
1276 dev->cylinders, dev->heads, dev->sectors);
1277 }
1278
1279 if (dev->id[59] & 0x100) {
1280 dev->multi_count = dev->id[59] & 0xff;
1281 DPRINTK("ata%u: dev %u multi count %u\n",
1282 ap->id, device, dev->multi_count);
1283 }
1284
1285 }
1286
1287 /* ATAPI-specific feature tests */
1288 else if (dev->class == ATA_DEV_ATAPI) {
1289 rc = atapi_cdb_len(id);
1290 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1291 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1292 rc = -EINVAL;
1293 goto err_out_nosup;
1294 }
1295 dev->cdb_len = (unsigned int) rc;
1296
1297 if (ata_id_cdb_intr(dev->id))
1298 dev->flags |= ATA_DFLAG_CDB_INTR;
1299
1300 /* print device info to dmesg */
1301 if (print_info)
1302 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1303 ap->id, dev->devno, ata_mode_string(xfer_mask));
1304 }
1305
1306 ap->host->max_cmd_len = 0;
1307 for (i = 0; i < ATA_MAX_DEVICES; i++)
1308 ap->host->max_cmd_len = max_t(unsigned int,
1309 ap->host->max_cmd_len,
1310 ap->device[i].cdb_len);
1311
1312 /* limit bridge transfers to udma5, 200 sectors */
1313 if (ata_dev_knobble(ap, dev)) {
1314 if (print_info)
1315 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1316 ap->id, dev->devno);
1317 ap->udma_mask &= ATA_UDMA5;
1318 dev->max_sectors = ATA_MAX_SECTORS;
1319 }
1320
1321 if (ap->ops->dev_config)
1322 ap->ops->dev_config(ap, dev);
1323
1324 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1325 return 0;
1326
1327 err_out_nosup:
1328 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1329 ap->id, dev->devno);
1330 DPRINTK("EXIT, err\n");
1331 return rc;
1332 }
1333
1334 /**
1335 * ata_bus_probe - Reset and probe ATA bus
1336 * @ap: Bus to probe
1337 *
1338 * Master ATA bus probing function. Initiates a hardware-dependent
1339 * bus reset, then attempts to identify any devices found on
1340 * the bus.
1341 *
1342 * LOCKING:
1343 * PCI/etc. bus probe sem.
1344 *
1345 * RETURNS:
1346 * Zero on success, non-zero on error.
1347 */
1348
1349 static int ata_bus_probe(struct ata_port *ap)
1350 {
1351 unsigned int classes[ATA_MAX_DEVICES];
1352 unsigned int i, rc, found = 0;
1353
1354 ata_port_probe(ap);
1355
1356 /* reset and determine device classes */
1357 for (i = 0; i < ATA_MAX_DEVICES; i++)
1358 classes[i] = ATA_DEV_UNKNOWN;
1359
1360 if (ap->ops->probe_reset) {
1361 rc = ap->ops->probe_reset(ap, classes);
1362 if (rc) {
1363 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1364 return rc;
1365 }
1366 } else {
1367 ap->ops->phy_reset(ap);
1368
1369 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1370 for (i = 0; i < ATA_MAX_DEVICES; i++)
1371 classes[i] = ap->device[i].class;
1372
1373 ata_port_probe(ap);
1374 }
1375
1376 for (i = 0; i < ATA_MAX_DEVICES; i++)
1377 if (classes[i] == ATA_DEV_UNKNOWN)
1378 classes[i] = ATA_DEV_NONE;
1379
1380 /* read IDENTIFY page and configure devices */
1381 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1382 struct ata_device *dev = &ap->device[i];
1383
1384 dev->class = classes[i];
1385
1386 if (!ata_dev_present(dev))
1387 continue;
1388
1389 WARN_ON(dev->id != NULL);
1390 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1391 dev->class = ATA_DEV_NONE;
1392 continue;
1393 }
1394
1395 if (ata_dev_configure(ap, dev, 1)) {
1396 dev->class++; /* disable device */
1397 continue;
1398 }
1399
1400 found = 1;
1401 }
1402
1403 if (!found)
1404 goto err_out_disable;
1405
1406 ata_set_mode(ap);
1407 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1408 goto err_out_disable;
1409
1410 return 0;
1411
1412 err_out_disable:
1413 ap->ops->port_disable(ap);
1414 return -1;
1415 }
1416
1417 /**
1418 * ata_port_probe - Mark port as enabled
1419 * @ap: Port for which we indicate enablement
1420 *
1421 * Modify @ap data structure such that the system
1422 * thinks that the entire port is enabled.
1423 *
1424 * LOCKING: host_set lock, or some other form of
1425 * serialization.
1426 */
1427
1428 void ata_port_probe(struct ata_port *ap)
1429 {
1430 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1431 }
1432
1433 /**
1434 * sata_print_link_status - Print SATA link status
1435 * @ap: SATA port to printk link status about
1436 *
1437 * This function prints link speed and status of a SATA link.
1438 *
1439 * LOCKING:
1440 * None.
1441 */
1442 static void sata_print_link_status(struct ata_port *ap)
1443 {
1444 u32 sstatus, tmp;
1445 const char *speed;
1446
1447 if (!ap->ops->scr_read)
1448 return;
1449
1450 sstatus = scr_read(ap, SCR_STATUS);
1451
1452 if (sata_dev_present(ap)) {
1453 tmp = (sstatus >> 4) & 0xf;
1454 if (tmp & (1 << 0))
1455 speed = "1.5";
1456 else if (tmp & (1 << 1))
1457 speed = "3.0";
1458 else
1459 speed = "<unknown>";
1460 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1461 ap->id, speed, sstatus);
1462 } else {
1463 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1464 ap->id, sstatus);
1465 }
1466 }
1467
1468 /**
1469 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1470 * @ap: SATA port associated with target SATA PHY.
1471 *
1472 * This function issues commands to standard SATA Sxxx
1473 * PHY registers, to wake up the phy (and device), and
1474 * clear any reset condition.
1475 *
1476 * LOCKING:
1477 * PCI/etc. bus probe sem.
1478 *
1479 */
1480 void __sata_phy_reset(struct ata_port *ap)
1481 {
1482 u32 sstatus;
1483 unsigned long timeout = jiffies + (HZ * 5);
1484
1485 if (ap->flags & ATA_FLAG_SATA_RESET) {
1486 /* issue phy wake/reset */
1487 scr_write_flush(ap, SCR_CONTROL, 0x301);
1488 /* Couldn't find anything in SATA I/II specs, but
1489 * AHCI-1.1 10.4.2 says at least 1 ms. */
1490 mdelay(1);
1491 }
1492 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1493
1494 /* wait for phy to become ready, if necessary */
1495 do {
1496 msleep(200);
1497 sstatus = scr_read(ap, SCR_STATUS);
1498 if ((sstatus & 0xf) != 1)
1499 break;
1500 } while (time_before(jiffies, timeout));
1501
1502 /* print link status */
1503 sata_print_link_status(ap);
1504
1505 /* TODO: phy layer with polling, timeouts, etc. */
1506 if (sata_dev_present(ap))
1507 ata_port_probe(ap);
1508 else
1509 ata_port_disable(ap);
1510
1511 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1512 return;
1513
1514 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1515 ata_port_disable(ap);
1516 return;
1517 }
1518
1519 ap->cbl = ATA_CBL_SATA;
1520 }
1521
1522 /**
1523 * sata_phy_reset - Reset SATA bus.
1524 * @ap: SATA port associated with target SATA PHY.
1525 *
1526 * This function resets the SATA bus, and then probes
1527 * the bus for devices.
1528 *
1529 * LOCKING:
1530 * PCI/etc. bus probe sem.
1531 *
1532 */
1533 void sata_phy_reset(struct ata_port *ap)
1534 {
1535 __sata_phy_reset(ap);
1536 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1537 return;
1538 ata_bus_reset(ap);
1539 }
1540
1541 /**
1542 * ata_port_disable - Disable port.
1543 * @ap: Port to be disabled.
1544 *
1545 * Modify @ap data structure such that the system
1546 * thinks that the entire port is disabled, and should
1547 * never attempt to probe or communicate with devices
1548 * on this port.
1549 *
1550 * LOCKING: host_set lock, or some other form of
1551 * serialization.
1552 */
1553
1554 void ata_port_disable(struct ata_port *ap)
1555 {
1556 ap->device[0].class = ATA_DEV_NONE;
1557 ap->device[1].class = ATA_DEV_NONE;
1558 ap->flags |= ATA_FLAG_PORT_DISABLED;
1559 }
1560
1561 /*
1562 * This mode timing computation functionality is ported over from
1563 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1564 */
1565 /*
1566 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1567 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1568 * for PIO 5, which is a nonstandard extension and UDMA6, which
1569 * is currently supported only by Maxtor drives.
1570 */
1571
1572 static const struct ata_timing ata_timing[] = {
1573
1574 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1575 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1576 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1577 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1578
1579 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1580 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1581 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1582
1583 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1584
1585 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1586 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1587 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1588
1589 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1590 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1591 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1592
1593 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1594 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1595 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1596
1597 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1598 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1599 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1600
1601 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1602
1603 { 0xFF }
1604 };
1605
1606 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1607 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1608
1609 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1610 {
1611 q->setup = EZ(t->setup * 1000, T);
1612 q->act8b = EZ(t->act8b * 1000, T);
1613 q->rec8b = EZ(t->rec8b * 1000, T);
1614 q->cyc8b = EZ(t->cyc8b * 1000, T);
1615 q->active = EZ(t->active * 1000, T);
1616 q->recover = EZ(t->recover * 1000, T);
1617 q->cycle = EZ(t->cycle * 1000, T);
1618 q->udma = EZ(t->udma * 1000, UT);
1619 }
1620
1621 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1622 struct ata_timing *m, unsigned int what)
1623 {
1624 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1625 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1626 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1627 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1628 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1629 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1630 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1631 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1632 }
1633
1634 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1635 {
1636 const struct ata_timing *t;
1637
1638 for (t = ata_timing; t->mode != speed; t++)
1639 if (t->mode == 0xFF)
1640 return NULL;
1641 return t;
1642 }
1643
1644 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1645 struct ata_timing *t, int T, int UT)
1646 {
1647 const struct ata_timing *s;
1648 struct ata_timing p;
1649
1650 /*
1651 * Find the mode.
1652 */
1653
1654 if (!(s = ata_timing_find_mode(speed)))
1655 return -EINVAL;
1656
1657 memcpy(t, s, sizeof(*s));
1658
1659 /*
1660 * If the drive is an EIDE drive, it can tell us it needs extended
1661 * PIO/MW_DMA cycle timing.
1662 */
1663
1664 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1665 memset(&p, 0, sizeof(p));
1666 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1667 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1668 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1669 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1670 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1671 }
1672 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1673 }
1674
1675 /*
1676 * Convert the timing to bus clock counts.
1677 */
1678
1679 ata_timing_quantize(t, t, T, UT);
1680
1681 /*
1682 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1683 * S.M.A.R.T * and some other commands. We have to ensure that the
1684 * DMA cycle timing is slower/equal than the fastest PIO timing.
1685 */
1686
1687 if (speed > XFER_PIO_4) {
1688 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1689 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1690 }
1691
1692 /*
1693 * Lengthen active & recovery time so that cycle time is correct.
1694 */
1695
1696 if (t->act8b + t->rec8b < t->cyc8b) {
1697 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1698 t->rec8b = t->cyc8b - t->act8b;
1699 }
1700
1701 if (t->active + t->recover < t->cycle) {
1702 t->active += (t->cycle - (t->active + t->recover)) / 2;
1703 t->recover = t->cycle - t->active;
1704 }
1705
1706 return 0;
1707 }
1708
1709 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1710 {
1711 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1712 return;
1713
1714 if (dev->xfer_shift == ATA_SHIFT_PIO)
1715 dev->flags |= ATA_DFLAG_PIO;
1716
1717 ata_dev_set_xfermode(ap, dev);
1718
1719 if (ata_dev_revalidate(ap, dev, 0)) {
1720 printk(KERN_ERR "ata%u: failed to revalidate after set "
1721 "xfermode, disabled\n", ap->id);
1722 ata_port_disable(ap);
1723 }
1724
1725 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1726 dev->xfer_shift, (int)dev->xfer_mode);
1727
1728 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1729 ap->id, dev->devno,
1730 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1731 }
1732
1733 static int ata_host_set_pio(struct ata_port *ap)
1734 {
1735 int i;
1736
1737 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1738 struct ata_device *dev = &ap->device[i];
1739
1740 if (!ata_dev_present(dev))
1741 continue;
1742
1743 if (!dev->pio_mode) {
1744 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1745 return -1;
1746 }
1747
1748 dev->xfer_mode = dev->pio_mode;
1749 dev->xfer_shift = ATA_SHIFT_PIO;
1750 if (ap->ops->set_piomode)
1751 ap->ops->set_piomode(ap, dev);
1752 }
1753
1754 return 0;
1755 }
1756
1757 static void ata_host_set_dma(struct ata_port *ap)
1758 {
1759 int i;
1760
1761 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1762 struct ata_device *dev = &ap->device[i];
1763
1764 if (!ata_dev_present(dev) || !dev->dma_mode)
1765 continue;
1766
1767 dev->xfer_mode = dev->dma_mode;
1768 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1769 if (ap->ops->set_dmamode)
1770 ap->ops->set_dmamode(ap, dev);
1771 }
1772 }
1773
1774 /**
1775 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1776 * @ap: port on which timings will be programmed
1777 *
1778 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1779 *
1780 * LOCKING:
1781 * PCI/etc. bus probe sem.
1782 */
1783 static void ata_set_mode(struct ata_port *ap)
1784 {
1785 int i, rc;
1786
1787 /* step 1: calculate xfer_mask */
1788 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1789 struct ata_device *dev = &ap->device[i];
1790 unsigned int xfer_mask;
1791
1792 if (!ata_dev_present(dev))
1793 continue;
1794
1795 xfer_mask = ata_dev_xfermask(ap, dev);
1796
1797 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1798 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1799 ATA_MASK_UDMA));
1800 }
1801
1802 /* step 2: always set host PIO timings */
1803 rc = ata_host_set_pio(ap);
1804 if (rc)
1805 goto err_out;
1806
1807 /* step 3: set host DMA timings */
1808 ata_host_set_dma(ap);
1809
1810 /* step 4: update devices' xfer mode */
1811 for (i = 0; i < ATA_MAX_DEVICES; i++)
1812 ata_dev_set_mode(ap, &ap->device[i]);
1813
1814 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1815 return;
1816
1817 if (ap->ops->post_set_mode)
1818 ap->ops->post_set_mode(ap);
1819
1820 return;
1821
1822 err_out:
1823 ata_port_disable(ap);
1824 }
1825
1826 /**
1827 * ata_tf_to_host - issue ATA taskfile to host controller
1828 * @ap: port to which command is being issued
1829 * @tf: ATA taskfile register set
1830 *
1831 * Issues ATA taskfile register set to ATA host controller,
1832 * with proper synchronization with interrupt handler and
1833 * other threads.
1834 *
1835 * LOCKING:
1836 * spin_lock_irqsave(host_set lock)
1837 */
1838
1839 static inline void ata_tf_to_host(struct ata_port *ap,
1840 const struct ata_taskfile *tf)
1841 {
1842 ap->ops->tf_load(ap, tf);
1843 ap->ops->exec_command(ap, tf);
1844 }
1845
1846 /**
1847 * ata_busy_sleep - sleep until BSY clears, or timeout
1848 * @ap: port containing status register to be polled
1849 * @tmout_pat: impatience timeout
1850 * @tmout: overall timeout
1851 *
1852 * Sleep until ATA Status register bit BSY clears,
1853 * or a timeout occurs.
1854 *
1855 * LOCKING: None.
1856 */
1857
1858 unsigned int ata_busy_sleep (struct ata_port *ap,
1859 unsigned long tmout_pat, unsigned long tmout)
1860 {
1861 unsigned long timer_start, timeout;
1862 u8 status;
1863
1864 status = ata_busy_wait(ap, ATA_BUSY, 300);
1865 timer_start = jiffies;
1866 timeout = timer_start + tmout_pat;
1867 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1868 msleep(50);
1869 status = ata_busy_wait(ap, ATA_BUSY, 3);
1870 }
1871
1872 if (status & ATA_BUSY)
1873 printk(KERN_WARNING "ata%u is slow to respond, "
1874 "please be patient\n", ap->id);
1875
1876 timeout = timer_start + tmout;
1877 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1878 msleep(50);
1879 status = ata_chk_status(ap);
1880 }
1881
1882 if (status & ATA_BUSY) {
1883 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1884 ap->id, tmout / HZ);
1885 return 1;
1886 }
1887
1888 return 0;
1889 }
1890
1891 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1892 {
1893 struct ata_ioports *ioaddr = &ap->ioaddr;
1894 unsigned int dev0 = devmask & (1 << 0);
1895 unsigned int dev1 = devmask & (1 << 1);
1896 unsigned long timeout;
1897
1898 /* if device 0 was found in ata_devchk, wait for its
1899 * BSY bit to clear
1900 */
1901 if (dev0)
1902 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1903
1904 /* if device 1 was found in ata_devchk, wait for
1905 * register access, then wait for BSY to clear
1906 */
1907 timeout = jiffies + ATA_TMOUT_BOOT;
1908 while (dev1) {
1909 u8 nsect, lbal;
1910
1911 ap->ops->dev_select(ap, 1);
1912 if (ap->flags & ATA_FLAG_MMIO) {
1913 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1914 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1915 } else {
1916 nsect = inb(ioaddr->nsect_addr);
1917 lbal = inb(ioaddr->lbal_addr);
1918 }
1919 if ((nsect == 1) && (lbal == 1))
1920 break;
1921 if (time_after(jiffies, timeout)) {
1922 dev1 = 0;
1923 break;
1924 }
1925 msleep(50); /* give drive a breather */
1926 }
1927 if (dev1)
1928 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1929
1930 /* is all this really necessary? */
1931 ap->ops->dev_select(ap, 0);
1932 if (dev1)
1933 ap->ops->dev_select(ap, 1);
1934 if (dev0)
1935 ap->ops->dev_select(ap, 0);
1936 }
1937
1938 /**
1939 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1940 * @ap: Port to reset and probe
1941 *
1942 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1943 * probe the bus. Not often used these days.
1944 *
1945 * LOCKING:
1946 * PCI/etc. bus probe sem.
1947 * Obtains host_set lock.
1948 *
1949 */
1950
1951 static unsigned int ata_bus_edd(struct ata_port *ap)
1952 {
1953 struct ata_taskfile tf;
1954 unsigned long flags;
1955
1956 /* set up execute-device-diag (bus reset) taskfile */
1957 /* also, take interrupts to a known state (disabled) */
1958 DPRINTK("execute-device-diag\n");
1959 ata_tf_init(ap, &tf, 0);
1960 tf.ctl |= ATA_NIEN;
1961 tf.command = ATA_CMD_EDD;
1962 tf.protocol = ATA_PROT_NODATA;
1963
1964 /* do bus reset */
1965 spin_lock_irqsave(&ap->host_set->lock, flags);
1966 ata_tf_to_host(ap, &tf);
1967 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1968
1969 /* spec says at least 2ms. but who knows with those
1970 * crazy ATAPI devices...
1971 */
1972 msleep(150);
1973
1974 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1975 }
1976
1977 static unsigned int ata_bus_softreset(struct ata_port *ap,
1978 unsigned int devmask)
1979 {
1980 struct ata_ioports *ioaddr = &ap->ioaddr;
1981
1982 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1983
1984 /* software reset. causes dev0 to be selected */
1985 if (ap->flags & ATA_FLAG_MMIO) {
1986 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1987 udelay(20); /* FIXME: flush */
1988 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1989 udelay(20); /* FIXME: flush */
1990 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1991 } else {
1992 outb(ap->ctl, ioaddr->ctl_addr);
1993 udelay(10);
1994 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1995 udelay(10);
1996 outb(ap->ctl, ioaddr->ctl_addr);
1997 }
1998
1999 /* spec mandates ">= 2ms" before checking status.
2000 * We wait 150ms, because that was the magic delay used for
2001 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2002 * between when the ATA command register is written, and then
2003 * status is checked. Because waiting for "a while" before
2004 * checking status is fine, post SRST, we perform this magic
2005 * delay here as well.
2006 *
2007 * Old drivers/ide uses the 2mS rule and then waits for ready
2008 */
2009 msleep(150);
2010
2011
2012 /* Before we perform post reset processing we want to see if
2013 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2014 resistor */
2015
2016 if (ata_check_status(ap) == 0xFF)
2017 return 1; /* Positive is failure for some reason */
2018
2019 ata_bus_post_reset(ap, devmask);
2020
2021 return 0;
2022 }
2023
2024 /**
2025 * ata_bus_reset - reset host port and associated ATA channel
2026 * @ap: port to reset
2027 *
2028 * This is typically the first time we actually start issuing
2029 * commands to the ATA channel. We wait for BSY to clear, then
2030 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2031 * result. Determine what devices, if any, are on the channel
2032 * by looking at the device 0/1 error register. Look at the signature
2033 * stored in each device's taskfile registers, to determine if
2034 * the device is ATA or ATAPI.
2035 *
2036 * LOCKING:
2037 * PCI/etc. bus probe sem.
2038 * Obtains host_set lock.
2039 *
2040 * SIDE EFFECTS:
2041 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2042 */
2043
2044 void ata_bus_reset(struct ata_port *ap)
2045 {
2046 struct ata_ioports *ioaddr = &ap->ioaddr;
2047 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2048 u8 err;
2049 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2050
2051 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2052
2053 /* determine if device 0/1 are present */
2054 if (ap->flags & ATA_FLAG_SATA_RESET)
2055 dev0 = 1;
2056 else {
2057 dev0 = ata_devchk(ap, 0);
2058 if (slave_possible)
2059 dev1 = ata_devchk(ap, 1);
2060 }
2061
2062 if (dev0)
2063 devmask |= (1 << 0);
2064 if (dev1)
2065 devmask |= (1 << 1);
2066
2067 /* select device 0 again */
2068 ap->ops->dev_select(ap, 0);
2069
2070 /* issue bus reset */
2071 if (ap->flags & ATA_FLAG_SRST)
2072 rc = ata_bus_softreset(ap, devmask);
2073 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2074 /* set up device control */
2075 if (ap->flags & ATA_FLAG_MMIO)
2076 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2077 else
2078 outb(ap->ctl, ioaddr->ctl_addr);
2079 rc = ata_bus_edd(ap);
2080 }
2081
2082 if (rc)
2083 goto err_out;
2084
2085 /*
2086 * determine by signature whether we have ATA or ATAPI devices
2087 */
2088 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2089 if ((slave_possible) && (err != 0x81))
2090 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2091
2092 /* re-enable interrupts */
2093 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2094 ata_irq_on(ap);
2095
2096 /* is double-select really necessary? */
2097 if (ap->device[1].class != ATA_DEV_NONE)
2098 ap->ops->dev_select(ap, 1);
2099 if (ap->device[0].class != ATA_DEV_NONE)
2100 ap->ops->dev_select(ap, 0);
2101
2102 /* if no devices were detected, disable this port */
2103 if ((ap->device[0].class == ATA_DEV_NONE) &&
2104 (ap->device[1].class == ATA_DEV_NONE))
2105 goto err_out;
2106
2107 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2108 /* set up device control for ATA_FLAG_SATA_RESET */
2109 if (ap->flags & ATA_FLAG_MMIO)
2110 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2111 else
2112 outb(ap->ctl, ioaddr->ctl_addr);
2113 }
2114
2115 DPRINTK("EXIT\n");
2116 return;
2117
2118 err_out:
2119 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2120 ap->ops->port_disable(ap);
2121
2122 DPRINTK("EXIT\n");
2123 }
2124
2125 static int sata_phy_resume(struct ata_port *ap)
2126 {
2127 unsigned long timeout = jiffies + (HZ * 5);
2128 u32 sstatus;
2129
2130 scr_write_flush(ap, SCR_CONTROL, 0x300);
2131
2132 /* Wait for phy to become ready, if necessary. */
2133 do {
2134 msleep(200);
2135 sstatus = scr_read(ap, SCR_STATUS);
2136 if ((sstatus & 0xf) != 1)
2137 return 0;
2138 } while (time_before(jiffies, timeout));
2139
2140 return -1;
2141 }
2142
2143 /**
2144 * ata_std_probeinit - initialize probing
2145 * @ap: port to be probed
2146 *
2147 * @ap is about to be probed. Initialize it. This function is
2148 * to be used as standard callback for ata_drive_probe_reset().
2149 *
2150 * NOTE!!! Do not use this function as probeinit if a low level
2151 * driver implements only hardreset. Just pass NULL as probeinit
2152 * in that case. Using this function is probably okay but doing
2153 * so makes reset sequence different from the original
2154 * ->phy_reset implementation and Jeff nervous. :-P
2155 */
2156 extern void ata_std_probeinit(struct ata_port *ap)
2157 {
2158 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2159 sata_phy_resume(ap);
2160 if (sata_dev_present(ap))
2161 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2162 }
2163 }
2164
2165 /**
2166 * ata_std_softreset - reset host port via ATA SRST
2167 * @ap: port to reset
2168 * @verbose: fail verbosely
2169 * @classes: resulting classes of attached devices
2170 *
2171 * Reset host port using ATA SRST. This function is to be used
2172 * as standard callback for ata_drive_*_reset() functions.
2173 *
2174 * LOCKING:
2175 * Kernel thread context (may sleep)
2176 *
2177 * RETURNS:
2178 * 0 on success, -errno otherwise.
2179 */
2180 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2181 {
2182 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2183 unsigned int devmask = 0, err_mask;
2184 u8 err;
2185
2186 DPRINTK("ENTER\n");
2187
2188 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2189 classes[0] = ATA_DEV_NONE;
2190 goto out;
2191 }
2192
2193 /* determine if device 0/1 are present */
2194 if (ata_devchk(ap, 0))
2195 devmask |= (1 << 0);
2196 if (slave_possible && ata_devchk(ap, 1))
2197 devmask |= (1 << 1);
2198
2199 /* select device 0 again */
2200 ap->ops->dev_select(ap, 0);
2201
2202 /* issue bus reset */
2203 DPRINTK("about to softreset, devmask=%x\n", devmask);
2204 err_mask = ata_bus_softreset(ap, devmask);
2205 if (err_mask) {
2206 if (verbose)
2207 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2208 ap->id, err_mask);
2209 else
2210 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2211 err_mask);
2212 return -EIO;
2213 }
2214
2215 /* determine by signature whether we have ATA or ATAPI devices */
2216 classes[0] = ata_dev_try_classify(ap, 0, &err);
2217 if (slave_possible && err != 0x81)
2218 classes[1] = ata_dev_try_classify(ap, 1, &err);
2219
2220 out:
2221 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2222 return 0;
2223 }
2224
2225 /**
2226 * sata_std_hardreset - reset host port via SATA phy reset
2227 * @ap: port to reset
2228 * @verbose: fail verbosely
2229 * @class: resulting class of attached device
2230 *
2231 * SATA phy-reset host port using DET bits of SControl register.
2232 * This function is to be used as standard callback for
2233 * ata_drive_*_reset().
2234 *
2235 * LOCKING:
2236 * Kernel thread context (may sleep)
2237 *
2238 * RETURNS:
2239 * 0 on success, -errno otherwise.
2240 */
2241 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2242 {
2243 DPRINTK("ENTER\n");
2244
2245 /* Issue phy wake/reset */
2246 scr_write_flush(ap, SCR_CONTROL, 0x301);
2247
2248 /*
2249 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2250 * 10.4.2 says at least 1 ms.
2251 */
2252 msleep(1);
2253
2254 /* Bring phy back */
2255 sata_phy_resume(ap);
2256
2257 /* TODO: phy layer with polling, timeouts, etc. */
2258 if (!sata_dev_present(ap)) {
2259 *class = ATA_DEV_NONE;
2260 DPRINTK("EXIT, link offline\n");
2261 return 0;
2262 }
2263
2264 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2265 if (verbose)
2266 printk(KERN_ERR "ata%u: COMRESET failed "
2267 "(device not ready)\n", ap->id);
2268 else
2269 DPRINTK("EXIT, device not ready\n");
2270 return -EIO;
2271 }
2272
2273 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2274
2275 *class = ata_dev_try_classify(ap, 0, NULL);
2276
2277 DPRINTK("EXIT, class=%u\n", *class);
2278 return 0;
2279 }
2280
2281 /**
2282 * ata_std_postreset - standard postreset callback
2283 * @ap: the target ata_port
2284 * @classes: classes of attached devices
2285 *
2286 * This function is invoked after a successful reset. Note that
2287 * the device might have been reset more than once using
2288 * different reset methods before postreset is invoked.
2289 *
2290 * This function is to be used as standard callback for
2291 * ata_drive_*_reset().
2292 *
2293 * LOCKING:
2294 * Kernel thread context (may sleep)
2295 */
2296 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2297 {
2298 DPRINTK("ENTER\n");
2299
2300 /* set cable type if it isn't already set */
2301 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2302 ap->cbl = ATA_CBL_SATA;
2303
2304 /* print link status */
2305 if (ap->cbl == ATA_CBL_SATA)
2306 sata_print_link_status(ap);
2307
2308 /* re-enable interrupts */
2309 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2310 ata_irq_on(ap);
2311
2312 /* is double-select really necessary? */
2313 if (classes[0] != ATA_DEV_NONE)
2314 ap->ops->dev_select(ap, 1);
2315 if (classes[1] != ATA_DEV_NONE)
2316 ap->ops->dev_select(ap, 0);
2317
2318 /* bail out if no device is present */
2319 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2320 DPRINTK("EXIT, no device\n");
2321 return;
2322 }
2323
2324 /* set up device control */
2325 if (ap->ioaddr.ctl_addr) {
2326 if (ap->flags & ATA_FLAG_MMIO)
2327 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2328 else
2329 outb(ap->ctl, ap->ioaddr.ctl_addr);
2330 }
2331
2332 DPRINTK("EXIT\n");
2333 }
2334
2335 /**
2336 * ata_std_probe_reset - standard probe reset method
2337 * @ap: prot to perform probe-reset
2338 * @classes: resulting classes of attached devices
2339 *
2340 * The stock off-the-shelf ->probe_reset method.
2341 *
2342 * LOCKING:
2343 * Kernel thread context (may sleep)
2344 *
2345 * RETURNS:
2346 * 0 on success, -errno otherwise.
2347 */
2348 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2349 {
2350 ata_reset_fn_t hardreset;
2351
2352 hardreset = NULL;
2353 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2354 hardreset = sata_std_hardreset;
2355
2356 return ata_drive_probe_reset(ap, ata_std_probeinit,
2357 ata_std_softreset, hardreset,
2358 ata_std_postreset, classes);
2359 }
2360
2361 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2362 ata_postreset_fn_t postreset,
2363 unsigned int *classes)
2364 {
2365 int i, rc;
2366
2367 for (i = 0; i < ATA_MAX_DEVICES; i++)
2368 classes[i] = ATA_DEV_UNKNOWN;
2369
2370 rc = reset(ap, 0, classes);
2371 if (rc)
2372 return rc;
2373
2374 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2375 * is complete and convert all ATA_DEV_UNKNOWN to
2376 * ATA_DEV_NONE.
2377 */
2378 for (i = 0; i < ATA_MAX_DEVICES; i++)
2379 if (classes[i] != ATA_DEV_UNKNOWN)
2380 break;
2381
2382 if (i < ATA_MAX_DEVICES)
2383 for (i = 0; i < ATA_MAX_DEVICES; i++)
2384 if (classes[i] == ATA_DEV_UNKNOWN)
2385 classes[i] = ATA_DEV_NONE;
2386
2387 if (postreset)
2388 postreset(ap, classes);
2389
2390 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2391 }
2392
2393 /**
2394 * ata_drive_probe_reset - Perform probe reset with given methods
2395 * @ap: port to reset
2396 * @probeinit: probeinit method (can be NULL)
2397 * @softreset: softreset method (can be NULL)
2398 * @hardreset: hardreset method (can be NULL)
2399 * @postreset: postreset method (can be NULL)
2400 * @classes: resulting classes of attached devices
2401 *
2402 * Reset the specified port and classify attached devices using
2403 * given methods. This function prefers softreset but tries all
2404 * possible reset sequences to reset and classify devices. This
2405 * function is intended to be used for constructing ->probe_reset
2406 * callback by low level drivers.
2407 *
2408 * Reset methods should follow the following rules.
2409 *
2410 * - Return 0 on sucess, -errno on failure.
2411 * - If classification is supported, fill classes[] with
2412 * recognized class codes.
2413 * - If classification is not supported, leave classes[] alone.
2414 * - If verbose is non-zero, print error message on failure;
2415 * otherwise, shut up.
2416 *
2417 * LOCKING:
2418 * Kernel thread context (may sleep)
2419 *
2420 * RETURNS:
2421 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2422 * if classification fails, and any error code from reset
2423 * methods.
2424 */
2425 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2426 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2427 ata_postreset_fn_t postreset, unsigned int *classes)
2428 {
2429 int rc = -EINVAL;
2430
2431 if (probeinit)
2432 probeinit(ap);
2433
2434 if (softreset) {
2435 rc = do_probe_reset(ap, softreset, postreset, classes);
2436 if (rc == 0)
2437 return 0;
2438 }
2439
2440 if (!hardreset)
2441 return rc;
2442
2443 rc = do_probe_reset(ap, hardreset, postreset, classes);
2444 if (rc == 0 || rc != -ENODEV)
2445 return rc;
2446
2447 if (softreset)
2448 rc = do_probe_reset(ap, softreset, postreset, classes);
2449
2450 return rc;
2451 }
2452
2453 /**
2454 * ata_dev_same_device - Determine whether new ID matches configured device
2455 * @ap: port on which the device to compare against resides
2456 * @dev: device to compare against
2457 * @new_class: class of the new device
2458 * @new_id: IDENTIFY page of the new device
2459 *
2460 * Compare @new_class and @new_id against @dev and determine
2461 * whether @dev is the device indicated by @new_class and
2462 * @new_id.
2463 *
2464 * LOCKING:
2465 * None.
2466 *
2467 * RETURNS:
2468 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2469 */
2470 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2471 unsigned int new_class, const u16 *new_id)
2472 {
2473 const u16 *old_id = dev->id;
2474 unsigned char model[2][41], serial[2][21];
2475 u64 new_n_sectors;
2476
2477 if (dev->class != new_class) {
2478 printk(KERN_INFO
2479 "ata%u: dev %u class mismatch %d != %d\n",
2480 ap->id, dev->devno, dev->class, new_class);
2481 return 0;
2482 }
2483
2484 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2485 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2486 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2487 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2488 new_n_sectors = ata_id_n_sectors(new_id);
2489
2490 if (strcmp(model[0], model[1])) {
2491 printk(KERN_INFO
2492 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2493 ap->id, dev->devno, model[0], model[1]);
2494 return 0;
2495 }
2496
2497 if (strcmp(serial[0], serial[1])) {
2498 printk(KERN_INFO
2499 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2500 ap->id, dev->devno, serial[0], serial[1]);
2501 return 0;
2502 }
2503
2504 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2505 printk(KERN_INFO
2506 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2507 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2508 (unsigned long long)new_n_sectors);
2509 return 0;
2510 }
2511
2512 return 1;
2513 }
2514
2515 /**
2516 * ata_dev_revalidate - Revalidate ATA device
2517 * @ap: port on which the device to revalidate resides
2518 * @dev: device to revalidate
2519 * @post_reset: is this revalidation after reset?
2520 *
2521 * Re-read IDENTIFY page and make sure @dev is still attached to
2522 * the port.
2523 *
2524 * LOCKING:
2525 * Kernel thread context (may sleep)
2526 *
2527 * RETURNS:
2528 * 0 on success, negative errno otherwise
2529 */
2530 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2531 int post_reset)
2532 {
2533 unsigned int class;
2534 u16 *id;
2535 int rc;
2536
2537 if (!ata_dev_present(dev))
2538 return -ENODEV;
2539
2540 class = dev->class;
2541 id = NULL;
2542
2543 /* allocate & read ID data */
2544 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2545 if (rc)
2546 goto fail;
2547
2548 /* is the device still there? */
2549 if (!ata_dev_same_device(ap, dev, class, id)) {
2550 rc = -ENODEV;
2551 goto fail;
2552 }
2553
2554 kfree(dev->id);
2555 dev->id = id;
2556
2557 /* configure device according to the new ID */
2558 return ata_dev_configure(ap, dev, 0);
2559
2560 fail:
2561 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2562 ap->id, dev->devno, rc);
2563 kfree(id);
2564 return rc;
2565 }
2566
2567 static const char * const ata_dma_blacklist [] = {
2568 "WDC AC11000H", NULL,
2569 "WDC AC22100H", NULL,
2570 "WDC AC32500H", NULL,
2571 "WDC AC33100H", NULL,
2572 "WDC AC31600H", NULL,
2573 "WDC AC32100H", "24.09P07",
2574 "WDC AC23200L", "21.10N21",
2575 "Compaq CRD-8241B", NULL,
2576 "CRD-8400B", NULL,
2577 "CRD-8480B", NULL,
2578 "CRD-8482B", NULL,
2579 "CRD-84", NULL,
2580 "SanDisk SDP3B", NULL,
2581 "SanDisk SDP3B-64", NULL,
2582 "SANYO CD-ROM CRD", NULL,
2583 "HITACHI CDR-8", NULL,
2584 "HITACHI CDR-8335", NULL,
2585 "HITACHI CDR-8435", NULL,
2586 "Toshiba CD-ROM XM-6202B", NULL,
2587 "TOSHIBA CD-ROM XM-1702BC", NULL,
2588 "CD-532E-A", NULL,
2589 "E-IDE CD-ROM CR-840", NULL,
2590 "CD-ROM Drive/F5A", NULL,
2591 "WPI CDD-820", NULL,
2592 "SAMSUNG CD-ROM SC-148C", NULL,
2593 "SAMSUNG CD-ROM SC", NULL,
2594 "SanDisk SDP3B-64", NULL,
2595 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2596 "_NEC DV5800A", NULL,
2597 "SAMSUNG CD-ROM SN-124", "N001"
2598 };
2599
2600 static int ata_strim(char *s, size_t len)
2601 {
2602 len = strnlen(s, len);
2603
2604 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2605 while ((len > 0) && (s[len - 1] == ' ')) {
2606 len--;
2607 s[len] = 0;
2608 }
2609 return len;
2610 }
2611
2612 static int ata_dma_blacklisted(const struct ata_device *dev)
2613 {
2614 unsigned char model_num[40];
2615 unsigned char model_rev[16];
2616 unsigned int nlen, rlen;
2617 int i;
2618
2619 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2620 sizeof(model_num));
2621 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2622 sizeof(model_rev));
2623 nlen = ata_strim(model_num, sizeof(model_num));
2624 rlen = ata_strim(model_rev, sizeof(model_rev));
2625
2626 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2627 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2628 if (ata_dma_blacklist[i+1] == NULL)
2629 return 1;
2630 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2631 return 1;
2632 }
2633 }
2634 return 0;
2635 }
2636
2637 /**
2638 * ata_dev_xfermask - Compute supported xfermask of the given device
2639 * @ap: Port on which the device to compute xfermask for resides
2640 * @dev: Device to compute xfermask for
2641 *
2642 * Compute supported xfermask of @dev. This function is
2643 * responsible for applying all known limits including host
2644 * controller limits, device blacklist, etc...
2645 *
2646 * LOCKING:
2647 * None.
2648 *
2649 * RETURNS:
2650 * Computed xfermask.
2651 */
2652 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2653 struct ata_device *dev)
2654 {
2655 unsigned long xfer_mask;
2656 int i;
2657
2658 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2659 ap->udma_mask);
2660
2661 /* use port-wide xfermask for now */
2662 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2663 struct ata_device *d = &ap->device[i];
2664 if (!ata_dev_present(d))
2665 continue;
2666 xfer_mask &= ata_id_xfermask(d->id);
2667 if (ata_dma_blacklisted(d))
2668 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2669 }
2670
2671 if (ata_dma_blacklisted(dev))
2672 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2673 "disabling DMA\n", ap->id, dev->devno);
2674
2675 return xfer_mask;
2676 }
2677
2678 /**
2679 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2680 * @ap: Port associated with device @dev
2681 * @dev: Device to which command will be sent
2682 *
2683 * Issue SET FEATURES - XFER MODE command to device @dev
2684 * on port @ap.
2685 *
2686 * LOCKING:
2687 * PCI/etc. bus probe sem.
2688 */
2689
2690 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2691 {
2692 struct ata_taskfile tf;
2693
2694 /* set up set-features taskfile */
2695 DPRINTK("set features - xfer mode\n");
2696
2697 ata_tf_init(ap, &tf, dev->devno);
2698 tf.command = ATA_CMD_SET_FEATURES;
2699 tf.feature = SETFEATURES_XFER;
2700 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2701 tf.protocol = ATA_PROT_NODATA;
2702 tf.nsect = dev->xfer_mode;
2703
2704 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2705 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2706 ap->id);
2707 ata_port_disable(ap);
2708 }
2709
2710 DPRINTK("EXIT\n");
2711 }
2712
2713 /**
2714 * ata_dev_init_params - Issue INIT DEV PARAMS command
2715 * @ap: Port associated with device @dev
2716 * @dev: Device to which command will be sent
2717 *
2718 * LOCKING:
2719 * Kernel thread context (may sleep)
2720 *
2721 * RETURNS:
2722 * 0 on success, AC_ERR_* mask otherwise.
2723 */
2724
2725 static unsigned int ata_dev_init_params(struct ata_port *ap,
2726 struct ata_device *dev)
2727 {
2728 struct ata_taskfile tf;
2729 unsigned int err_mask;
2730 u16 sectors = dev->id[6];
2731 u16 heads = dev->id[3];
2732
2733 /* Number of sectors per track 1-255. Number of heads 1-16 */
2734 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2735 return 0;
2736
2737 /* set up init dev params taskfile */
2738 DPRINTK("init dev params \n");
2739
2740 ata_tf_init(ap, &tf, dev->devno);
2741 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2742 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2743 tf.protocol = ATA_PROT_NODATA;
2744 tf.nsect = sectors;
2745 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2746
2747 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2748
2749 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2750 return err_mask;
2751 }
2752
2753 /**
2754 * ata_sg_clean - Unmap DMA memory associated with command
2755 * @qc: Command containing DMA memory to be released
2756 *
2757 * Unmap all mapped DMA memory associated with this command.
2758 *
2759 * LOCKING:
2760 * spin_lock_irqsave(host_set lock)
2761 */
2762
2763 static void ata_sg_clean(struct ata_queued_cmd *qc)
2764 {
2765 struct ata_port *ap = qc->ap;
2766 struct scatterlist *sg = qc->__sg;
2767 int dir = qc->dma_dir;
2768 void *pad_buf = NULL;
2769
2770 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2771 WARN_ON(sg == NULL);
2772
2773 if (qc->flags & ATA_QCFLAG_SINGLE)
2774 WARN_ON(qc->n_elem > 1);
2775
2776 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2777
2778 /* if we padded the buffer out to 32-bit bound, and data
2779 * xfer direction is from-device, we must copy from the
2780 * pad buffer back into the supplied buffer
2781 */
2782 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2783 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2784
2785 if (qc->flags & ATA_QCFLAG_SG) {
2786 if (qc->n_elem)
2787 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2788 /* restore last sg */
2789 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2790 if (pad_buf) {
2791 struct scatterlist *psg = &qc->pad_sgent;
2792 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2793 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2794 kunmap_atomic(addr, KM_IRQ0);
2795 }
2796 } else {
2797 if (qc->n_elem)
2798 dma_unmap_single(ap->host_set->dev,
2799 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2800 dir);
2801 /* restore sg */
2802 sg->length += qc->pad_len;
2803 if (pad_buf)
2804 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2805 pad_buf, qc->pad_len);
2806 }
2807
2808 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2809 qc->__sg = NULL;
2810 }
2811
2812 /**
2813 * ata_fill_sg - Fill PCI IDE PRD table
2814 * @qc: Metadata associated with taskfile to be transferred
2815 *
2816 * Fill PCI IDE PRD (scatter-gather) table with segments
2817 * associated with the current disk command.
2818 *
2819 * LOCKING:
2820 * spin_lock_irqsave(host_set lock)
2821 *
2822 */
2823 static void ata_fill_sg(struct ata_queued_cmd *qc)
2824 {
2825 struct ata_port *ap = qc->ap;
2826 struct scatterlist *sg;
2827 unsigned int idx;
2828
2829 WARN_ON(qc->__sg == NULL);
2830 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2831
2832 idx = 0;
2833 ata_for_each_sg(sg, qc) {
2834 u32 addr, offset;
2835 u32 sg_len, len;
2836
2837 /* determine if physical DMA addr spans 64K boundary.
2838 * Note h/w doesn't support 64-bit, so we unconditionally
2839 * truncate dma_addr_t to u32.
2840 */
2841 addr = (u32) sg_dma_address(sg);
2842 sg_len = sg_dma_len(sg);
2843
2844 while (sg_len) {
2845 offset = addr & 0xffff;
2846 len = sg_len;
2847 if ((offset + sg_len) > 0x10000)
2848 len = 0x10000 - offset;
2849
2850 ap->prd[idx].addr = cpu_to_le32(addr);
2851 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2852 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2853
2854 idx++;
2855 sg_len -= len;
2856 addr += len;
2857 }
2858 }
2859
2860 if (idx)
2861 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2862 }
2863 /**
2864 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2865 * @qc: Metadata associated with taskfile to check
2866 *
2867 * Allow low-level driver to filter ATA PACKET commands, returning
2868 * a status indicating whether or not it is OK to use DMA for the
2869 * supplied PACKET command.
2870 *
2871 * LOCKING:
2872 * spin_lock_irqsave(host_set lock)
2873 *
2874 * RETURNS: 0 when ATAPI DMA can be used
2875 * nonzero otherwise
2876 */
2877 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2878 {
2879 struct ata_port *ap = qc->ap;
2880 int rc = 0; /* Assume ATAPI DMA is OK by default */
2881
2882 if (ap->ops->check_atapi_dma)
2883 rc = ap->ops->check_atapi_dma(qc);
2884
2885 return rc;
2886 }
2887 /**
2888 * ata_qc_prep - Prepare taskfile for submission
2889 * @qc: Metadata associated with taskfile to be prepared
2890 *
2891 * Prepare ATA taskfile for submission.
2892 *
2893 * LOCKING:
2894 * spin_lock_irqsave(host_set lock)
2895 */
2896 void ata_qc_prep(struct ata_queued_cmd *qc)
2897 {
2898 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2899 return;
2900
2901 ata_fill_sg(qc);
2902 }
2903
2904 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2905
2906 /**
2907 * ata_sg_init_one - Associate command with memory buffer
2908 * @qc: Command to be associated
2909 * @buf: Memory buffer
2910 * @buflen: Length of memory buffer, in bytes.
2911 *
2912 * Initialize the data-related elements of queued_cmd @qc
2913 * to point to a single memory buffer, @buf of byte length @buflen.
2914 *
2915 * LOCKING:
2916 * spin_lock_irqsave(host_set lock)
2917 */
2918
2919 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2920 {
2921 struct scatterlist *sg;
2922
2923 qc->flags |= ATA_QCFLAG_SINGLE;
2924
2925 memset(&qc->sgent, 0, sizeof(qc->sgent));
2926 qc->__sg = &qc->sgent;
2927 qc->n_elem = 1;
2928 qc->orig_n_elem = 1;
2929 qc->buf_virt = buf;
2930
2931 sg = qc->__sg;
2932 sg_init_one(sg, buf, buflen);
2933 }
2934
2935 /**
2936 * ata_sg_init - Associate command with scatter-gather table.
2937 * @qc: Command to be associated
2938 * @sg: Scatter-gather table.
2939 * @n_elem: Number of elements in s/g table.
2940 *
2941 * Initialize the data-related elements of queued_cmd @qc
2942 * to point to a scatter-gather table @sg, containing @n_elem
2943 * elements.
2944 *
2945 * LOCKING:
2946 * spin_lock_irqsave(host_set lock)
2947 */
2948
2949 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2950 unsigned int n_elem)
2951 {
2952 qc->flags |= ATA_QCFLAG_SG;
2953 qc->__sg = sg;
2954 qc->n_elem = n_elem;
2955 qc->orig_n_elem = n_elem;
2956 }
2957
2958 /**
2959 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2960 * @qc: Command with memory buffer to be mapped.
2961 *
2962 * DMA-map the memory buffer associated with queued_cmd @qc.
2963 *
2964 * LOCKING:
2965 * spin_lock_irqsave(host_set lock)
2966 *
2967 * RETURNS:
2968 * Zero on success, negative on error.
2969 */
2970
2971 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2972 {
2973 struct ata_port *ap = qc->ap;
2974 int dir = qc->dma_dir;
2975 struct scatterlist *sg = qc->__sg;
2976 dma_addr_t dma_address;
2977 int trim_sg = 0;
2978
2979 /* we must lengthen transfers to end on a 32-bit boundary */
2980 qc->pad_len = sg->length & 3;
2981 if (qc->pad_len) {
2982 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2983 struct scatterlist *psg = &qc->pad_sgent;
2984
2985 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2986
2987 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2988
2989 if (qc->tf.flags & ATA_TFLAG_WRITE)
2990 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2991 qc->pad_len);
2992
2993 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2994 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2995 /* trim sg */
2996 sg->length -= qc->pad_len;
2997 if (sg->length == 0)
2998 trim_sg = 1;
2999
3000 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3001 sg->length, qc->pad_len);
3002 }
3003
3004 if (trim_sg) {
3005 qc->n_elem--;
3006 goto skip_map;
3007 }
3008
3009 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3010 sg->length, dir);
3011 if (dma_mapping_error(dma_address)) {
3012 /* restore sg */
3013 sg->length += qc->pad_len;
3014 return -1;
3015 }
3016
3017 sg_dma_address(sg) = dma_address;
3018 sg_dma_len(sg) = sg->length;
3019
3020 skip_map:
3021 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3022 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3023
3024 return 0;
3025 }
3026
3027 /**
3028 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3029 * @qc: Command with scatter-gather table to be mapped.
3030 *
3031 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3032 *
3033 * LOCKING:
3034 * spin_lock_irqsave(host_set lock)
3035 *
3036 * RETURNS:
3037 * Zero on success, negative on error.
3038 *
3039 */
3040
3041 static int ata_sg_setup(struct ata_queued_cmd *qc)
3042 {
3043 struct ata_port *ap = qc->ap;
3044 struct scatterlist *sg = qc->__sg;
3045 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3046 int n_elem, pre_n_elem, dir, trim_sg = 0;
3047
3048 VPRINTK("ENTER, ata%u\n", ap->id);
3049 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3050
3051 /* we must lengthen transfers to end on a 32-bit boundary */
3052 qc->pad_len = lsg->length & 3;
3053 if (qc->pad_len) {
3054 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3055 struct scatterlist *psg = &qc->pad_sgent;
3056 unsigned int offset;
3057
3058 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3059
3060 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3061
3062 /*
3063 * psg->page/offset are used to copy to-be-written
3064 * data in this function or read data in ata_sg_clean.
3065 */
3066 offset = lsg->offset + lsg->length - qc->pad_len;
3067 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3068 psg->offset = offset_in_page(offset);
3069
3070 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3071 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3072 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3073 kunmap_atomic(addr, KM_IRQ0);
3074 }
3075
3076 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3077 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3078 /* trim last sg */
3079 lsg->length -= qc->pad_len;
3080 if (lsg->length == 0)
3081 trim_sg = 1;
3082
3083 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3084 qc->n_elem - 1, lsg->length, qc->pad_len);
3085 }
3086
3087 pre_n_elem = qc->n_elem;
3088 if (trim_sg && pre_n_elem)
3089 pre_n_elem--;
3090
3091 if (!pre_n_elem) {
3092 n_elem = 0;
3093 goto skip_map;
3094 }
3095
3096 dir = qc->dma_dir;
3097 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3098 if (n_elem < 1) {
3099 /* restore last sg */
3100 lsg->length += qc->pad_len;
3101 return -1;
3102 }
3103
3104 DPRINTK("%d sg elements mapped\n", n_elem);
3105
3106 skip_map:
3107 qc->n_elem = n_elem;
3108
3109 return 0;
3110 }
3111
3112 /**
3113 * ata_poll_qc_complete - turn irq back on and finish qc
3114 * @qc: Command to complete
3115 * @err_mask: ATA status register content
3116 *
3117 * LOCKING:
3118 * None. (grabs host lock)
3119 */
3120
3121 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3122 {
3123 struct ata_port *ap = qc->ap;
3124 unsigned long flags;
3125
3126 spin_lock_irqsave(&ap->host_set->lock, flags);
3127 ata_irq_on(ap);
3128 ata_qc_complete(qc);
3129 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3130 }
3131
3132 /**
3133 * ata_pio_poll - poll using PIO, depending on current state
3134 * @ap: the target ata_port
3135 *
3136 * LOCKING:
3137 * None. (executing in kernel thread context)
3138 *
3139 * RETURNS:
3140 * timeout value to use
3141 */
3142
3143 static unsigned long ata_pio_poll(struct ata_port *ap)
3144 {
3145 struct ata_queued_cmd *qc;
3146 u8 status;
3147 unsigned int poll_state = HSM_ST_UNKNOWN;
3148 unsigned int reg_state = HSM_ST_UNKNOWN;
3149
3150 qc = ata_qc_from_tag(ap, ap->active_tag);
3151 WARN_ON(qc == NULL);
3152
3153 switch (ap->hsm_task_state) {
3154 case HSM_ST:
3155 case HSM_ST_POLL:
3156 poll_state = HSM_ST_POLL;
3157 reg_state = HSM_ST;
3158 break;
3159 case HSM_ST_LAST:
3160 case HSM_ST_LAST_POLL:
3161 poll_state = HSM_ST_LAST_POLL;
3162 reg_state = HSM_ST_LAST;
3163 break;
3164 default:
3165 BUG();
3166 break;
3167 }
3168
3169 status = ata_chk_status(ap);
3170 if (status & ATA_BUSY) {
3171 if (time_after(jiffies, ap->pio_task_timeout)) {
3172 qc->err_mask |= AC_ERR_TIMEOUT;
3173 ap->hsm_task_state = HSM_ST_TMOUT;
3174 return 0;
3175 }
3176 ap->hsm_task_state = poll_state;
3177 return ATA_SHORT_PAUSE;
3178 }
3179
3180 ap->hsm_task_state = reg_state;
3181 return 0;
3182 }
3183
3184 /**
3185 * ata_pio_complete - check if drive is busy or idle
3186 * @ap: the target ata_port
3187 *
3188 * LOCKING:
3189 * None. (executing in kernel thread context)
3190 *
3191 * RETURNS:
3192 * Zero if qc completed.
3193 * Non-zero if has next.
3194 */
3195
3196 static int ata_pio_complete (struct ata_port *ap)
3197 {
3198 struct ata_queued_cmd *qc;
3199 u8 drv_stat;
3200
3201 /*
3202 * This is purely heuristic. This is a fast path. Sometimes when
3203 * we enter, BSY will be cleared in a chk-status or two. If not,
3204 * the drive is probably seeking or something. Snooze for a couple
3205 * msecs, then chk-status again. If still busy, fall back to
3206 * HSM_ST_LAST_POLL state.
3207 */
3208 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3209 if (drv_stat & ATA_BUSY) {
3210 msleep(2);
3211 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3212 if (drv_stat & ATA_BUSY) {
3213 ap->hsm_task_state = HSM_ST_LAST_POLL;
3214 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3215 return 1;
3216 }
3217 }
3218
3219 qc = ata_qc_from_tag(ap, ap->active_tag);
3220 WARN_ON(qc == NULL);
3221
3222 drv_stat = ata_wait_idle(ap);
3223 if (!ata_ok(drv_stat)) {
3224 qc->err_mask |= __ac_err_mask(drv_stat);
3225 ap->hsm_task_state = HSM_ST_ERR;
3226 return 1;
3227 }
3228
3229 ap->hsm_task_state = HSM_ST_IDLE;
3230
3231 WARN_ON(qc->err_mask);
3232 ata_poll_qc_complete(qc);
3233
3234 /* another command may start at this point */
3235
3236 return 0;
3237 }
3238
3239
3240 /**
3241 * swap_buf_le16 - swap halves of 16-bit words in place
3242 * @buf: Buffer to swap
3243 * @buf_words: Number of 16-bit words in buffer.
3244 *
3245 * Swap halves of 16-bit words if needed to convert from
3246 * little-endian byte order to native cpu byte order, or
3247 * vice-versa.
3248 *
3249 * LOCKING:
3250 * Inherited from caller.
3251 */
3252 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3253 {
3254 #ifdef __BIG_ENDIAN
3255 unsigned int i;
3256
3257 for (i = 0; i < buf_words; i++)
3258 buf[i] = le16_to_cpu(buf[i]);
3259 #endif /* __BIG_ENDIAN */
3260 }
3261
3262 /**
3263 * ata_mmio_data_xfer - Transfer data by MMIO
3264 * @ap: port to read/write
3265 * @buf: data buffer
3266 * @buflen: buffer length
3267 * @write_data: read/write
3268 *
3269 * Transfer data from/to the device data register by MMIO.
3270 *
3271 * LOCKING:
3272 * Inherited from caller.
3273 */
3274
3275 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3276 unsigned int buflen, int write_data)
3277 {
3278 unsigned int i;
3279 unsigned int words = buflen >> 1;
3280 u16 *buf16 = (u16 *) buf;
3281 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3282
3283 /* Transfer multiple of 2 bytes */
3284 if (write_data) {
3285 for (i = 0; i < words; i++)
3286 writew(le16_to_cpu(buf16[i]), mmio);
3287 } else {
3288 for (i = 0; i < words; i++)
3289 buf16[i] = cpu_to_le16(readw(mmio));
3290 }
3291
3292 /* Transfer trailing 1 byte, if any. */
3293 if (unlikely(buflen & 0x01)) {
3294 u16 align_buf[1] = { 0 };
3295 unsigned char *trailing_buf = buf + buflen - 1;
3296
3297 if (write_data) {
3298 memcpy(align_buf, trailing_buf, 1);
3299 writew(le16_to_cpu(align_buf[0]), mmio);
3300 } else {
3301 align_buf[0] = cpu_to_le16(readw(mmio));
3302 memcpy(trailing_buf, align_buf, 1);
3303 }
3304 }
3305 }
3306
3307 /**
3308 * ata_pio_data_xfer - Transfer data by PIO
3309 * @ap: port to read/write
3310 * @buf: data buffer
3311 * @buflen: buffer length
3312 * @write_data: read/write
3313 *
3314 * Transfer data from/to the device data register by PIO.
3315 *
3316 * LOCKING:
3317 * Inherited from caller.
3318 */
3319
3320 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3321 unsigned int buflen, int write_data)
3322 {
3323 unsigned int words = buflen >> 1;
3324
3325 /* Transfer multiple of 2 bytes */
3326 if (write_data)
3327 outsw(ap->ioaddr.data_addr, buf, words);
3328 else
3329 insw(ap->ioaddr.data_addr, buf, words);
3330
3331 /* Transfer trailing 1 byte, if any. */
3332 if (unlikely(buflen & 0x01)) {
3333 u16 align_buf[1] = { 0 };
3334 unsigned char *trailing_buf = buf + buflen - 1;
3335
3336 if (write_data) {
3337 memcpy(align_buf, trailing_buf, 1);
3338 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3339 } else {
3340 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3341 memcpy(trailing_buf, align_buf, 1);
3342 }
3343 }
3344 }
3345
3346 /**
3347 * ata_data_xfer - Transfer data from/to the data register.
3348 * @ap: port to read/write
3349 * @buf: data buffer
3350 * @buflen: buffer length
3351 * @do_write: read/write
3352 *
3353 * Transfer data from/to the device data register.
3354 *
3355 * LOCKING:
3356 * Inherited from caller.
3357 */
3358
3359 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3360 unsigned int buflen, int do_write)
3361 {
3362 /* Make the crap hardware pay the costs not the good stuff */
3363 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3364 unsigned long flags;
3365 local_irq_save(flags);
3366 if (ap->flags & ATA_FLAG_MMIO)
3367 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3368 else
3369 ata_pio_data_xfer(ap, buf, buflen, do_write);
3370 local_irq_restore(flags);
3371 } else {
3372 if (ap->flags & ATA_FLAG_MMIO)
3373 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3374 else
3375 ata_pio_data_xfer(ap, buf, buflen, do_write);
3376 }
3377 }
3378
3379 /**
3380 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3381 * @qc: Command on going
3382 *
3383 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3384 *
3385 * LOCKING:
3386 * Inherited from caller.
3387 */
3388
3389 static void ata_pio_sector(struct ata_queued_cmd *qc)
3390 {
3391 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3392 struct scatterlist *sg = qc->__sg;
3393 struct ata_port *ap = qc->ap;
3394 struct page *page;
3395 unsigned int offset;
3396 unsigned char *buf;
3397
3398 if (qc->cursect == (qc->nsect - 1))
3399 ap->hsm_task_state = HSM_ST_LAST;
3400
3401 page = sg[qc->cursg].page;
3402 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3403
3404 /* get the current page and offset */
3405 page = nth_page(page, (offset >> PAGE_SHIFT));
3406 offset %= PAGE_SIZE;
3407
3408 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3409
3410 if (PageHighMem(page)) {
3411 unsigned long flags;
3412
3413 local_irq_save(flags);
3414 buf = kmap_atomic(page, KM_IRQ0);
3415
3416 /* do the actual data transfer */
3417 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3418
3419 kunmap_atomic(buf, KM_IRQ0);
3420 local_irq_restore(flags);
3421 } else {
3422 buf = page_address(page);
3423 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3424 }
3425
3426 qc->cursect++;
3427 qc->cursg_ofs++;
3428
3429 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3430 qc->cursg++;
3431 qc->cursg_ofs = 0;
3432 }
3433 }
3434
3435 /**
3436 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3437 * @qc: Command on going
3438 *
3439 * Transfer one or many ATA_SECT_SIZE of data from/to the
3440 * ATA device for the DRQ request.
3441 *
3442 * LOCKING:
3443 * Inherited from caller.
3444 */
3445
3446 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3447 {
3448 if (is_multi_taskfile(&qc->tf)) {
3449 /* READ/WRITE MULTIPLE */
3450 unsigned int nsect;
3451
3452 WARN_ON(qc->dev->multi_count == 0);
3453
3454 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3455 while (nsect--)
3456 ata_pio_sector(qc);
3457 } else
3458 ata_pio_sector(qc);
3459 }
3460
3461 /**
3462 * atapi_send_cdb - Write CDB bytes to hardware
3463 * @ap: Port to which ATAPI device is attached.
3464 * @qc: Taskfile currently active
3465 *
3466 * When device has indicated its readiness to accept
3467 * a CDB, this function is called. Send the CDB.
3468 *
3469 * LOCKING:
3470 * caller.
3471 */
3472
3473 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3474 {
3475 /* send SCSI cdb */
3476 DPRINTK("send cdb\n");
3477 WARN_ON(qc->dev->cdb_len < 12);
3478
3479 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3480 ata_altstatus(ap); /* flush */
3481
3482 switch (qc->tf.protocol) {
3483 case ATA_PROT_ATAPI:
3484 ap->hsm_task_state = HSM_ST;
3485 break;
3486 case ATA_PROT_ATAPI_NODATA:
3487 ap->hsm_task_state = HSM_ST_LAST;
3488 break;
3489 case ATA_PROT_ATAPI_DMA:
3490 ap->hsm_task_state = HSM_ST_LAST;
3491 /* initiate bmdma */
3492 ap->ops->bmdma_start(qc);
3493 break;
3494 }
3495 }
3496
3497 /**
3498 * ata_pio_first_block - Write first data block to hardware
3499 * @ap: Port to which ATA/ATAPI device is attached.
3500 *
3501 * When device has indicated its readiness to accept
3502 * the data, this function sends out the CDB or
3503 * the first data block by PIO.
3504 * After this,
3505 * - If polling, ata_pio_task() handles the rest.
3506 * - Otherwise, interrupt handler takes over.
3507 *
3508 * LOCKING:
3509 * Kernel thread context (may sleep)
3510 *
3511 * RETURNS:
3512 * Zero if irq handler takes over
3513 * Non-zero if has next (polling).
3514 */
3515
3516 static int ata_pio_first_block(struct ata_port *ap)
3517 {
3518 struct ata_queued_cmd *qc;
3519 u8 status;
3520 unsigned long flags;
3521 int has_next;
3522
3523 qc = ata_qc_from_tag(ap, ap->active_tag);
3524 WARN_ON(qc == NULL);
3525 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3526
3527 /* if polling, we will stay in the work queue after sending the data.
3528 * otherwise, interrupt handler takes over after sending the data.
3529 */
3530 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3531
3532 /* sleep-wait for BSY to clear */
3533 DPRINTK("busy wait\n");
3534 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3535 qc->err_mask |= AC_ERR_TIMEOUT;
3536 ap->hsm_task_state = HSM_ST_TMOUT;
3537 goto err_out;
3538 }
3539
3540 /* make sure DRQ is set */
3541 status = ata_chk_status(ap);
3542 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3543 /* device status error */
3544 qc->err_mask |= AC_ERR_HSM;
3545 ap->hsm_task_state = HSM_ST_ERR;
3546 goto err_out;
3547 }
3548
3549 /* Send the CDB (atapi) or the first data block (ata pio out).
3550 * During the state transition, interrupt handler shouldn't
3551 * be invoked before the data transfer is complete and
3552 * hsm_task_state is changed. Hence, the following locking.
3553 */
3554 spin_lock_irqsave(&ap->host_set->lock, flags);
3555
3556 if (qc->tf.protocol == ATA_PROT_PIO) {
3557 /* PIO data out protocol.
3558 * send first data block.
3559 */
3560
3561 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3562 * so, the state is changed here before ata_pio_sectors().
3563 */
3564 ap->hsm_task_state = HSM_ST;
3565 ata_pio_sectors(qc);
3566 ata_altstatus(ap); /* flush */
3567 } else
3568 /* send CDB */
3569 atapi_send_cdb(ap, qc);
3570
3571 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3572
3573 /* if polling, ata_pio_task() handles the rest.
3574 * otherwise, interrupt handler takes over from here.
3575 */
3576 return has_next;
3577
3578 err_out:
3579 return 1; /* has next */
3580 }
3581
3582 /**
3583 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3584 * @qc: Command on going
3585 * @bytes: number of bytes
3586 *
3587 * Transfer Transfer data from/to the ATAPI device.
3588 *
3589 * LOCKING:
3590 * Inherited from caller.
3591 *
3592 */
3593
3594 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3595 {
3596 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3597 struct scatterlist *sg = qc->__sg;
3598 struct ata_port *ap = qc->ap;
3599 struct page *page;
3600 unsigned char *buf;
3601 unsigned int offset, count;
3602
3603 if (qc->curbytes + bytes >= qc->nbytes)
3604 ap->hsm_task_state = HSM_ST_LAST;
3605
3606 next_sg:
3607 if (unlikely(qc->cursg >= qc->n_elem)) {
3608 /*
3609 * The end of qc->sg is reached and the device expects
3610 * more data to transfer. In order not to overrun qc->sg
3611 * and fulfill length specified in the byte count register,
3612 * - for read case, discard trailing data from the device
3613 * - for write case, padding zero data to the device
3614 */
3615 u16 pad_buf[1] = { 0 };
3616 unsigned int words = bytes >> 1;
3617 unsigned int i;
3618
3619 if (words) /* warning if bytes > 1 */
3620 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3621 ap->id, bytes);
3622
3623 for (i = 0; i < words; i++)
3624 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3625
3626 ap->hsm_task_state = HSM_ST_LAST;
3627 return;
3628 }
3629
3630 sg = &qc->__sg[qc->cursg];
3631
3632 page = sg->page;
3633 offset = sg->offset + qc->cursg_ofs;
3634
3635 /* get the current page and offset */
3636 page = nth_page(page, (offset >> PAGE_SHIFT));
3637 offset %= PAGE_SIZE;
3638
3639 /* don't overrun current sg */
3640 count = min(sg->length - qc->cursg_ofs, bytes);
3641
3642 /* don't cross page boundaries */
3643 count = min(count, (unsigned int)PAGE_SIZE - offset);
3644
3645 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3646
3647 if (PageHighMem(page)) {
3648 unsigned long flags;
3649
3650 local_irq_save(flags);
3651 buf = kmap_atomic(page, KM_IRQ0);
3652
3653 /* do the actual data transfer */
3654 ata_data_xfer(ap, buf + offset, count, do_write);
3655
3656 kunmap_atomic(buf, KM_IRQ0);
3657 local_irq_restore(flags);
3658 } else {
3659 buf = page_address(page);
3660 ata_data_xfer(ap, buf + offset, count, do_write);
3661 }
3662
3663 bytes -= count;
3664 qc->curbytes += count;
3665 qc->cursg_ofs += count;
3666
3667 if (qc->cursg_ofs == sg->length) {
3668 qc->cursg++;
3669 qc->cursg_ofs = 0;
3670 }
3671
3672 if (bytes)
3673 goto next_sg;
3674 }
3675
3676 /**
3677 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3678 * @qc: Command on going
3679 *
3680 * Transfer Transfer data from/to the ATAPI device.
3681 *
3682 * LOCKING:
3683 * Inherited from caller.
3684 */
3685
3686 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3687 {
3688 struct ata_port *ap = qc->ap;
3689 struct ata_device *dev = qc->dev;
3690 unsigned int ireason, bc_lo, bc_hi, bytes;
3691 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3692
3693 ap->ops->tf_read(ap, &qc->tf);
3694 ireason = qc->tf.nsect;
3695 bc_lo = qc->tf.lbam;
3696 bc_hi = qc->tf.lbah;
3697 bytes = (bc_hi << 8) | bc_lo;
3698
3699 /* shall be cleared to zero, indicating xfer of data */
3700 if (ireason & (1 << 0))
3701 goto err_out;
3702
3703 /* make sure transfer direction matches expected */
3704 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3705 if (do_write != i_write)
3706 goto err_out;
3707
3708 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3709
3710 __atapi_pio_bytes(qc, bytes);
3711
3712 return;
3713
3714 err_out:
3715 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3716 ap->id, dev->devno);
3717 qc->err_mask |= AC_ERR_HSM;
3718 ap->hsm_task_state = HSM_ST_ERR;
3719 }
3720
3721 /**
3722 * ata_pio_block - start PIO on a block
3723 * @ap: the target ata_port
3724 *
3725 * LOCKING:
3726 * None. (executing in kernel thread context)
3727 */
3728
3729 static void ata_pio_block(struct ata_port *ap)
3730 {
3731 struct ata_queued_cmd *qc;
3732 u8 status;
3733
3734 /*
3735 * This is purely heuristic. This is a fast path.
3736 * Sometimes when we enter, BSY will be cleared in
3737 * a chk-status or two. If not, the drive is probably seeking
3738 * or something. Snooze for a couple msecs, then
3739 * chk-status again. If still busy, fall back to
3740 * HSM_ST_POLL state.
3741 */
3742 status = ata_busy_wait(ap, ATA_BUSY, 5);
3743 if (status & ATA_BUSY) {
3744 msleep(2);
3745 status = ata_busy_wait(ap, ATA_BUSY, 10);
3746 if (status & ATA_BUSY) {
3747 ap->hsm_task_state = HSM_ST_POLL;
3748 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3749 return;
3750 }
3751 }
3752
3753 qc = ata_qc_from_tag(ap, ap->active_tag);
3754 WARN_ON(qc == NULL);
3755
3756 /* check error */
3757 if (status & (ATA_ERR | ATA_DF)) {
3758 qc->err_mask |= AC_ERR_DEV;
3759 ap->hsm_task_state = HSM_ST_ERR;
3760 return;
3761 }
3762
3763 /* transfer data if any */
3764 if (is_atapi_taskfile(&qc->tf)) {
3765 /* DRQ=0 means no more data to transfer */
3766 if ((status & ATA_DRQ) == 0) {
3767 ap->hsm_task_state = HSM_ST_LAST;
3768 return;
3769 }
3770
3771 atapi_pio_bytes(qc);
3772 } else {
3773 /* handle BSY=0, DRQ=0 as error */
3774 if ((status & ATA_DRQ) == 0) {
3775 qc->err_mask |= AC_ERR_HSM;
3776 ap->hsm_task_state = HSM_ST_ERR;
3777 return;
3778 }
3779
3780 ata_pio_sectors(qc);
3781 }
3782
3783 ata_altstatus(ap); /* flush */
3784 }
3785
3786 static void ata_pio_error(struct ata_port *ap)
3787 {
3788 struct ata_queued_cmd *qc;
3789
3790 qc = ata_qc_from_tag(ap, ap->active_tag);
3791 WARN_ON(qc == NULL);
3792
3793 if (qc->tf.command != ATA_CMD_PACKET)
3794 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3795
3796 /* make sure qc->err_mask is available to
3797 * know what's wrong and recover
3798 */
3799 WARN_ON(qc->err_mask == 0);
3800
3801 ap->hsm_task_state = HSM_ST_IDLE;
3802
3803 ata_poll_qc_complete(qc);
3804 }
3805
3806 static void ata_pio_task(void *_data)
3807 {
3808 struct ata_port *ap = _data;
3809 unsigned long timeout;
3810 int has_next;
3811
3812 fsm_start:
3813 timeout = 0;
3814 has_next = 1;
3815
3816 switch (ap->hsm_task_state) {
3817 case HSM_ST_FIRST:
3818 has_next = ata_pio_first_block(ap);
3819 break;
3820
3821 case HSM_ST:
3822 ata_pio_block(ap);
3823 break;
3824
3825 case HSM_ST_LAST:
3826 has_next = ata_pio_complete(ap);
3827 break;
3828
3829 case HSM_ST_POLL:
3830 case HSM_ST_LAST_POLL:
3831 timeout = ata_pio_poll(ap);
3832 break;
3833
3834 case HSM_ST_TMOUT:
3835 case HSM_ST_ERR:
3836 ata_pio_error(ap);
3837 return;
3838
3839 default:
3840 BUG();
3841 return;
3842 }
3843
3844 if (timeout)
3845 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3846 else if (has_next)
3847 goto fsm_start;
3848 }
3849
3850 /**
3851 * atapi_packet_task - Write CDB bytes to hardware
3852 * @_data: Port to which ATAPI device is attached.
3853 *
3854 * When device has indicated its readiness to accept
3855 * a CDB, this function is called. Send the CDB.
3856 * If DMA is to be performed, exit immediately.
3857 * Otherwise, we are in polling mode, so poll
3858 * status under operation succeeds or fails.
3859 *
3860 * LOCKING:
3861 * Kernel thread context (may sleep)
3862 */
3863
3864 static void atapi_packet_task(void *_data)
3865 {
3866 struct ata_port *ap = _data;
3867 struct ata_queued_cmd *qc;
3868 u8 status;
3869
3870 qc = ata_qc_from_tag(ap, ap->active_tag);
3871 WARN_ON(qc == NULL);
3872 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3873
3874 /* sleep-wait for BSY to clear */
3875 DPRINTK("busy wait\n");
3876 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3877 qc->err_mask |= AC_ERR_TIMEOUT;
3878 goto err_out;
3879 }
3880
3881 /* make sure DRQ is set */
3882 status = ata_chk_status(ap);
3883 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3884 qc->err_mask |= AC_ERR_HSM;
3885 goto err_out;
3886 }
3887
3888 /* send SCSI cdb */
3889 DPRINTK("send cdb\n");
3890 WARN_ON(qc->dev->cdb_len < 12);
3891
3892 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3893 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3894 unsigned long flags;
3895
3896 /* Once we're done issuing command and kicking bmdma,
3897 * irq handler takes over. To not lose irq, we need
3898 * to clear NOINTR flag before sending cdb, but
3899 * interrupt handler shouldn't be invoked before we're
3900 * finished. Hence, the following locking.
3901 */
3902 spin_lock_irqsave(&ap->host_set->lock, flags);
3903 #warning FIXME
3904 /* ap->flags &= ~ATA_FLAG_NOINTR; */
3905 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3906 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3907 ap->ops->bmdma_start(qc); /* initiate bmdma */
3908 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3909 } else {
3910 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3911
3912 /* PIO commands are handled by polling */
3913 ap->hsm_task_state = HSM_ST;
3914 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3915 }
3916
3917 return;
3918
3919 err_out:
3920 ata_poll_qc_complete(qc);
3921 }
3922
3923 /**
3924 * ata_qc_timeout - Handle timeout of queued command
3925 * @qc: Command that timed out
3926 *
3927 * Some part of the kernel (currently, only the SCSI layer)
3928 * has noticed that the active command on port @ap has not
3929 * completed after a specified length of time. Handle this
3930 * condition by disabling DMA (if necessary) and completing
3931 * transactions, with error if necessary.
3932 *
3933 * This also handles the case of the "lost interrupt", where
3934 * for some reason (possibly hardware bug, possibly driver bug)
3935 * an interrupt was not delivered to the driver, even though the
3936 * transaction completed successfully.
3937 *
3938 * LOCKING:
3939 * Inherited from SCSI layer (none, can sleep)
3940 */
3941
3942 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3943 {
3944 struct ata_port *ap = qc->ap;
3945 struct ata_host_set *host_set = ap->host_set;
3946 u8 host_stat = 0, drv_stat;
3947 unsigned long flags;
3948
3949 DPRINTK("ENTER\n");
3950
3951 ap->hsm_task_state = HSM_ST_IDLE;
3952
3953 spin_lock_irqsave(&host_set->lock, flags);
3954
3955 switch (qc->tf.protocol) {
3956
3957 case ATA_PROT_DMA:
3958 case ATA_PROT_ATAPI_DMA:
3959 host_stat = ap->ops->bmdma_status(ap);
3960
3961 /* before we do anything else, clear DMA-Start bit */
3962 ap->ops->bmdma_stop(qc);
3963
3964 /* fall through */
3965
3966 default:
3967 ata_altstatus(ap);
3968 drv_stat = ata_chk_status(ap);
3969
3970 /* ack bmdma irq events */
3971 ap->ops->irq_clear(ap);
3972
3973 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3974 ap->id, qc->tf.command, drv_stat, host_stat);
3975
3976 ap->hsm_task_state = HSM_ST_IDLE;
3977
3978 /* complete taskfile transaction */
3979 qc->err_mask |= AC_ERR_TIMEOUT;
3980 break;
3981 }
3982
3983 spin_unlock_irqrestore(&host_set->lock, flags);
3984
3985 ata_eh_qc_complete(qc);
3986
3987 DPRINTK("EXIT\n");
3988 }
3989
3990 /**
3991 * ata_eng_timeout - Handle timeout of queued command
3992 * @ap: Port on which timed-out command is active
3993 *
3994 * Some part of the kernel (currently, only the SCSI layer)
3995 * has noticed that the active command on port @ap has not
3996 * completed after a specified length of time. Handle this
3997 * condition by disabling DMA (if necessary) and completing
3998 * transactions, with error if necessary.
3999 *
4000 * This also handles the case of the "lost interrupt", where
4001 * for some reason (possibly hardware bug, possibly driver bug)
4002 * an interrupt was not delivered to the driver, even though the
4003 * transaction completed successfully.
4004 *
4005 * LOCKING:
4006 * Inherited from SCSI layer (none, can sleep)
4007 */
4008
4009 void ata_eng_timeout(struct ata_port *ap)
4010 {
4011 DPRINTK("ENTER\n");
4012
4013 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
4014
4015 DPRINTK("EXIT\n");
4016 }
4017
4018 /**
4019 * ata_qc_new - Request an available ATA command, for queueing
4020 * @ap: Port associated with device @dev
4021 * @dev: Device from whom we request an available command structure
4022 *
4023 * LOCKING:
4024 * None.
4025 */
4026
4027 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4028 {
4029 struct ata_queued_cmd *qc = NULL;
4030 unsigned int i;
4031
4032 for (i = 0; i < ATA_MAX_QUEUE; i++)
4033 if (!test_and_set_bit(i, &ap->qactive)) {
4034 qc = ata_qc_from_tag(ap, i);
4035 break;
4036 }
4037
4038 if (qc)
4039 qc->tag = i;
4040
4041 return qc;
4042 }
4043
4044 /**
4045 * ata_qc_new_init - Request an available ATA command, and initialize it
4046 * @ap: Port associated with device @dev
4047 * @dev: Device from whom we request an available command structure
4048 *
4049 * LOCKING:
4050 * None.
4051 */
4052
4053 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
4054 struct ata_device *dev)
4055 {
4056 struct ata_queued_cmd *qc;
4057
4058 qc = ata_qc_new(ap);
4059 if (qc) {
4060 qc->scsicmd = NULL;
4061 qc->ap = ap;
4062 qc->dev = dev;
4063
4064 ata_qc_reinit(qc);
4065 }
4066
4067 return qc;
4068 }
4069
4070 /**
4071 * ata_qc_free - free unused ata_queued_cmd
4072 * @qc: Command to complete
4073 *
4074 * Designed to free unused ata_queued_cmd object
4075 * in case something prevents using it.
4076 *
4077 * LOCKING:
4078 * spin_lock_irqsave(host_set lock)
4079 */
4080 void ata_qc_free(struct ata_queued_cmd *qc)
4081 {
4082 struct ata_port *ap = qc->ap;
4083 unsigned int tag;
4084
4085 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4086
4087 qc->flags = 0;
4088 tag = qc->tag;
4089 if (likely(ata_tag_valid(tag))) {
4090 if (tag == ap->active_tag)
4091 ap->active_tag = ATA_TAG_POISON;
4092 qc->tag = ATA_TAG_POISON;
4093 clear_bit(tag, &ap->qactive);
4094 }
4095 }
4096
4097 void __ata_qc_complete(struct ata_queued_cmd *qc)
4098 {
4099 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4100 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4101
4102 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4103 ata_sg_clean(qc);
4104
4105 /* atapi: mark qc as inactive to prevent the interrupt handler
4106 * from completing the command twice later, before the error handler
4107 * is called. (when rc != 0 and atapi request sense is needed)
4108 */
4109 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4110
4111 /* call completion callback */
4112 qc->complete_fn(qc);
4113 }
4114
4115 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4116 {
4117 struct ata_port *ap = qc->ap;
4118
4119 switch (qc->tf.protocol) {
4120 case ATA_PROT_DMA:
4121 case ATA_PROT_ATAPI_DMA:
4122 return 1;
4123
4124 case ATA_PROT_ATAPI:
4125 case ATA_PROT_PIO:
4126 if (ap->flags & ATA_FLAG_PIO_DMA)
4127 return 1;
4128
4129 /* fall through */
4130
4131 default:
4132 return 0;
4133 }
4134
4135 /* never reached */
4136 }
4137
4138 /**
4139 * ata_qc_issue - issue taskfile to device
4140 * @qc: command to issue to device
4141 *
4142 * Prepare an ATA command to submission to device.
4143 * This includes mapping the data into a DMA-able
4144 * area, filling in the S/G table, and finally
4145 * writing the taskfile to hardware, starting the command.
4146 *
4147 * LOCKING:
4148 * spin_lock_irqsave(host_set lock)
4149 *
4150 * RETURNS:
4151 * Zero on success, AC_ERR_* mask on failure
4152 */
4153
4154 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4155 {
4156 struct ata_port *ap = qc->ap;
4157
4158 if (ata_should_dma_map(qc)) {
4159 if (qc->flags & ATA_QCFLAG_SG) {
4160 if (ata_sg_setup(qc))
4161 goto sg_err;
4162 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4163 if (ata_sg_setup_one(qc))
4164 goto sg_err;
4165 }
4166 } else {
4167 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4168 }
4169
4170 ap->ops->qc_prep(qc);
4171
4172 qc->ap->active_tag = qc->tag;
4173 qc->flags |= ATA_QCFLAG_ACTIVE;
4174
4175 return ap->ops->qc_issue(qc);
4176
4177 sg_err:
4178 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4179 return AC_ERR_SYSTEM;
4180 }
4181
4182
4183 /**
4184 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4185 * @qc: command to issue to device
4186 *
4187 * Using various libata functions and hooks, this function
4188 * starts an ATA command. ATA commands are grouped into
4189 * classes called "protocols", and issuing each type of protocol
4190 * is slightly different.
4191 *
4192 * May be used as the qc_issue() entry in ata_port_operations.
4193 *
4194 * LOCKING:
4195 * spin_lock_irqsave(host_set lock)
4196 *
4197 * RETURNS:
4198 * Zero on success, AC_ERR_* mask on failure
4199 */
4200
4201 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4202 {
4203 struct ata_port *ap = qc->ap;
4204
4205 /* Use polling pio if the LLD doesn't handle
4206 * interrupt driven pio and atapi CDB interrupt.
4207 */
4208 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4209 switch (qc->tf.protocol) {
4210 case ATA_PROT_PIO:
4211 case ATA_PROT_ATAPI:
4212 case ATA_PROT_ATAPI_NODATA:
4213 qc->tf.flags |= ATA_TFLAG_POLLING;
4214 break;
4215 case ATA_PROT_ATAPI_DMA:
4216 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4217 BUG();
4218 break;
4219 default:
4220 break;
4221 }
4222 }
4223
4224 /* select the device */
4225 ata_dev_select(ap, qc->dev->devno, 1, 0);
4226
4227 /* start the command */
4228 switch (qc->tf.protocol) {
4229 case ATA_PROT_NODATA:
4230 if (qc->tf.flags & ATA_TFLAG_POLLING)
4231 ata_qc_set_polling(qc);
4232
4233 ata_tf_to_host(ap, &qc->tf);
4234 ap->hsm_task_state = HSM_ST_LAST;
4235
4236 if (qc->tf.flags & ATA_TFLAG_POLLING)
4237 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4238
4239 break;
4240
4241 case ATA_PROT_DMA:
4242 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4243
4244 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4245 ap->ops->bmdma_setup(qc); /* set up bmdma */
4246 ap->ops->bmdma_start(qc); /* initiate bmdma */
4247 ap->hsm_task_state = HSM_ST_LAST;
4248 break;
4249
4250 case ATA_PROT_PIO:
4251 if (qc->tf.flags & ATA_TFLAG_POLLING)
4252 ata_qc_set_polling(qc);
4253
4254 ata_tf_to_host(ap, &qc->tf);
4255
4256 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4257 /* PIO data out protocol */
4258 ap->hsm_task_state = HSM_ST_FIRST;
4259 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4260
4261 /* always send first data block using
4262 * the ata_pio_task() codepath.
4263 */
4264 } else {
4265 /* PIO data in protocol */
4266 ap->hsm_task_state = HSM_ST;
4267
4268 if (qc->tf.flags & ATA_TFLAG_POLLING)
4269 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4270
4271 /* if polling, ata_pio_task() handles the rest.
4272 * otherwise, interrupt handler takes over from here.
4273 */
4274 }
4275
4276 break;
4277
4278 case ATA_PROT_ATAPI:
4279 case ATA_PROT_ATAPI_NODATA:
4280 if (qc->tf.flags & ATA_TFLAG_POLLING)
4281 ata_qc_set_polling(qc);
4282
4283 ata_tf_to_host(ap, &qc->tf);
4284
4285 ap->hsm_task_state = HSM_ST_FIRST;
4286
4287 /* send cdb by polling if no cdb interrupt */
4288 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4289 (qc->tf.flags & ATA_TFLAG_POLLING))
4290 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4291 break;
4292
4293 case ATA_PROT_ATAPI_DMA:
4294 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4295
4296 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4297 ap->ops->bmdma_setup(qc); /* set up bmdma */
4298 ap->hsm_task_state = HSM_ST_FIRST;
4299
4300 /* send cdb by polling if no cdb interrupt */
4301 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4302 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4303 break;
4304
4305 default:
4306 WARN_ON(1);
4307 return AC_ERR_SYSTEM;
4308 }
4309
4310 return 0;
4311 }
4312
4313 /**
4314 * ata_host_intr - Handle host interrupt for given (port, task)
4315 * @ap: Port on which interrupt arrived (possibly...)
4316 * @qc: Taskfile currently active in engine
4317 *
4318 * Handle host interrupt for given queued command. Currently,
4319 * only DMA interrupts are handled. All other commands are
4320 * handled via polling with interrupts disabled (nIEN bit).
4321 *
4322 * LOCKING:
4323 * spin_lock_irqsave(host_set lock)
4324 *
4325 * RETURNS:
4326 * One if interrupt was handled, zero if not (shared irq).
4327 */
4328
4329 inline unsigned int ata_host_intr (struct ata_port *ap,
4330 struct ata_queued_cmd *qc)
4331 {
4332 u8 status, host_stat = 0;
4333
4334 VPRINTK("ata%u: protocol %d task_state %d\n",
4335 ap->id, qc->tf.protocol, ap->hsm_task_state);
4336
4337 /* Check whether we are expecting interrupt in this state */
4338 switch (ap->hsm_task_state) {
4339 case HSM_ST_FIRST:
4340 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4341 * The flag was turned on only for atapi devices.
4342 * No need to check is_atapi_taskfile(&qc->tf) again.
4343 */
4344 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4345 goto idle_irq;
4346 break;
4347 case HSM_ST_LAST:
4348 if (qc->tf.protocol == ATA_PROT_DMA ||
4349 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4350 /* check status of DMA engine */
4351 host_stat = ap->ops->bmdma_status(ap);
4352 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4353
4354 /* if it's not our irq... */
4355 if (!(host_stat & ATA_DMA_INTR))
4356 goto idle_irq;
4357
4358 /* before we do anything else, clear DMA-Start bit */
4359 ap->ops->bmdma_stop(qc);
4360
4361 if (unlikely(host_stat & ATA_DMA_ERR)) {
4362 /* error when transfering data to/from memory */
4363 qc->err_mask |= AC_ERR_HOST_BUS;
4364 ap->hsm_task_state = HSM_ST_ERR;
4365 }
4366 }
4367 break;
4368 case HSM_ST:
4369 break;
4370 default:
4371 goto idle_irq;
4372 }
4373
4374 /* check altstatus */
4375 status = ata_altstatus(ap);
4376 if (status & ATA_BUSY)
4377 goto idle_irq;
4378
4379 /* check main status, clearing INTRQ */
4380 status = ata_chk_status(ap);
4381 if (unlikely(status & ATA_BUSY))
4382 goto idle_irq;
4383
4384 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4385 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4386
4387 /* ack bmdma irq events */
4388 ap->ops->irq_clear(ap);
4389
4390 /* check error */
4391 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4392 qc->err_mask |= AC_ERR_DEV;
4393 ap->hsm_task_state = HSM_ST_ERR;
4394 }
4395
4396 fsm_start:
4397 switch (ap->hsm_task_state) {
4398 case HSM_ST_FIRST:
4399 /* Some pre-ATAPI-4 devices assert INTRQ
4400 * at this state when ready to receive CDB.
4401 */
4402
4403 /* check device status */
4404 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4405 /* Wrong status. Let EH handle this */
4406 qc->err_mask |= AC_ERR_HSM;
4407 ap->hsm_task_state = HSM_ST_ERR;
4408 goto fsm_start;
4409 }
4410
4411 atapi_send_cdb(ap, qc);
4412
4413 break;
4414
4415 case HSM_ST:
4416 /* complete command or read/write the data register */
4417 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4418 /* ATAPI PIO protocol */
4419 if ((status & ATA_DRQ) == 0) {
4420 /* no more data to transfer */
4421 ap->hsm_task_state = HSM_ST_LAST;
4422 goto fsm_start;
4423 }
4424
4425 atapi_pio_bytes(qc);
4426
4427 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4428 /* bad ireason reported by device */
4429 goto fsm_start;
4430
4431 } else {
4432 /* ATA PIO protocol */
4433 if (unlikely((status & ATA_DRQ) == 0)) {
4434 /* handle BSY=0, DRQ=0 as error */
4435 qc->err_mask |= AC_ERR_HSM;
4436 ap->hsm_task_state = HSM_ST_ERR;
4437 goto fsm_start;
4438 }
4439
4440 ata_pio_sectors(qc);
4441
4442 if (ap->hsm_task_state == HSM_ST_LAST &&
4443 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4444 /* all data read */
4445 ata_altstatus(ap);
4446 status = ata_chk_status(ap);
4447 goto fsm_start;
4448 }
4449 }
4450
4451 ata_altstatus(ap); /* flush */
4452 break;
4453
4454 case HSM_ST_LAST:
4455 if (unlikely(status & ATA_DRQ)) {
4456 /* handle DRQ=1 as error */
4457 qc->err_mask |= AC_ERR_HSM;
4458 ap->hsm_task_state = HSM_ST_ERR;
4459 goto fsm_start;
4460 }
4461
4462 /* no more data to transfer */
4463 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4464 ap->id, status);
4465
4466 ap->hsm_task_state = HSM_ST_IDLE;
4467
4468 /* complete taskfile transaction */
4469 qc->err_mask |= ac_err_mask(status);
4470 ata_qc_complete(qc);
4471 break;
4472
4473 case HSM_ST_ERR:
4474 if (qc->tf.command != ATA_CMD_PACKET)
4475 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4476 ap->id, status, host_stat);
4477
4478 /* make sure qc->err_mask is available to
4479 * know what's wrong and recover
4480 */
4481 WARN_ON(qc->err_mask == 0);
4482
4483 ap->hsm_task_state = HSM_ST_IDLE;
4484 ata_qc_complete(qc);
4485 break;
4486 default:
4487 goto idle_irq;
4488 }
4489
4490 return 1; /* irq handled */
4491
4492 idle_irq:
4493 ap->stats.idle_irq++;
4494
4495 #ifdef ATA_IRQ_TRAP
4496 if ((ap->stats.idle_irq % 1000) == 0) {
4497 ata_irq_ack(ap, 0); /* debug trap */
4498 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4499 return 1;
4500 }
4501 #endif
4502 return 0; /* irq not handled */
4503 }
4504
4505 /**
4506 * ata_interrupt - Default ATA host interrupt handler
4507 * @irq: irq line (unused)
4508 * @dev_instance: pointer to our ata_host_set information structure
4509 * @regs: unused
4510 *
4511 * Default interrupt handler for PCI IDE devices. Calls
4512 * ata_host_intr() for each port that is not disabled.
4513 *
4514 * LOCKING:
4515 * Obtains host_set lock during operation.
4516 *
4517 * RETURNS:
4518 * IRQ_NONE or IRQ_HANDLED.
4519 */
4520
4521 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4522 {
4523 struct ata_host_set *host_set = dev_instance;
4524 unsigned int i;
4525 unsigned int handled = 0;
4526 unsigned long flags;
4527
4528 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4529 spin_lock_irqsave(&host_set->lock, flags);
4530
4531 for (i = 0; i < host_set->n_ports; i++) {
4532 struct ata_port *ap;
4533
4534 ap = host_set->ports[i];
4535 if (ap &&
4536 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4537 struct ata_queued_cmd *qc;
4538
4539 qc = ata_qc_from_tag(ap, ap->active_tag);
4540 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4541 (qc->flags & ATA_QCFLAG_ACTIVE))
4542 handled |= ata_host_intr(ap, qc);
4543 }
4544 }
4545
4546 spin_unlock_irqrestore(&host_set->lock, flags);
4547
4548 return IRQ_RETVAL(handled);
4549 }
4550
4551
4552 /*
4553 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4554 * without filling any other registers
4555 */
4556 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4557 u8 cmd)
4558 {
4559 struct ata_taskfile tf;
4560 int err;
4561
4562 ata_tf_init(ap, &tf, dev->devno);
4563
4564 tf.command = cmd;
4565 tf.flags |= ATA_TFLAG_DEVICE;
4566 tf.protocol = ATA_PROT_NODATA;
4567
4568 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4569 if (err)
4570 printk(KERN_ERR "%s: ata command failed: %d\n",
4571 __FUNCTION__, err);
4572
4573 return err;
4574 }
4575
4576 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4577 {
4578 u8 cmd;
4579
4580 if (!ata_try_flush_cache(dev))
4581 return 0;
4582
4583 if (ata_id_has_flush_ext(dev->id))
4584 cmd = ATA_CMD_FLUSH_EXT;
4585 else
4586 cmd = ATA_CMD_FLUSH;
4587
4588 return ata_do_simple_cmd(ap, dev, cmd);
4589 }
4590
4591 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4592 {
4593 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4594 }
4595
4596 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4597 {
4598 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4599 }
4600
4601 /**
4602 * ata_device_resume - wakeup a previously suspended devices
4603 * @ap: port the device is connected to
4604 * @dev: the device to resume
4605 *
4606 * Kick the drive back into action, by sending it an idle immediate
4607 * command and making sure its transfer mode matches between drive
4608 * and host.
4609 *
4610 */
4611 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4612 {
4613 if (ap->flags & ATA_FLAG_SUSPENDED) {
4614 ap->flags &= ~ATA_FLAG_SUSPENDED;
4615 ata_set_mode(ap);
4616 }
4617 if (!ata_dev_present(dev))
4618 return 0;
4619 if (dev->class == ATA_DEV_ATA)
4620 ata_start_drive(ap, dev);
4621
4622 return 0;
4623 }
4624
4625 /**
4626 * ata_device_suspend - prepare a device for suspend
4627 * @ap: port the device is connected to
4628 * @dev: the device to suspend
4629 *
4630 * Flush the cache on the drive, if appropriate, then issue a
4631 * standbynow command.
4632 */
4633 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4634 {
4635 if (!ata_dev_present(dev))
4636 return 0;
4637 if (dev->class == ATA_DEV_ATA)
4638 ata_flush_cache(ap, dev);
4639
4640 ata_standby_drive(ap, dev);
4641 ap->flags |= ATA_FLAG_SUSPENDED;
4642 return 0;
4643 }
4644
4645 /**
4646 * ata_port_start - Set port up for dma.
4647 * @ap: Port to initialize
4648 *
4649 * Called just after data structures for each port are
4650 * initialized. Allocates space for PRD table.
4651 *
4652 * May be used as the port_start() entry in ata_port_operations.
4653 *
4654 * LOCKING:
4655 * Inherited from caller.
4656 */
4657
4658 int ata_port_start (struct ata_port *ap)
4659 {
4660 struct device *dev = ap->host_set->dev;
4661 int rc;
4662
4663 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4664 if (!ap->prd)
4665 return -ENOMEM;
4666
4667 rc = ata_pad_alloc(ap, dev);
4668 if (rc) {
4669 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4670 return rc;
4671 }
4672
4673 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4674
4675 return 0;
4676 }
4677
4678
4679 /**
4680 * ata_port_stop - Undo ata_port_start()
4681 * @ap: Port to shut down
4682 *
4683 * Frees the PRD table.
4684 *
4685 * May be used as the port_stop() entry in ata_port_operations.
4686 *
4687 * LOCKING:
4688 * Inherited from caller.
4689 */
4690
4691 void ata_port_stop (struct ata_port *ap)
4692 {
4693 struct device *dev = ap->host_set->dev;
4694
4695 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4696 ata_pad_free(ap, dev);
4697 }
4698
4699 void ata_host_stop (struct ata_host_set *host_set)
4700 {
4701 if (host_set->mmio_base)
4702 iounmap(host_set->mmio_base);
4703 }
4704
4705
4706 /**
4707 * ata_host_remove - Unregister SCSI host structure with upper layers
4708 * @ap: Port to unregister
4709 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4710 *
4711 * LOCKING:
4712 * Inherited from caller.
4713 */
4714
4715 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4716 {
4717 struct Scsi_Host *sh = ap->host;
4718
4719 DPRINTK("ENTER\n");
4720
4721 if (do_unregister)
4722 scsi_remove_host(sh);
4723
4724 ap->ops->port_stop(ap);
4725 }
4726
4727 /**
4728 * ata_host_init - Initialize an ata_port structure
4729 * @ap: Structure to initialize
4730 * @host: associated SCSI mid-layer structure
4731 * @host_set: Collection of hosts to which @ap belongs
4732 * @ent: Probe information provided by low-level driver
4733 * @port_no: Port number associated with this ata_port
4734 *
4735 * Initialize a new ata_port structure, and its associated
4736 * scsi_host.
4737 *
4738 * LOCKING:
4739 * Inherited from caller.
4740 */
4741
4742 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4743 struct ata_host_set *host_set,
4744 const struct ata_probe_ent *ent, unsigned int port_no)
4745 {
4746 unsigned int i;
4747
4748 host->max_id = 16;
4749 host->max_lun = 1;
4750 host->max_channel = 1;
4751 host->unique_id = ata_unique_id++;
4752 host->max_cmd_len = 12;
4753
4754 ap->flags = ATA_FLAG_PORT_DISABLED;
4755 ap->id = host->unique_id;
4756 ap->host = host;
4757 ap->ctl = ATA_DEVCTL_OBS;
4758 ap->host_set = host_set;
4759 ap->port_no = port_no;
4760 ap->hard_port_no =
4761 ent->legacy_mode ? ent->hard_port_no : port_no;
4762 ap->pio_mask = ent->pio_mask;
4763 ap->mwdma_mask = ent->mwdma_mask;
4764 ap->udma_mask = ent->udma_mask;
4765 ap->flags |= ent->host_flags;
4766 ap->ops = ent->port_ops;
4767 ap->cbl = ATA_CBL_NONE;
4768 ap->active_tag = ATA_TAG_POISON;
4769 ap->last_ctl = 0xFF;
4770
4771 INIT_WORK(&ap->port_task, NULL, NULL);
4772 INIT_LIST_HEAD(&ap->eh_done_q);
4773
4774 for (i = 0; i < ATA_MAX_DEVICES; i++)
4775 ap->device[i].devno = i;
4776
4777 #ifdef ATA_IRQ_TRAP
4778 ap->stats.unhandled_irq = 1;
4779 ap->stats.idle_irq = 1;
4780 #endif
4781
4782 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4783 }
4784
4785 /**
4786 * ata_host_add - Attach low-level ATA driver to system
4787 * @ent: Information provided by low-level driver
4788 * @host_set: Collections of ports to which we add
4789 * @port_no: Port number associated with this host
4790 *
4791 * Attach low-level ATA driver to system.
4792 *
4793 * LOCKING:
4794 * PCI/etc. bus probe sem.
4795 *
4796 * RETURNS:
4797 * New ata_port on success, for NULL on error.
4798 */
4799
4800 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4801 struct ata_host_set *host_set,
4802 unsigned int port_no)
4803 {
4804 struct Scsi_Host *host;
4805 struct ata_port *ap;
4806 int rc;
4807
4808 DPRINTK("ENTER\n");
4809 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4810 if (!host)
4811 return NULL;
4812
4813 host->transportt = &ata_scsi_transport_template;
4814
4815 ap = (struct ata_port *) &host->hostdata[0];
4816
4817 ata_host_init(ap, host, host_set, ent, port_no);
4818
4819 rc = ap->ops->port_start(ap);
4820 if (rc)
4821 goto err_out;
4822
4823 return ap;
4824
4825 err_out:
4826 scsi_host_put(host);
4827 return NULL;
4828 }
4829
4830 /**
4831 * ata_device_add - Register hardware device with ATA and SCSI layers
4832 * @ent: Probe information describing hardware device to be registered
4833 *
4834 * This function processes the information provided in the probe
4835 * information struct @ent, allocates the necessary ATA and SCSI
4836 * host information structures, initializes them, and registers
4837 * everything with requisite kernel subsystems.
4838 *
4839 * This function requests irqs, probes the ATA bus, and probes
4840 * the SCSI bus.
4841 *
4842 * LOCKING:
4843 * PCI/etc. bus probe sem.
4844 *
4845 * RETURNS:
4846 * Number of ports registered. Zero on error (no ports registered).
4847 */
4848
4849 int ata_device_add(const struct ata_probe_ent *ent)
4850 {
4851 unsigned int count = 0, i;
4852 struct device *dev = ent->dev;
4853 struct ata_host_set *host_set;
4854
4855 DPRINTK("ENTER\n");
4856 /* alloc a container for our list of ATA ports (buses) */
4857 host_set = kzalloc(sizeof(struct ata_host_set) +
4858 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4859 if (!host_set)
4860 return 0;
4861 spin_lock_init(&host_set->lock);
4862
4863 host_set->dev = dev;
4864 host_set->n_ports = ent->n_ports;
4865 host_set->irq = ent->irq;
4866 host_set->mmio_base = ent->mmio_base;
4867 host_set->private_data = ent->private_data;
4868 host_set->ops = ent->port_ops;
4869
4870 /* register each port bound to this device */
4871 for (i = 0; i < ent->n_ports; i++) {
4872 struct ata_port *ap;
4873 unsigned long xfer_mode_mask;
4874
4875 ap = ata_host_add(ent, host_set, i);
4876 if (!ap)
4877 goto err_out;
4878
4879 host_set->ports[i] = ap;
4880 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4881 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4882 (ap->pio_mask << ATA_SHIFT_PIO);
4883
4884 /* print per-port info to dmesg */
4885 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4886 "bmdma 0x%lX irq %lu\n",
4887 ap->id,
4888 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4889 ata_mode_string(xfer_mode_mask),
4890 ap->ioaddr.cmd_addr,
4891 ap->ioaddr.ctl_addr,
4892 ap->ioaddr.bmdma_addr,
4893 ent->irq);
4894
4895 ata_chk_status(ap);
4896 host_set->ops->irq_clear(ap);
4897 count++;
4898 }
4899
4900 if (!count)
4901 goto err_free_ret;
4902
4903 /* obtain irq, that is shared between channels */
4904 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4905 DRV_NAME, host_set))
4906 goto err_out;
4907
4908 /* perform each probe synchronously */
4909 DPRINTK("probe begin\n");
4910 for (i = 0; i < count; i++) {
4911 struct ata_port *ap;
4912 int rc;
4913
4914 ap = host_set->ports[i];
4915
4916 DPRINTK("ata%u: bus probe begin\n", ap->id);
4917 rc = ata_bus_probe(ap);
4918 DPRINTK("ata%u: bus probe end\n", ap->id);
4919
4920 if (rc) {
4921 /* FIXME: do something useful here?
4922 * Current libata behavior will
4923 * tear down everything when
4924 * the module is removed
4925 * or the h/w is unplugged.
4926 */
4927 }
4928
4929 rc = scsi_add_host(ap->host, dev);
4930 if (rc) {
4931 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4932 ap->id);
4933 /* FIXME: do something useful here */
4934 /* FIXME: handle unconditional calls to
4935 * scsi_scan_host and ata_host_remove, below,
4936 * at the very least
4937 */
4938 }
4939 }
4940
4941 /* probes are done, now scan each port's disk(s) */
4942 DPRINTK("host probe begin\n");
4943 for (i = 0; i < count; i++) {
4944 struct ata_port *ap = host_set->ports[i];
4945
4946 ata_scsi_scan_host(ap);
4947 }
4948
4949 dev_set_drvdata(dev, host_set);
4950
4951 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4952 return ent->n_ports; /* success */
4953
4954 err_out:
4955 for (i = 0; i < count; i++) {
4956 ata_host_remove(host_set->ports[i], 1);
4957 scsi_host_put(host_set->ports[i]->host);
4958 }
4959 err_free_ret:
4960 kfree(host_set);
4961 VPRINTK("EXIT, returning 0\n");
4962 return 0;
4963 }
4964
4965 /**
4966 * ata_host_set_remove - PCI layer callback for device removal
4967 * @host_set: ATA host set that was removed
4968 *
4969 * Unregister all objects associated with this host set. Free those
4970 * objects.
4971 *
4972 * LOCKING:
4973 * Inherited from calling layer (may sleep).
4974 */
4975
4976 void ata_host_set_remove(struct ata_host_set *host_set)
4977 {
4978 struct ata_port *ap;
4979 unsigned int i;
4980
4981 for (i = 0; i < host_set->n_ports; i++) {
4982 ap = host_set->ports[i];
4983 scsi_remove_host(ap->host);
4984 }
4985
4986 free_irq(host_set->irq, host_set);
4987
4988 for (i = 0; i < host_set->n_ports; i++) {
4989 ap = host_set->ports[i];
4990
4991 ata_scsi_release(ap->host);
4992
4993 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4994 struct ata_ioports *ioaddr = &ap->ioaddr;
4995
4996 if (ioaddr->cmd_addr == 0x1f0)
4997 release_region(0x1f0, 8);
4998 else if (ioaddr->cmd_addr == 0x170)
4999 release_region(0x170, 8);
5000 }
5001
5002 scsi_host_put(ap->host);
5003 }
5004
5005 if (host_set->ops->host_stop)
5006 host_set->ops->host_stop(host_set);
5007
5008 kfree(host_set);
5009 }
5010
5011 /**
5012 * ata_scsi_release - SCSI layer callback hook for host unload
5013 * @host: libata host to be unloaded
5014 *
5015 * Performs all duties necessary to shut down a libata port...
5016 * Kill port kthread, disable port, and release resources.
5017 *
5018 * LOCKING:
5019 * Inherited from SCSI layer.
5020 *
5021 * RETURNS:
5022 * One.
5023 */
5024
5025 int ata_scsi_release(struct Scsi_Host *host)
5026 {
5027 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
5028 int i;
5029
5030 DPRINTK("ENTER\n");
5031
5032 ap->ops->port_disable(ap);
5033 ata_host_remove(ap, 0);
5034 for (i = 0; i < ATA_MAX_DEVICES; i++)
5035 kfree(ap->device[i].id);
5036
5037 DPRINTK("EXIT\n");
5038 return 1;
5039 }
5040
5041 /**
5042 * ata_std_ports - initialize ioaddr with standard port offsets.
5043 * @ioaddr: IO address structure to be initialized
5044 *
5045 * Utility function which initializes data_addr, error_addr,
5046 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5047 * device_addr, status_addr, and command_addr to standard offsets
5048 * relative to cmd_addr.
5049 *
5050 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5051 */
5052
5053 void ata_std_ports(struct ata_ioports *ioaddr)
5054 {
5055 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5056 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5057 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5058 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5059 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5060 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5061 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5062 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5063 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5064 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5065 }
5066
5067
5068 #ifdef CONFIG_PCI
5069
5070 void ata_pci_host_stop (struct ata_host_set *host_set)
5071 {
5072 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5073
5074 pci_iounmap(pdev, host_set->mmio_base);
5075 }
5076
5077 /**
5078 * ata_pci_remove_one - PCI layer callback for device removal
5079 * @pdev: PCI device that was removed
5080 *
5081 * PCI layer indicates to libata via this hook that
5082 * hot-unplug or module unload event has occurred.
5083 * Handle this by unregistering all objects associated
5084 * with this PCI device. Free those objects. Then finally
5085 * release PCI resources and disable device.
5086 *
5087 * LOCKING:
5088 * Inherited from PCI layer (may sleep).
5089 */
5090
5091 void ata_pci_remove_one (struct pci_dev *pdev)
5092 {
5093 struct device *dev = pci_dev_to_dev(pdev);
5094 struct ata_host_set *host_set = dev_get_drvdata(dev);
5095
5096 ata_host_set_remove(host_set);
5097 pci_release_regions(pdev);
5098 pci_disable_device(pdev);
5099 dev_set_drvdata(dev, NULL);
5100 }
5101
5102 /* move to PCI subsystem */
5103 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5104 {
5105 unsigned long tmp = 0;
5106
5107 switch (bits->width) {
5108 case 1: {
5109 u8 tmp8 = 0;
5110 pci_read_config_byte(pdev, bits->reg, &tmp8);
5111 tmp = tmp8;
5112 break;
5113 }
5114 case 2: {
5115 u16 tmp16 = 0;
5116 pci_read_config_word(pdev, bits->reg, &tmp16);
5117 tmp = tmp16;
5118 break;
5119 }
5120 case 4: {
5121 u32 tmp32 = 0;
5122 pci_read_config_dword(pdev, bits->reg, &tmp32);
5123 tmp = tmp32;
5124 break;
5125 }
5126
5127 default:
5128 return -EINVAL;
5129 }
5130
5131 tmp &= bits->mask;
5132
5133 return (tmp == bits->val) ? 1 : 0;
5134 }
5135
5136 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5137 {
5138 pci_save_state(pdev);
5139 pci_disable_device(pdev);
5140 pci_set_power_state(pdev, PCI_D3hot);
5141 return 0;
5142 }
5143
5144 int ata_pci_device_resume(struct pci_dev *pdev)
5145 {
5146 pci_set_power_state(pdev, PCI_D0);
5147 pci_restore_state(pdev);
5148 pci_enable_device(pdev);
5149 pci_set_master(pdev);
5150 return 0;
5151 }
5152 #endif /* CONFIG_PCI */
5153
5154
5155 static int __init ata_init(void)
5156 {
5157 ata_wq = create_workqueue("ata");
5158 if (!ata_wq)
5159 return -ENOMEM;
5160
5161 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5162 return 0;
5163 }
5164
5165 static void __exit ata_exit(void)
5166 {
5167 destroy_workqueue(ata_wq);
5168 }
5169
5170 module_init(ata_init);
5171 module_exit(ata_exit);
5172
5173 static unsigned long ratelimit_time;
5174 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5175
5176 int ata_ratelimit(void)
5177 {
5178 int rc;
5179 unsigned long flags;
5180
5181 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5182
5183 if (time_after(jiffies, ratelimit_time)) {
5184 rc = 1;
5185 ratelimit_time = jiffies + (HZ/5);
5186 } else
5187 rc = 0;
5188
5189 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5190
5191 return rc;
5192 }
5193
5194 /*
5195 * libata is essentially a library of internal helper functions for
5196 * low-level ATA host controller drivers. As such, the API/ABI is
5197 * likely to change as new drivers are added and updated.
5198 * Do not depend on ABI/API stability.
5199 */
5200
5201 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5202 EXPORT_SYMBOL_GPL(ata_std_ports);
5203 EXPORT_SYMBOL_GPL(ata_device_add);
5204 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5205 EXPORT_SYMBOL_GPL(ata_sg_init);
5206 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5207 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5208 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5209 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5210 EXPORT_SYMBOL_GPL(ata_tf_load);
5211 EXPORT_SYMBOL_GPL(ata_tf_read);
5212 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5213 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5214 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5215 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5216 EXPORT_SYMBOL_GPL(ata_check_status);
5217 EXPORT_SYMBOL_GPL(ata_altstatus);
5218 EXPORT_SYMBOL_GPL(ata_exec_command);
5219 EXPORT_SYMBOL_GPL(ata_port_start);
5220 EXPORT_SYMBOL_GPL(ata_port_stop);
5221 EXPORT_SYMBOL_GPL(ata_host_stop);
5222 EXPORT_SYMBOL_GPL(ata_interrupt);
5223 EXPORT_SYMBOL_GPL(ata_qc_prep);
5224 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5225 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5226 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5227 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5228 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5229 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5230 EXPORT_SYMBOL_GPL(ata_port_probe);
5231 EXPORT_SYMBOL_GPL(sata_phy_reset);
5232 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5233 EXPORT_SYMBOL_GPL(ata_bus_reset);
5234 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5235 EXPORT_SYMBOL_GPL(ata_std_softreset);
5236 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5237 EXPORT_SYMBOL_GPL(ata_std_postreset);
5238 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5239 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5240 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5241 EXPORT_SYMBOL_GPL(ata_port_disable);
5242 EXPORT_SYMBOL_GPL(ata_ratelimit);
5243 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5244 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5245 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5246 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5247 EXPORT_SYMBOL_GPL(ata_scsi_error);
5248 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5249 EXPORT_SYMBOL_GPL(ata_scsi_release);
5250 EXPORT_SYMBOL_GPL(ata_host_intr);
5251 EXPORT_SYMBOL_GPL(ata_dev_classify);
5252 EXPORT_SYMBOL_GPL(ata_id_string);
5253 EXPORT_SYMBOL_GPL(ata_id_c_string);
5254 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5255 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5256 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5257
5258 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5259 EXPORT_SYMBOL_GPL(ata_timing_compute);
5260 EXPORT_SYMBOL_GPL(ata_timing_merge);
5261
5262 #ifdef CONFIG_PCI
5263 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5264 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5265 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5266 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5267 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5268 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5269 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5270 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5271 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5272 #endif /* CONFIG_PCI */
5273
5274 EXPORT_SYMBOL_GPL(ata_device_suspend);
5275 EXPORT_SYMBOL_GPL(ata_device_resume);
5276 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5277 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.194286 seconds and 6 git commands to generate.