Merge nommu tree
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_dev_xfermask(struct ata_port *ap,
69 struct ata_device *dev);
70
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
73
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
77
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
81
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86
87
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
93 *
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
102 {
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
108
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
113
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
118
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
123
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
128 }
129
130 /**
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
134 *
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
136 *
137 * LOCKING:
138 * Inherited from caller.
139 */
140
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
142 {
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
145
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
150
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
154
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
157 }
158
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
165 0,
166 0,
167 0,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
174 0,
175 0,
176 0,
177 0,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_FUA_EXT
187 };
188
189 /**
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
192 *
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
195 *
196 * LOCKING:
197 * caller.
198 */
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
200 {
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
204
205 int index, fua, lba48, write;
206
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
210
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
221 }
222
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
227 }
228 return -1;
229 }
230
231 /**
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
236 *
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
239 *
240 * LOCKING:
241 * None.
242 *
243 * RETURNS:
244 * Packed xfer_mask.
245 */
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249 {
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253 }
254
255 static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258 } ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
263 };
264
265 /**
266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
267 * @xfer_mask: xfer_mask of interest
268 *
269 * Return matching XFER_* value for @xfer_mask. Only the highest
270 * bit of @xfer_mask is considered.
271 *
272 * LOCKING:
273 * None.
274 *
275 * RETURNS:
276 * Matching XFER_* value, 0 if no match found.
277 */
278 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
279 {
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
282
283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
287 }
288
289 /**
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
292 *
293 * Return matching xfer_mask for @xfer_mode.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
300 */
301 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
302 {
303 const struct ata_xfer_ent *ent;
304
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
307 return 1 << (ent->shift + xfer_mode - ent->base);
308 return 0;
309 }
310
311 /**
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_shift for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
322 */
323 static int ata_xfer_mode2shift(unsigned int xfer_mode)
324 {
325 const struct ata_xfer_ent *ent;
326
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
331 }
332
333 /**
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
336 *
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
346 */
347 static const char *ata_mode_string(unsigned int xfer_mask)
348 {
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
366 };
367 int highbit;
368
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
373 }
374
375 /**
376 * ata_pio_devchk - PATA device presence detection
377 * @ap: ATA channel to examine
378 * @device: Device to examine (starting at zero)
379 *
380 * This technique was originally described in
381 * Hale Landis's ATADRVR (www.ata-atapi.com), and
382 * later found its way into the ATA/ATAPI spec.
383 *
384 * Write a pattern to the ATA shadow registers,
385 * and if a device is present, it will respond by
386 * correctly storing and echoing back the
387 * ATA shadow register contents.
388 *
389 * LOCKING:
390 * caller.
391 */
392
393 static unsigned int ata_pio_devchk(struct ata_port *ap,
394 unsigned int device)
395 {
396 struct ata_ioports *ioaddr = &ap->ioaddr;
397 u8 nsect, lbal;
398
399 ap->ops->dev_select(ap, device);
400
401 outb(0x55, ioaddr->nsect_addr);
402 outb(0xaa, ioaddr->lbal_addr);
403
404 outb(0xaa, ioaddr->nsect_addr);
405 outb(0x55, ioaddr->lbal_addr);
406
407 outb(0x55, ioaddr->nsect_addr);
408 outb(0xaa, ioaddr->lbal_addr);
409
410 nsect = inb(ioaddr->nsect_addr);
411 lbal = inb(ioaddr->lbal_addr);
412
413 if ((nsect == 0x55) && (lbal == 0xaa))
414 return 1; /* we found a device */
415
416 return 0; /* nothing found */
417 }
418
419 /**
420 * ata_mmio_devchk - PATA device presence detection
421 * @ap: ATA channel to examine
422 * @device: Device to examine (starting at zero)
423 *
424 * This technique was originally described in
425 * Hale Landis's ATADRVR (www.ata-atapi.com), and
426 * later found its way into the ATA/ATAPI spec.
427 *
428 * Write a pattern to the ATA shadow registers,
429 * and if a device is present, it will respond by
430 * correctly storing and echoing back the
431 * ATA shadow register contents.
432 *
433 * LOCKING:
434 * caller.
435 */
436
437 static unsigned int ata_mmio_devchk(struct ata_port *ap,
438 unsigned int device)
439 {
440 struct ata_ioports *ioaddr = &ap->ioaddr;
441 u8 nsect, lbal;
442
443 ap->ops->dev_select(ap, device);
444
445 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
446 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
447
448 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
449 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
450
451 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
452 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
453
454 nsect = readb((void __iomem *) ioaddr->nsect_addr);
455 lbal = readb((void __iomem *) ioaddr->lbal_addr);
456
457 if ((nsect == 0x55) && (lbal == 0xaa))
458 return 1; /* we found a device */
459
460 return 0; /* nothing found */
461 }
462
463 /**
464 * ata_devchk - PATA device presence detection
465 * @ap: ATA channel to examine
466 * @device: Device to examine (starting at zero)
467 *
468 * Dispatch ATA device presence detection, depending
469 * on whether we are using PIO or MMIO to talk to the
470 * ATA shadow registers.
471 *
472 * LOCKING:
473 * caller.
474 */
475
476 static unsigned int ata_devchk(struct ata_port *ap,
477 unsigned int device)
478 {
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_mmio_devchk(ap, device);
481 return ata_pio_devchk(ap, device);
482 }
483
484 /**
485 * ata_dev_classify - determine device type based on ATA-spec signature
486 * @tf: ATA taskfile register set for device to be identified
487 *
488 * Determine from taskfile register contents whether a device is
489 * ATA or ATAPI, as per "Signature and persistence" section
490 * of ATA/PI spec (volume 1, sect 5.14).
491 *
492 * LOCKING:
493 * None.
494 *
495 * RETURNS:
496 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
497 * the event of failure.
498 */
499
500 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
501 {
502 /* Apple's open source Darwin code hints that some devices only
503 * put a proper signature into the LBA mid/high registers,
504 * So, we only check those. It's sufficient for uniqueness.
505 */
506
507 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
508 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
509 DPRINTK("found ATA device by sig\n");
510 return ATA_DEV_ATA;
511 }
512
513 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
514 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
515 DPRINTK("found ATAPI device by sig\n");
516 return ATA_DEV_ATAPI;
517 }
518
519 DPRINTK("unknown device\n");
520 return ATA_DEV_UNKNOWN;
521 }
522
523 /**
524 * ata_dev_try_classify - Parse returned ATA device signature
525 * @ap: ATA channel to examine
526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
528 *
529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
530 * an ATA/ATAPI-defined set of values is placed in the ATA
531 * shadow registers, indicating the results of device detection
532 * and diagnostics.
533 *
534 * Select the ATA device, and read the values from the ATA shadow
535 * registers. Then parse according to the Error register value,
536 * and the spec-defined values examined by ata_dev_classify().
537 *
538 * LOCKING:
539 * caller.
540 *
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
543 */
544
545 static unsigned int
546 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
547 {
548 struct ata_taskfile tf;
549 unsigned int class;
550 u8 err;
551
552 ap->ops->dev_select(ap, device);
553
554 memset(&tf, 0, sizeof(tf));
555
556 ap->ops->tf_read(ap, &tf);
557 err = tf.feature;
558 if (r_err)
559 *r_err = err;
560
561 /* see if device passed diags */
562 if (err == 1)
563 /* do nothing */ ;
564 else if ((device == 0) && (err == 0x81))
565 /* do nothing */ ;
566 else
567 return ATA_DEV_NONE;
568
569 /* determine if device is ATA or ATAPI */
570 class = ata_dev_classify(&tf);
571
572 if (class == ATA_DEV_UNKNOWN)
573 return ATA_DEV_NONE;
574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
575 return ATA_DEV_NONE;
576 return class;
577 }
578
579 /**
580 * ata_id_string - Convert IDENTIFY DEVICE page into string
581 * @id: IDENTIFY DEVICE results we will examine
582 * @s: string into which data is output
583 * @ofs: offset into identify device page
584 * @len: length of string to return. must be an even number.
585 *
586 * The strings in the IDENTIFY DEVICE page are broken up into
587 * 16-bit chunks. Run through the string, and output each
588 * 8-bit chunk linearly, regardless of platform.
589 *
590 * LOCKING:
591 * caller.
592 */
593
594 void ata_id_string(const u16 *id, unsigned char *s,
595 unsigned int ofs, unsigned int len)
596 {
597 unsigned int c;
598
599 while (len > 0) {
600 c = id[ofs] >> 8;
601 *s = c;
602 s++;
603
604 c = id[ofs] & 0xff;
605 *s = c;
606 s++;
607
608 ofs++;
609 len -= 2;
610 }
611 }
612
613 /**
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
619 *
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
623 *
624 * LOCKING:
625 * caller.
626 */
627 void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629 {
630 unsigned char *p;
631
632 WARN_ON(!(len & 1));
633
634 ata_id_string(id, s, ofs, len - 1);
635
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
640 }
641
642 static u64 ata_id_n_sectors(const u16 *id)
643 {
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
654 }
655 }
656
657 /**
658 * ata_noop_dev_select - Select device 0/1 on ATA bus
659 * @ap: ATA channel to manipulate
660 * @device: ATA device (numbered from zero) to select
661 *
662 * This function performs no actual function.
663 *
664 * May be used as the dev_select() entry in ata_port_operations.
665 *
666 * LOCKING:
667 * caller.
668 */
669 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
670 {
671 }
672
673
674 /**
675 * ata_std_dev_select - Select device 0/1 on ATA bus
676 * @ap: ATA channel to manipulate
677 * @device: ATA device (numbered from zero) to select
678 *
679 * Use the method defined in the ATA specification to
680 * make either device 0, or device 1, active on the
681 * ATA channel. Works with both PIO and MMIO.
682 *
683 * May be used as the dev_select() entry in ata_port_operations.
684 *
685 * LOCKING:
686 * caller.
687 */
688
689 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
690 {
691 u8 tmp;
692
693 if (device == 0)
694 tmp = ATA_DEVICE_OBS;
695 else
696 tmp = ATA_DEVICE_OBS | ATA_DEV1;
697
698 if (ap->flags & ATA_FLAG_MMIO) {
699 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
700 } else {
701 outb(tmp, ap->ioaddr.device_addr);
702 }
703 ata_pause(ap); /* needed; also flushes, for mmio */
704 }
705
706 /**
707 * ata_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
710 * @wait: non-zero to wait for Status register BSY bit to clear
711 * @can_sleep: non-zero if context allows sleeping
712 *
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel.
716 *
717 * This is a high-level version of ata_std_dev_select(),
718 * which additionally provides the services of inserting
719 * the proper pauses and status polling, where needed.
720 *
721 * LOCKING:
722 * caller.
723 */
724
725 void ata_dev_select(struct ata_port *ap, unsigned int device,
726 unsigned int wait, unsigned int can_sleep)
727 {
728 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
729 ap->id, device, wait);
730
731 if (wait)
732 ata_wait_idle(ap);
733
734 ap->ops->dev_select(ap, device);
735
736 if (wait) {
737 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
738 msleep(150);
739 ata_wait_idle(ap);
740 }
741 }
742
743 /**
744 * ata_dump_id - IDENTIFY DEVICE info debugging output
745 * @id: IDENTIFY DEVICE page to dump
746 *
747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
748 * page.
749 *
750 * LOCKING:
751 * caller.
752 */
753
754 static inline void ata_dump_id(const u16 *id)
755 {
756 DPRINTK("49==0x%04x "
757 "53==0x%04x "
758 "63==0x%04x "
759 "64==0x%04x "
760 "75==0x%04x \n",
761 id[49],
762 id[53],
763 id[63],
764 id[64],
765 id[75]);
766 DPRINTK("80==0x%04x "
767 "81==0x%04x "
768 "82==0x%04x "
769 "83==0x%04x "
770 "84==0x%04x \n",
771 id[80],
772 id[81],
773 id[82],
774 id[83],
775 id[84]);
776 DPRINTK("88==0x%04x "
777 "93==0x%04x\n",
778 id[88],
779 id[93]);
780 }
781
782 /**
783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
784 * @id: IDENTIFY data to compute xfer mask from
785 *
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
788 *
789 * FIXME: pre IDE drive timing (do we care ?).
790 *
791 * LOCKING:
792 * None.
793 *
794 * RETURNS:
795 * Computed xfermask
796 */
797 static unsigned int ata_id_xfermask(const u16 *id)
798 {
799 unsigned int pio_mask, mwdma_mask, udma_mask;
800
801 /* Usual case. Word 53 indicates word 64 is valid */
802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
804 pio_mask <<= 3;
805 pio_mask |= 0x7;
806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
810 */
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
812
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
818 */
819 }
820
821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
822
823 udma_mask = 0;
824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
826
827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
828 }
829
830 /**
831 * ata_port_queue_task - Queue port_task
832 * @ap: The ata_port to queue port_task for
833 *
834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
838 *
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
841 * synchronization.
842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
846 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
847 unsigned long delay)
848 {
849 int rc;
850
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
852 return;
853
854 PREPARE_WORK(&ap->port_task, fn, data);
855
856 if (!delay)
857 rc = queue_work(ata_wq, &ap->port_task);
858 else
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
860
861 /* rc == 0 means that another user is using port task */
862 WARN_ON(rc == 0);
863 }
864
865 /**
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
868 *
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
871 *
872 * LOCKING:
873 * Kernel thread context (may sleep)
874 */
875 void ata_port_flush_task(struct ata_port *ap)
876 {
877 unsigned long flags;
878
879 DPRINTK("ENTER\n");
880
881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
884
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
887
888 /*
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
891 * Cancel and flush.
892 */
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
896 }
897
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
901
902 DPRINTK("EXIT\n");
903 }
904
905 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
906 {
907 struct completion *waiting = qc->private_data;
908
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
910 complete(waiting);
911 }
912
913 /**
914 * ata_exec_internal - execute libata internal command
915 * @ap: Port to which the command is sent
916 * @dev: Device to which the command is sent
917 * @tf: Taskfile registers for the command and the result
918 * @dma_dir: Data tranfer direction of the command
919 * @buf: Data buffer of the command
920 * @buflen: Length of data buffer
921 *
922 * Executes libata internal command with timeout. @tf contains
923 * command on entry and result on return. Timeout and error
924 * conditions are reported via return value. No recovery action
925 * is taken after a command times out. It's caller's duty to
926 * clean up after timeout.
927 *
928 * LOCKING:
929 * None. Should be called with kernel context, might sleep.
930 */
931
932 static unsigned
933 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
934 struct ata_taskfile *tf,
935 int dma_dir, void *buf, unsigned int buflen)
936 {
937 u8 command = tf->command;
938 struct ata_queued_cmd *qc;
939 DECLARE_COMPLETION(wait);
940 unsigned long flags;
941 unsigned int err_mask;
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944
945 qc = ata_qc_new_init(ap, dev);
946 BUG_ON(qc == NULL);
947
948 qc->tf = *tf;
949 qc->dma_dir = dma_dir;
950 if (dma_dir != DMA_NONE) {
951 ata_sg_init_one(qc, buf, buflen);
952 qc->nsect = buflen / ATA_SECT_SIZE;
953 }
954
955 qc->private_data = &wait;
956 qc->complete_fn = ata_qc_complete_internal;
957
958 qc->err_mask = ata_qc_issue(qc);
959 if (qc->err_mask)
960 ata_qc_complete(qc);
961
962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
963
964 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
965 ata_port_flush_task(ap);
966
967 spin_lock_irqsave(&ap->host_set->lock, flags);
968
969 /* We're racing with irq here. If we lose, the
970 * following test prevents us from completing the qc
971 * again. If completion irq occurs after here but
972 * before the caller cleans up, it will result in a
973 * spurious interrupt. We can live with that.
974 */
975 if (qc->flags & ATA_QCFLAG_ACTIVE) {
976 qc->err_mask = AC_ERR_TIMEOUT;
977 ata_qc_complete(qc);
978 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
979 ap->id, command);
980 }
981
982 spin_unlock_irqrestore(&ap->host_set->lock, flags);
983 }
984
985 *tf = qc->tf;
986 err_mask = qc->err_mask;
987
988 ata_qc_free(qc);
989
990 return err_mask;
991 }
992
993 /**
994 * ata_pio_need_iordy - check if iordy needed
995 * @adev: ATA device
996 *
997 * Check if the current speed of the device requires IORDY. Used
998 * by various controllers for chip configuration.
999 */
1000
1001 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1002 {
1003 int pio;
1004 int speed = adev->pio_mode - XFER_PIO_0;
1005
1006 if (speed < 2)
1007 return 0;
1008 if (speed > 2)
1009 return 1;
1010
1011 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1012
1013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1014 pio = adev->id[ATA_ID_EIDE_PIO];
1015 /* Is the speed faster than the drive allows non IORDY ? */
1016 if (pio) {
1017 /* This is cycle times not frequency - watch the logic! */
1018 if (pio > 240) /* PIO2 is 240nS per cycle */
1019 return 1;
1020 return 0;
1021 }
1022 }
1023 return 0;
1024 }
1025
1026 /**
1027 * ata_dev_read_id - Read ID data from the specified device
1028 * @ap: port on which target device resides
1029 * @dev: target device
1030 * @p_class: pointer to class of the target device (may be changed)
1031 * @post_reset: is this read ID post-reset?
1032 * @p_id: read IDENTIFY page (newly allocated)
1033 *
1034 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1035 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1036 * devices. This function also takes care of EDD signature
1037 * misreporting (to be removed once EDD support is gone) and
1038 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1039 *
1040 * LOCKING:
1041 * Kernel thread context (may sleep)
1042 *
1043 * RETURNS:
1044 * 0 on success, -errno otherwise.
1045 */
1046 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1047 unsigned int *p_class, int post_reset, u16 **p_id)
1048 {
1049 unsigned int class = *p_class;
1050 unsigned int using_edd;
1051 struct ata_taskfile tf;
1052 unsigned int err_mask = 0;
1053 u16 *id;
1054 const char *reason;
1055 int rc;
1056
1057 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1058
1059 if (ap->ops->probe_reset ||
1060 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1061 using_edd = 0;
1062 else
1063 using_edd = 1;
1064
1065 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1066
1067 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1068 if (id == NULL) {
1069 rc = -ENOMEM;
1070 reason = "out of memory";
1071 goto err_out;
1072 }
1073
1074 retry:
1075 ata_tf_init(ap, &tf, dev->devno);
1076
1077 switch (class) {
1078 case ATA_DEV_ATA:
1079 tf.command = ATA_CMD_ID_ATA;
1080 break;
1081 case ATA_DEV_ATAPI:
1082 tf.command = ATA_CMD_ID_ATAPI;
1083 break;
1084 default:
1085 rc = -ENODEV;
1086 reason = "unsupported class";
1087 goto err_out;
1088 }
1089
1090 tf.protocol = ATA_PROT_PIO;
1091
1092 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1093 id, sizeof(id[0]) * ATA_ID_WORDS);
1094
1095 if (err_mask) {
1096 rc = -EIO;
1097 reason = "I/O error";
1098
1099 if (err_mask & ~AC_ERR_DEV)
1100 goto err_out;
1101
1102 /*
1103 * arg! EDD works for all test cases, but seems to return
1104 * the ATA signature for some ATAPI devices. Until the
1105 * reason for this is found and fixed, we fix up the mess
1106 * here. If IDENTIFY DEVICE returns command aborted
1107 * (as ATAPI devices do), then we issue an
1108 * IDENTIFY PACKET DEVICE.
1109 *
1110 * ATA software reset (SRST, the default) does not appear
1111 * to have this problem.
1112 */
1113 if ((using_edd) && (class == ATA_DEV_ATA)) {
1114 u8 err = tf.feature;
1115 if (err & ATA_ABORTED) {
1116 class = ATA_DEV_ATAPI;
1117 goto retry;
1118 }
1119 }
1120 goto err_out;
1121 }
1122
1123 swap_buf_le16(id, ATA_ID_WORDS);
1124
1125 /* sanity check */
1126 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1127 rc = -EINVAL;
1128 reason = "device reports illegal type";
1129 goto err_out;
1130 }
1131
1132 if (post_reset && class == ATA_DEV_ATA) {
1133 /*
1134 * The exact sequence expected by certain pre-ATA4 drives is:
1135 * SRST RESET
1136 * IDENTIFY
1137 * INITIALIZE DEVICE PARAMETERS
1138 * anything else..
1139 * Some drives were very specific about that exact sequence.
1140 */
1141 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1142 err_mask = ata_dev_init_params(ap, dev);
1143 if (err_mask) {
1144 rc = -EIO;
1145 reason = "INIT_DEV_PARAMS failed";
1146 goto err_out;
1147 }
1148
1149 /* current CHS translation info (id[53-58]) might be
1150 * changed. reread the identify device info.
1151 */
1152 post_reset = 0;
1153 goto retry;
1154 }
1155 }
1156
1157 *p_class = class;
1158 *p_id = id;
1159 return 0;
1160
1161 err_out:
1162 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1163 ap->id, dev->devno, reason);
1164 kfree(id);
1165 return rc;
1166 }
1167
1168 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1169 struct ata_device *dev)
1170 {
1171 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1172 }
1173
1174 /**
1175 * ata_dev_configure - Configure the specified ATA/ATAPI device
1176 * @ap: Port on which target device resides
1177 * @dev: Target device to configure
1178 * @print_info: Enable device info printout
1179 *
1180 * Configure @dev according to @dev->id. Generic and low-level
1181 * driver specific fixups are also applied.
1182 *
1183 * LOCKING:
1184 * Kernel thread context (may sleep)
1185 *
1186 * RETURNS:
1187 * 0 on success, -errno otherwise
1188 */
1189 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1190 int print_info)
1191 {
1192 const u16 *id = dev->id;
1193 unsigned int xfer_mask;
1194 int i, rc;
1195
1196 if (!ata_dev_present(dev)) {
1197 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1198 ap->id, dev->devno);
1199 return 0;
1200 }
1201
1202 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1203
1204 /* print device capabilities */
1205 if (print_info)
1206 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1207 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1208 ap->id, dev->devno, id[49], id[82], id[83],
1209 id[84], id[85], id[86], id[87], id[88]);
1210
1211 /* initialize to-be-configured parameters */
1212 dev->flags = 0;
1213 dev->max_sectors = 0;
1214 dev->cdb_len = 0;
1215 dev->n_sectors = 0;
1216 dev->cylinders = 0;
1217 dev->heads = 0;
1218 dev->sectors = 0;
1219
1220 /*
1221 * common ATA, ATAPI feature tests
1222 */
1223
1224 /* find max transfer mode; for printk only */
1225 xfer_mask = ata_id_xfermask(id);
1226
1227 ata_dump_id(id);
1228
1229 /* ATA-specific feature tests */
1230 if (dev->class == ATA_DEV_ATA) {
1231 dev->n_sectors = ata_id_n_sectors(id);
1232
1233 if (ata_id_has_lba(id)) {
1234 const char *lba_desc;
1235
1236 lba_desc = "LBA";
1237 dev->flags |= ATA_DFLAG_LBA;
1238 if (ata_id_has_lba48(id)) {
1239 dev->flags |= ATA_DFLAG_LBA48;
1240 lba_desc = "LBA48";
1241 }
1242
1243 /* print device info to dmesg */
1244 if (print_info)
1245 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1246 "max %s, %Lu sectors: %s\n",
1247 ap->id, dev->devno,
1248 ata_id_major_version(id),
1249 ata_mode_string(xfer_mask),
1250 (unsigned long long)dev->n_sectors,
1251 lba_desc);
1252 } else {
1253 /* CHS */
1254
1255 /* Default translation */
1256 dev->cylinders = id[1];
1257 dev->heads = id[3];
1258 dev->sectors = id[6];
1259
1260 if (ata_id_current_chs_valid(id)) {
1261 /* Current CHS translation is valid. */
1262 dev->cylinders = id[54];
1263 dev->heads = id[55];
1264 dev->sectors = id[56];
1265 }
1266
1267 /* print device info to dmesg */
1268 if (print_info)
1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1270 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1271 ap->id, dev->devno,
1272 ata_id_major_version(id),
1273 ata_mode_string(xfer_mask),
1274 (unsigned long long)dev->n_sectors,
1275 dev->cylinders, dev->heads, dev->sectors);
1276 }
1277
1278 dev->cdb_len = 16;
1279 }
1280
1281 /* ATAPI-specific feature tests */
1282 else if (dev->class == ATA_DEV_ATAPI) {
1283 rc = atapi_cdb_len(id);
1284 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1285 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1286 rc = -EINVAL;
1287 goto err_out_nosup;
1288 }
1289 dev->cdb_len = (unsigned int) rc;
1290
1291 /* print device info to dmesg */
1292 if (print_info)
1293 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1294 ap->id, dev->devno, ata_mode_string(xfer_mask));
1295 }
1296
1297 ap->host->max_cmd_len = 0;
1298 for (i = 0; i < ATA_MAX_DEVICES; i++)
1299 ap->host->max_cmd_len = max_t(unsigned int,
1300 ap->host->max_cmd_len,
1301 ap->device[i].cdb_len);
1302
1303 /* limit bridge transfers to udma5, 200 sectors */
1304 if (ata_dev_knobble(ap, dev)) {
1305 if (print_info)
1306 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1307 ap->id, dev->devno);
1308 ap->udma_mask &= ATA_UDMA5;
1309 dev->max_sectors = ATA_MAX_SECTORS;
1310 }
1311
1312 if (ap->ops->dev_config)
1313 ap->ops->dev_config(ap, dev);
1314
1315 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1316 return 0;
1317
1318 err_out_nosup:
1319 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1320 ap->id, dev->devno);
1321 DPRINTK("EXIT, err\n");
1322 return rc;
1323 }
1324
1325 /**
1326 * ata_bus_probe - Reset and probe ATA bus
1327 * @ap: Bus to probe
1328 *
1329 * Master ATA bus probing function. Initiates a hardware-dependent
1330 * bus reset, then attempts to identify any devices found on
1331 * the bus.
1332 *
1333 * LOCKING:
1334 * PCI/etc. bus probe sem.
1335 *
1336 * RETURNS:
1337 * Zero on success, non-zero on error.
1338 */
1339
1340 static int ata_bus_probe(struct ata_port *ap)
1341 {
1342 unsigned int classes[ATA_MAX_DEVICES];
1343 unsigned int i, rc, found = 0;
1344
1345 ata_port_probe(ap);
1346
1347 /* reset and determine device classes */
1348 for (i = 0; i < ATA_MAX_DEVICES; i++)
1349 classes[i] = ATA_DEV_UNKNOWN;
1350
1351 if (ap->ops->probe_reset) {
1352 rc = ap->ops->probe_reset(ap, classes);
1353 if (rc) {
1354 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1355 return rc;
1356 }
1357 } else {
1358 ap->ops->phy_reset(ap);
1359
1360 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1361 for (i = 0; i < ATA_MAX_DEVICES; i++)
1362 classes[i] = ap->device[i].class;
1363
1364 ata_port_probe(ap);
1365 }
1366
1367 for (i = 0; i < ATA_MAX_DEVICES; i++)
1368 if (classes[i] == ATA_DEV_UNKNOWN)
1369 classes[i] = ATA_DEV_NONE;
1370
1371 /* read IDENTIFY page and configure devices */
1372 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1373 struct ata_device *dev = &ap->device[i];
1374
1375 dev->class = classes[i];
1376
1377 if (!ata_dev_present(dev))
1378 continue;
1379
1380 WARN_ON(dev->id != NULL);
1381 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1382 dev->class = ATA_DEV_NONE;
1383 continue;
1384 }
1385
1386 if (ata_dev_configure(ap, dev, 1)) {
1387 dev->class++; /* disable device */
1388 continue;
1389 }
1390
1391 found = 1;
1392 }
1393
1394 if (!found)
1395 goto err_out_disable;
1396
1397 ata_set_mode(ap);
1398 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1399 goto err_out_disable;
1400
1401 return 0;
1402
1403 err_out_disable:
1404 ap->ops->port_disable(ap);
1405 return -1;
1406 }
1407
1408 /**
1409 * ata_port_probe - Mark port as enabled
1410 * @ap: Port for which we indicate enablement
1411 *
1412 * Modify @ap data structure such that the system
1413 * thinks that the entire port is enabled.
1414 *
1415 * LOCKING: host_set lock, or some other form of
1416 * serialization.
1417 */
1418
1419 void ata_port_probe(struct ata_port *ap)
1420 {
1421 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1422 }
1423
1424 /**
1425 * sata_print_link_status - Print SATA link status
1426 * @ap: SATA port to printk link status about
1427 *
1428 * This function prints link speed and status of a SATA link.
1429 *
1430 * LOCKING:
1431 * None.
1432 */
1433 static void sata_print_link_status(struct ata_port *ap)
1434 {
1435 u32 sstatus, tmp;
1436 const char *speed;
1437
1438 if (!ap->ops->scr_read)
1439 return;
1440
1441 sstatus = scr_read(ap, SCR_STATUS);
1442
1443 if (sata_dev_present(ap)) {
1444 tmp = (sstatus >> 4) & 0xf;
1445 if (tmp & (1 << 0))
1446 speed = "1.5";
1447 else if (tmp & (1 << 1))
1448 speed = "3.0";
1449 else
1450 speed = "<unknown>";
1451 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1452 ap->id, speed, sstatus);
1453 } else {
1454 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1455 ap->id, sstatus);
1456 }
1457 }
1458
1459 /**
1460 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1461 * @ap: SATA port associated with target SATA PHY.
1462 *
1463 * This function issues commands to standard SATA Sxxx
1464 * PHY registers, to wake up the phy (and device), and
1465 * clear any reset condition.
1466 *
1467 * LOCKING:
1468 * PCI/etc. bus probe sem.
1469 *
1470 */
1471 void __sata_phy_reset(struct ata_port *ap)
1472 {
1473 u32 sstatus;
1474 unsigned long timeout = jiffies + (HZ * 5);
1475
1476 if (ap->flags & ATA_FLAG_SATA_RESET) {
1477 /* issue phy wake/reset */
1478 scr_write_flush(ap, SCR_CONTROL, 0x301);
1479 /* Couldn't find anything in SATA I/II specs, but
1480 * AHCI-1.1 10.4.2 says at least 1 ms. */
1481 mdelay(1);
1482 }
1483 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1484
1485 /* wait for phy to become ready, if necessary */
1486 do {
1487 msleep(200);
1488 sstatus = scr_read(ap, SCR_STATUS);
1489 if ((sstatus & 0xf) != 1)
1490 break;
1491 } while (time_before(jiffies, timeout));
1492
1493 /* print link status */
1494 sata_print_link_status(ap);
1495
1496 /* TODO: phy layer with polling, timeouts, etc. */
1497 if (sata_dev_present(ap))
1498 ata_port_probe(ap);
1499 else
1500 ata_port_disable(ap);
1501
1502 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1503 return;
1504
1505 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1506 ata_port_disable(ap);
1507 return;
1508 }
1509
1510 ap->cbl = ATA_CBL_SATA;
1511 }
1512
1513 /**
1514 * sata_phy_reset - Reset SATA bus.
1515 * @ap: SATA port associated with target SATA PHY.
1516 *
1517 * This function resets the SATA bus, and then probes
1518 * the bus for devices.
1519 *
1520 * LOCKING:
1521 * PCI/etc. bus probe sem.
1522 *
1523 */
1524 void sata_phy_reset(struct ata_port *ap)
1525 {
1526 __sata_phy_reset(ap);
1527 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1528 return;
1529 ata_bus_reset(ap);
1530 }
1531
1532 /**
1533 * ata_port_disable - Disable port.
1534 * @ap: Port to be disabled.
1535 *
1536 * Modify @ap data structure such that the system
1537 * thinks that the entire port is disabled, and should
1538 * never attempt to probe or communicate with devices
1539 * on this port.
1540 *
1541 * LOCKING: host_set lock, or some other form of
1542 * serialization.
1543 */
1544
1545 void ata_port_disable(struct ata_port *ap)
1546 {
1547 ap->device[0].class = ATA_DEV_NONE;
1548 ap->device[1].class = ATA_DEV_NONE;
1549 ap->flags |= ATA_FLAG_PORT_DISABLED;
1550 }
1551
1552 /*
1553 * This mode timing computation functionality is ported over from
1554 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1555 */
1556 /*
1557 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1558 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1559 * for PIO 5, which is a nonstandard extension and UDMA6, which
1560 * is currently supported only by Maxtor drives.
1561 */
1562
1563 static const struct ata_timing ata_timing[] = {
1564
1565 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1566 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1567 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1568 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1569
1570 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1571 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1572 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1573
1574 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1575
1576 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1577 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1578 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1579
1580 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1581 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1582 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1583
1584 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1585 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1586 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1587
1588 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1589 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1590 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1591
1592 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1593
1594 { 0xFF }
1595 };
1596
1597 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1598 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1599
1600 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1601 {
1602 q->setup = EZ(t->setup * 1000, T);
1603 q->act8b = EZ(t->act8b * 1000, T);
1604 q->rec8b = EZ(t->rec8b * 1000, T);
1605 q->cyc8b = EZ(t->cyc8b * 1000, T);
1606 q->active = EZ(t->active * 1000, T);
1607 q->recover = EZ(t->recover * 1000, T);
1608 q->cycle = EZ(t->cycle * 1000, T);
1609 q->udma = EZ(t->udma * 1000, UT);
1610 }
1611
1612 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1613 struct ata_timing *m, unsigned int what)
1614 {
1615 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1616 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1617 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1618 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1619 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1620 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1621 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1622 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1623 }
1624
1625 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1626 {
1627 const struct ata_timing *t;
1628
1629 for (t = ata_timing; t->mode != speed; t++)
1630 if (t->mode == 0xFF)
1631 return NULL;
1632 return t;
1633 }
1634
1635 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1636 struct ata_timing *t, int T, int UT)
1637 {
1638 const struct ata_timing *s;
1639 struct ata_timing p;
1640
1641 /*
1642 * Find the mode.
1643 */
1644
1645 if (!(s = ata_timing_find_mode(speed)))
1646 return -EINVAL;
1647
1648 memcpy(t, s, sizeof(*s));
1649
1650 /*
1651 * If the drive is an EIDE drive, it can tell us it needs extended
1652 * PIO/MW_DMA cycle timing.
1653 */
1654
1655 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1656 memset(&p, 0, sizeof(p));
1657 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1658 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1659 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1660 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1661 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1662 }
1663 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1664 }
1665
1666 /*
1667 * Convert the timing to bus clock counts.
1668 */
1669
1670 ata_timing_quantize(t, t, T, UT);
1671
1672 /*
1673 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1674 * S.M.A.R.T * and some other commands. We have to ensure that the
1675 * DMA cycle timing is slower/equal than the fastest PIO timing.
1676 */
1677
1678 if (speed > XFER_PIO_4) {
1679 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1680 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1681 }
1682
1683 /*
1684 * Lengthen active & recovery time so that cycle time is correct.
1685 */
1686
1687 if (t->act8b + t->rec8b < t->cyc8b) {
1688 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1689 t->rec8b = t->cyc8b - t->act8b;
1690 }
1691
1692 if (t->active + t->recover < t->cycle) {
1693 t->active += (t->cycle - (t->active + t->recover)) / 2;
1694 t->recover = t->cycle - t->active;
1695 }
1696
1697 return 0;
1698 }
1699
1700 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1701 {
1702 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1703 return;
1704
1705 if (dev->xfer_shift == ATA_SHIFT_PIO)
1706 dev->flags |= ATA_DFLAG_PIO;
1707
1708 ata_dev_set_xfermode(ap, dev);
1709
1710 if (ata_dev_revalidate(ap, dev, 0)) {
1711 printk(KERN_ERR "ata%u: failed to revalidate after set "
1712 "xfermode, disabled\n", ap->id);
1713 ata_port_disable(ap);
1714 }
1715
1716 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1717 dev->xfer_shift, (int)dev->xfer_mode);
1718
1719 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1720 ap->id, dev->devno,
1721 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1722 }
1723
1724 static int ata_host_set_pio(struct ata_port *ap)
1725 {
1726 int i;
1727
1728 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1729 struct ata_device *dev = &ap->device[i];
1730
1731 if (!ata_dev_present(dev))
1732 continue;
1733
1734 if (!dev->pio_mode) {
1735 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1736 return -1;
1737 }
1738
1739 dev->xfer_mode = dev->pio_mode;
1740 dev->xfer_shift = ATA_SHIFT_PIO;
1741 if (ap->ops->set_piomode)
1742 ap->ops->set_piomode(ap, dev);
1743 }
1744
1745 return 0;
1746 }
1747
1748 static void ata_host_set_dma(struct ata_port *ap)
1749 {
1750 int i;
1751
1752 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1753 struct ata_device *dev = &ap->device[i];
1754
1755 if (!ata_dev_present(dev) || !dev->dma_mode)
1756 continue;
1757
1758 dev->xfer_mode = dev->dma_mode;
1759 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1760 if (ap->ops->set_dmamode)
1761 ap->ops->set_dmamode(ap, dev);
1762 }
1763 }
1764
1765 /**
1766 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1767 * @ap: port on which timings will be programmed
1768 *
1769 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1770 *
1771 * LOCKING:
1772 * PCI/etc. bus probe sem.
1773 */
1774 static void ata_set_mode(struct ata_port *ap)
1775 {
1776 int i, rc;
1777
1778 /* step 1: calculate xfer_mask */
1779 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1780 struct ata_device *dev = &ap->device[i];
1781 unsigned int xfer_mask;
1782
1783 if (!ata_dev_present(dev))
1784 continue;
1785
1786 xfer_mask = ata_dev_xfermask(ap, dev);
1787
1788 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1789 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1790 ATA_MASK_UDMA));
1791 }
1792
1793 /* step 2: always set host PIO timings */
1794 rc = ata_host_set_pio(ap);
1795 if (rc)
1796 goto err_out;
1797
1798 /* step 3: set host DMA timings */
1799 ata_host_set_dma(ap);
1800
1801 /* step 4: update devices' xfer mode */
1802 for (i = 0; i < ATA_MAX_DEVICES; i++)
1803 ata_dev_set_mode(ap, &ap->device[i]);
1804
1805 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1806 return;
1807
1808 if (ap->ops->post_set_mode)
1809 ap->ops->post_set_mode(ap);
1810
1811 return;
1812
1813 err_out:
1814 ata_port_disable(ap);
1815 }
1816
1817 /**
1818 * ata_tf_to_host - issue ATA taskfile to host controller
1819 * @ap: port to which command is being issued
1820 * @tf: ATA taskfile register set
1821 *
1822 * Issues ATA taskfile register set to ATA host controller,
1823 * with proper synchronization with interrupt handler and
1824 * other threads.
1825 *
1826 * LOCKING:
1827 * spin_lock_irqsave(host_set lock)
1828 */
1829
1830 static inline void ata_tf_to_host(struct ata_port *ap,
1831 const struct ata_taskfile *tf)
1832 {
1833 ap->ops->tf_load(ap, tf);
1834 ap->ops->exec_command(ap, tf);
1835 }
1836
1837 /**
1838 * ata_busy_sleep - sleep until BSY clears, or timeout
1839 * @ap: port containing status register to be polled
1840 * @tmout_pat: impatience timeout
1841 * @tmout: overall timeout
1842 *
1843 * Sleep until ATA Status register bit BSY clears,
1844 * or a timeout occurs.
1845 *
1846 * LOCKING: None.
1847 */
1848
1849 unsigned int ata_busy_sleep (struct ata_port *ap,
1850 unsigned long tmout_pat, unsigned long tmout)
1851 {
1852 unsigned long timer_start, timeout;
1853 u8 status;
1854
1855 status = ata_busy_wait(ap, ATA_BUSY, 300);
1856 timer_start = jiffies;
1857 timeout = timer_start + tmout_pat;
1858 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1859 msleep(50);
1860 status = ata_busy_wait(ap, ATA_BUSY, 3);
1861 }
1862
1863 if (status & ATA_BUSY)
1864 printk(KERN_WARNING "ata%u is slow to respond, "
1865 "please be patient\n", ap->id);
1866
1867 timeout = timer_start + tmout;
1868 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1869 msleep(50);
1870 status = ata_chk_status(ap);
1871 }
1872
1873 if (status & ATA_BUSY) {
1874 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1875 ap->id, tmout / HZ);
1876 return 1;
1877 }
1878
1879 return 0;
1880 }
1881
1882 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1883 {
1884 struct ata_ioports *ioaddr = &ap->ioaddr;
1885 unsigned int dev0 = devmask & (1 << 0);
1886 unsigned int dev1 = devmask & (1 << 1);
1887 unsigned long timeout;
1888
1889 /* if device 0 was found in ata_devchk, wait for its
1890 * BSY bit to clear
1891 */
1892 if (dev0)
1893 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1894
1895 /* if device 1 was found in ata_devchk, wait for
1896 * register access, then wait for BSY to clear
1897 */
1898 timeout = jiffies + ATA_TMOUT_BOOT;
1899 while (dev1) {
1900 u8 nsect, lbal;
1901
1902 ap->ops->dev_select(ap, 1);
1903 if (ap->flags & ATA_FLAG_MMIO) {
1904 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1905 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1906 } else {
1907 nsect = inb(ioaddr->nsect_addr);
1908 lbal = inb(ioaddr->lbal_addr);
1909 }
1910 if ((nsect == 1) && (lbal == 1))
1911 break;
1912 if (time_after(jiffies, timeout)) {
1913 dev1 = 0;
1914 break;
1915 }
1916 msleep(50); /* give drive a breather */
1917 }
1918 if (dev1)
1919 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1920
1921 /* is all this really necessary? */
1922 ap->ops->dev_select(ap, 0);
1923 if (dev1)
1924 ap->ops->dev_select(ap, 1);
1925 if (dev0)
1926 ap->ops->dev_select(ap, 0);
1927 }
1928
1929 /**
1930 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1931 * @ap: Port to reset and probe
1932 *
1933 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1934 * probe the bus. Not often used these days.
1935 *
1936 * LOCKING:
1937 * PCI/etc. bus probe sem.
1938 * Obtains host_set lock.
1939 *
1940 */
1941
1942 static unsigned int ata_bus_edd(struct ata_port *ap)
1943 {
1944 struct ata_taskfile tf;
1945 unsigned long flags;
1946
1947 /* set up execute-device-diag (bus reset) taskfile */
1948 /* also, take interrupts to a known state (disabled) */
1949 DPRINTK("execute-device-diag\n");
1950 ata_tf_init(ap, &tf, 0);
1951 tf.ctl |= ATA_NIEN;
1952 tf.command = ATA_CMD_EDD;
1953 tf.protocol = ATA_PROT_NODATA;
1954
1955 /* do bus reset */
1956 spin_lock_irqsave(&ap->host_set->lock, flags);
1957 ata_tf_to_host(ap, &tf);
1958 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1959
1960 /* spec says at least 2ms. but who knows with those
1961 * crazy ATAPI devices...
1962 */
1963 msleep(150);
1964
1965 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1966 }
1967
1968 static unsigned int ata_bus_softreset(struct ata_port *ap,
1969 unsigned int devmask)
1970 {
1971 struct ata_ioports *ioaddr = &ap->ioaddr;
1972
1973 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1974
1975 /* software reset. causes dev0 to be selected */
1976 if (ap->flags & ATA_FLAG_MMIO) {
1977 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1978 udelay(20); /* FIXME: flush */
1979 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1980 udelay(20); /* FIXME: flush */
1981 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1982 } else {
1983 outb(ap->ctl, ioaddr->ctl_addr);
1984 udelay(10);
1985 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1986 udelay(10);
1987 outb(ap->ctl, ioaddr->ctl_addr);
1988 }
1989
1990 /* spec mandates ">= 2ms" before checking status.
1991 * We wait 150ms, because that was the magic delay used for
1992 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1993 * between when the ATA command register is written, and then
1994 * status is checked. Because waiting for "a while" before
1995 * checking status is fine, post SRST, we perform this magic
1996 * delay here as well.
1997 *
1998 * Old drivers/ide uses the 2mS rule and then waits for ready
1999 */
2000 msleep(150);
2001
2002
2003 /* Before we perform post reset processing we want to see if
2004 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2005 resistor */
2006
2007 if (ata_check_status(ap) == 0xFF)
2008 return 1; /* Positive is failure for some reason */
2009
2010 ata_bus_post_reset(ap, devmask);
2011
2012 return 0;
2013 }
2014
2015 /**
2016 * ata_bus_reset - reset host port and associated ATA channel
2017 * @ap: port to reset
2018 *
2019 * This is typically the first time we actually start issuing
2020 * commands to the ATA channel. We wait for BSY to clear, then
2021 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2022 * result. Determine what devices, if any, are on the channel
2023 * by looking at the device 0/1 error register. Look at the signature
2024 * stored in each device's taskfile registers, to determine if
2025 * the device is ATA or ATAPI.
2026 *
2027 * LOCKING:
2028 * PCI/etc. bus probe sem.
2029 * Obtains host_set lock.
2030 *
2031 * SIDE EFFECTS:
2032 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2033 */
2034
2035 void ata_bus_reset(struct ata_port *ap)
2036 {
2037 struct ata_ioports *ioaddr = &ap->ioaddr;
2038 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2039 u8 err;
2040 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2041
2042 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2043
2044 /* determine if device 0/1 are present */
2045 if (ap->flags & ATA_FLAG_SATA_RESET)
2046 dev0 = 1;
2047 else {
2048 dev0 = ata_devchk(ap, 0);
2049 if (slave_possible)
2050 dev1 = ata_devchk(ap, 1);
2051 }
2052
2053 if (dev0)
2054 devmask |= (1 << 0);
2055 if (dev1)
2056 devmask |= (1 << 1);
2057
2058 /* select device 0 again */
2059 ap->ops->dev_select(ap, 0);
2060
2061 /* issue bus reset */
2062 if (ap->flags & ATA_FLAG_SRST)
2063 rc = ata_bus_softreset(ap, devmask);
2064 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2065 /* set up device control */
2066 if (ap->flags & ATA_FLAG_MMIO)
2067 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2068 else
2069 outb(ap->ctl, ioaddr->ctl_addr);
2070 rc = ata_bus_edd(ap);
2071 }
2072
2073 if (rc)
2074 goto err_out;
2075
2076 /*
2077 * determine by signature whether we have ATA or ATAPI devices
2078 */
2079 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2080 if ((slave_possible) && (err != 0x81))
2081 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2082
2083 /* re-enable interrupts */
2084 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2085 ata_irq_on(ap);
2086
2087 /* is double-select really necessary? */
2088 if (ap->device[1].class != ATA_DEV_NONE)
2089 ap->ops->dev_select(ap, 1);
2090 if (ap->device[0].class != ATA_DEV_NONE)
2091 ap->ops->dev_select(ap, 0);
2092
2093 /* if no devices were detected, disable this port */
2094 if ((ap->device[0].class == ATA_DEV_NONE) &&
2095 (ap->device[1].class == ATA_DEV_NONE))
2096 goto err_out;
2097
2098 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2099 /* set up device control for ATA_FLAG_SATA_RESET */
2100 if (ap->flags & ATA_FLAG_MMIO)
2101 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2102 else
2103 outb(ap->ctl, ioaddr->ctl_addr);
2104 }
2105
2106 DPRINTK("EXIT\n");
2107 return;
2108
2109 err_out:
2110 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2111 ap->ops->port_disable(ap);
2112
2113 DPRINTK("EXIT\n");
2114 }
2115
2116 static int sata_phy_resume(struct ata_port *ap)
2117 {
2118 unsigned long timeout = jiffies + (HZ * 5);
2119 u32 sstatus;
2120
2121 scr_write_flush(ap, SCR_CONTROL, 0x300);
2122
2123 /* Wait for phy to become ready, if necessary. */
2124 do {
2125 msleep(200);
2126 sstatus = scr_read(ap, SCR_STATUS);
2127 if ((sstatus & 0xf) != 1)
2128 return 0;
2129 } while (time_before(jiffies, timeout));
2130
2131 return -1;
2132 }
2133
2134 /**
2135 * ata_std_probeinit - initialize probing
2136 * @ap: port to be probed
2137 *
2138 * @ap is about to be probed. Initialize it. This function is
2139 * to be used as standard callback for ata_drive_probe_reset().
2140 *
2141 * NOTE!!! Do not use this function as probeinit if a low level
2142 * driver implements only hardreset. Just pass NULL as probeinit
2143 * in that case. Using this function is probably okay but doing
2144 * so makes reset sequence different from the original
2145 * ->phy_reset implementation and Jeff nervous. :-P
2146 */
2147 extern void ata_std_probeinit(struct ata_port *ap)
2148 {
2149 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2150 sata_phy_resume(ap);
2151 if (sata_dev_present(ap))
2152 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2153 }
2154 }
2155
2156 /**
2157 * ata_std_softreset - reset host port via ATA SRST
2158 * @ap: port to reset
2159 * @verbose: fail verbosely
2160 * @classes: resulting classes of attached devices
2161 *
2162 * Reset host port using ATA SRST. This function is to be used
2163 * as standard callback for ata_drive_*_reset() functions.
2164 *
2165 * LOCKING:
2166 * Kernel thread context (may sleep)
2167 *
2168 * RETURNS:
2169 * 0 on success, -errno otherwise.
2170 */
2171 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2172 {
2173 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2174 unsigned int devmask = 0, err_mask;
2175 u8 err;
2176
2177 DPRINTK("ENTER\n");
2178
2179 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2180 classes[0] = ATA_DEV_NONE;
2181 goto out;
2182 }
2183
2184 /* determine if device 0/1 are present */
2185 if (ata_devchk(ap, 0))
2186 devmask |= (1 << 0);
2187 if (slave_possible && ata_devchk(ap, 1))
2188 devmask |= (1 << 1);
2189
2190 /* select device 0 again */
2191 ap->ops->dev_select(ap, 0);
2192
2193 /* issue bus reset */
2194 DPRINTK("about to softreset, devmask=%x\n", devmask);
2195 err_mask = ata_bus_softreset(ap, devmask);
2196 if (err_mask) {
2197 if (verbose)
2198 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2199 ap->id, err_mask);
2200 else
2201 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2202 err_mask);
2203 return -EIO;
2204 }
2205
2206 /* determine by signature whether we have ATA or ATAPI devices */
2207 classes[0] = ata_dev_try_classify(ap, 0, &err);
2208 if (slave_possible && err != 0x81)
2209 classes[1] = ata_dev_try_classify(ap, 1, &err);
2210
2211 out:
2212 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2213 return 0;
2214 }
2215
2216 /**
2217 * sata_std_hardreset - reset host port via SATA phy reset
2218 * @ap: port to reset
2219 * @verbose: fail verbosely
2220 * @class: resulting class of attached device
2221 *
2222 * SATA phy-reset host port using DET bits of SControl register.
2223 * This function is to be used as standard callback for
2224 * ata_drive_*_reset().
2225 *
2226 * LOCKING:
2227 * Kernel thread context (may sleep)
2228 *
2229 * RETURNS:
2230 * 0 on success, -errno otherwise.
2231 */
2232 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2233 {
2234 DPRINTK("ENTER\n");
2235
2236 /* Issue phy wake/reset */
2237 scr_write_flush(ap, SCR_CONTROL, 0x301);
2238
2239 /*
2240 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2241 * 10.4.2 says at least 1 ms.
2242 */
2243 msleep(1);
2244
2245 /* Bring phy back */
2246 sata_phy_resume(ap);
2247
2248 /* TODO: phy layer with polling, timeouts, etc. */
2249 if (!sata_dev_present(ap)) {
2250 *class = ATA_DEV_NONE;
2251 DPRINTK("EXIT, link offline\n");
2252 return 0;
2253 }
2254
2255 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2256 if (verbose)
2257 printk(KERN_ERR "ata%u: COMRESET failed "
2258 "(device not ready)\n", ap->id);
2259 else
2260 DPRINTK("EXIT, device not ready\n");
2261 return -EIO;
2262 }
2263
2264 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2265
2266 *class = ata_dev_try_classify(ap, 0, NULL);
2267
2268 DPRINTK("EXIT, class=%u\n", *class);
2269 return 0;
2270 }
2271
2272 /**
2273 * ata_std_postreset - standard postreset callback
2274 * @ap: the target ata_port
2275 * @classes: classes of attached devices
2276 *
2277 * This function is invoked after a successful reset. Note that
2278 * the device might have been reset more than once using
2279 * different reset methods before postreset is invoked.
2280 *
2281 * This function is to be used as standard callback for
2282 * ata_drive_*_reset().
2283 *
2284 * LOCKING:
2285 * Kernel thread context (may sleep)
2286 */
2287 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2288 {
2289 DPRINTK("ENTER\n");
2290
2291 /* set cable type if it isn't already set */
2292 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2293 ap->cbl = ATA_CBL_SATA;
2294
2295 /* print link status */
2296 if (ap->cbl == ATA_CBL_SATA)
2297 sata_print_link_status(ap);
2298
2299 /* re-enable interrupts */
2300 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2301 ata_irq_on(ap);
2302
2303 /* is double-select really necessary? */
2304 if (classes[0] != ATA_DEV_NONE)
2305 ap->ops->dev_select(ap, 1);
2306 if (classes[1] != ATA_DEV_NONE)
2307 ap->ops->dev_select(ap, 0);
2308
2309 /* bail out if no device is present */
2310 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2311 DPRINTK("EXIT, no device\n");
2312 return;
2313 }
2314
2315 /* set up device control */
2316 if (ap->ioaddr.ctl_addr) {
2317 if (ap->flags & ATA_FLAG_MMIO)
2318 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2319 else
2320 outb(ap->ctl, ap->ioaddr.ctl_addr);
2321 }
2322
2323 DPRINTK("EXIT\n");
2324 }
2325
2326 /**
2327 * ata_std_probe_reset - standard probe reset method
2328 * @ap: prot to perform probe-reset
2329 * @classes: resulting classes of attached devices
2330 *
2331 * The stock off-the-shelf ->probe_reset method.
2332 *
2333 * LOCKING:
2334 * Kernel thread context (may sleep)
2335 *
2336 * RETURNS:
2337 * 0 on success, -errno otherwise.
2338 */
2339 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2340 {
2341 ata_reset_fn_t hardreset;
2342
2343 hardreset = NULL;
2344 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2345 hardreset = sata_std_hardreset;
2346
2347 return ata_drive_probe_reset(ap, ata_std_probeinit,
2348 ata_std_softreset, hardreset,
2349 ata_std_postreset, classes);
2350 }
2351
2352 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2353 ata_postreset_fn_t postreset,
2354 unsigned int *classes)
2355 {
2356 int i, rc;
2357
2358 for (i = 0; i < ATA_MAX_DEVICES; i++)
2359 classes[i] = ATA_DEV_UNKNOWN;
2360
2361 rc = reset(ap, 0, classes);
2362 if (rc)
2363 return rc;
2364
2365 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2366 * is complete and convert all ATA_DEV_UNKNOWN to
2367 * ATA_DEV_NONE.
2368 */
2369 for (i = 0; i < ATA_MAX_DEVICES; i++)
2370 if (classes[i] != ATA_DEV_UNKNOWN)
2371 break;
2372
2373 if (i < ATA_MAX_DEVICES)
2374 for (i = 0; i < ATA_MAX_DEVICES; i++)
2375 if (classes[i] == ATA_DEV_UNKNOWN)
2376 classes[i] = ATA_DEV_NONE;
2377
2378 if (postreset)
2379 postreset(ap, classes);
2380
2381 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2382 }
2383
2384 /**
2385 * ata_drive_probe_reset - Perform probe reset with given methods
2386 * @ap: port to reset
2387 * @probeinit: probeinit method (can be NULL)
2388 * @softreset: softreset method (can be NULL)
2389 * @hardreset: hardreset method (can be NULL)
2390 * @postreset: postreset method (can be NULL)
2391 * @classes: resulting classes of attached devices
2392 *
2393 * Reset the specified port and classify attached devices using
2394 * given methods. This function prefers softreset but tries all
2395 * possible reset sequences to reset and classify devices. This
2396 * function is intended to be used for constructing ->probe_reset
2397 * callback by low level drivers.
2398 *
2399 * Reset methods should follow the following rules.
2400 *
2401 * - Return 0 on sucess, -errno on failure.
2402 * - If classification is supported, fill classes[] with
2403 * recognized class codes.
2404 * - If classification is not supported, leave classes[] alone.
2405 * - If verbose is non-zero, print error message on failure;
2406 * otherwise, shut up.
2407 *
2408 * LOCKING:
2409 * Kernel thread context (may sleep)
2410 *
2411 * RETURNS:
2412 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2413 * if classification fails, and any error code from reset
2414 * methods.
2415 */
2416 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2417 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2418 ata_postreset_fn_t postreset, unsigned int *classes)
2419 {
2420 int rc = -EINVAL;
2421
2422 if (probeinit)
2423 probeinit(ap);
2424
2425 if (softreset) {
2426 rc = do_probe_reset(ap, softreset, postreset, classes);
2427 if (rc == 0)
2428 return 0;
2429 }
2430
2431 if (!hardreset)
2432 return rc;
2433
2434 rc = do_probe_reset(ap, hardreset, postreset, classes);
2435 if (rc == 0 || rc != -ENODEV)
2436 return rc;
2437
2438 if (softreset)
2439 rc = do_probe_reset(ap, softreset, postreset, classes);
2440
2441 return rc;
2442 }
2443
2444 /**
2445 * ata_dev_same_device - Determine whether new ID matches configured device
2446 * @ap: port on which the device to compare against resides
2447 * @dev: device to compare against
2448 * @new_class: class of the new device
2449 * @new_id: IDENTIFY page of the new device
2450 *
2451 * Compare @new_class and @new_id against @dev and determine
2452 * whether @dev is the device indicated by @new_class and
2453 * @new_id.
2454 *
2455 * LOCKING:
2456 * None.
2457 *
2458 * RETURNS:
2459 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2460 */
2461 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2462 unsigned int new_class, const u16 *new_id)
2463 {
2464 const u16 *old_id = dev->id;
2465 unsigned char model[2][41], serial[2][21];
2466 u64 new_n_sectors;
2467
2468 if (dev->class != new_class) {
2469 printk(KERN_INFO
2470 "ata%u: dev %u class mismatch %d != %d\n",
2471 ap->id, dev->devno, dev->class, new_class);
2472 return 0;
2473 }
2474
2475 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2476 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2477 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2478 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2479 new_n_sectors = ata_id_n_sectors(new_id);
2480
2481 if (strcmp(model[0], model[1])) {
2482 printk(KERN_INFO
2483 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2484 ap->id, dev->devno, model[0], model[1]);
2485 return 0;
2486 }
2487
2488 if (strcmp(serial[0], serial[1])) {
2489 printk(KERN_INFO
2490 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2491 ap->id, dev->devno, serial[0], serial[1]);
2492 return 0;
2493 }
2494
2495 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2496 printk(KERN_INFO
2497 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2498 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2499 (unsigned long long)new_n_sectors);
2500 return 0;
2501 }
2502
2503 return 1;
2504 }
2505
2506 /**
2507 * ata_dev_revalidate - Revalidate ATA device
2508 * @ap: port on which the device to revalidate resides
2509 * @dev: device to revalidate
2510 * @post_reset: is this revalidation after reset?
2511 *
2512 * Re-read IDENTIFY page and make sure @dev is still attached to
2513 * the port.
2514 *
2515 * LOCKING:
2516 * Kernel thread context (may sleep)
2517 *
2518 * RETURNS:
2519 * 0 on success, negative errno otherwise
2520 */
2521 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2522 int post_reset)
2523 {
2524 unsigned int class;
2525 u16 *id;
2526 int rc;
2527
2528 if (!ata_dev_present(dev))
2529 return -ENODEV;
2530
2531 class = dev->class;
2532 id = NULL;
2533
2534 /* allocate & read ID data */
2535 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2536 if (rc)
2537 goto fail;
2538
2539 /* is the device still there? */
2540 if (!ata_dev_same_device(ap, dev, class, id)) {
2541 rc = -ENODEV;
2542 goto fail;
2543 }
2544
2545 kfree(dev->id);
2546 dev->id = id;
2547
2548 /* configure device according to the new ID */
2549 return ata_dev_configure(ap, dev, 0);
2550
2551 fail:
2552 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2553 ap->id, dev->devno, rc);
2554 kfree(id);
2555 return rc;
2556 }
2557
2558 static const char * const ata_dma_blacklist [] = {
2559 "WDC AC11000H", NULL,
2560 "WDC AC22100H", NULL,
2561 "WDC AC32500H", NULL,
2562 "WDC AC33100H", NULL,
2563 "WDC AC31600H", NULL,
2564 "WDC AC32100H", "24.09P07",
2565 "WDC AC23200L", "21.10N21",
2566 "Compaq CRD-8241B", NULL,
2567 "CRD-8400B", NULL,
2568 "CRD-8480B", NULL,
2569 "CRD-8482B", NULL,
2570 "CRD-84", NULL,
2571 "SanDisk SDP3B", NULL,
2572 "SanDisk SDP3B-64", NULL,
2573 "SANYO CD-ROM CRD", NULL,
2574 "HITACHI CDR-8", NULL,
2575 "HITACHI CDR-8335", NULL,
2576 "HITACHI CDR-8435", NULL,
2577 "Toshiba CD-ROM XM-6202B", NULL,
2578 "TOSHIBA CD-ROM XM-1702BC", NULL,
2579 "CD-532E-A", NULL,
2580 "E-IDE CD-ROM CR-840", NULL,
2581 "CD-ROM Drive/F5A", NULL,
2582 "WPI CDD-820", NULL,
2583 "SAMSUNG CD-ROM SC-148C", NULL,
2584 "SAMSUNG CD-ROM SC", NULL,
2585 "SanDisk SDP3B-64", NULL,
2586 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2587 "_NEC DV5800A", NULL,
2588 "SAMSUNG CD-ROM SN-124", "N001"
2589 };
2590
2591 static int ata_strim(char *s, size_t len)
2592 {
2593 len = strnlen(s, len);
2594
2595 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2596 while ((len > 0) && (s[len - 1] == ' ')) {
2597 len--;
2598 s[len] = 0;
2599 }
2600 return len;
2601 }
2602
2603 static int ata_dma_blacklisted(const struct ata_device *dev)
2604 {
2605 unsigned char model_num[40];
2606 unsigned char model_rev[16];
2607 unsigned int nlen, rlen;
2608 int i;
2609
2610 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2611 sizeof(model_num));
2612 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2613 sizeof(model_rev));
2614 nlen = ata_strim(model_num, sizeof(model_num));
2615 rlen = ata_strim(model_rev, sizeof(model_rev));
2616
2617 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2618 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2619 if (ata_dma_blacklist[i+1] == NULL)
2620 return 1;
2621 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2622 return 1;
2623 }
2624 }
2625 return 0;
2626 }
2627
2628 /**
2629 * ata_dev_xfermask - Compute supported xfermask of the given device
2630 * @ap: Port on which the device to compute xfermask for resides
2631 * @dev: Device to compute xfermask for
2632 *
2633 * Compute supported xfermask of @dev. This function is
2634 * responsible for applying all known limits including host
2635 * controller limits, device blacklist, etc...
2636 *
2637 * LOCKING:
2638 * None.
2639 *
2640 * RETURNS:
2641 * Computed xfermask.
2642 */
2643 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2644 struct ata_device *dev)
2645 {
2646 unsigned long xfer_mask;
2647 int i;
2648
2649 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2650 ap->udma_mask);
2651
2652 /* use port-wide xfermask for now */
2653 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2654 struct ata_device *d = &ap->device[i];
2655 if (!ata_dev_present(d))
2656 continue;
2657 xfer_mask &= ata_id_xfermask(d->id);
2658 if (ata_dma_blacklisted(d))
2659 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2660 }
2661
2662 if (ata_dma_blacklisted(dev))
2663 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2664 "disabling DMA\n", ap->id, dev->devno);
2665
2666 return xfer_mask;
2667 }
2668
2669 /**
2670 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2671 * @ap: Port associated with device @dev
2672 * @dev: Device to which command will be sent
2673 *
2674 * Issue SET FEATURES - XFER MODE command to device @dev
2675 * on port @ap.
2676 *
2677 * LOCKING:
2678 * PCI/etc. bus probe sem.
2679 */
2680
2681 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2682 {
2683 struct ata_taskfile tf;
2684
2685 /* set up set-features taskfile */
2686 DPRINTK("set features - xfer mode\n");
2687
2688 ata_tf_init(ap, &tf, dev->devno);
2689 tf.command = ATA_CMD_SET_FEATURES;
2690 tf.feature = SETFEATURES_XFER;
2691 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2692 tf.protocol = ATA_PROT_NODATA;
2693 tf.nsect = dev->xfer_mode;
2694
2695 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2696 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2697 ap->id);
2698 ata_port_disable(ap);
2699 }
2700
2701 DPRINTK("EXIT\n");
2702 }
2703
2704 /**
2705 * ata_dev_init_params - Issue INIT DEV PARAMS command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent
2708 *
2709 * LOCKING:
2710 * Kernel thread context (may sleep)
2711 *
2712 * RETURNS:
2713 * 0 on success, AC_ERR_* mask otherwise.
2714 */
2715
2716 static unsigned int ata_dev_init_params(struct ata_port *ap,
2717 struct ata_device *dev)
2718 {
2719 struct ata_taskfile tf;
2720 unsigned int err_mask;
2721 u16 sectors = dev->id[6];
2722 u16 heads = dev->id[3];
2723
2724 /* Number of sectors per track 1-255. Number of heads 1-16 */
2725 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2726 return 0;
2727
2728 /* set up init dev params taskfile */
2729 DPRINTK("init dev params \n");
2730
2731 ata_tf_init(ap, &tf, dev->devno);
2732 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2733 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2734 tf.protocol = ATA_PROT_NODATA;
2735 tf.nsect = sectors;
2736 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2737
2738 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2739
2740 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2741 return err_mask;
2742 }
2743
2744 /**
2745 * ata_sg_clean - Unmap DMA memory associated with command
2746 * @qc: Command containing DMA memory to be released
2747 *
2748 * Unmap all mapped DMA memory associated with this command.
2749 *
2750 * LOCKING:
2751 * spin_lock_irqsave(host_set lock)
2752 */
2753
2754 static void ata_sg_clean(struct ata_queued_cmd *qc)
2755 {
2756 struct ata_port *ap = qc->ap;
2757 struct scatterlist *sg = qc->__sg;
2758 int dir = qc->dma_dir;
2759 void *pad_buf = NULL;
2760
2761 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2762 WARN_ON(sg == NULL);
2763
2764 if (qc->flags & ATA_QCFLAG_SINGLE)
2765 WARN_ON(qc->n_elem > 1);
2766
2767 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2768
2769 /* if we padded the buffer out to 32-bit bound, and data
2770 * xfer direction is from-device, we must copy from the
2771 * pad buffer back into the supplied buffer
2772 */
2773 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2774 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2775
2776 if (qc->flags & ATA_QCFLAG_SG) {
2777 if (qc->n_elem)
2778 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2779 /* restore last sg */
2780 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2781 if (pad_buf) {
2782 struct scatterlist *psg = &qc->pad_sgent;
2783 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2784 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2785 kunmap_atomic(addr, KM_IRQ0);
2786 }
2787 } else {
2788 if (qc->n_elem)
2789 dma_unmap_single(ap->host_set->dev,
2790 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2791 dir);
2792 /* restore sg */
2793 sg->length += qc->pad_len;
2794 if (pad_buf)
2795 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2796 pad_buf, qc->pad_len);
2797 }
2798
2799 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2800 qc->__sg = NULL;
2801 }
2802
2803 /**
2804 * ata_fill_sg - Fill PCI IDE PRD table
2805 * @qc: Metadata associated with taskfile to be transferred
2806 *
2807 * Fill PCI IDE PRD (scatter-gather) table with segments
2808 * associated with the current disk command.
2809 *
2810 * LOCKING:
2811 * spin_lock_irqsave(host_set lock)
2812 *
2813 */
2814 static void ata_fill_sg(struct ata_queued_cmd *qc)
2815 {
2816 struct ata_port *ap = qc->ap;
2817 struct scatterlist *sg;
2818 unsigned int idx;
2819
2820 WARN_ON(qc->__sg == NULL);
2821 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2822
2823 idx = 0;
2824 ata_for_each_sg(sg, qc) {
2825 u32 addr, offset;
2826 u32 sg_len, len;
2827
2828 /* determine if physical DMA addr spans 64K boundary.
2829 * Note h/w doesn't support 64-bit, so we unconditionally
2830 * truncate dma_addr_t to u32.
2831 */
2832 addr = (u32) sg_dma_address(sg);
2833 sg_len = sg_dma_len(sg);
2834
2835 while (sg_len) {
2836 offset = addr & 0xffff;
2837 len = sg_len;
2838 if ((offset + sg_len) > 0x10000)
2839 len = 0x10000 - offset;
2840
2841 ap->prd[idx].addr = cpu_to_le32(addr);
2842 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2843 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2844
2845 idx++;
2846 sg_len -= len;
2847 addr += len;
2848 }
2849 }
2850
2851 if (idx)
2852 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2853 }
2854 /**
2855 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2856 * @qc: Metadata associated with taskfile to check
2857 *
2858 * Allow low-level driver to filter ATA PACKET commands, returning
2859 * a status indicating whether or not it is OK to use DMA for the
2860 * supplied PACKET command.
2861 *
2862 * LOCKING:
2863 * spin_lock_irqsave(host_set lock)
2864 *
2865 * RETURNS: 0 when ATAPI DMA can be used
2866 * nonzero otherwise
2867 */
2868 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2869 {
2870 struct ata_port *ap = qc->ap;
2871 int rc = 0; /* Assume ATAPI DMA is OK by default */
2872
2873 if (ap->ops->check_atapi_dma)
2874 rc = ap->ops->check_atapi_dma(qc);
2875
2876 return rc;
2877 }
2878 /**
2879 * ata_qc_prep - Prepare taskfile for submission
2880 * @qc: Metadata associated with taskfile to be prepared
2881 *
2882 * Prepare ATA taskfile for submission.
2883 *
2884 * LOCKING:
2885 * spin_lock_irqsave(host_set lock)
2886 */
2887 void ata_qc_prep(struct ata_queued_cmd *qc)
2888 {
2889 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2890 return;
2891
2892 ata_fill_sg(qc);
2893 }
2894
2895 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2896
2897 /**
2898 * ata_sg_init_one - Associate command with memory buffer
2899 * @qc: Command to be associated
2900 * @buf: Memory buffer
2901 * @buflen: Length of memory buffer, in bytes.
2902 *
2903 * Initialize the data-related elements of queued_cmd @qc
2904 * to point to a single memory buffer, @buf of byte length @buflen.
2905 *
2906 * LOCKING:
2907 * spin_lock_irqsave(host_set lock)
2908 */
2909
2910 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2911 {
2912 struct scatterlist *sg;
2913
2914 qc->flags |= ATA_QCFLAG_SINGLE;
2915
2916 memset(&qc->sgent, 0, sizeof(qc->sgent));
2917 qc->__sg = &qc->sgent;
2918 qc->n_elem = 1;
2919 qc->orig_n_elem = 1;
2920 qc->buf_virt = buf;
2921
2922 sg = qc->__sg;
2923 sg_init_one(sg, buf, buflen);
2924 }
2925
2926 /**
2927 * ata_sg_init - Associate command with scatter-gather table.
2928 * @qc: Command to be associated
2929 * @sg: Scatter-gather table.
2930 * @n_elem: Number of elements in s/g table.
2931 *
2932 * Initialize the data-related elements of queued_cmd @qc
2933 * to point to a scatter-gather table @sg, containing @n_elem
2934 * elements.
2935 *
2936 * LOCKING:
2937 * spin_lock_irqsave(host_set lock)
2938 */
2939
2940 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2941 unsigned int n_elem)
2942 {
2943 qc->flags |= ATA_QCFLAG_SG;
2944 qc->__sg = sg;
2945 qc->n_elem = n_elem;
2946 qc->orig_n_elem = n_elem;
2947 }
2948
2949 /**
2950 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2951 * @qc: Command with memory buffer to be mapped.
2952 *
2953 * DMA-map the memory buffer associated with queued_cmd @qc.
2954 *
2955 * LOCKING:
2956 * spin_lock_irqsave(host_set lock)
2957 *
2958 * RETURNS:
2959 * Zero on success, negative on error.
2960 */
2961
2962 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2963 {
2964 struct ata_port *ap = qc->ap;
2965 int dir = qc->dma_dir;
2966 struct scatterlist *sg = qc->__sg;
2967 dma_addr_t dma_address;
2968 int trim_sg = 0;
2969
2970 /* we must lengthen transfers to end on a 32-bit boundary */
2971 qc->pad_len = sg->length & 3;
2972 if (qc->pad_len) {
2973 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2974 struct scatterlist *psg = &qc->pad_sgent;
2975
2976 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2977
2978 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2979
2980 if (qc->tf.flags & ATA_TFLAG_WRITE)
2981 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2982 qc->pad_len);
2983
2984 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2985 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2986 /* trim sg */
2987 sg->length -= qc->pad_len;
2988 if (sg->length == 0)
2989 trim_sg = 1;
2990
2991 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2992 sg->length, qc->pad_len);
2993 }
2994
2995 if (trim_sg) {
2996 qc->n_elem--;
2997 goto skip_map;
2998 }
2999
3000 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3001 sg->length, dir);
3002 if (dma_mapping_error(dma_address)) {
3003 /* restore sg */
3004 sg->length += qc->pad_len;
3005 return -1;
3006 }
3007
3008 sg_dma_address(sg) = dma_address;
3009 sg_dma_len(sg) = sg->length;
3010
3011 skip_map:
3012 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3013 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3014
3015 return 0;
3016 }
3017
3018 /**
3019 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3020 * @qc: Command with scatter-gather table to be mapped.
3021 *
3022 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3023 *
3024 * LOCKING:
3025 * spin_lock_irqsave(host_set lock)
3026 *
3027 * RETURNS:
3028 * Zero on success, negative on error.
3029 *
3030 */
3031
3032 static int ata_sg_setup(struct ata_queued_cmd *qc)
3033 {
3034 struct ata_port *ap = qc->ap;
3035 struct scatterlist *sg = qc->__sg;
3036 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3037 int n_elem, pre_n_elem, dir, trim_sg = 0;
3038
3039 VPRINTK("ENTER, ata%u\n", ap->id);
3040 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3041
3042 /* we must lengthen transfers to end on a 32-bit boundary */
3043 qc->pad_len = lsg->length & 3;
3044 if (qc->pad_len) {
3045 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3046 struct scatterlist *psg = &qc->pad_sgent;
3047 unsigned int offset;
3048
3049 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3050
3051 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3052
3053 /*
3054 * psg->page/offset are used to copy to-be-written
3055 * data in this function or read data in ata_sg_clean.
3056 */
3057 offset = lsg->offset + lsg->length - qc->pad_len;
3058 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3059 psg->offset = offset_in_page(offset);
3060
3061 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3062 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3063 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3064 kunmap_atomic(addr, KM_IRQ0);
3065 }
3066
3067 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3068 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3069 /* trim last sg */
3070 lsg->length -= qc->pad_len;
3071 if (lsg->length == 0)
3072 trim_sg = 1;
3073
3074 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3075 qc->n_elem - 1, lsg->length, qc->pad_len);
3076 }
3077
3078 pre_n_elem = qc->n_elem;
3079 if (trim_sg && pre_n_elem)
3080 pre_n_elem--;
3081
3082 if (!pre_n_elem) {
3083 n_elem = 0;
3084 goto skip_map;
3085 }
3086
3087 dir = qc->dma_dir;
3088 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3089 if (n_elem < 1) {
3090 /* restore last sg */
3091 lsg->length += qc->pad_len;
3092 return -1;
3093 }
3094
3095 DPRINTK("%d sg elements mapped\n", n_elem);
3096
3097 skip_map:
3098 qc->n_elem = n_elem;
3099
3100 return 0;
3101 }
3102
3103 /**
3104 * ata_poll_qc_complete - turn irq back on and finish qc
3105 * @qc: Command to complete
3106 * @err_mask: ATA status register content
3107 *
3108 * LOCKING:
3109 * None. (grabs host lock)
3110 */
3111
3112 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3113 {
3114 struct ata_port *ap = qc->ap;
3115 unsigned long flags;
3116
3117 spin_lock_irqsave(&ap->host_set->lock, flags);
3118 ap->flags &= ~ATA_FLAG_NOINTR;
3119 ata_irq_on(ap);
3120 ata_qc_complete(qc);
3121 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3122 }
3123
3124 /**
3125 * ata_pio_poll - poll using PIO, depending on current state
3126 * @ap: the target ata_port
3127 *
3128 * LOCKING:
3129 * None. (executing in kernel thread context)
3130 *
3131 * RETURNS:
3132 * timeout value to use
3133 */
3134
3135 static unsigned long ata_pio_poll(struct ata_port *ap)
3136 {
3137 struct ata_queued_cmd *qc;
3138 u8 status;
3139 unsigned int poll_state = HSM_ST_UNKNOWN;
3140 unsigned int reg_state = HSM_ST_UNKNOWN;
3141
3142 qc = ata_qc_from_tag(ap, ap->active_tag);
3143 WARN_ON(qc == NULL);
3144
3145 switch (ap->hsm_task_state) {
3146 case HSM_ST:
3147 case HSM_ST_POLL:
3148 poll_state = HSM_ST_POLL;
3149 reg_state = HSM_ST;
3150 break;
3151 case HSM_ST_LAST:
3152 case HSM_ST_LAST_POLL:
3153 poll_state = HSM_ST_LAST_POLL;
3154 reg_state = HSM_ST_LAST;
3155 break;
3156 default:
3157 BUG();
3158 break;
3159 }
3160
3161 status = ata_chk_status(ap);
3162 if (status & ATA_BUSY) {
3163 if (time_after(jiffies, ap->pio_task_timeout)) {
3164 qc->err_mask |= AC_ERR_TIMEOUT;
3165 ap->hsm_task_state = HSM_ST_TMOUT;
3166 return 0;
3167 }
3168 ap->hsm_task_state = poll_state;
3169 return ATA_SHORT_PAUSE;
3170 }
3171
3172 ap->hsm_task_state = reg_state;
3173 return 0;
3174 }
3175
3176 /**
3177 * ata_pio_complete - check if drive is busy or idle
3178 * @ap: the target ata_port
3179 *
3180 * LOCKING:
3181 * None. (executing in kernel thread context)
3182 *
3183 * RETURNS:
3184 * Non-zero if qc completed, zero otherwise.
3185 */
3186
3187 static int ata_pio_complete (struct ata_port *ap)
3188 {
3189 struct ata_queued_cmd *qc;
3190 u8 drv_stat;
3191
3192 /*
3193 * This is purely heuristic. This is a fast path. Sometimes when
3194 * we enter, BSY will be cleared in a chk-status or two. If not,
3195 * the drive is probably seeking or something. Snooze for a couple
3196 * msecs, then chk-status again. If still busy, fall back to
3197 * HSM_ST_POLL state.
3198 */
3199 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3200 if (drv_stat & ATA_BUSY) {
3201 msleep(2);
3202 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3203 if (drv_stat & ATA_BUSY) {
3204 ap->hsm_task_state = HSM_ST_LAST_POLL;
3205 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3206 return 0;
3207 }
3208 }
3209
3210 qc = ata_qc_from_tag(ap, ap->active_tag);
3211 WARN_ON(qc == NULL);
3212
3213 drv_stat = ata_wait_idle(ap);
3214 if (!ata_ok(drv_stat)) {
3215 qc->err_mask |= __ac_err_mask(drv_stat);
3216 ap->hsm_task_state = HSM_ST_ERR;
3217 return 0;
3218 }
3219
3220 ap->hsm_task_state = HSM_ST_IDLE;
3221
3222 WARN_ON(qc->err_mask);
3223 ata_poll_qc_complete(qc);
3224
3225 /* another command may start at this point */
3226
3227 return 1;
3228 }
3229
3230
3231 /**
3232 * swap_buf_le16 - swap halves of 16-bit words in place
3233 * @buf: Buffer to swap
3234 * @buf_words: Number of 16-bit words in buffer.
3235 *
3236 * Swap halves of 16-bit words if needed to convert from
3237 * little-endian byte order to native cpu byte order, or
3238 * vice-versa.
3239 *
3240 * LOCKING:
3241 * Inherited from caller.
3242 */
3243 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3244 {
3245 #ifdef __BIG_ENDIAN
3246 unsigned int i;
3247
3248 for (i = 0; i < buf_words; i++)
3249 buf[i] = le16_to_cpu(buf[i]);
3250 #endif /* __BIG_ENDIAN */
3251 }
3252
3253 /**
3254 * ata_mmio_data_xfer - Transfer data by MMIO
3255 * @ap: port to read/write
3256 * @buf: data buffer
3257 * @buflen: buffer length
3258 * @write_data: read/write
3259 *
3260 * Transfer data from/to the device data register by MMIO.
3261 *
3262 * LOCKING:
3263 * Inherited from caller.
3264 */
3265
3266 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3267 unsigned int buflen, int write_data)
3268 {
3269 unsigned int i;
3270 unsigned int words = buflen >> 1;
3271 u16 *buf16 = (u16 *) buf;
3272 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3273
3274 /* Transfer multiple of 2 bytes */
3275 if (write_data) {
3276 for (i = 0; i < words; i++)
3277 writew(le16_to_cpu(buf16[i]), mmio);
3278 } else {
3279 for (i = 0; i < words; i++)
3280 buf16[i] = cpu_to_le16(readw(mmio));
3281 }
3282
3283 /* Transfer trailing 1 byte, if any. */
3284 if (unlikely(buflen & 0x01)) {
3285 u16 align_buf[1] = { 0 };
3286 unsigned char *trailing_buf = buf + buflen - 1;
3287
3288 if (write_data) {
3289 memcpy(align_buf, trailing_buf, 1);
3290 writew(le16_to_cpu(align_buf[0]), mmio);
3291 } else {
3292 align_buf[0] = cpu_to_le16(readw(mmio));
3293 memcpy(trailing_buf, align_buf, 1);
3294 }
3295 }
3296 }
3297
3298 /**
3299 * ata_pio_data_xfer - Transfer data by PIO
3300 * @ap: port to read/write
3301 * @buf: data buffer
3302 * @buflen: buffer length
3303 * @write_data: read/write
3304 *
3305 * Transfer data from/to the device data register by PIO.
3306 *
3307 * LOCKING:
3308 * Inherited from caller.
3309 */
3310
3311 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3312 unsigned int buflen, int write_data)
3313 {
3314 unsigned int words = buflen >> 1;
3315
3316 /* Transfer multiple of 2 bytes */
3317 if (write_data)
3318 outsw(ap->ioaddr.data_addr, buf, words);
3319 else
3320 insw(ap->ioaddr.data_addr, buf, words);
3321
3322 /* Transfer trailing 1 byte, if any. */
3323 if (unlikely(buflen & 0x01)) {
3324 u16 align_buf[1] = { 0 };
3325 unsigned char *trailing_buf = buf + buflen - 1;
3326
3327 if (write_data) {
3328 memcpy(align_buf, trailing_buf, 1);
3329 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3330 } else {
3331 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3332 memcpy(trailing_buf, align_buf, 1);
3333 }
3334 }
3335 }
3336
3337 /**
3338 * ata_data_xfer - Transfer data from/to the data register.
3339 * @ap: port to read/write
3340 * @buf: data buffer
3341 * @buflen: buffer length
3342 * @do_write: read/write
3343 *
3344 * Transfer data from/to the device data register.
3345 *
3346 * LOCKING:
3347 * Inherited from caller.
3348 */
3349
3350 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3351 unsigned int buflen, int do_write)
3352 {
3353 /* Make the crap hardware pay the costs not the good stuff */
3354 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3355 unsigned long flags;
3356 local_irq_save(flags);
3357 if (ap->flags & ATA_FLAG_MMIO)
3358 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3359 else
3360 ata_pio_data_xfer(ap, buf, buflen, do_write);
3361 local_irq_restore(flags);
3362 } else {
3363 if (ap->flags & ATA_FLAG_MMIO)
3364 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3365 else
3366 ata_pio_data_xfer(ap, buf, buflen, do_write);
3367 }
3368 }
3369
3370 /**
3371 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3372 * @qc: Command on going
3373 *
3374 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3375 *
3376 * LOCKING:
3377 * Inherited from caller.
3378 */
3379
3380 static void ata_pio_sector(struct ata_queued_cmd *qc)
3381 {
3382 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3383 struct scatterlist *sg = qc->__sg;
3384 struct ata_port *ap = qc->ap;
3385 struct page *page;
3386 unsigned int offset;
3387 unsigned char *buf;
3388
3389 if (qc->cursect == (qc->nsect - 1))
3390 ap->hsm_task_state = HSM_ST_LAST;
3391
3392 page = sg[qc->cursg].page;
3393 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3394
3395 /* get the current page and offset */
3396 page = nth_page(page, (offset >> PAGE_SHIFT));
3397 offset %= PAGE_SIZE;
3398
3399 buf = kmap(page) + offset;
3400
3401 qc->cursect++;
3402 qc->cursg_ofs++;
3403
3404 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3405 qc->cursg++;
3406 qc->cursg_ofs = 0;
3407 }
3408
3409 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3410
3411 /* do the actual data transfer */
3412 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3413 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3414
3415 kunmap(page);
3416 }
3417
3418 /**
3419 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3420 * @qc: Command on going
3421 * @bytes: number of bytes
3422 *
3423 * Transfer Transfer data from/to the ATAPI device.
3424 *
3425 * LOCKING:
3426 * Inherited from caller.
3427 *
3428 */
3429
3430 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3431 {
3432 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3433 struct scatterlist *sg = qc->__sg;
3434 struct ata_port *ap = qc->ap;
3435 struct page *page;
3436 unsigned char *buf;
3437 unsigned int offset, count;
3438
3439 if (qc->curbytes + bytes >= qc->nbytes)
3440 ap->hsm_task_state = HSM_ST_LAST;
3441
3442 next_sg:
3443 if (unlikely(qc->cursg >= qc->n_elem)) {
3444 /*
3445 * The end of qc->sg is reached and the device expects
3446 * more data to transfer. In order not to overrun qc->sg
3447 * and fulfill length specified in the byte count register,
3448 * - for read case, discard trailing data from the device
3449 * - for write case, padding zero data to the device
3450 */
3451 u16 pad_buf[1] = { 0 };
3452 unsigned int words = bytes >> 1;
3453 unsigned int i;
3454
3455 if (words) /* warning if bytes > 1 */
3456 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3457 ap->id, bytes);
3458
3459 for (i = 0; i < words; i++)
3460 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3461
3462 ap->hsm_task_state = HSM_ST_LAST;
3463 return;
3464 }
3465
3466 sg = &qc->__sg[qc->cursg];
3467
3468 page = sg->page;
3469 offset = sg->offset + qc->cursg_ofs;
3470
3471 /* get the current page and offset */
3472 page = nth_page(page, (offset >> PAGE_SHIFT));
3473 offset %= PAGE_SIZE;
3474
3475 /* don't overrun current sg */
3476 count = min(sg->length - qc->cursg_ofs, bytes);
3477
3478 /* don't cross page boundaries */
3479 count = min(count, (unsigned int)PAGE_SIZE - offset);
3480
3481 buf = kmap(page) + offset;
3482
3483 bytes -= count;
3484 qc->curbytes += count;
3485 qc->cursg_ofs += count;
3486
3487 if (qc->cursg_ofs == sg->length) {
3488 qc->cursg++;
3489 qc->cursg_ofs = 0;
3490 }
3491
3492 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3493
3494 /* do the actual data transfer */
3495 ata_data_xfer(ap, buf, count, do_write);
3496
3497 kunmap(page);
3498
3499 if (bytes)
3500 goto next_sg;
3501 }
3502
3503 /**
3504 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3505 * @qc: Command on going
3506 *
3507 * Transfer Transfer data from/to the ATAPI device.
3508 *
3509 * LOCKING:
3510 * Inherited from caller.
3511 */
3512
3513 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3514 {
3515 struct ata_port *ap = qc->ap;
3516 struct ata_device *dev = qc->dev;
3517 unsigned int ireason, bc_lo, bc_hi, bytes;
3518 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3519
3520 ap->ops->tf_read(ap, &qc->tf);
3521 ireason = qc->tf.nsect;
3522 bc_lo = qc->tf.lbam;
3523 bc_hi = qc->tf.lbah;
3524 bytes = (bc_hi << 8) | bc_lo;
3525
3526 /* shall be cleared to zero, indicating xfer of data */
3527 if (ireason & (1 << 0))
3528 goto err_out;
3529
3530 /* make sure transfer direction matches expected */
3531 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3532 if (do_write != i_write)
3533 goto err_out;
3534
3535 __atapi_pio_bytes(qc, bytes);
3536
3537 return;
3538
3539 err_out:
3540 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3541 ap->id, dev->devno);
3542 qc->err_mask |= AC_ERR_HSM;
3543 ap->hsm_task_state = HSM_ST_ERR;
3544 }
3545
3546 /**
3547 * ata_pio_block - start PIO on a block
3548 * @ap: the target ata_port
3549 *
3550 * LOCKING:
3551 * None. (executing in kernel thread context)
3552 */
3553
3554 static void ata_pio_block(struct ata_port *ap)
3555 {
3556 struct ata_queued_cmd *qc;
3557 u8 status;
3558
3559 /*
3560 * This is purely heuristic. This is a fast path.
3561 * Sometimes when we enter, BSY will be cleared in
3562 * a chk-status or two. If not, the drive is probably seeking
3563 * or something. Snooze for a couple msecs, then
3564 * chk-status again. If still busy, fall back to
3565 * HSM_ST_POLL state.
3566 */
3567 status = ata_busy_wait(ap, ATA_BUSY, 5);
3568 if (status & ATA_BUSY) {
3569 msleep(2);
3570 status = ata_busy_wait(ap, ATA_BUSY, 10);
3571 if (status & ATA_BUSY) {
3572 ap->hsm_task_state = HSM_ST_POLL;
3573 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3574 return;
3575 }
3576 }
3577
3578 qc = ata_qc_from_tag(ap, ap->active_tag);
3579 WARN_ON(qc == NULL);
3580
3581 /* check error */
3582 if (status & (ATA_ERR | ATA_DF)) {
3583 qc->err_mask |= AC_ERR_DEV;
3584 ap->hsm_task_state = HSM_ST_ERR;
3585 return;
3586 }
3587
3588 /* transfer data if any */
3589 if (is_atapi_taskfile(&qc->tf)) {
3590 /* DRQ=0 means no more data to transfer */
3591 if ((status & ATA_DRQ) == 0) {
3592 ap->hsm_task_state = HSM_ST_LAST;
3593 return;
3594 }
3595
3596 atapi_pio_bytes(qc);
3597 } else {
3598 /* handle BSY=0, DRQ=0 as error */
3599 if ((status & ATA_DRQ) == 0) {
3600 qc->err_mask |= AC_ERR_HSM;
3601 ap->hsm_task_state = HSM_ST_ERR;
3602 return;
3603 }
3604
3605 ata_pio_sector(qc);
3606 }
3607 }
3608
3609 static void ata_pio_error(struct ata_port *ap)
3610 {
3611 struct ata_queued_cmd *qc;
3612
3613 qc = ata_qc_from_tag(ap, ap->active_tag);
3614 WARN_ON(qc == NULL);
3615
3616 if (qc->tf.command != ATA_CMD_PACKET)
3617 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3618
3619 /* make sure qc->err_mask is available to
3620 * know what's wrong and recover
3621 */
3622 WARN_ON(qc->err_mask == 0);
3623
3624 ap->hsm_task_state = HSM_ST_IDLE;
3625
3626 ata_poll_qc_complete(qc);
3627 }
3628
3629 static void ata_pio_task(void *_data)
3630 {
3631 struct ata_port *ap = _data;
3632 unsigned long timeout;
3633 int qc_completed;
3634
3635 fsm_start:
3636 timeout = 0;
3637 qc_completed = 0;
3638
3639 switch (ap->hsm_task_state) {
3640 case HSM_ST_IDLE:
3641 return;
3642
3643 case HSM_ST:
3644 ata_pio_block(ap);
3645 break;
3646
3647 case HSM_ST_LAST:
3648 qc_completed = ata_pio_complete(ap);
3649 break;
3650
3651 case HSM_ST_POLL:
3652 case HSM_ST_LAST_POLL:
3653 timeout = ata_pio_poll(ap);
3654 break;
3655
3656 case HSM_ST_TMOUT:
3657 case HSM_ST_ERR:
3658 ata_pio_error(ap);
3659 return;
3660 }
3661
3662 if (timeout)
3663 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3664 else if (!qc_completed)
3665 goto fsm_start;
3666 }
3667
3668 /**
3669 * atapi_packet_task - Write CDB bytes to hardware
3670 * @_data: Port to which ATAPI device is attached.
3671 *
3672 * When device has indicated its readiness to accept
3673 * a CDB, this function is called. Send the CDB.
3674 * If DMA is to be performed, exit immediately.
3675 * Otherwise, we are in polling mode, so poll
3676 * status under operation succeeds or fails.
3677 *
3678 * LOCKING:
3679 * Kernel thread context (may sleep)
3680 */
3681
3682 static void atapi_packet_task(void *_data)
3683 {
3684 struct ata_port *ap = _data;
3685 struct ata_queued_cmd *qc;
3686 u8 status;
3687
3688 qc = ata_qc_from_tag(ap, ap->active_tag);
3689 WARN_ON(qc == NULL);
3690 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3691
3692 /* sleep-wait for BSY to clear */
3693 DPRINTK("busy wait\n");
3694 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3695 qc->err_mask |= AC_ERR_TIMEOUT;
3696 goto err_out;
3697 }
3698
3699 /* make sure DRQ is set */
3700 status = ata_chk_status(ap);
3701 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3702 qc->err_mask |= AC_ERR_HSM;
3703 goto err_out;
3704 }
3705
3706 /* send SCSI cdb */
3707 DPRINTK("send cdb\n");
3708 WARN_ON(qc->dev->cdb_len < 12);
3709
3710 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3711 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3712 unsigned long flags;
3713
3714 /* Once we're done issuing command and kicking bmdma,
3715 * irq handler takes over. To not lose irq, we need
3716 * to clear NOINTR flag before sending cdb, but
3717 * interrupt handler shouldn't be invoked before we're
3718 * finished. Hence, the following locking.
3719 */
3720 spin_lock_irqsave(&ap->host_set->lock, flags);
3721 ap->flags &= ~ATA_FLAG_NOINTR;
3722 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3723 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3724 ap->ops->bmdma_start(qc); /* initiate bmdma */
3725 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3726 } else {
3727 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3728
3729 /* PIO commands are handled by polling */
3730 ap->hsm_task_state = HSM_ST;
3731 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3732 }
3733
3734 return;
3735
3736 err_out:
3737 ata_poll_qc_complete(qc);
3738 }
3739
3740 /**
3741 * ata_qc_timeout - Handle timeout of queued command
3742 * @qc: Command that timed out
3743 *
3744 * Some part of the kernel (currently, only the SCSI layer)
3745 * has noticed that the active command on port @ap has not
3746 * completed after a specified length of time. Handle this
3747 * condition by disabling DMA (if necessary) and completing
3748 * transactions, with error if necessary.
3749 *
3750 * This also handles the case of the "lost interrupt", where
3751 * for some reason (possibly hardware bug, possibly driver bug)
3752 * an interrupt was not delivered to the driver, even though the
3753 * transaction completed successfully.
3754 *
3755 * LOCKING:
3756 * Inherited from SCSI layer (none, can sleep)
3757 */
3758
3759 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3760 {
3761 struct ata_port *ap = qc->ap;
3762 struct ata_host_set *host_set = ap->host_set;
3763 u8 host_stat = 0, drv_stat;
3764 unsigned long flags;
3765
3766 DPRINTK("ENTER\n");
3767
3768 ap->hsm_task_state = HSM_ST_IDLE;
3769
3770 spin_lock_irqsave(&host_set->lock, flags);
3771
3772 switch (qc->tf.protocol) {
3773
3774 case ATA_PROT_DMA:
3775 case ATA_PROT_ATAPI_DMA:
3776 host_stat = ap->ops->bmdma_status(ap);
3777
3778 /* before we do anything else, clear DMA-Start bit */
3779 ap->ops->bmdma_stop(qc);
3780
3781 /* fall through */
3782
3783 default:
3784 ata_altstatus(ap);
3785 drv_stat = ata_chk_status(ap);
3786
3787 /* ack bmdma irq events */
3788 ap->ops->irq_clear(ap);
3789
3790 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3791 ap->id, qc->tf.command, drv_stat, host_stat);
3792
3793 /* complete taskfile transaction */
3794 qc->err_mask |= ac_err_mask(drv_stat);
3795 break;
3796 }
3797
3798 spin_unlock_irqrestore(&host_set->lock, flags);
3799
3800 ata_eh_qc_complete(qc);
3801
3802 DPRINTK("EXIT\n");
3803 }
3804
3805 /**
3806 * ata_eng_timeout - Handle timeout of queued command
3807 * @ap: Port on which timed-out command is active
3808 *
3809 * Some part of the kernel (currently, only the SCSI layer)
3810 * has noticed that the active command on port @ap has not
3811 * completed after a specified length of time. Handle this
3812 * condition by disabling DMA (if necessary) and completing
3813 * transactions, with error if necessary.
3814 *
3815 * This also handles the case of the "lost interrupt", where
3816 * for some reason (possibly hardware bug, possibly driver bug)
3817 * an interrupt was not delivered to the driver, even though the
3818 * transaction completed successfully.
3819 *
3820 * LOCKING:
3821 * Inherited from SCSI layer (none, can sleep)
3822 */
3823
3824 void ata_eng_timeout(struct ata_port *ap)
3825 {
3826 DPRINTK("ENTER\n");
3827
3828 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3829
3830 DPRINTK("EXIT\n");
3831 }
3832
3833 /**
3834 * ata_qc_new - Request an available ATA command, for queueing
3835 * @ap: Port associated with device @dev
3836 * @dev: Device from whom we request an available command structure
3837 *
3838 * LOCKING:
3839 * None.
3840 */
3841
3842 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3843 {
3844 struct ata_queued_cmd *qc = NULL;
3845 unsigned int i;
3846
3847 for (i = 0; i < ATA_MAX_QUEUE; i++)
3848 if (!test_and_set_bit(i, &ap->qactive)) {
3849 qc = ata_qc_from_tag(ap, i);
3850 break;
3851 }
3852
3853 if (qc)
3854 qc->tag = i;
3855
3856 return qc;
3857 }
3858
3859 /**
3860 * ata_qc_new_init - Request an available ATA command, and initialize it
3861 * @ap: Port associated with device @dev
3862 * @dev: Device from whom we request an available command structure
3863 *
3864 * LOCKING:
3865 * None.
3866 */
3867
3868 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3869 struct ata_device *dev)
3870 {
3871 struct ata_queued_cmd *qc;
3872
3873 qc = ata_qc_new(ap);
3874 if (qc) {
3875 qc->scsicmd = NULL;
3876 qc->ap = ap;
3877 qc->dev = dev;
3878
3879 ata_qc_reinit(qc);
3880 }
3881
3882 return qc;
3883 }
3884
3885 /**
3886 * ata_qc_free - free unused ata_queued_cmd
3887 * @qc: Command to complete
3888 *
3889 * Designed to free unused ata_queued_cmd object
3890 * in case something prevents using it.
3891 *
3892 * LOCKING:
3893 * spin_lock_irqsave(host_set lock)
3894 */
3895 void ata_qc_free(struct ata_queued_cmd *qc)
3896 {
3897 struct ata_port *ap = qc->ap;
3898 unsigned int tag;
3899
3900 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3901
3902 qc->flags = 0;
3903 tag = qc->tag;
3904 if (likely(ata_tag_valid(tag))) {
3905 if (tag == ap->active_tag)
3906 ap->active_tag = ATA_TAG_POISON;
3907 qc->tag = ATA_TAG_POISON;
3908 clear_bit(tag, &ap->qactive);
3909 }
3910 }
3911
3912 void __ata_qc_complete(struct ata_queued_cmd *qc)
3913 {
3914 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3915 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3916
3917 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3918 ata_sg_clean(qc);
3919
3920 /* atapi: mark qc as inactive to prevent the interrupt handler
3921 * from completing the command twice later, before the error handler
3922 * is called. (when rc != 0 and atapi request sense is needed)
3923 */
3924 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3925
3926 /* call completion callback */
3927 qc->complete_fn(qc);
3928 }
3929
3930 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3931 {
3932 struct ata_port *ap = qc->ap;
3933
3934 switch (qc->tf.protocol) {
3935 case ATA_PROT_DMA:
3936 case ATA_PROT_ATAPI_DMA:
3937 return 1;
3938
3939 case ATA_PROT_ATAPI:
3940 case ATA_PROT_PIO:
3941 if (ap->flags & ATA_FLAG_PIO_DMA)
3942 return 1;
3943
3944 /* fall through */
3945
3946 default:
3947 return 0;
3948 }
3949
3950 /* never reached */
3951 }
3952
3953 /**
3954 * ata_qc_issue - issue taskfile to device
3955 * @qc: command to issue to device
3956 *
3957 * Prepare an ATA command to submission to device.
3958 * This includes mapping the data into a DMA-able
3959 * area, filling in the S/G table, and finally
3960 * writing the taskfile to hardware, starting the command.
3961 *
3962 * LOCKING:
3963 * spin_lock_irqsave(host_set lock)
3964 *
3965 * RETURNS:
3966 * Zero on success, AC_ERR_* mask on failure
3967 */
3968
3969 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3970 {
3971 struct ata_port *ap = qc->ap;
3972
3973 if (ata_should_dma_map(qc)) {
3974 if (qc->flags & ATA_QCFLAG_SG) {
3975 if (ata_sg_setup(qc))
3976 goto sg_err;
3977 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3978 if (ata_sg_setup_one(qc))
3979 goto sg_err;
3980 }
3981 } else {
3982 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3983 }
3984
3985 ap->ops->qc_prep(qc);
3986
3987 qc->ap->active_tag = qc->tag;
3988 qc->flags |= ATA_QCFLAG_ACTIVE;
3989
3990 return ap->ops->qc_issue(qc);
3991
3992 sg_err:
3993 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3994 return AC_ERR_SYSTEM;
3995 }
3996
3997
3998 /**
3999 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4000 * @qc: command to issue to device
4001 *
4002 * Using various libata functions and hooks, this function
4003 * starts an ATA command. ATA commands are grouped into
4004 * classes called "protocols", and issuing each type of protocol
4005 * is slightly different.
4006 *
4007 * May be used as the qc_issue() entry in ata_port_operations.
4008 *
4009 * LOCKING:
4010 * spin_lock_irqsave(host_set lock)
4011 *
4012 * RETURNS:
4013 * Zero on success, AC_ERR_* mask on failure
4014 */
4015
4016 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4017 {
4018 struct ata_port *ap = qc->ap;
4019
4020 ata_dev_select(ap, qc->dev->devno, 1, 0);
4021
4022 switch (qc->tf.protocol) {
4023 case ATA_PROT_NODATA:
4024 ata_tf_to_host(ap, &qc->tf);
4025 break;
4026
4027 case ATA_PROT_DMA:
4028 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4029 ap->ops->bmdma_setup(qc); /* set up bmdma */
4030 ap->ops->bmdma_start(qc); /* initiate bmdma */
4031 break;
4032
4033 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4034 ata_qc_set_polling(qc);
4035 ata_tf_to_host(ap, &qc->tf);
4036 ap->hsm_task_state = HSM_ST;
4037 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4038 break;
4039
4040 case ATA_PROT_ATAPI:
4041 ata_qc_set_polling(qc);
4042 ata_tf_to_host(ap, &qc->tf);
4043 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4044 break;
4045
4046 case ATA_PROT_ATAPI_NODATA:
4047 ap->flags |= ATA_FLAG_NOINTR;
4048 ata_tf_to_host(ap, &qc->tf);
4049 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4050 break;
4051
4052 case ATA_PROT_ATAPI_DMA:
4053 ap->flags |= ATA_FLAG_NOINTR;
4054 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4055 ap->ops->bmdma_setup(qc); /* set up bmdma */
4056 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4057 break;
4058
4059 default:
4060 WARN_ON(1);
4061 return AC_ERR_SYSTEM;
4062 }
4063
4064 return 0;
4065 }
4066
4067 /**
4068 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4069 * @qc: Info associated with this ATA transaction.
4070 *
4071 * LOCKING:
4072 * spin_lock_irqsave(host_set lock)
4073 */
4074
4075 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4076 {
4077 struct ata_port *ap = qc->ap;
4078 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4079 u8 dmactl;
4080 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4081
4082 /* load PRD table addr. */
4083 mb(); /* make sure PRD table writes are visible to controller */
4084 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4085
4086 /* specify data direction, triple-check start bit is clear */
4087 dmactl = readb(mmio + ATA_DMA_CMD);
4088 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4089 if (!rw)
4090 dmactl |= ATA_DMA_WR;
4091 writeb(dmactl, mmio + ATA_DMA_CMD);
4092
4093 /* issue r/w command */
4094 ap->ops->exec_command(ap, &qc->tf);
4095 }
4096
4097 /**
4098 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4099 * @qc: Info associated with this ATA transaction.
4100 *
4101 * LOCKING:
4102 * spin_lock_irqsave(host_set lock)
4103 */
4104
4105 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4106 {
4107 struct ata_port *ap = qc->ap;
4108 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4109 u8 dmactl;
4110
4111 /* start host DMA transaction */
4112 dmactl = readb(mmio + ATA_DMA_CMD);
4113 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4114
4115 /* Strictly, one may wish to issue a readb() here, to
4116 * flush the mmio write. However, control also passes
4117 * to the hardware at this point, and it will interrupt
4118 * us when we are to resume control. So, in effect,
4119 * we don't care when the mmio write flushes.
4120 * Further, a read of the DMA status register _immediately_
4121 * following the write may not be what certain flaky hardware
4122 * is expected, so I think it is best to not add a readb()
4123 * without first all the MMIO ATA cards/mobos.
4124 * Or maybe I'm just being paranoid.
4125 */
4126 }
4127
4128 /**
4129 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4130 * @qc: Info associated with this ATA transaction.
4131 *
4132 * LOCKING:
4133 * spin_lock_irqsave(host_set lock)
4134 */
4135
4136 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4137 {
4138 struct ata_port *ap = qc->ap;
4139 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4140 u8 dmactl;
4141
4142 /* load PRD table addr. */
4143 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4144
4145 /* specify data direction, triple-check start bit is clear */
4146 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4147 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4148 if (!rw)
4149 dmactl |= ATA_DMA_WR;
4150 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4151
4152 /* issue r/w command */
4153 ap->ops->exec_command(ap, &qc->tf);
4154 }
4155
4156 /**
4157 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4158 * @qc: Info associated with this ATA transaction.
4159 *
4160 * LOCKING:
4161 * spin_lock_irqsave(host_set lock)
4162 */
4163
4164 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4165 {
4166 struct ata_port *ap = qc->ap;
4167 u8 dmactl;
4168
4169 /* start host DMA transaction */
4170 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4171 outb(dmactl | ATA_DMA_START,
4172 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4173 }
4174
4175
4176 /**
4177 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4178 * @qc: Info associated with this ATA transaction.
4179 *
4180 * Writes the ATA_DMA_START flag to the DMA command register.
4181 *
4182 * May be used as the bmdma_start() entry in ata_port_operations.
4183 *
4184 * LOCKING:
4185 * spin_lock_irqsave(host_set lock)
4186 */
4187 void ata_bmdma_start(struct ata_queued_cmd *qc)
4188 {
4189 if (qc->ap->flags & ATA_FLAG_MMIO)
4190 ata_bmdma_start_mmio(qc);
4191 else
4192 ata_bmdma_start_pio(qc);
4193 }
4194
4195
4196 /**
4197 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4198 * @qc: Info associated with this ATA transaction.
4199 *
4200 * Writes address of PRD table to device's PRD Table Address
4201 * register, sets the DMA control register, and calls
4202 * ops->exec_command() to start the transfer.
4203 *
4204 * May be used as the bmdma_setup() entry in ata_port_operations.
4205 *
4206 * LOCKING:
4207 * spin_lock_irqsave(host_set lock)
4208 */
4209 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4210 {
4211 if (qc->ap->flags & ATA_FLAG_MMIO)
4212 ata_bmdma_setup_mmio(qc);
4213 else
4214 ata_bmdma_setup_pio(qc);
4215 }
4216
4217
4218 /**
4219 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4220 * @ap: Port associated with this ATA transaction.
4221 *
4222 * Clear interrupt and error flags in DMA status register.
4223 *
4224 * May be used as the irq_clear() entry in ata_port_operations.
4225 *
4226 * LOCKING:
4227 * spin_lock_irqsave(host_set lock)
4228 */
4229
4230 void ata_bmdma_irq_clear(struct ata_port *ap)
4231 {
4232 if (!ap->ioaddr.bmdma_addr)
4233 return;
4234
4235 if (ap->flags & ATA_FLAG_MMIO) {
4236 void __iomem *mmio =
4237 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4238 writeb(readb(mmio), mmio);
4239 } else {
4240 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4241 outb(inb(addr), addr);
4242 }
4243 }
4244
4245
4246 /**
4247 * ata_bmdma_status - Read PCI IDE BMDMA status
4248 * @ap: Port associated with this ATA transaction.
4249 *
4250 * Read and return BMDMA status register.
4251 *
4252 * May be used as the bmdma_status() entry in ata_port_operations.
4253 *
4254 * LOCKING:
4255 * spin_lock_irqsave(host_set lock)
4256 */
4257
4258 u8 ata_bmdma_status(struct ata_port *ap)
4259 {
4260 u8 host_stat;
4261 if (ap->flags & ATA_FLAG_MMIO) {
4262 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4263 host_stat = readb(mmio + ATA_DMA_STATUS);
4264 } else
4265 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4266 return host_stat;
4267 }
4268
4269
4270 /**
4271 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4272 * @qc: Command we are ending DMA for
4273 *
4274 * Clears the ATA_DMA_START flag in the dma control register
4275 *
4276 * May be used as the bmdma_stop() entry in ata_port_operations.
4277 *
4278 * LOCKING:
4279 * spin_lock_irqsave(host_set lock)
4280 */
4281
4282 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4283 {
4284 struct ata_port *ap = qc->ap;
4285 if (ap->flags & ATA_FLAG_MMIO) {
4286 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4287
4288 /* clear start/stop bit */
4289 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4290 mmio + ATA_DMA_CMD);
4291 } else {
4292 /* clear start/stop bit */
4293 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4294 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4295 }
4296
4297 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4298 ata_altstatus(ap); /* dummy read */
4299 }
4300
4301 /**
4302 * ata_host_intr - Handle host interrupt for given (port, task)
4303 * @ap: Port on which interrupt arrived (possibly...)
4304 * @qc: Taskfile currently active in engine
4305 *
4306 * Handle host interrupt for given queued command. Currently,
4307 * only DMA interrupts are handled. All other commands are
4308 * handled via polling with interrupts disabled (nIEN bit).
4309 *
4310 * LOCKING:
4311 * spin_lock_irqsave(host_set lock)
4312 *
4313 * RETURNS:
4314 * One if interrupt was handled, zero if not (shared irq).
4315 */
4316
4317 inline unsigned int ata_host_intr (struct ata_port *ap,
4318 struct ata_queued_cmd *qc)
4319 {
4320 u8 status, host_stat;
4321
4322 switch (qc->tf.protocol) {
4323
4324 case ATA_PROT_DMA:
4325 case ATA_PROT_ATAPI_DMA:
4326 case ATA_PROT_ATAPI:
4327 /* check status of DMA engine */
4328 host_stat = ap->ops->bmdma_status(ap);
4329 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4330
4331 /* if it's not our irq... */
4332 if (!(host_stat & ATA_DMA_INTR))
4333 goto idle_irq;
4334
4335 /* before we do anything else, clear DMA-Start bit */
4336 ap->ops->bmdma_stop(qc);
4337
4338 /* fall through */
4339
4340 case ATA_PROT_ATAPI_NODATA:
4341 case ATA_PROT_NODATA:
4342 /* check altstatus */
4343 status = ata_altstatus(ap);
4344 if (status & ATA_BUSY)
4345 goto idle_irq;
4346
4347 /* check main status, clearing INTRQ */
4348 status = ata_chk_status(ap);
4349 if (unlikely(status & ATA_BUSY))
4350 goto idle_irq;
4351 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4352 ap->id, qc->tf.protocol, status);
4353
4354 /* ack bmdma irq events */
4355 ap->ops->irq_clear(ap);
4356
4357 /* complete taskfile transaction */
4358 qc->err_mask |= ac_err_mask(status);
4359 ata_qc_complete(qc);
4360 break;
4361
4362 default:
4363 goto idle_irq;
4364 }
4365
4366 return 1; /* irq handled */
4367
4368 idle_irq:
4369 ap->stats.idle_irq++;
4370
4371 #ifdef ATA_IRQ_TRAP
4372 if ((ap->stats.idle_irq % 1000) == 0) {
4373 ata_irq_ack(ap, 0); /* debug trap */
4374 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4375 return 1;
4376 }
4377 #endif
4378 return 0; /* irq not handled */
4379 }
4380
4381 /**
4382 * ata_interrupt - Default ATA host interrupt handler
4383 * @irq: irq line (unused)
4384 * @dev_instance: pointer to our ata_host_set information structure
4385 * @regs: unused
4386 *
4387 * Default interrupt handler for PCI IDE devices. Calls
4388 * ata_host_intr() for each port that is not disabled.
4389 *
4390 * LOCKING:
4391 * Obtains host_set lock during operation.
4392 *
4393 * RETURNS:
4394 * IRQ_NONE or IRQ_HANDLED.
4395 */
4396
4397 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4398 {
4399 struct ata_host_set *host_set = dev_instance;
4400 unsigned int i;
4401 unsigned int handled = 0;
4402 unsigned long flags;
4403
4404 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4405 spin_lock_irqsave(&host_set->lock, flags);
4406
4407 for (i = 0; i < host_set->n_ports; i++) {
4408 struct ata_port *ap;
4409
4410 ap = host_set->ports[i];
4411 if (ap &&
4412 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4413 struct ata_queued_cmd *qc;
4414
4415 qc = ata_qc_from_tag(ap, ap->active_tag);
4416 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4417 (qc->flags & ATA_QCFLAG_ACTIVE))
4418 handled |= ata_host_intr(ap, qc);
4419 }
4420 }
4421
4422 spin_unlock_irqrestore(&host_set->lock, flags);
4423
4424 return IRQ_RETVAL(handled);
4425 }
4426
4427
4428 /*
4429 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4430 * without filling any other registers
4431 */
4432 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4433 u8 cmd)
4434 {
4435 struct ata_taskfile tf;
4436 int err;
4437
4438 ata_tf_init(ap, &tf, dev->devno);
4439
4440 tf.command = cmd;
4441 tf.flags |= ATA_TFLAG_DEVICE;
4442 tf.protocol = ATA_PROT_NODATA;
4443
4444 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4445 if (err)
4446 printk(KERN_ERR "%s: ata command failed: %d\n",
4447 __FUNCTION__, err);
4448
4449 return err;
4450 }
4451
4452 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4453 {
4454 u8 cmd;
4455
4456 if (!ata_try_flush_cache(dev))
4457 return 0;
4458
4459 if (ata_id_has_flush_ext(dev->id))
4460 cmd = ATA_CMD_FLUSH_EXT;
4461 else
4462 cmd = ATA_CMD_FLUSH;
4463
4464 return ata_do_simple_cmd(ap, dev, cmd);
4465 }
4466
4467 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4468 {
4469 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4470 }
4471
4472 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4473 {
4474 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4475 }
4476
4477 /**
4478 * ata_device_resume - wakeup a previously suspended devices
4479 * @ap: port the device is connected to
4480 * @dev: the device to resume
4481 *
4482 * Kick the drive back into action, by sending it an idle immediate
4483 * command and making sure its transfer mode matches between drive
4484 * and host.
4485 *
4486 */
4487 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4488 {
4489 if (ap->flags & ATA_FLAG_SUSPENDED) {
4490 ap->flags &= ~ATA_FLAG_SUSPENDED;
4491 ata_set_mode(ap);
4492 }
4493 if (!ata_dev_present(dev))
4494 return 0;
4495 if (dev->class == ATA_DEV_ATA)
4496 ata_start_drive(ap, dev);
4497
4498 return 0;
4499 }
4500
4501 /**
4502 * ata_device_suspend - prepare a device for suspend
4503 * @ap: port the device is connected to
4504 * @dev: the device to suspend
4505 *
4506 * Flush the cache on the drive, if appropriate, then issue a
4507 * standbynow command.
4508 */
4509 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4510 {
4511 if (!ata_dev_present(dev))
4512 return 0;
4513 if (dev->class == ATA_DEV_ATA)
4514 ata_flush_cache(ap, dev);
4515
4516 ata_standby_drive(ap, dev);
4517 ap->flags |= ATA_FLAG_SUSPENDED;
4518 return 0;
4519 }
4520
4521 /**
4522 * ata_port_start - Set port up for dma.
4523 * @ap: Port to initialize
4524 *
4525 * Called just after data structures for each port are
4526 * initialized. Allocates space for PRD table.
4527 *
4528 * May be used as the port_start() entry in ata_port_operations.
4529 *
4530 * LOCKING:
4531 * Inherited from caller.
4532 */
4533
4534 int ata_port_start (struct ata_port *ap)
4535 {
4536 struct device *dev = ap->host_set->dev;
4537 int rc;
4538
4539 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4540 if (!ap->prd)
4541 return -ENOMEM;
4542
4543 rc = ata_pad_alloc(ap, dev);
4544 if (rc) {
4545 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4546 return rc;
4547 }
4548
4549 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4550
4551 return 0;
4552 }
4553
4554
4555 /**
4556 * ata_port_stop - Undo ata_port_start()
4557 * @ap: Port to shut down
4558 *
4559 * Frees the PRD table.
4560 *
4561 * May be used as the port_stop() entry in ata_port_operations.
4562 *
4563 * LOCKING:
4564 * Inherited from caller.
4565 */
4566
4567 void ata_port_stop (struct ata_port *ap)
4568 {
4569 struct device *dev = ap->host_set->dev;
4570
4571 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4572 ata_pad_free(ap, dev);
4573 }
4574
4575 void ata_host_stop (struct ata_host_set *host_set)
4576 {
4577 if (host_set->mmio_base)
4578 iounmap(host_set->mmio_base);
4579 }
4580
4581
4582 /**
4583 * ata_host_remove - Unregister SCSI host structure with upper layers
4584 * @ap: Port to unregister
4585 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4586 *
4587 * LOCKING:
4588 * Inherited from caller.
4589 */
4590
4591 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4592 {
4593 struct Scsi_Host *sh = ap->host;
4594
4595 DPRINTK("ENTER\n");
4596
4597 if (do_unregister)
4598 scsi_remove_host(sh);
4599
4600 ap->ops->port_stop(ap);
4601 }
4602
4603 /**
4604 * ata_host_init - Initialize an ata_port structure
4605 * @ap: Structure to initialize
4606 * @host: associated SCSI mid-layer structure
4607 * @host_set: Collection of hosts to which @ap belongs
4608 * @ent: Probe information provided by low-level driver
4609 * @port_no: Port number associated with this ata_port
4610 *
4611 * Initialize a new ata_port structure, and its associated
4612 * scsi_host.
4613 *
4614 * LOCKING:
4615 * Inherited from caller.
4616 */
4617
4618 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4619 struct ata_host_set *host_set,
4620 const struct ata_probe_ent *ent, unsigned int port_no)
4621 {
4622 unsigned int i;
4623
4624 host->max_id = 16;
4625 host->max_lun = 1;
4626 host->max_channel = 1;
4627 host->unique_id = ata_unique_id++;
4628 host->max_cmd_len = 12;
4629
4630 ap->flags = ATA_FLAG_PORT_DISABLED;
4631 ap->id = host->unique_id;
4632 ap->host = host;
4633 ap->ctl = ATA_DEVCTL_OBS;
4634 ap->host_set = host_set;
4635 ap->port_no = port_no;
4636 ap->hard_port_no =
4637 ent->legacy_mode ? ent->hard_port_no : port_no;
4638 ap->pio_mask = ent->pio_mask;
4639 ap->mwdma_mask = ent->mwdma_mask;
4640 ap->udma_mask = ent->udma_mask;
4641 ap->flags |= ent->host_flags;
4642 ap->ops = ent->port_ops;
4643 ap->cbl = ATA_CBL_NONE;
4644 ap->active_tag = ATA_TAG_POISON;
4645 ap->last_ctl = 0xFF;
4646
4647 INIT_WORK(&ap->port_task, NULL, NULL);
4648 INIT_LIST_HEAD(&ap->eh_done_q);
4649
4650 for (i = 0; i < ATA_MAX_DEVICES; i++)
4651 ap->device[i].devno = i;
4652
4653 #ifdef ATA_IRQ_TRAP
4654 ap->stats.unhandled_irq = 1;
4655 ap->stats.idle_irq = 1;
4656 #endif
4657
4658 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4659 }
4660
4661 /**
4662 * ata_host_add - Attach low-level ATA driver to system
4663 * @ent: Information provided by low-level driver
4664 * @host_set: Collections of ports to which we add
4665 * @port_no: Port number associated with this host
4666 *
4667 * Attach low-level ATA driver to system.
4668 *
4669 * LOCKING:
4670 * PCI/etc. bus probe sem.
4671 *
4672 * RETURNS:
4673 * New ata_port on success, for NULL on error.
4674 */
4675
4676 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4677 struct ata_host_set *host_set,
4678 unsigned int port_no)
4679 {
4680 struct Scsi_Host *host;
4681 struct ata_port *ap;
4682 int rc;
4683
4684 DPRINTK("ENTER\n");
4685 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4686 if (!host)
4687 return NULL;
4688
4689 host->transportt = &ata_scsi_transport_template;
4690
4691 ap = (struct ata_port *) &host->hostdata[0];
4692
4693 ata_host_init(ap, host, host_set, ent, port_no);
4694
4695 rc = ap->ops->port_start(ap);
4696 if (rc)
4697 goto err_out;
4698
4699 return ap;
4700
4701 err_out:
4702 scsi_host_put(host);
4703 return NULL;
4704 }
4705
4706 /**
4707 * ata_device_add - Register hardware device with ATA and SCSI layers
4708 * @ent: Probe information describing hardware device to be registered
4709 *
4710 * This function processes the information provided in the probe
4711 * information struct @ent, allocates the necessary ATA and SCSI
4712 * host information structures, initializes them, and registers
4713 * everything with requisite kernel subsystems.
4714 *
4715 * This function requests irqs, probes the ATA bus, and probes
4716 * the SCSI bus.
4717 *
4718 * LOCKING:
4719 * PCI/etc. bus probe sem.
4720 *
4721 * RETURNS:
4722 * Number of ports registered. Zero on error (no ports registered).
4723 */
4724
4725 int ata_device_add(const struct ata_probe_ent *ent)
4726 {
4727 unsigned int count = 0, i;
4728 struct device *dev = ent->dev;
4729 struct ata_host_set *host_set;
4730
4731 DPRINTK("ENTER\n");
4732 /* alloc a container for our list of ATA ports (buses) */
4733 host_set = kzalloc(sizeof(struct ata_host_set) +
4734 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4735 if (!host_set)
4736 return 0;
4737 spin_lock_init(&host_set->lock);
4738
4739 host_set->dev = dev;
4740 host_set->n_ports = ent->n_ports;
4741 host_set->irq = ent->irq;
4742 host_set->mmio_base = ent->mmio_base;
4743 host_set->private_data = ent->private_data;
4744 host_set->ops = ent->port_ops;
4745
4746 /* register each port bound to this device */
4747 for (i = 0; i < ent->n_ports; i++) {
4748 struct ata_port *ap;
4749 unsigned long xfer_mode_mask;
4750
4751 ap = ata_host_add(ent, host_set, i);
4752 if (!ap)
4753 goto err_out;
4754
4755 host_set->ports[i] = ap;
4756 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4757 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4758 (ap->pio_mask << ATA_SHIFT_PIO);
4759
4760 /* print per-port info to dmesg */
4761 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4762 "bmdma 0x%lX irq %lu\n",
4763 ap->id,
4764 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4765 ata_mode_string(xfer_mode_mask),
4766 ap->ioaddr.cmd_addr,
4767 ap->ioaddr.ctl_addr,
4768 ap->ioaddr.bmdma_addr,
4769 ent->irq);
4770
4771 ata_chk_status(ap);
4772 host_set->ops->irq_clear(ap);
4773 count++;
4774 }
4775
4776 if (!count)
4777 goto err_free_ret;
4778
4779 /* obtain irq, that is shared between channels */
4780 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4781 DRV_NAME, host_set))
4782 goto err_out;
4783
4784 /* perform each probe synchronously */
4785 DPRINTK("probe begin\n");
4786 for (i = 0; i < count; i++) {
4787 struct ata_port *ap;
4788 int rc;
4789
4790 ap = host_set->ports[i];
4791
4792 DPRINTK("ata%u: bus probe begin\n", ap->id);
4793 rc = ata_bus_probe(ap);
4794 DPRINTK("ata%u: bus probe end\n", ap->id);
4795
4796 if (rc) {
4797 /* FIXME: do something useful here?
4798 * Current libata behavior will
4799 * tear down everything when
4800 * the module is removed
4801 * or the h/w is unplugged.
4802 */
4803 }
4804
4805 rc = scsi_add_host(ap->host, dev);
4806 if (rc) {
4807 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4808 ap->id);
4809 /* FIXME: do something useful here */
4810 /* FIXME: handle unconditional calls to
4811 * scsi_scan_host and ata_host_remove, below,
4812 * at the very least
4813 */
4814 }
4815 }
4816
4817 /* probes are done, now scan each port's disk(s) */
4818 DPRINTK("host probe begin\n");
4819 for (i = 0; i < count; i++) {
4820 struct ata_port *ap = host_set->ports[i];
4821
4822 ata_scsi_scan_host(ap);
4823 }
4824
4825 dev_set_drvdata(dev, host_set);
4826
4827 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4828 return ent->n_ports; /* success */
4829
4830 err_out:
4831 for (i = 0; i < count; i++) {
4832 ata_host_remove(host_set->ports[i], 1);
4833 scsi_host_put(host_set->ports[i]->host);
4834 }
4835 err_free_ret:
4836 kfree(host_set);
4837 VPRINTK("EXIT, returning 0\n");
4838 return 0;
4839 }
4840
4841 /**
4842 * ata_host_set_remove - PCI layer callback for device removal
4843 * @host_set: ATA host set that was removed
4844 *
4845 * Unregister all objects associated with this host set. Free those
4846 * objects.
4847 *
4848 * LOCKING:
4849 * Inherited from calling layer (may sleep).
4850 */
4851
4852 void ata_host_set_remove(struct ata_host_set *host_set)
4853 {
4854 struct ata_port *ap;
4855 unsigned int i;
4856
4857 for (i = 0; i < host_set->n_ports; i++) {
4858 ap = host_set->ports[i];
4859 scsi_remove_host(ap->host);
4860 }
4861
4862 free_irq(host_set->irq, host_set);
4863
4864 for (i = 0; i < host_set->n_ports; i++) {
4865 ap = host_set->ports[i];
4866
4867 ata_scsi_release(ap->host);
4868
4869 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4870 struct ata_ioports *ioaddr = &ap->ioaddr;
4871
4872 if (ioaddr->cmd_addr == 0x1f0)
4873 release_region(0x1f0, 8);
4874 else if (ioaddr->cmd_addr == 0x170)
4875 release_region(0x170, 8);
4876 }
4877
4878 scsi_host_put(ap->host);
4879 }
4880
4881 if (host_set->ops->host_stop)
4882 host_set->ops->host_stop(host_set);
4883
4884 kfree(host_set);
4885 }
4886
4887 /**
4888 * ata_scsi_release - SCSI layer callback hook for host unload
4889 * @host: libata host to be unloaded
4890 *
4891 * Performs all duties necessary to shut down a libata port...
4892 * Kill port kthread, disable port, and release resources.
4893 *
4894 * LOCKING:
4895 * Inherited from SCSI layer.
4896 *
4897 * RETURNS:
4898 * One.
4899 */
4900
4901 int ata_scsi_release(struct Scsi_Host *host)
4902 {
4903 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4904 int i;
4905
4906 DPRINTK("ENTER\n");
4907
4908 ap->ops->port_disable(ap);
4909 ata_host_remove(ap, 0);
4910 for (i = 0; i < ATA_MAX_DEVICES; i++)
4911 kfree(ap->device[i].id);
4912
4913 DPRINTK("EXIT\n");
4914 return 1;
4915 }
4916
4917 /**
4918 * ata_std_ports - initialize ioaddr with standard port offsets.
4919 * @ioaddr: IO address structure to be initialized
4920 *
4921 * Utility function which initializes data_addr, error_addr,
4922 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4923 * device_addr, status_addr, and command_addr to standard offsets
4924 * relative to cmd_addr.
4925 *
4926 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4927 */
4928
4929 void ata_std_ports(struct ata_ioports *ioaddr)
4930 {
4931 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4932 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4933 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4934 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4935 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4936 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4937 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4938 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4939 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4940 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4941 }
4942
4943
4944 #ifdef CONFIG_PCI
4945
4946 void ata_pci_host_stop (struct ata_host_set *host_set)
4947 {
4948 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4949
4950 pci_iounmap(pdev, host_set->mmio_base);
4951 }
4952
4953 /**
4954 * ata_pci_remove_one - PCI layer callback for device removal
4955 * @pdev: PCI device that was removed
4956 *
4957 * PCI layer indicates to libata via this hook that
4958 * hot-unplug or module unload event has occurred.
4959 * Handle this by unregistering all objects associated
4960 * with this PCI device. Free those objects. Then finally
4961 * release PCI resources and disable device.
4962 *
4963 * LOCKING:
4964 * Inherited from PCI layer (may sleep).
4965 */
4966
4967 void ata_pci_remove_one (struct pci_dev *pdev)
4968 {
4969 struct device *dev = pci_dev_to_dev(pdev);
4970 struct ata_host_set *host_set = dev_get_drvdata(dev);
4971
4972 ata_host_set_remove(host_set);
4973 pci_release_regions(pdev);
4974 pci_disable_device(pdev);
4975 dev_set_drvdata(dev, NULL);
4976 }
4977
4978 /* move to PCI subsystem */
4979 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4980 {
4981 unsigned long tmp = 0;
4982
4983 switch (bits->width) {
4984 case 1: {
4985 u8 tmp8 = 0;
4986 pci_read_config_byte(pdev, bits->reg, &tmp8);
4987 tmp = tmp8;
4988 break;
4989 }
4990 case 2: {
4991 u16 tmp16 = 0;
4992 pci_read_config_word(pdev, bits->reg, &tmp16);
4993 tmp = tmp16;
4994 break;
4995 }
4996 case 4: {
4997 u32 tmp32 = 0;
4998 pci_read_config_dword(pdev, bits->reg, &tmp32);
4999 tmp = tmp32;
5000 break;
5001 }
5002
5003 default:
5004 return -EINVAL;
5005 }
5006
5007 tmp &= bits->mask;
5008
5009 return (tmp == bits->val) ? 1 : 0;
5010 }
5011
5012 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5013 {
5014 pci_save_state(pdev);
5015 pci_disable_device(pdev);
5016 pci_set_power_state(pdev, PCI_D3hot);
5017 return 0;
5018 }
5019
5020 int ata_pci_device_resume(struct pci_dev *pdev)
5021 {
5022 pci_set_power_state(pdev, PCI_D0);
5023 pci_restore_state(pdev);
5024 pci_enable_device(pdev);
5025 pci_set_master(pdev);
5026 return 0;
5027 }
5028 #endif /* CONFIG_PCI */
5029
5030
5031 static int __init ata_init(void)
5032 {
5033 ata_wq = create_workqueue("ata");
5034 if (!ata_wq)
5035 return -ENOMEM;
5036
5037 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5038 return 0;
5039 }
5040
5041 static void __exit ata_exit(void)
5042 {
5043 destroy_workqueue(ata_wq);
5044 }
5045
5046 module_init(ata_init);
5047 module_exit(ata_exit);
5048
5049 static unsigned long ratelimit_time;
5050 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5051
5052 int ata_ratelimit(void)
5053 {
5054 int rc;
5055 unsigned long flags;
5056
5057 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5058
5059 if (time_after(jiffies, ratelimit_time)) {
5060 rc = 1;
5061 ratelimit_time = jiffies + (HZ/5);
5062 } else
5063 rc = 0;
5064
5065 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5066
5067 return rc;
5068 }
5069
5070 /*
5071 * libata is essentially a library of internal helper functions for
5072 * low-level ATA host controller drivers. As such, the API/ABI is
5073 * likely to change as new drivers are added and updated.
5074 * Do not depend on ABI/API stability.
5075 */
5076
5077 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5078 EXPORT_SYMBOL_GPL(ata_std_ports);
5079 EXPORT_SYMBOL_GPL(ata_device_add);
5080 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5081 EXPORT_SYMBOL_GPL(ata_sg_init);
5082 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5083 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5084 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5085 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5086 EXPORT_SYMBOL_GPL(ata_tf_load);
5087 EXPORT_SYMBOL_GPL(ata_tf_read);
5088 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5089 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5090 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5091 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5092 EXPORT_SYMBOL_GPL(ata_check_status);
5093 EXPORT_SYMBOL_GPL(ata_altstatus);
5094 EXPORT_SYMBOL_GPL(ata_exec_command);
5095 EXPORT_SYMBOL_GPL(ata_port_start);
5096 EXPORT_SYMBOL_GPL(ata_port_stop);
5097 EXPORT_SYMBOL_GPL(ata_host_stop);
5098 EXPORT_SYMBOL_GPL(ata_interrupt);
5099 EXPORT_SYMBOL_GPL(ata_qc_prep);
5100 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5101 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5102 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5103 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5104 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5105 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5106 EXPORT_SYMBOL_GPL(ata_port_probe);
5107 EXPORT_SYMBOL_GPL(sata_phy_reset);
5108 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5109 EXPORT_SYMBOL_GPL(ata_bus_reset);
5110 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5111 EXPORT_SYMBOL_GPL(ata_std_softreset);
5112 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5113 EXPORT_SYMBOL_GPL(ata_std_postreset);
5114 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5115 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5116 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5117 EXPORT_SYMBOL_GPL(ata_port_disable);
5118 EXPORT_SYMBOL_GPL(ata_ratelimit);
5119 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5120 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5121 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5122 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5123 EXPORT_SYMBOL_GPL(ata_scsi_error);
5124 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5125 EXPORT_SYMBOL_GPL(ata_scsi_release);
5126 EXPORT_SYMBOL_GPL(ata_host_intr);
5127 EXPORT_SYMBOL_GPL(ata_dev_classify);
5128 EXPORT_SYMBOL_GPL(ata_id_string);
5129 EXPORT_SYMBOL_GPL(ata_id_c_string);
5130 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5131 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5132 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5133
5134 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5135 EXPORT_SYMBOL_GPL(ata_timing_compute);
5136 EXPORT_SYMBOL_GPL(ata_timing_merge);
5137
5138 #ifdef CONFIG_PCI
5139 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5140 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5141 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5142 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5143 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5144 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5145 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5146 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5147 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5148 #endif /* CONFIG_PCI */
5149
5150 EXPORT_SYMBOL_GPL(ata_device_suspend);
5151 EXPORT_SYMBOL_GPL(ata_device_resume);
5152 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5153 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.141688 seconds and 6 git commands to generate.