[PATCH] libata: implement ata_unpack_xfermask()
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_dev_xfermask(struct ata_port *ap,
69 struct ata_device *dev);
70
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
73
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
77
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
81
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86
87
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
93 *
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
102 {
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
108
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
113
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
118
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
123
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
128 }
129
130 /**
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
134 *
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
136 *
137 * LOCKING:
138 * Inherited from caller.
139 */
140
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
142 {
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
145
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
150
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
154
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
157 }
158
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
165 0,
166 0,
167 0,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
174 0,
175 0,
176 0,
177 0,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_FUA_EXT
187 };
188
189 /**
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
192 *
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
195 *
196 * LOCKING:
197 * caller.
198 */
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
200 {
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
204
205 int index, fua, lba48, write;
206
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
210
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
221 }
222
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
227 }
228 return -1;
229 }
230
231 /**
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
236 *
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
239 *
240 * LOCKING:
241 * None.
242 *
243 * RETURNS:
244 * Packed xfer_mask.
245 */
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249 {
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253 }
254
255 /**
256 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
257 * @xfer_mask: xfer_mask to unpack
258 * @pio_mask: resulting pio_mask
259 * @mwdma_mask: resulting mwdma_mask
260 * @udma_mask: resulting udma_mask
261 *
262 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
263 * Any NULL distination masks will be ignored.
264 */
265 static void ata_unpack_xfermask(unsigned int xfer_mask,
266 unsigned int *pio_mask,
267 unsigned int *mwdma_mask,
268 unsigned int *udma_mask)
269 {
270 if (pio_mask)
271 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
272 if (mwdma_mask)
273 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
274 if (udma_mask)
275 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
276 }
277
278 static const struct ata_xfer_ent {
279 unsigned int shift, bits;
280 u8 base;
281 } ata_xfer_tbl[] = {
282 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
283 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
284 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
285 { -1, },
286 };
287
288 /**
289 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
290 * @xfer_mask: xfer_mask of interest
291 *
292 * Return matching XFER_* value for @xfer_mask. Only the highest
293 * bit of @xfer_mask is considered.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching XFER_* value, 0 if no match found.
300 */
301 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
302 {
303 int highbit = fls(xfer_mask) - 1;
304 const struct ata_xfer_ent *ent;
305
306 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
307 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
308 return ent->base + highbit - ent->shift;
309 return 0;
310 }
311
312 /**
313 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
314 * @xfer_mode: XFER_* of interest
315 *
316 * Return matching xfer_mask for @xfer_mode.
317 *
318 * LOCKING:
319 * None.
320 *
321 * RETURNS:
322 * Matching xfer_mask, 0 if no match found.
323 */
324 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
325 {
326 const struct ata_xfer_ent *ent;
327
328 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
329 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
330 return 1 << (ent->shift + xfer_mode - ent->base);
331 return 0;
332 }
333
334 /**
335 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
336 * @xfer_mode: XFER_* of interest
337 *
338 * Return matching xfer_shift for @xfer_mode.
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Matching xfer_shift, -1 if no match found.
345 */
346 static int ata_xfer_mode2shift(unsigned int xfer_mode)
347 {
348 const struct ata_xfer_ent *ent;
349
350 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
351 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
352 return ent->shift;
353 return -1;
354 }
355
356 /**
357 * ata_mode_string - convert xfer_mask to string
358 * @xfer_mask: mask of bits supported; only highest bit counts.
359 *
360 * Determine string which represents the highest speed
361 * (highest bit in @modemask).
362 *
363 * LOCKING:
364 * None.
365 *
366 * RETURNS:
367 * Constant C string representing highest speed listed in
368 * @mode_mask, or the constant C string "<n/a>".
369 */
370 static const char *ata_mode_string(unsigned int xfer_mask)
371 {
372 static const char * const xfer_mode_str[] = {
373 "PIO0",
374 "PIO1",
375 "PIO2",
376 "PIO3",
377 "PIO4",
378 "MWDMA0",
379 "MWDMA1",
380 "MWDMA2",
381 "UDMA/16",
382 "UDMA/25",
383 "UDMA/33",
384 "UDMA/44",
385 "UDMA/66",
386 "UDMA/100",
387 "UDMA/133",
388 "UDMA7",
389 };
390 int highbit;
391
392 highbit = fls(xfer_mask) - 1;
393 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
394 return xfer_mode_str[highbit];
395 return "<n/a>";
396 }
397
398 /**
399 * ata_pio_devchk - PATA device presence detection
400 * @ap: ATA channel to examine
401 * @device: Device to examine (starting at zero)
402 *
403 * This technique was originally described in
404 * Hale Landis's ATADRVR (www.ata-atapi.com), and
405 * later found its way into the ATA/ATAPI spec.
406 *
407 * Write a pattern to the ATA shadow registers,
408 * and if a device is present, it will respond by
409 * correctly storing and echoing back the
410 * ATA shadow register contents.
411 *
412 * LOCKING:
413 * caller.
414 */
415
416 static unsigned int ata_pio_devchk(struct ata_port *ap,
417 unsigned int device)
418 {
419 struct ata_ioports *ioaddr = &ap->ioaddr;
420 u8 nsect, lbal;
421
422 ap->ops->dev_select(ap, device);
423
424 outb(0x55, ioaddr->nsect_addr);
425 outb(0xaa, ioaddr->lbal_addr);
426
427 outb(0xaa, ioaddr->nsect_addr);
428 outb(0x55, ioaddr->lbal_addr);
429
430 outb(0x55, ioaddr->nsect_addr);
431 outb(0xaa, ioaddr->lbal_addr);
432
433 nsect = inb(ioaddr->nsect_addr);
434 lbal = inb(ioaddr->lbal_addr);
435
436 if ((nsect == 0x55) && (lbal == 0xaa))
437 return 1; /* we found a device */
438
439 return 0; /* nothing found */
440 }
441
442 /**
443 * ata_mmio_devchk - PATA device presence detection
444 * @ap: ATA channel to examine
445 * @device: Device to examine (starting at zero)
446 *
447 * This technique was originally described in
448 * Hale Landis's ATADRVR (www.ata-atapi.com), and
449 * later found its way into the ATA/ATAPI spec.
450 *
451 * Write a pattern to the ATA shadow registers,
452 * and if a device is present, it will respond by
453 * correctly storing and echoing back the
454 * ATA shadow register contents.
455 *
456 * LOCKING:
457 * caller.
458 */
459
460 static unsigned int ata_mmio_devchk(struct ata_port *ap,
461 unsigned int device)
462 {
463 struct ata_ioports *ioaddr = &ap->ioaddr;
464 u8 nsect, lbal;
465
466 ap->ops->dev_select(ap, device);
467
468 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
469 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
470
471 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
472 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
473
474 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
475 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
476
477 nsect = readb((void __iomem *) ioaddr->nsect_addr);
478 lbal = readb((void __iomem *) ioaddr->lbal_addr);
479
480 if ((nsect == 0x55) && (lbal == 0xaa))
481 return 1; /* we found a device */
482
483 return 0; /* nothing found */
484 }
485
486 /**
487 * ata_devchk - PATA device presence detection
488 * @ap: ATA channel to examine
489 * @device: Device to examine (starting at zero)
490 *
491 * Dispatch ATA device presence detection, depending
492 * on whether we are using PIO or MMIO to talk to the
493 * ATA shadow registers.
494 *
495 * LOCKING:
496 * caller.
497 */
498
499 static unsigned int ata_devchk(struct ata_port *ap,
500 unsigned int device)
501 {
502 if (ap->flags & ATA_FLAG_MMIO)
503 return ata_mmio_devchk(ap, device);
504 return ata_pio_devchk(ap, device);
505 }
506
507 /**
508 * ata_dev_classify - determine device type based on ATA-spec signature
509 * @tf: ATA taskfile register set for device to be identified
510 *
511 * Determine from taskfile register contents whether a device is
512 * ATA or ATAPI, as per "Signature and persistence" section
513 * of ATA/PI spec (volume 1, sect 5.14).
514 *
515 * LOCKING:
516 * None.
517 *
518 * RETURNS:
519 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
520 * the event of failure.
521 */
522
523 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
524 {
525 /* Apple's open source Darwin code hints that some devices only
526 * put a proper signature into the LBA mid/high registers,
527 * So, we only check those. It's sufficient for uniqueness.
528 */
529
530 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
531 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
532 DPRINTK("found ATA device by sig\n");
533 return ATA_DEV_ATA;
534 }
535
536 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
537 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
538 DPRINTK("found ATAPI device by sig\n");
539 return ATA_DEV_ATAPI;
540 }
541
542 DPRINTK("unknown device\n");
543 return ATA_DEV_UNKNOWN;
544 }
545
546 /**
547 * ata_dev_try_classify - Parse returned ATA device signature
548 * @ap: ATA channel to examine
549 * @device: Device to examine (starting at zero)
550 * @r_err: Value of error register on completion
551 *
552 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
553 * an ATA/ATAPI-defined set of values is placed in the ATA
554 * shadow registers, indicating the results of device detection
555 * and diagnostics.
556 *
557 * Select the ATA device, and read the values from the ATA shadow
558 * registers. Then parse according to the Error register value,
559 * and the spec-defined values examined by ata_dev_classify().
560 *
561 * LOCKING:
562 * caller.
563 *
564 * RETURNS:
565 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
566 */
567
568 static unsigned int
569 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
570 {
571 struct ata_taskfile tf;
572 unsigned int class;
573 u8 err;
574
575 ap->ops->dev_select(ap, device);
576
577 memset(&tf, 0, sizeof(tf));
578
579 ap->ops->tf_read(ap, &tf);
580 err = tf.feature;
581 if (r_err)
582 *r_err = err;
583
584 /* see if device passed diags */
585 if (err == 1)
586 /* do nothing */ ;
587 else if ((device == 0) && (err == 0x81))
588 /* do nothing */ ;
589 else
590 return ATA_DEV_NONE;
591
592 /* determine if device is ATA or ATAPI */
593 class = ata_dev_classify(&tf);
594
595 if (class == ATA_DEV_UNKNOWN)
596 return ATA_DEV_NONE;
597 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
598 return ATA_DEV_NONE;
599 return class;
600 }
601
602 /**
603 * ata_id_string - Convert IDENTIFY DEVICE page into string
604 * @id: IDENTIFY DEVICE results we will examine
605 * @s: string into which data is output
606 * @ofs: offset into identify device page
607 * @len: length of string to return. must be an even number.
608 *
609 * The strings in the IDENTIFY DEVICE page are broken up into
610 * 16-bit chunks. Run through the string, and output each
611 * 8-bit chunk linearly, regardless of platform.
612 *
613 * LOCKING:
614 * caller.
615 */
616
617 void ata_id_string(const u16 *id, unsigned char *s,
618 unsigned int ofs, unsigned int len)
619 {
620 unsigned int c;
621
622 while (len > 0) {
623 c = id[ofs] >> 8;
624 *s = c;
625 s++;
626
627 c = id[ofs] & 0xff;
628 *s = c;
629 s++;
630
631 ofs++;
632 len -= 2;
633 }
634 }
635
636 /**
637 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
638 * @id: IDENTIFY DEVICE results we will examine
639 * @s: string into which data is output
640 * @ofs: offset into identify device page
641 * @len: length of string to return. must be an odd number.
642 *
643 * This function is identical to ata_id_string except that it
644 * trims trailing spaces and terminates the resulting string with
645 * null. @len must be actual maximum length (even number) + 1.
646 *
647 * LOCKING:
648 * caller.
649 */
650 void ata_id_c_string(const u16 *id, unsigned char *s,
651 unsigned int ofs, unsigned int len)
652 {
653 unsigned char *p;
654
655 WARN_ON(!(len & 1));
656
657 ata_id_string(id, s, ofs, len - 1);
658
659 p = s + strnlen(s, len - 1);
660 while (p > s && p[-1] == ' ')
661 p--;
662 *p = '\0';
663 }
664
665 static u64 ata_id_n_sectors(const u16 *id)
666 {
667 if (ata_id_has_lba(id)) {
668 if (ata_id_has_lba48(id))
669 return ata_id_u64(id, 100);
670 else
671 return ata_id_u32(id, 60);
672 } else {
673 if (ata_id_current_chs_valid(id))
674 return ata_id_u32(id, 57);
675 else
676 return id[1] * id[3] * id[6];
677 }
678 }
679
680 /**
681 * ata_noop_dev_select - Select device 0/1 on ATA bus
682 * @ap: ATA channel to manipulate
683 * @device: ATA device (numbered from zero) to select
684 *
685 * This function performs no actual function.
686 *
687 * May be used as the dev_select() entry in ata_port_operations.
688 *
689 * LOCKING:
690 * caller.
691 */
692 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
693 {
694 }
695
696
697 /**
698 * ata_std_dev_select - Select device 0/1 on ATA bus
699 * @ap: ATA channel to manipulate
700 * @device: ATA device (numbered from zero) to select
701 *
702 * Use the method defined in the ATA specification to
703 * make either device 0, or device 1, active on the
704 * ATA channel. Works with both PIO and MMIO.
705 *
706 * May be used as the dev_select() entry in ata_port_operations.
707 *
708 * LOCKING:
709 * caller.
710 */
711
712 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
713 {
714 u8 tmp;
715
716 if (device == 0)
717 tmp = ATA_DEVICE_OBS;
718 else
719 tmp = ATA_DEVICE_OBS | ATA_DEV1;
720
721 if (ap->flags & ATA_FLAG_MMIO) {
722 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
723 } else {
724 outb(tmp, ap->ioaddr.device_addr);
725 }
726 ata_pause(ap); /* needed; also flushes, for mmio */
727 }
728
729 /**
730 * ata_dev_select - Select device 0/1 on ATA bus
731 * @ap: ATA channel to manipulate
732 * @device: ATA device (numbered from zero) to select
733 * @wait: non-zero to wait for Status register BSY bit to clear
734 * @can_sleep: non-zero if context allows sleeping
735 *
736 * Use the method defined in the ATA specification to
737 * make either device 0, or device 1, active on the
738 * ATA channel.
739 *
740 * This is a high-level version of ata_std_dev_select(),
741 * which additionally provides the services of inserting
742 * the proper pauses and status polling, where needed.
743 *
744 * LOCKING:
745 * caller.
746 */
747
748 void ata_dev_select(struct ata_port *ap, unsigned int device,
749 unsigned int wait, unsigned int can_sleep)
750 {
751 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
752 ap->id, device, wait);
753
754 if (wait)
755 ata_wait_idle(ap);
756
757 ap->ops->dev_select(ap, device);
758
759 if (wait) {
760 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
761 msleep(150);
762 ata_wait_idle(ap);
763 }
764 }
765
766 /**
767 * ata_dump_id - IDENTIFY DEVICE info debugging output
768 * @id: IDENTIFY DEVICE page to dump
769 *
770 * Dump selected 16-bit words from the given IDENTIFY DEVICE
771 * page.
772 *
773 * LOCKING:
774 * caller.
775 */
776
777 static inline void ata_dump_id(const u16 *id)
778 {
779 DPRINTK("49==0x%04x "
780 "53==0x%04x "
781 "63==0x%04x "
782 "64==0x%04x "
783 "75==0x%04x \n",
784 id[49],
785 id[53],
786 id[63],
787 id[64],
788 id[75]);
789 DPRINTK("80==0x%04x "
790 "81==0x%04x "
791 "82==0x%04x "
792 "83==0x%04x "
793 "84==0x%04x \n",
794 id[80],
795 id[81],
796 id[82],
797 id[83],
798 id[84]);
799 DPRINTK("88==0x%04x "
800 "93==0x%04x\n",
801 id[88],
802 id[93]);
803 }
804
805 /**
806 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
807 * @id: IDENTIFY data to compute xfer mask from
808 *
809 * Compute the xfermask for this device. This is not as trivial
810 * as it seems if we must consider early devices correctly.
811 *
812 * FIXME: pre IDE drive timing (do we care ?).
813 *
814 * LOCKING:
815 * None.
816 *
817 * RETURNS:
818 * Computed xfermask
819 */
820 static unsigned int ata_id_xfermask(const u16 *id)
821 {
822 unsigned int pio_mask, mwdma_mask, udma_mask;
823
824 /* Usual case. Word 53 indicates word 64 is valid */
825 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
826 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
827 pio_mask <<= 3;
828 pio_mask |= 0x7;
829 } else {
830 /* If word 64 isn't valid then Word 51 high byte holds
831 * the PIO timing number for the maximum. Turn it into
832 * a mask.
833 */
834 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
835
836 /* But wait.. there's more. Design your standards by
837 * committee and you too can get a free iordy field to
838 * process. However its the speeds not the modes that
839 * are supported... Note drivers using the timing API
840 * will get this right anyway
841 */
842 }
843
844 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
845
846 udma_mask = 0;
847 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
848 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
849
850 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
851 }
852
853 /**
854 * ata_port_queue_task - Queue port_task
855 * @ap: The ata_port to queue port_task for
856 *
857 * Schedule @fn(@data) for execution after @delay jiffies using
858 * port_task. There is one port_task per port and it's the
859 * user(low level driver)'s responsibility to make sure that only
860 * one task is active at any given time.
861 *
862 * libata core layer takes care of synchronization between
863 * port_task and EH. ata_port_queue_task() may be ignored for EH
864 * synchronization.
865 *
866 * LOCKING:
867 * Inherited from caller.
868 */
869 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
870 unsigned long delay)
871 {
872 int rc;
873
874 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
875 return;
876
877 PREPARE_WORK(&ap->port_task, fn, data);
878
879 if (!delay)
880 rc = queue_work(ata_wq, &ap->port_task);
881 else
882 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
883
884 /* rc == 0 means that another user is using port task */
885 WARN_ON(rc == 0);
886 }
887
888 /**
889 * ata_port_flush_task - Flush port_task
890 * @ap: The ata_port to flush port_task for
891 *
892 * After this function completes, port_task is guranteed not to
893 * be running or scheduled.
894 *
895 * LOCKING:
896 * Kernel thread context (may sleep)
897 */
898 void ata_port_flush_task(struct ata_port *ap)
899 {
900 unsigned long flags;
901
902 DPRINTK("ENTER\n");
903
904 spin_lock_irqsave(&ap->host_set->lock, flags);
905 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
906 spin_unlock_irqrestore(&ap->host_set->lock, flags);
907
908 DPRINTK("flush #1\n");
909 flush_workqueue(ata_wq);
910
911 /*
912 * At this point, if a task is running, it's guaranteed to see
913 * the FLUSH flag; thus, it will never queue pio tasks again.
914 * Cancel and flush.
915 */
916 if (!cancel_delayed_work(&ap->port_task)) {
917 DPRINTK("flush #2\n");
918 flush_workqueue(ata_wq);
919 }
920
921 spin_lock_irqsave(&ap->host_set->lock, flags);
922 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
923 spin_unlock_irqrestore(&ap->host_set->lock, flags);
924
925 DPRINTK("EXIT\n");
926 }
927
928 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
929 {
930 struct completion *waiting = qc->private_data;
931
932 qc->ap->ops->tf_read(qc->ap, &qc->tf);
933 complete(waiting);
934 }
935
936 /**
937 * ata_exec_internal - execute libata internal command
938 * @ap: Port to which the command is sent
939 * @dev: Device to which the command is sent
940 * @tf: Taskfile registers for the command and the result
941 * @dma_dir: Data tranfer direction of the command
942 * @buf: Data buffer of the command
943 * @buflen: Length of data buffer
944 *
945 * Executes libata internal command with timeout. @tf contains
946 * command on entry and result on return. Timeout and error
947 * conditions are reported via return value. No recovery action
948 * is taken after a command times out. It's caller's duty to
949 * clean up after timeout.
950 *
951 * LOCKING:
952 * None. Should be called with kernel context, might sleep.
953 */
954
955 static unsigned
956 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
957 struct ata_taskfile *tf,
958 int dma_dir, void *buf, unsigned int buflen)
959 {
960 u8 command = tf->command;
961 struct ata_queued_cmd *qc;
962 DECLARE_COMPLETION(wait);
963 unsigned long flags;
964 unsigned int err_mask;
965
966 spin_lock_irqsave(&ap->host_set->lock, flags);
967
968 qc = ata_qc_new_init(ap, dev);
969 BUG_ON(qc == NULL);
970
971 qc->tf = *tf;
972 qc->dma_dir = dma_dir;
973 if (dma_dir != DMA_NONE) {
974 ata_sg_init_one(qc, buf, buflen);
975 qc->nsect = buflen / ATA_SECT_SIZE;
976 }
977
978 qc->private_data = &wait;
979 qc->complete_fn = ata_qc_complete_internal;
980
981 qc->err_mask = ata_qc_issue(qc);
982 if (qc->err_mask)
983 ata_qc_complete(qc);
984
985 spin_unlock_irqrestore(&ap->host_set->lock, flags);
986
987 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
988 ata_port_flush_task(ap);
989
990 spin_lock_irqsave(&ap->host_set->lock, flags);
991
992 /* We're racing with irq here. If we lose, the
993 * following test prevents us from completing the qc
994 * again. If completion irq occurs after here but
995 * before the caller cleans up, it will result in a
996 * spurious interrupt. We can live with that.
997 */
998 if (qc->flags & ATA_QCFLAG_ACTIVE) {
999 qc->err_mask = AC_ERR_TIMEOUT;
1000 ata_qc_complete(qc);
1001 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1002 ap->id, command);
1003 }
1004
1005 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1006 }
1007
1008 *tf = qc->tf;
1009 err_mask = qc->err_mask;
1010
1011 ata_qc_free(qc);
1012
1013 return err_mask;
1014 }
1015
1016 /**
1017 * ata_pio_need_iordy - check if iordy needed
1018 * @adev: ATA device
1019 *
1020 * Check if the current speed of the device requires IORDY. Used
1021 * by various controllers for chip configuration.
1022 */
1023
1024 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1025 {
1026 int pio;
1027 int speed = adev->pio_mode - XFER_PIO_0;
1028
1029 if (speed < 2)
1030 return 0;
1031 if (speed > 2)
1032 return 1;
1033
1034 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1035
1036 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1037 pio = adev->id[ATA_ID_EIDE_PIO];
1038 /* Is the speed faster than the drive allows non IORDY ? */
1039 if (pio) {
1040 /* This is cycle times not frequency - watch the logic! */
1041 if (pio > 240) /* PIO2 is 240nS per cycle */
1042 return 1;
1043 return 0;
1044 }
1045 }
1046 return 0;
1047 }
1048
1049 /**
1050 * ata_dev_read_id - Read ID data from the specified device
1051 * @ap: port on which target device resides
1052 * @dev: target device
1053 * @p_class: pointer to class of the target device (may be changed)
1054 * @post_reset: is this read ID post-reset?
1055 * @p_id: read IDENTIFY page (newly allocated)
1056 *
1057 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1058 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1059 * devices. This function also takes care of EDD signature
1060 * misreporting (to be removed once EDD support is gone) and
1061 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1062 *
1063 * LOCKING:
1064 * Kernel thread context (may sleep)
1065 *
1066 * RETURNS:
1067 * 0 on success, -errno otherwise.
1068 */
1069 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1070 unsigned int *p_class, int post_reset, u16 **p_id)
1071 {
1072 unsigned int class = *p_class;
1073 unsigned int using_edd;
1074 struct ata_taskfile tf;
1075 unsigned int err_mask = 0;
1076 u16 *id;
1077 const char *reason;
1078 int rc;
1079
1080 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1081
1082 if (ap->ops->probe_reset ||
1083 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1084 using_edd = 0;
1085 else
1086 using_edd = 1;
1087
1088 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1089
1090 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1091 if (id == NULL) {
1092 rc = -ENOMEM;
1093 reason = "out of memory";
1094 goto err_out;
1095 }
1096
1097 retry:
1098 ata_tf_init(ap, &tf, dev->devno);
1099
1100 switch (class) {
1101 case ATA_DEV_ATA:
1102 tf.command = ATA_CMD_ID_ATA;
1103 break;
1104 case ATA_DEV_ATAPI:
1105 tf.command = ATA_CMD_ID_ATAPI;
1106 break;
1107 default:
1108 rc = -ENODEV;
1109 reason = "unsupported class";
1110 goto err_out;
1111 }
1112
1113 tf.protocol = ATA_PROT_PIO;
1114
1115 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1116 id, sizeof(id[0]) * ATA_ID_WORDS);
1117
1118 if (err_mask) {
1119 rc = -EIO;
1120 reason = "I/O error";
1121
1122 if (err_mask & ~AC_ERR_DEV)
1123 goto err_out;
1124
1125 /*
1126 * arg! EDD works for all test cases, but seems to return
1127 * the ATA signature for some ATAPI devices. Until the
1128 * reason for this is found and fixed, we fix up the mess
1129 * here. If IDENTIFY DEVICE returns command aborted
1130 * (as ATAPI devices do), then we issue an
1131 * IDENTIFY PACKET DEVICE.
1132 *
1133 * ATA software reset (SRST, the default) does not appear
1134 * to have this problem.
1135 */
1136 if ((using_edd) && (class == ATA_DEV_ATA)) {
1137 u8 err = tf.feature;
1138 if (err & ATA_ABORTED) {
1139 class = ATA_DEV_ATAPI;
1140 goto retry;
1141 }
1142 }
1143 goto err_out;
1144 }
1145
1146 swap_buf_le16(id, ATA_ID_WORDS);
1147
1148 /* sanity check */
1149 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1150 rc = -EINVAL;
1151 reason = "device reports illegal type";
1152 goto err_out;
1153 }
1154
1155 if (post_reset && class == ATA_DEV_ATA) {
1156 /*
1157 * The exact sequence expected by certain pre-ATA4 drives is:
1158 * SRST RESET
1159 * IDENTIFY
1160 * INITIALIZE DEVICE PARAMETERS
1161 * anything else..
1162 * Some drives were very specific about that exact sequence.
1163 */
1164 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1165 err_mask = ata_dev_init_params(ap, dev);
1166 if (err_mask) {
1167 rc = -EIO;
1168 reason = "INIT_DEV_PARAMS failed";
1169 goto err_out;
1170 }
1171
1172 /* current CHS translation info (id[53-58]) might be
1173 * changed. reread the identify device info.
1174 */
1175 post_reset = 0;
1176 goto retry;
1177 }
1178 }
1179
1180 *p_class = class;
1181 *p_id = id;
1182 return 0;
1183
1184 err_out:
1185 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1186 ap->id, dev->devno, reason);
1187 kfree(id);
1188 return rc;
1189 }
1190
1191 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1192 struct ata_device *dev)
1193 {
1194 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1195 }
1196
1197 /**
1198 * ata_dev_configure - Configure the specified ATA/ATAPI device
1199 * @ap: Port on which target device resides
1200 * @dev: Target device to configure
1201 * @print_info: Enable device info printout
1202 *
1203 * Configure @dev according to @dev->id. Generic and low-level
1204 * driver specific fixups are also applied.
1205 *
1206 * LOCKING:
1207 * Kernel thread context (may sleep)
1208 *
1209 * RETURNS:
1210 * 0 on success, -errno otherwise
1211 */
1212 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1213 int print_info)
1214 {
1215 const u16 *id = dev->id;
1216 unsigned int xfer_mask;
1217 int i, rc;
1218
1219 if (!ata_dev_present(dev)) {
1220 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1221 ap->id, dev->devno);
1222 return 0;
1223 }
1224
1225 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1226
1227 /* print device capabilities */
1228 if (print_info)
1229 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1230 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1231 ap->id, dev->devno, id[49], id[82], id[83],
1232 id[84], id[85], id[86], id[87], id[88]);
1233
1234 /* initialize to-be-configured parameters */
1235 dev->flags = 0;
1236 dev->max_sectors = 0;
1237 dev->cdb_len = 0;
1238 dev->n_sectors = 0;
1239 dev->cylinders = 0;
1240 dev->heads = 0;
1241 dev->sectors = 0;
1242
1243 /*
1244 * common ATA, ATAPI feature tests
1245 */
1246
1247 /* find max transfer mode; for printk only */
1248 xfer_mask = ata_id_xfermask(id);
1249
1250 ata_dump_id(id);
1251
1252 /* ATA-specific feature tests */
1253 if (dev->class == ATA_DEV_ATA) {
1254 dev->n_sectors = ata_id_n_sectors(id);
1255
1256 if (ata_id_has_lba(id)) {
1257 const char *lba_desc;
1258
1259 lba_desc = "LBA";
1260 dev->flags |= ATA_DFLAG_LBA;
1261 if (ata_id_has_lba48(id)) {
1262 dev->flags |= ATA_DFLAG_LBA48;
1263 lba_desc = "LBA48";
1264 }
1265
1266 /* print device info to dmesg */
1267 if (print_info)
1268 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1269 "max %s, %Lu sectors: %s\n",
1270 ap->id, dev->devno,
1271 ata_id_major_version(id),
1272 ata_mode_string(xfer_mask),
1273 (unsigned long long)dev->n_sectors,
1274 lba_desc);
1275 } else {
1276 /* CHS */
1277
1278 /* Default translation */
1279 dev->cylinders = id[1];
1280 dev->heads = id[3];
1281 dev->sectors = id[6];
1282
1283 if (ata_id_current_chs_valid(id)) {
1284 /* Current CHS translation is valid. */
1285 dev->cylinders = id[54];
1286 dev->heads = id[55];
1287 dev->sectors = id[56];
1288 }
1289
1290 /* print device info to dmesg */
1291 if (print_info)
1292 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1293 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1294 ap->id, dev->devno,
1295 ata_id_major_version(id),
1296 ata_mode_string(xfer_mask),
1297 (unsigned long long)dev->n_sectors,
1298 dev->cylinders, dev->heads, dev->sectors);
1299 }
1300
1301 dev->cdb_len = 16;
1302 }
1303
1304 /* ATAPI-specific feature tests */
1305 else if (dev->class == ATA_DEV_ATAPI) {
1306 rc = atapi_cdb_len(id);
1307 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1308 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1309 rc = -EINVAL;
1310 goto err_out_nosup;
1311 }
1312 dev->cdb_len = (unsigned int) rc;
1313
1314 /* print device info to dmesg */
1315 if (print_info)
1316 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1317 ap->id, dev->devno, ata_mode_string(xfer_mask));
1318 }
1319
1320 ap->host->max_cmd_len = 0;
1321 for (i = 0; i < ATA_MAX_DEVICES; i++)
1322 ap->host->max_cmd_len = max_t(unsigned int,
1323 ap->host->max_cmd_len,
1324 ap->device[i].cdb_len);
1325
1326 /* limit bridge transfers to udma5, 200 sectors */
1327 if (ata_dev_knobble(ap, dev)) {
1328 if (print_info)
1329 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1330 ap->id, dev->devno);
1331 ap->udma_mask &= ATA_UDMA5;
1332 dev->max_sectors = ATA_MAX_SECTORS;
1333 }
1334
1335 if (ap->ops->dev_config)
1336 ap->ops->dev_config(ap, dev);
1337
1338 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1339 return 0;
1340
1341 err_out_nosup:
1342 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1343 ap->id, dev->devno);
1344 DPRINTK("EXIT, err\n");
1345 return rc;
1346 }
1347
1348 /**
1349 * ata_bus_probe - Reset and probe ATA bus
1350 * @ap: Bus to probe
1351 *
1352 * Master ATA bus probing function. Initiates a hardware-dependent
1353 * bus reset, then attempts to identify any devices found on
1354 * the bus.
1355 *
1356 * LOCKING:
1357 * PCI/etc. bus probe sem.
1358 *
1359 * RETURNS:
1360 * Zero on success, non-zero on error.
1361 */
1362
1363 static int ata_bus_probe(struct ata_port *ap)
1364 {
1365 unsigned int classes[ATA_MAX_DEVICES];
1366 unsigned int i, rc, found = 0;
1367
1368 ata_port_probe(ap);
1369
1370 /* reset and determine device classes */
1371 for (i = 0; i < ATA_MAX_DEVICES; i++)
1372 classes[i] = ATA_DEV_UNKNOWN;
1373
1374 if (ap->ops->probe_reset) {
1375 rc = ap->ops->probe_reset(ap, classes);
1376 if (rc) {
1377 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1378 return rc;
1379 }
1380 } else {
1381 ap->ops->phy_reset(ap);
1382
1383 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1384 for (i = 0; i < ATA_MAX_DEVICES; i++)
1385 classes[i] = ap->device[i].class;
1386
1387 ata_port_probe(ap);
1388 }
1389
1390 for (i = 0; i < ATA_MAX_DEVICES; i++)
1391 if (classes[i] == ATA_DEV_UNKNOWN)
1392 classes[i] = ATA_DEV_NONE;
1393
1394 /* read IDENTIFY page and configure devices */
1395 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1396 struct ata_device *dev = &ap->device[i];
1397
1398 dev->class = classes[i];
1399
1400 if (!ata_dev_present(dev))
1401 continue;
1402
1403 WARN_ON(dev->id != NULL);
1404 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1405 dev->class = ATA_DEV_NONE;
1406 continue;
1407 }
1408
1409 if (ata_dev_configure(ap, dev, 1)) {
1410 dev->class++; /* disable device */
1411 continue;
1412 }
1413
1414 found = 1;
1415 }
1416
1417 if (!found)
1418 goto err_out_disable;
1419
1420 ata_set_mode(ap);
1421 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1422 goto err_out_disable;
1423
1424 return 0;
1425
1426 err_out_disable:
1427 ap->ops->port_disable(ap);
1428 return -1;
1429 }
1430
1431 /**
1432 * ata_port_probe - Mark port as enabled
1433 * @ap: Port for which we indicate enablement
1434 *
1435 * Modify @ap data structure such that the system
1436 * thinks that the entire port is enabled.
1437 *
1438 * LOCKING: host_set lock, or some other form of
1439 * serialization.
1440 */
1441
1442 void ata_port_probe(struct ata_port *ap)
1443 {
1444 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1445 }
1446
1447 /**
1448 * sata_print_link_status - Print SATA link status
1449 * @ap: SATA port to printk link status about
1450 *
1451 * This function prints link speed and status of a SATA link.
1452 *
1453 * LOCKING:
1454 * None.
1455 */
1456 static void sata_print_link_status(struct ata_port *ap)
1457 {
1458 u32 sstatus, tmp;
1459 const char *speed;
1460
1461 if (!ap->ops->scr_read)
1462 return;
1463
1464 sstatus = scr_read(ap, SCR_STATUS);
1465
1466 if (sata_dev_present(ap)) {
1467 tmp = (sstatus >> 4) & 0xf;
1468 if (tmp & (1 << 0))
1469 speed = "1.5";
1470 else if (tmp & (1 << 1))
1471 speed = "3.0";
1472 else
1473 speed = "<unknown>";
1474 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1475 ap->id, speed, sstatus);
1476 } else {
1477 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1478 ap->id, sstatus);
1479 }
1480 }
1481
1482 /**
1483 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1484 * @ap: SATA port associated with target SATA PHY.
1485 *
1486 * This function issues commands to standard SATA Sxxx
1487 * PHY registers, to wake up the phy (and device), and
1488 * clear any reset condition.
1489 *
1490 * LOCKING:
1491 * PCI/etc. bus probe sem.
1492 *
1493 */
1494 void __sata_phy_reset(struct ata_port *ap)
1495 {
1496 u32 sstatus;
1497 unsigned long timeout = jiffies + (HZ * 5);
1498
1499 if (ap->flags & ATA_FLAG_SATA_RESET) {
1500 /* issue phy wake/reset */
1501 scr_write_flush(ap, SCR_CONTROL, 0x301);
1502 /* Couldn't find anything in SATA I/II specs, but
1503 * AHCI-1.1 10.4.2 says at least 1 ms. */
1504 mdelay(1);
1505 }
1506 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1507
1508 /* wait for phy to become ready, if necessary */
1509 do {
1510 msleep(200);
1511 sstatus = scr_read(ap, SCR_STATUS);
1512 if ((sstatus & 0xf) != 1)
1513 break;
1514 } while (time_before(jiffies, timeout));
1515
1516 /* print link status */
1517 sata_print_link_status(ap);
1518
1519 /* TODO: phy layer with polling, timeouts, etc. */
1520 if (sata_dev_present(ap))
1521 ata_port_probe(ap);
1522 else
1523 ata_port_disable(ap);
1524
1525 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1526 return;
1527
1528 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1529 ata_port_disable(ap);
1530 return;
1531 }
1532
1533 ap->cbl = ATA_CBL_SATA;
1534 }
1535
1536 /**
1537 * sata_phy_reset - Reset SATA bus.
1538 * @ap: SATA port associated with target SATA PHY.
1539 *
1540 * This function resets the SATA bus, and then probes
1541 * the bus for devices.
1542 *
1543 * LOCKING:
1544 * PCI/etc. bus probe sem.
1545 *
1546 */
1547 void sata_phy_reset(struct ata_port *ap)
1548 {
1549 __sata_phy_reset(ap);
1550 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1551 return;
1552 ata_bus_reset(ap);
1553 }
1554
1555 /**
1556 * ata_port_disable - Disable port.
1557 * @ap: Port to be disabled.
1558 *
1559 * Modify @ap data structure such that the system
1560 * thinks that the entire port is disabled, and should
1561 * never attempt to probe or communicate with devices
1562 * on this port.
1563 *
1564 * LOCKING: host_set lock, or some other form of
1565 * serialization.
1566 */
1567
1568 void ata_port_disable(struct ata_port *ap)
1569 {
1570 ap->device[0].class = ATA_DEV_NONE;
1571 ap->device[1].class = ATA_DEV_NONE;
1572 ap->flags |= ATA_FLAG_PORT_DISABLED;
1573 }
1574
1575 /*
1576 * This mode timing computation functionality is ported over from
1577 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1578 */
1579 /*
1580 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1581 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1582 * for PIO 5, which is a nonstandard extension and UDMA6, which
1583 * is currently supported only by Maxtor drives.
1584 */
1585
1586 static const struct ata_timing ata_timing[] = {
1587
1588 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1589 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1590 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1591 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1592
1593 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1594 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1595 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1596
1597 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1598
1599 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1600 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1601 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1602
1603 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1604 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1605 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1606
1607 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1608 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1609 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1610
1611 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1612 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1613 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1614
1615 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1616
1617 { 0xFF }
1618 };
1619
1620 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1621 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1622
1623 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1624 {
1625 q->setup = EZ(t->setup * 1000, T);
1626 q->act8b = EZ(t->act8b * 1000, T);
1627 q->rec8b = EZ(t->rec8b * 1000, T);
1628 q->cyc8b = EZ(t->cyc8b * 1000, T);
1629 q->active = EZ(t->active * 1000, T);
1630 q->recover = EZ(t->recover * 1000, T);
1631 q->cycle = EZ(t->cycle * 1000, T);
1632 q->udma = EZ(t->udma * 1000, UT);
1633 }
1634
1635 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1636 struct ata_timing *m, unsigned int what)
1637 {
1638 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1639 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1640 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1641 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1642 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1643 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1644 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1645 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1646 }
1647
1648 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1649 {
1650 const struct ata_timing *t;
1651
1652 for (t = ata_timing; t->mode != speed; t++)
1653 if (t->mode == 0xFF)
1654 return NULL;
1655 return t;
1656 }
1657
1658 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1659 struct ata_timing *t, int T, int UT)
1660 {
1661 const struct ata_timing *s;
1662 struct ata_timing p;
1663
1664 /*
1665 * Find the mode.
1666 */
1667
1668 if (!(s = ata_timing_find_mode(speed)))
1669 return -EINVAL;
1670
1671 memcpy(t, s, sizeof(*s));
1672
1673 /*
1674 * If the drive is an EIDE drive, it can tell us it needs extended
1675 * PIO/MW_DMA cycle timing.
1676 */
1677
1678 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1679 memset(&p, 0, sizeof(p));
1680 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1681 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1682 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1683 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1684 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1685 }
1686 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1687 }
1688
1689 /*
1690 * Convert the timing to bus clock counts.
1691 */
1692
1693 ata_timing_quantize(t, t, T, UT);
1694
1695 /*
1696 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1697 * S.M.A.R.T * and some other commands. We have to ensure that the
1698 * DMA cycle timing is slower/equal than the fastest PIO timing.
1699 */
1700
1701 if (speed > XFER_PIO_4) {
1702 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1703 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1704 }
1705
1706 /*
1707 * Lengthen active & recovery time so that cycle time is correct.
1708 */
1709
1710 if (t->act8b + t->rec8b < t->cyc8b) {
1711 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1712 t->rec8b = t->cyc8b - t->act8b;
1713 }
1714
1715 if (t->active + t->recover < t->cycle) {
1716 t->active += (t->cycle - (t->active + t->recover)) / 2;
1717 t->recover = t->cycle - t->active;
1718 }
1719
1720 return 0;
1721 }
1722
1723 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1724 {
1725 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1726 return;
1727
1728 if (dev->xfer_shift == ATA_SHIFT_PIO)
1729 dev->flags |= ATA_DFLAG_PIO;
1730
1731 ata_dev_set_xfermode(ap, dev);
1732
1733 if (ata_dev_revalidate(ap, dev, 0)) {
1734 printk(KERN_ERR "ata%u: failed to revalidate after set "
1735 "xfermode, disabled\n", ap->id);
1736 ata_port_disable(ap);
1737 }
1738
1739 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1740 dev->xfer_shift, (int)dev->xfer_mode);
1741
1742 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1743 ap->id, dev->devno,
1744 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1745 }
1746
1747 static int ata_host_set_pio(struct ata_port *ap)
1748 {
1749 int i;
1750
1751 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1752 struct ata_device *dev = &ap->device[i];
1753
1754 if (!ata_dev_present(dev))
1755 continue;
1756
1757 if (!dev->pio_mode) {
1758 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1759 return -1;
1760 }
1761
1762 dev->xfer_mode = dev->pio_mode;
1763 dev->xfer_shift = ATA_SHIFT_PIO;
1764 if (ap->ops->set_piomode)
1765 ap->ops->set_piomode(ap, dev);
1766 }
1767
1768 return 0;
1769 }
1770
1771 static void ata_host_set_dma(struct ata_port *ap)
1772 {
1773 int i;
1774
1775 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1776 struct ata_device *dev = &ap->device[i];
1777
1778 if (!ata_dev_present(dev) || !dev->dma_mode)
1779 continue;
1780
1781 dev->xfer_mode = dev->dma_mode;
1782 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1783 if (ap->ops->set_dmamode)
1784 ap->ops->set_dmamode(ap, dev);
1785 }
1786 }
1787
1788 /**
1789 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1790 * @ap: port on which timings will be programmed
1791 *
1792 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1793 *
1794 * LOCKING:
1795 * PCI/etc. bus probe sem.
1796 */
1797 static void ata_set_mode(struct ata_port *ap)
1798 {
1799 int i, rc;
1800
1801 /* step 1: calculate xfer_mask */
1802 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1803 struct ata_device *dev = &ap->device[i];
1804 unsigned int xfer_mask;
1805
1806 if (!ata_dev_present(dev))
1807 continue;
1808
1809 xfer_mask = ata_dev_xfermask(ap, dev);
1810
1811 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1812 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1813 ATA_MASK_UDMA));
1814 }
1815
1816 /* step 2: always set host PIO timings */
1817 rc = ata_host_set_pio(ap);
1818 if (rc)
1819 goto err_out;
1820
1821 /* step 3: set host DMA timings */
1822 ata_host_set_dma(ap);
1823
1824 /* step 4: update devices' xfer mode */
1825 for (i = 0; i < ATA_MAX_DEVICES; i++)
1826 ata_dev_set_mode(ap, &ap->device[i]);
1827
1828 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1829 return;
1830
1831 if (ap->ops->post_set_mode)
1832 ap->ops->post_set_mode(ap);
1833
1834 return;
1835
1836 err_out:
1837 ata_port_disable(ap);
1838 }
1839
1840 /**
1841 * ata_tf_to_host - issue ATA taskfile to host controller
1842 * @ap: port to which command is being issued
1843 * @tf: ATA taskfile register set
1844 *
1845 * Issues ATA taskfile register set to ATA host controller,
1846 * with proper synchronization with interrupt handler and
1847 * other threads.
1848 *
1849 * LOCKING:
1850 * spin_lock_irqsave(host_set lock)
1851 */
1852
1853 static inline void ata_tf_to_host(struct ata_port *ap,
1854 const struct ata_taskfile *tf)
1855 {
1856 ap->ops->tf_load(ap, tf);
1857 ap->ops->exec_command(ap, tf);
1858 }
1859
1860 /**
1861 * ata_busy_sleep - sleep until BSY clears, or timeout
1862 * @ap: port containing status register to be polled
1863 * @tmout_pat: impatience timeout
1864 * @tmout: overall timeout
1865 *
1866 * Sleep until ATA Status register bit BSY clears,
1867 * or a timeout occurs.
1868 *
1869 * LOCKING: None.
1870 */
1871
1872 unsigned int ata_busy_sleep (struct ata_port *ap,
1873 unsigned long tmout_pat, unsigned long tmout)
1874 {
1875 unsigned long timer_start, timeout;
1876 u8 status;
1877
1878 status = ata_busy_wait(ap, ATA_BUSY, 300);
1879 timer_start = jiffies;
1880 timeout = timer_start + tmout_pat;
1881 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1882 msleep(50);
1883 status = ata_busy_wait(ap, ATA_BUSY, 3);
1884 }
1885
1886 if (status & ATA_BUSY)
1887 printk(KERN_WARNING "ata%u is slow to respond, "
1888 "please be patient\n", ap->id);
1889
1890 timeout = timer_start + tmout;
1891 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1892 msleep(50);
1893 status = ata_chk_status(ap);
1894 }
1895
1896 if (status & ATA_BUSY) {
1897 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1898 ap->id, tmout / HZ);
1899 return 1;
1900 }
1901
1902 return 0;
1903 }
1904
1905 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1906 {
1907 struct ata_ioports *ioaddr = &ap->ioaddr;
1908 unsigned int dev0 = devmask & (1 << 0);
1909 unsigned int dev1 = devmask & (1 << 1);
1910 unsigned long timeout;
1911
1912 /* if device 0 was found in ata_devchk, wait for its
1913 * BSY bit to clear
1914 */
1915 if (dev0)
1916 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1917
1918 /* if device 1 was found in ata_devchk, wait for
1919 * register access, then wait for BSY to clear
1920 */
1921 timeout = jiffies + ATA_TMOUT_BOOT;
1922 while (dev1) {
1923 u8 nsect, lbal;
1924
1925 ap->ops->dev_select(ap, 1);
1926 if (ap->flags & ATA_FLAG_MMIO) {
1927 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1928 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1929 } else {
1930 nsect = inb(ioaddr->nsect_addr);
1931 lbal = inb(ioaddr->lbal_addr);
1932 }
1933 if ((nsect == 1) && (lbal == 1))
1934 break;
1935 if (time_after(jiffies, timeout)) {
1936 dev1 = 0;
1937 break;
1938 }
1939 msleep(50); /* give drive a breather */
1940 }
1941 if (dev1)
1942 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1943
1944 /* is all this really necessary? */
1945 ap->ops->dev_select(ap, 0);
1946 if (dev1)
1947 ap->ops->dev_select(ap, 1);
1948 if (dev0)
1949 ap->ops->dev_select(ap, 0);
1950 }
1951
1952 /**
1953 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1954 * @ap: Port to reset and probe
1955 *
1956 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1957 * probe the bus. Not often used these days.
1958 *
1959 * LOCKING:
1960 * PCI/etc. bus probe sem.
1961 * Obtains host_set lock.
1962 *
1963 */
1964
1965 static unsigned int ata_bus_edd(struct ata_port *ap)
1966 {
1967 struct ata_taskfile tf;
1968 unsigned long flags;
1969
1970 /* set up execute-device-diag (bus reset) taskfile */
1971 /* also, take interrupts to a known state (disabled) */
1972 DPRINTK("execute-device-diag\n");
1973 ata_tf_init(ap, &tf, 0);
1974 tf.ctl |= ATA_NIEN;
1975 tf.command = ATA_CMD_EDD;
1976 tf.protocol = ATA_PROT_NODATA;
1977
1978 /* do bus reset */
1979 spin_lock_irqsave(&ap->host_set->lock, flags);
1980 ata_tf_to_host(ap, &tf);
1981 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1982
1983 /* spec says at least 2ms. but who knows with those
1984 * crazy ATAPI devices...
1985 */
1986 msleep(150);
1987
1988 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1989 }
1990
1991 static unsigned int ata_bus_softreset(struct ata_port *ap,
1992 unsigned int devmask)
1993 {
1994 struct ata_ioports *ioaddr = &ap->ioaddr;
1995
1996 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1997
1998 /* software reset. causes dev0 to be selected */
1999 if (ap->flags & ATA_FLAG_MMIO) {
2000 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2001 udelay(20); /* FIXME: flush */
2002 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2003 udelay(20); /* FIXME: flush */
2004 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2005 } else {
2006 outb(ap->ctl, ioaddr->ctl_addr);
2007 udelay(10);
2008 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2009 udelay(10);
2010 outb(ap->ctl, ioaddr->ctl_addr);
2011 }
2012
2013 /* spec mandates ">= 2ms" before checking status.
2014 * We wait 150ms, because that was the magic delay used for
2015 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2016 * between when the ATA command register is written, and then
2017 * status is checked. Because waiting for "a while" before
2018 * checking status is fine, post SRST, we perform this magic
2019 * delay here as well.
2020 *
2021 * Old drivers/ide uses the 2mS rule and then waits for ready
2022 */
2023 msleep(150);
2024
2025
2026 /* Before we perform post reset processing we want to see if
2027 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2028 resistor */
2029
2030 if (ata_check_status(ap) == 0xFF)
2031 return 1; /* Positive is failure for some reason */
2032
2033 ata_bus_post_reset(ap, devmask);
2034
2035 return 0;
2036 }
2037
2038 /**
2039 * ata_bus_reset - reset host port and associated ATA channel
2040 * @ap: port to reset
2041 *
2042 * This is typically the first time we actually start issuing
2043 * commands to the ATA channel. We wait for BSY to clear, then
2044 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2045 * result. Determine what devices, if any, are on the channel
2046 * by looking at the device 0/1 error register. Look at the signature
2047 * stored in each device's taskfile registers, to determine if
2048 * the device is ATA or ATAPI.
2049 *
2050 * LOCKING:
2051 * PCI/etc. bus probe sem.
2052 * Obtains host_set lock.
2053 *
2054 * SIDE EFFECTS:
2055 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2056 */
2057
2058 void ata_bus_reset(struct ata_port *ap)
2059 {
2060 struct ata_ioports *ioaddr = &ap->ioaddr;
2061 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2062 u8 err;
2063 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2064
2065 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2066
2067 /* determine if device 0/1 are present */
2068 if (ap->flags & ATA_FLAG_SATA_RESET)
2069 dev0 = 1;
2070 else {
2071 dev0 = ata_devchk(ap, 0);
2072 if (slave_possible)
2073 dev1 = ata_devchk(ap, 1);
2074 }
2075
2076 if (dev0)
2077 devmask |= (1 << 0);
2078 if (dev1)
2079 devmask |= (1 << 1);
2080
2081 /* select device 0 again */
2082 ap->ops->dev_select(ap, 0);
2083
2084 /* issue bus reset */
2085 if (ap->flags & ATA_FLAG_SRST)
2086 rc = ata_bus_softreset(ap, devmask);
2087 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2088 /* set up device control */
2089 if (ap->flags & ATA_FLAG_MMIO)
2090 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2091 else
2092 outb(ap->ctl, ioaddr->ctl_addr);
2093 rc = ata_bus_edd(ap);
2094 }
2095
2096 if (rc)
2097 goto err_out;
2098
2099 /*
2100 * determine by signature whether we have ATA or ATAPI devices
2101 */
2102 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2103 if ((slave_possible) && (err != 0x81))
2104 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2105
2106 /* re-enable interrupts */
2107 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2108 ata_irq_on(ap);
2109
2110 /* is double-select really necessary? */
2111 if (ap->device[1].class != ATA_DEV_NONE)
2112 ap->ops->dev_select(ap, 1);
2113 if (ap->device[0].class != ATA_DEV_NONE)
2114 ap->ops->dev_select(ap, 0);
2115
2116 /* if no devices were detected, disable this port */
2117 if ((ap->device[0].class == ATA_DEV_NONE) &&
2118 (ap->device[1].class == ATA_DEV_NONE))
2119 goto err_out;
2120
2121 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2122 /* set up device control for ATA_FLAG_SATA_RESET */
2123 if (ap->flags & ATA_FLAG_MMIO)
2124 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2125 else
2126 outb(ap->ctl, ioaddr->ctl_addr);
2127 }
2128
2129 DPRINTK("EXIT\n");
2130 return;
2131
2132 err_out:
2133 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2134 ap->ops->port_disable(ap);
2135
2136 DPRINTK("EXIT\n");
2137 }
2138
2139 static int sata_phy_resume(struct ata_port *ap)
2140 {
2141 unsigned long timeout = jiffies + (HZ * 5);
2142 u32 sstatus;
2143
2144 scr_write_flush(ap, SCR_CONTROL, 0x300);
2145
2146 /* Wait for phy to become ready, if necessary. */
2147 do {
2148 msleep(200);
2149 sstatus = scr_read(ap, SCR_STATUS);
2150 if ((sstatus & 0xf) != 1)
2151 return 0;
2152 } while (time_before(jiffies, timeout));
2153
2154 return -1;
2155 }
2156
2157 /**
2158 * ata_std_probeinit - initialize probing
2159 * @ap: port to be probed
2160 *
2161 * @ap is about to be probed. Initialize it. This function is
2162 * to be used as standard callback for ata_drive_probe_reset().
2163 *
2164 * NOTE!!! Do not use this function as probeinit if a low level
2165 * driver implements only hardreset. Just pass NULL as probeinit
2166 * in that case. Using this function is probably okay but doing
2167 * so makes reset sequence different from the original
2168 * ->phy_reset implementation and Jeff nervous. :-P
2169 */
2170 extern void ata_std_probeinit(struct ata_port *ap)
2171 {
2172 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2173 sata_phy_resume(ap);
2174 if (sata_dev_present(ap))
2175 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2176 }
2177 }
2178
2179 /**
2180 * ata_std_softreset - reset host port via ATA SRST
2181 * @ap: port to reset
2182 * @verbose: fail verbosely
2183 * @classes: resulting classes of attached devices
2184 *
2185 * Reset host port using ATA SRST. This function is to be used
2186 * as standard callback for ata_drive_*_reset() functions.
2187 *
2188 * LOCKING:
2189 * Kernel thread context (may sleep)
2190 *
2191 * RETURNS:
2192 * 0 on success, -errno otherwise.
2193 */
2194 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2195 {
2196 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2197 unsigned int devmask = 0, err_mask;
2198 u8 err;
2199
2200 DPRINTK("ENTER\n");
2201
2202 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2203 classes[0] = ATA_DEV_NONE;
2204 goto out;
2205 }
2206
2207 /* determine if device 0/1 are present */
2208 if (ata_devchk(ap, 0))
2209 devmask |= (1 << 0);
2210 if (slave_possible && ata_devchk(ap, 1))
2211 devmask |= (1 << 1);
2212
2213 /* select device 0 again */
2214 ap->ops->dev_select(ap, 0);
2215
2216 /* issue bus reset */
2217 DPRINTK("about to softreset, devmask=%x\n", devmask);
2218 err_mask = ata_bus_softreset(ap, devmask);
2219 if (err_mask) {
2220 if (verbose)
2221 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2222 ap->id, err_mask);
2223 else
2224 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2225 err_mask);
2226 return -EIO;
2227 }
2228
2229 /* determine by signature whether we have ATA or ATAPI devices */
2230 classes[0] = ata_dev_try_classify(ap, 0, &err);
2231 if (slave_possible && err != 0x81)
2232 classes[1] = ata_dev_try_classify(ap, 1, &err);
2233
2234 out:
2235 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2236 return 0;
2237 }
2238
2239 /**
2240 * sata_std_hardreset - reset host port via SATA phy reset
2241 * @ap: port to reset
2242 * @verbose: fail verbosely
2243 * @class: resulting class of attached device
2244 *
2245 * SATA phy-reset host port using DET bits of SControl register.
2246 * This function is to be used as standard callback for
2247 * ata_drive_*_reset().
2248 *
2249 * LOCKING:
2250 * Kernel thread context (may sleep)
2251 *
2252 * RETURNS:
2253 * 0 on success, -errno otherwise.
2254 */
2255 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2256 {
2257 DPRINTK("ENTER\n");
2258
2259 /* Issue phy wake/reset */
2260 scr_write_flush(ap, SCR_CONTROL, 0x301);
2261
2262 /*
2263 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2264 * 10.4.2 says at least 1 ms.
2265 */
2266 msleep(1);
2267
2268 /* Bring phy back */
2269 sata_phy_resume(ap);
2270
2271 /* TODO: phy layer with polling, timeouts, etc. */
2272 if (!sata_dev_present(ap)) {
2273 *class = ATA_DEV_NONE;
2274 DPRINTK("EXIT, link offline\n");
2275 return 0;
2276 }
2277
2278 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2279 if (verbose)
2280 printk(KERN_ERR "ata%u: COMRESET failed "
2281 "(device not ready)\n", ap->id);
2282 else
2283 DPRINTK("EXIT, device not ready\n");
2284 return -EIO;
2285 }
2286
2287 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2288
2289 *class = ata_dev_try_classify(ap, 0, NULL);
2290
2291 DPRINTK("EXIT, class=%u\n", *class);
2292 return 0;
2293 }
2294
2295 /**
2296 * ata_std_postreset - standard postreset callback
2297 * @ap: the target ata_port
2298 * @classes: classes of attached devices
2299 *
2300 * This function is invoked after a successful reset. Note that
2301 * the device might have been reset more than once using
2302 * different reset methods before postreset is invoked.
2303 *
2304 * This function is to be used as standard callback for
2305 * ata_drive_*_reset().
2306 *
2307 * LOCKING:
2308 * Kernel thread context (may sleep)
2309 */
2310 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2311 {
2312 DPRINTK("ENTER\n");
2313
2314 /* set cable type if it isn't already set */
2315 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2316 ap->cbl = ATA_CBL_SATA;
2317
2318 /* print link status */
2319 if (ap->cbl == ATA_CBL_SATA)
2320 sata_print_link_status(ap);
2321
2322 /* re-enable interrupts */
2323 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2324 ata_irq_on(ap);
2325
2326 /* is double-select really necessary? */
2327 if (classes[0] != ATA_DEV_NONE)
2328 ap->ops->dev_select(ap, 1);
2329 if (classes[1] != ATA_DEV_NONE)
2330 ap->ops->dev_select(ap, 0);
2331
2332 /* bail out if no device is present */
2333 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2334 DPRINTK("EXIT, no device\n");
2335 return;
2336 }
2337
2338 /* set up device control */
2339 if (ap->ioaddr.ctl_addr) {
2340 if (ap->flags & ATA_FLAG_MMIO)
2341 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2342 else
2343 outb(ap->ctl, ap->ioaddr.ctl_addr);
2344 }
2345
2346 DPRINTK("EXIT\n");
2347 }
2348
2349 /**
2350 * ata_std_probe_reset - standard probe reset method
2351 * @ap: prot to perform probe-reset
2352 * @classes: resulting classes of attached devices
2353 *
2354 * The stock off-the-shelf ->probe_reset method.
2355 *
2356 * LOCKING:
2357 * Kernel thread context (may sleep)
2358 *
2359 * RETURNS:
2360 * 0 on success, -errno otherwise.
2361 */
2362 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2363 {
2364 ata_reset_fn_t hardreset;
2365
2366 hardreset = NULL;
2367 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2368 hardreset = sata_std_hardreset;
2369
2370 return ata_drive_probe_reset(ap, ata_std_probeinit,
2371 ata_std_softreset, hardreset,
2372 ata_std_postreset, classes);
2373 }
2374
2375 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2376 ata_postreset_fn_t postreset,
2377 unsigned int *classes)
2378 {
2379 int i, rc;
2380
2381 for (i = 0; i < ATA_MAX_DEVICES; i++)
2382 classes[i] = ATA_DEV_UNKNOWN;
2383
2384 rc = reset(ap, 0, classes);
2385 if (rc)
2386 return rc;
2387
2388 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2389 * is complete and convert all ATA_DEV_UNKNOWN to
2390 * ATA_DEV_NONE.
2391 */
2392 for (i = 0; i < ATA_MAX_DEVICES; i++)
2393 if (classes[i] != ATA_DEV_UNKNOWN)
2394 break;
2395
2396 if (i < ATA_MAX_DEVICES)
2397 for (i = 0; i < ATA_MAX_DEVICES; i++)
2398 if (classes[i] == ATA_DEV_UNKNOWN)
2399 classes[i] = ATA_DEV_NONE;
2400
2401 if (postreset)
2402 postreset(ap, classes);
2403
2404 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2405 }
2406
2407 /**
2408 * ata_drive_probe_reset - Perform probe reset with given methods
2409 * @ap: port to reset
2410 * @probeinit: probeinit method (can be NULL)
2411 * @softreset: softreset method (can be NULL)
2412 * @hardreset: hardreset method (can be NULL)
2413 * @postreset: postreset method (can be NULL)
2414 * @classes: resulting classes of attached devices
2415 *
2416 * Reset the specified port and classify attached devices using
2417 * given methods. This function prefers softreset but tries all
2418 * possible reset sequences to reset and classify devices. This
2419 * function is intended to be used for constructing ->probe_reset
2420 * callback by low level drivers.
2421 *
2422 * Reset methods should follow the following rules.
2423 *
2424 * - Return 0 on sucess, -errno on failure.
2425 * - If classification is supported, fill classes[] with
2426 * recognized class codes.
2427 * - If classification is not supported, leave classes[] alone.
2428 * - If verbose is non-zero, print error message on failure;
2429 * otherwise, shut up.
2430 *
2431 * LOCKING:
2432 * Kernel thread context (may sleep)
2433 *
2434 * RETURNS:
2435 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2436 * if classification fails, and any error code from reset
2437 * methods.
2438 */
2439 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2440 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2441 ata_postreset_fn_t postreset, unsigned int *classes)
2442 {
2443 int rc = -EINVAL;
2444
2445 if (probeinit)
2446 probeinit(ap);
2447
2448 if (softreset) {
2449 rc = do_probe_reset(ap, softreset, postreset, classes);
2450 if (rc == 0)
2451 return 0;
2452 }
2453
2454 if (!hardreset)
2455 return rc;
2456
2457 rc = do_probe_reset(ap, hardreset, postreset, classes);
2458 if (rc == 0 || rc != -ENODEV)
2459 return rc;
2460
2461 if (softreset)
2462 rc = do_probe_reset(ap, softreset, postreset, classes);
2463
2464 return rc;
2465 }
2466
2467 /**
2468 * ata_dev_same_device - Determine whether new ID matches configured device
2469 * @ap: port on which the device to compare against resides
2470 * @dev: device to compare against
2471 * @new_class: class of the new device
2472 * @new_id: IDENTIFY page of the new device
2473 *
2474 * Compare @new_class and @new_id against @dev and determine
2475 * whether @dev is the device indicated by @new_class and
2476 * @new_id.
2477 *
2478 * LOCKING:
2479 * None.
2480 *
2481 * RETURNS:
2482 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2483 */
2484 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2485 unsigned int new_class, const u16 *new_id)
2486 {
2487 const u16 *old_id = dev->id;
2488 unsigned char model[2][41], serial[2][21];
2489 u64 new_n_sectors;
2490
2491 if (dev->class != new_class) {
2492 printk(KERN_INFO
2493 "ata%u: dev %u class mismatch %d != %d\n",
2494 ap->id, dev->devno, dev->class, new_class);
2495 return 0;
2496 }
2497
2498 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2499 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2500 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2501 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2502 new_n_sectors = ata_id_n_sectors(new_id);
2503
2504 if (strcmp(model[0], model[1])) {
2505 printk(KERN_INFO
2506 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2507 ap->id, dev->devno, model[0], model[1]);
2508 return 0;
2509 }
2510
2511 if (strcmp(serial[0], serial[1])) {
2512 printk(KERN_INFO
2513 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2514 ap->id, dev->devno, serial[0], serial[1]);
2515 return 0;
2516 }
2517
2518 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2519 printk(KERN_INFO
2520 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2521 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2522 (unsigned long long)new_n_sectors);
2523 return 0;
2524 }
2525
2526 return 1;
2527 }
2528
2529 /**
2530 * ata_dev_revalidate - Revalidate ATA device
2531 * @ap: port on which the device to revalidate resides
2532 * @dev: device to revalidate
2533 * @post_reset: is this revalidation after reset?
2534 *
2535 * Re-read IDENTIFY page and make sure @dev is still attached to
2536 * the port.
2537 *
2538 * LOCKING:
2539 * Kernel thread context (may sleep)
2540 *
2541 * RETURNS:
2542 * 0 on success, negative errno otherwise
2543 */
2544 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2545 int post_reset)
2546 {
2547 unsigned int class;
2548 u16 *id;
2549 int rc;
2550
2551 if (!ata_dev_present(dev))
2552 return -ENODEV;
2553
2554 class = dev->class;
2555 id = NULL;
2556
2557 /* allocate & read ID data */
2558 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2559 if (rc)
2560 goto fail;
2561
2562 /* is the device still there? */
2563 if (!ata_dev_same_device(ap, dev, class, id)) {
2564 rc = -ENODEV;
2565 goto fail;
2566 }
2567
2568 kfree(dev->id);
2569 dev->id = id;
2570
2571 /* configure device according to the new ID */
2572 return ata_dev_configure(ap, dev, 0);
2573
2574 fail:
2575 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2576 ap->id, dev->devno, rc);
2577 kfree(id);
2578 return rc;
2579 }
2580
2581 static const char * const ata_dma_blacklist [] = {
2582 "WDC AC11000H", NULL,
2583 "WDC AC22100H", NULL,
2584 "WDC AC32500H", NULL,
2585 "WDC AC33100H", NULL,
2586 "WDC AC31600H", NULL,
2587 "WDC AC32100H", "24.09P07",
2588 "WDC AC23200L", "21.10N21",
2589 "Compaq CRD-8241B", NULL,
2590 "CRD-8400B", NULL,
2591 "CRD-8480B", NULL,
2592 "CRD-8482B", NULL,
2593 "CRD-84", NULL,
2594 "SanDisk SDP3B", NULL,
2595 "SanDisk SDP3B-64", NULL,
2596 "SANYO CD-ROM CRD", NULL,
2597 "HITACHI CDR-8", NULL,
2598 "HITACHI CDR-8335", NULL,
2599 "HITACHI CDR-8435", NULL,
2600 "Toshiba CD-ROM XM-6202B", NULL,
2601 "TOSHIBA CD-ROM XM-1702BC", NULL,
2602 "CD-532E-A", NULL,
2603 "E-IDE CD-ROM CR-840", NULL,
2604 "CD-ROM Drive/F5A", NULL,
2605 "WPI CDD-820", NULL,
2606 "SAMSUNG CD-ROM SC-148C", NULL,
2607 "SAMSUNG CD-ROM SC", NULL,
2608 "SanDisk SDP3B-64", NULL,
2609 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2610 "_NEC DV5800A", NULL,
2611 "SAMSUNG CD-ROM SN-124", "N001"
2612 };
2613
2614 static int ata_strim(char *s, size_t len)
2615 {
2616 len = strnlen(s, len);
2617
2618 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2619 while ((len > 0) && (s[len - 1] == ' ')) {
2620 len--;
2621 s[len] = 0;
2622 }
2623 return len;
2624 }
2625
2626 static int ata_dma_blacklisted(const struct ata_device *dev)
2627 {
2628 unsigned char model_num[40];
2629 unsigned char model_rev[16];
2630 unsigned int nlen, rlen;
2631 int i;
2632
2633 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2634 sizeof(model_num));
2635 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2636 sizeof(model_rev));
2637 nlen = ata_strim(model_num, sizeof(model_num));
2638 rlen = ata_strim(model_rev, sizeof(model_rev));
2639
2640 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2641 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2642 if (ata_dma_blacklist[i+1] == NULL)
2643 return 1;
2644 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2645 return 1;
2646 }
2647 }
2648 return 0;
2649 }
2650
2651 /**
2652 * ata_dev_xfermask - Compute supported xfermask of the given device
2653 * @ap: Port on which the device to compute xfermask for resides
2654 * @dev: Device to compute xfermask for
2655 *
2656 * Compute supported xfermask of @dev. This function is
2657 * responsible for applying all known limits including host
2658 * controller limits, device blacklist, etc...
2659 *
2660 * LOCKING:
2661 * None.
2662 *
2663 * RETURNS:
2664 * Computed xfermask.
2665 */
2666 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2667 struct ata_device *dev)
2668 {
2669 unsigned long xfer_mask;
2670 int i;
2671
2672 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2673 ap->udma_mask);
2674
2675 /* use port-wide xfermask for now */
2676 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2677 struct ata_device *d = &ap->device[i];
2678 if (!ata_dev_present(d))
2679 continue;
2680 xfer_mask &= ata_id_xfermask(d->id);
2681 if (ata_dma_blacklisted(d))
2682 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2683 }
2684
2685 if (ata_dma_blacklisted(dev))
2686 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2687 "disabling DMA\n", ap->id, dev->devno);
2688
2689 return xfer_mask;
2690 }
2691
2692 /**
2693 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2694 * @ap: Port associated with device @dev
2695 * @dev: Device to which command will be sent
2696 *
2697 * Issue SET FEATURES - XFER MODE command to device @dev
2698 * on port @ap.
2699 *
2700 * LOCKING:
2701 * PCI/etc. bus probe sem.
2702 */
2703
2704 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2705 {
2706 struct ata_taskfile tf;
2707
2708 /* set up set-features taskfile */
2709 DPRINTK("set features - xfer mode\n");
2710
2711 ata_tf_init(ap, &tf, dev->devno);
2712 tf.command = ATA_CMD_SET_FEATURES;
2713 tf.feature = SETFEATURES_XFER;
2714 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2715 tf.protocol = ATA_PROT_NODATA;
2716 tf.nsect = dev->xfer_mode;
2717
2718 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2719 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2720 ap->id);
2721 ata_port_disable(ap);
2722 }
2723
2724 DPRINTK("EXIT\n");
2725 }
2726
2727 /**
2728 * ata_dev_init_params - Issue INIT DEV PARAMS command
2729 * @ap: Port associated with device @dev
2730 * @dev: Device to which command will be sent
2731 *
2732 * LOCKING:
2733 * Kernel thread context (may sleep)
2734 *
2735 * RETURNS:
2736 * 0 on success, AC_ERR_* mask otherwise.
2737 */
2738
2739 static unsigned int ata_dev_init_params(struct ata_port *ap,
2740 struct ata_device *dev)
2741 {
2742 struct ata_taskfile tf;
2743 unsigned int err_mask;
2744 u16 sectors = dev->id[6];
2745 u16 heads = dev->id[3];
2746
2747 /* Number of sectors per track 1-255. Number of heads 1-16 */
2748 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2749 return 0;
2750
2751 /* set up init dev params taskfile */
2752 DPRINTK("init dev params \n");
2753
2754 ata_tf_init(ap, &tf, dev->devno);
2755 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2756 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2757 tf.protocol = ATA_PROT_NODATA;
2758 tf.nsect = sectors;
2759 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2760
2761 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2762
2763 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2764 return err_mask;
2765 }
2766
2767 /**
2768 * ata_sg_clean - Unmap DMA memory associated with command
2769 * @qc: Command containing DMA memory to be released
2770 *
2771 * Unmap all mapped DMA memory associated with this command.
2772 *
2773 * LOCKING:
2774 * spin_lock_irqsave(host_set lock)
2775 */
2776
2777 static void ata_sg_clean(struct ata_queued_cmd *qc)
2778 {
2779 struct ata_port *ap = qc->ap;
2780 struct scatterlist *sg = qc->__sg;
2781 int dir = qc->dma_dir;
2782 void *pad_buf = NULL;
2783
2784 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2785 WARN_ON(sg == NULL);
2786
2787 if (qc->flags & ATA_QCFLAG_SINGLE)
2788 WARN_ON(qc->n_elem > 1);
2789
2790 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2791
2792 /* if we padded the buffer out to 32-bit bound, and data
2793 * xfer direction is from-device, we must copy from the
2794 * pad buffer back into the supplied buffer
2795 */
2796 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2797 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2798
2799 if (qc->flags & ATA_QCFLAG_SG) {
2800 if (qc->n_elem)
2801 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2802 /* restore last sg */
2803 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2804 if (pad_buf) {
2805 struct scatterlist *psg = &qc->pad_sgent;
2806 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2807 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2808 kunmap_atomic(addr, KM_IRQ0);
2809 }
2810 } else {
2811 if (qc->n_elem)
2812 dma_unmap_single(ap->host_set->dev,
2813 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2814 dir);
2815 /* restore sg */
2816 sg->length += qc->pad_len;
2817 if (pad_buf)
2818 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2819 pad_buf, qc->pad_len);
2820 }
2821
2822 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2823 qc->__sg = NULL;
2824 }
2825
2826 /**
2827 * ata_fill_sg - Fill PCI IDE PRD table
2828 * @qc: Metadata associated with taskfile to be transferred
2829 *
2830 * Fill PCI IDE PRD (scatter-gather) table with segments
2831 * associated with the current disk command.
2832 *
2833 * LOCKING:
2834 * spin_lock_irqsave(host_set lock)
2835 *
2836 */
2837 static void ata_fill_sg(struct ata_queued_cmd *qc)
2838 {
2839 struct ata_port *ap = qc->ap;
2840 struct scatterlist *sg;
2841 unsigned int idx;
2842
2843 WARN_ON(qc->__sg == NULL);
2844 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2845
2846 idx = 0;
2847 ata_for_each_sg(sg, qc) {
2848 u32 addr, offset;
2849 u32 sg_len, len;
2850
2851 /* determine if physical DMA addr spans 64K boundary.
2852 * Note h/w doesn't support 64-bit, so we unconditionally
2853 * truncate dma_addr_t to u32.
2854 */
2855 addr = (u32) sg_dma_address(sg);
2856 sg_len = sg_dma_len(sg);
2857
2858 while (sg_len) {
2859 offset = addr & 0xffff;
2860 len = sg_len;
2861 if ((offset + sg_len) > 0x10000)
2862 len = 0x10000 - offset;
2863
2864 ap->prd[idx].addr = cpu_to_le32(addr);
2865 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2866 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2867
2868 idx++;
2869 sg_len -= len;
2870 addr += len;
2871 }
2872 }
2873
2874 if (idx)
2875 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2876 }
2877 /**
2878 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2879 * @qc: Metadata associated with taskfile to check
2880 *
2881 * Allow low-level driver to filter ATA PACKET commands, returning
2882 * a status indicating whether or not it is OK to use DMA for the
2883 * supplied PACKET command.
2884 *
2885 * LOCKING:
2886 * spin_lock_irqsave(host_set lock)
2887 *
2888 * RETURNS: 0 when ATAPI DMA can be used
2889 * nonzero otherwise
2890 */
2891 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2892 {
2893 struct ata_port *ap = qc->ap;
2894 int rc = 0; /* Assume ATAPI DMA is OK by default */
2895
2896 if (ap->ops->check_atapi_dma)
2897 rc = ap->ops->check_atapi_dma(qc);
2898
2899 return rc;
2900 }
2901 /**
2902 * ata_qc_prep - Prepare taskfile for submission
2903 * @qc: Metadata associated with taskfile to be prepared
2904 *
2905 * Prepare ATA taskfile for submission.
2906 *
2907 * LOCKING:
2908 * spin_lock_irqsave(host_set lock)
2909 */
2910 void ata_qc_prep(struct ata_queued_cmd *qc)
2911 {
2912 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2913 return;
2914
2915 ata_fill_sg(qc);
2916 }
2917
2918 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2919
2920 /**
2921 * ata_sg_init_one - Associate command with memory buffer
2922 * @qc: Command to be associated
2923 * @buf: Memory buffer
2924 * @buflen: Length of memory buffer, in bytes.
2925 *
2926 * Initialize the data-related elements of queued_cmd @qc
2927 * to point to a single memory buffer, @buf of byte length @buflen.
2928 *
2929 * LOCKING:
2930 * spin_lock_irqsave(host_set lock)
2931 */
2932
2933 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2934 {
2935 struct scatterlist *sg;
2936
2937 qc->flags |= ATA_QCFLAG_SINGLE;
2938
2939 memset(&qc->sgent, 0, sizeof(qc->sgent));
2940 qc->__sg = &qc->sgent;
2941 qc->n_elem = 1;
2942 qc->orig_n_elem = 1;
2943 qc->buf_virt = buf;
2944
2945 sg = qc->__sg;
2946 sg_init_one(sg, buf, buflen);
2947 }
2948
2949 /**
2950 * ata_sg_init - Associate command with scatter-gather table.
2951 * @qc: Command to be associated
2952 * @sg: Scatter-gather table.
2953 * @n_elem: Number of elements in s/g table.
2954 *
2955 * Initialize the data-related elements of queued_cmd @qc
2956 * to point to a scatter-gather table @sg, containing @n_elem
2957 * elements.
2958 *
2959 * LOCKING:
2960 * spin_lock_irqsave(host_set lock)
2961 */
2962
2963 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2964 unsigned int n_elem)
2965 {
2966 qc->flags |= ATA_QCFLAG_SG;
2967 qc->__sg = sg;
2968 qc->n_elem = n_elem;
2969 qc->orig_n_elem = n_elem;
2970 }
2971
2972 /**
2973 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2974 * @qc: Command with memory buffer to be mapped.
2975 *
2976 * DMA-map the memory buffer associated with queued_cmd @qc.
2977 *
2978 * LOCKING:
2979 * spin_lock_irqsave(host_set lock)
2980 *
2981 * RETURNS:
2982 * Zero on success, negative on error.
2983 */
2984
2985 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2986 {
2987 struct ata_port *ap = qc->ap;
2988 int dir = qc->dma_dir;
2989 struct scatterlist *sg = qc->__sg;
2990 dma_addr_t dma_address;
2991 int trim_sg = 0;
2992
2993 /* we must lengthen transfers to end on a 32-bit boundary */
2994 qc->pad_len = sg->length & 3;
2995 if (qc->pad_len) {
2996 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2997 struct scatterlist *psg = &qc->pad_sgent;
2998
2999 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3000
3001 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3002
3003 if (qc->tf.flags & ATA_TFLAG_WRITE)
3004 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3005 qc->pad_len);
3006
3007 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3008 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3009 /* trim sg */
3010 sg->length -= qc->pad_len;
3011 if (sg->length == 0)
3012 trim_sg = 1;
3013
3014 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3015 sg->length, qc->pad_len);
3016 }
3017
3018 if (trim_sg) {
3019 qc->n_elem--;
3020 goto skip_map;
3021 }
3022
3023 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3024 sg->length, dir);
3025 if (dma_mapping_error(dma_address)) {
3026 /* restore sg */
3027 sg->length += qc->pad_len;
3028 return -1;
3029 }
3030
3031 sg_dma_address(sg) = dma_address;
3032 sg_dma_len(sg) = sg->length;
3033
3034 skip_map:
3035 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3036 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3037
3038 return 0;
3039 }
3040
3041 /**
3042 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3043 * @qc: Command with scatter-gather table to be mapped.
3044 *
3045 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3046 *
3047 * LOCKING:
3048 * spin_lock_irqsave(host_set lock)
3049 *
3050 * RETURNS:
3051 * Zero on success, negative on error.
3052 *
3053 */
3054
3055 static int ata_sg_setup(struct ata_queued_cmd *qc)
3056 {
3057 struct ata_port *ap = qc->ap;
3058 struct scatterlist *sg = qc->__sg;
3059 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3060 int n_elem, pre_n_elem, dir, trim_sg = 0;
3061
3062 VPRINTK("ENTER, ata%u\n", ap->id);
3063 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3064
3065 /* we must lengthen transfers to end on a 32-bit boundary */
3066 qc->pad_len = lsg->length & 3;
3067 if (qc->pad_len) {
3068 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3069 struct scatterlist *psg = &qc->pad_sgent;
3070 unsigned int offset;
3071
3072 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3073
3074 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3075
3076 /*
3077 * psg->page/offset are used to copy to-be-written
3078 * data in this function or read data in ata_sg_clean.
3079 */
3080 offset = lsg->offset + lsg->length - qc->pad_len;
3081 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3082 psg->offset = offset_in_page(offset);
3083
3084 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3085 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3086 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3087 kunmap_atomic(addr, KM_IRQ0);
3088 }
3089
3090 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3091 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3092 /* trim last sg */
3093 lsg->length -= qc->pad_len;
3094 if (lsg->length == 0)
3095 trim_sg = 1;
3096
3097 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3098 qc->n_elem - 1, lsg->length, qc->pad_len);
3099 }
3100
3101 pre_n_elem = qc->n_elem;
3102 if (trim_sg && pre_n_elem)
3103 pre_n_elem--;
3104
3105 if (!pre_n_elem) {
3106 n_elem = 0;
3107 goto skip_map;
3108 }
3109
3110 dir = qc->dma_dir;
3111 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3112 if (n_elem < 1) {
3113 /* restore last sg */
3114 lsg->length += qc->pad_len;
3115 return -1;
3116 }
3117
3118 DPRINTK("%d sg elements mapped\n", n_elem);
3119
3120 skip_map:
3121 qc->n_elem = n_elem;
3122
3123 return 0;
3124 }
3125
3126 /**
3127 * ata_poll_qc_complete - turn irq back on and finish qc
3128 * @qc: Command to complete
3129 * @err_mask: ATA status register content
3130 *
3131 * LOCKING:
3132 * None. (grabs host lock)
3133 */
3134
3135 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3136 {
3137 struct ata_port *ap = qc->ap;
3138 unsigned long flags;
3139
3140 spin_lock_irqsave(&ap->host_set->lock, flags);
3141 ap->flags &= ~ATA_FLAG_NOINTR;
3142 ata_irq_on(ap);
3143 ata_qc_complete(qc);
3144 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3145 }
3146
3147 /**
3148 * ata_pio_poll - poll using PIO, depending on current state
3149 * @ap: the target ata_port
3150 *
3151 * LOCKING:
3152 * None. (executing in kernel thread context)
3153 *
3154 * RETURNS:
3155 * timeout value to use
3156 */
3157
3158 static unsigned long ata_pio_poll(struct ata_port *ap)
3159 {
3160 struct ata_queued_cmd *qc;
3161 u8 status;
3162 unsigned int poll_state = HSM_ST_UNKNOWN;
3163 unsigned int reg_state = HSM_ST_UNKNOWN;
3164
3165 qc = ata_qc_from_tag(ap, ap->active_tag);
3166 WARN_ON(qc == NULL);
3167
3168 switch (ap->hsm_task_state) {
3169 case HSM_ST:
3170 case HSM_ST_POLL:
3171 poll_state = HSM_ST_POLL;
3172 reg_state = HSM_ST;
3173 break;
3174 case HSM_ST_LAST:
3175 case HSM_ST_LAST_POLL:
3176 poll_state = HSM_ST_LAST_POLL;
3177 reg_state = HSM_ST_LAST;
3178 break;
3179 default:
3180 BUG();
3181 break;
3182 }
3183
3184 status = ata_chk_status(ap);
3185 if (status & ATA_BUSY) {
3186 if (time_after(jiffies, ap->pio_task_timeout)) {
3187 qc->err_mask |= AC_ERR_TIMEOUT;
3188 ap->hsm_task_state = HSM_ST_TMOUT;
3189 return 0;
3190 }
3191 ap->hsm_task_state = poll_state;
3192 return ATA_SHORT_PAUSE;
3193 }
3194
3195 ap->hsm_task_state = reg_state;
3196 return 0;
3197 }
3198
3199 /**
3200 * ata_pio_complete - check if drive is busy or idle
3201 * @ap: the target ata_port
3202 *
3203 * LOCKING:
3204 * None. (executing in kernel thread context)
3205 *
3206 * RETURNS:
3207 * Non-zero if qc completed, zero otherwise.
3208 */
3209
3210 static int ata_pio_complete (struct ata_port *ap)
3211 {
3212 struct ata_queued_cmd *qc;
3213 u8 drv_stat;
3214
3215 /*
3216 * This is purely heuristic. This is a fast path. Sometimes when
3217 * we enter, BSY will be cleared in a chk-status or two. If not,
3218 * the drive is probably seeking or something. Snooze for a couple
3219 * msecs, then chk-status again. If still busy, fall back to
3220 * HSM_ST_POLL state.
3221 */
3222 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3223 if (drv_stat & ATA_BUSY) {
3224 msleep(2);
3225 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3226 if (drv_stat & ATA_BUSY) {
3227 ap->hsm_task_state = HSM_ST_LAST_POLL;
3228 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3229 return 0;
3230 }
3231 }
3232
3233 qc = ata_qc_from_tag(ap, ap->active_tag);
3234 WARN_ON(qc == NULL);
3235
3236 drv_stat = ata_wait_idle(ap);
3237 if (!ata_ok(drv_stat)) {
3238 qc->err_mask |= __ac_err_mask(drv_stat);
3239 ap->hsm_task_state = HSM_ST_ERR;
3240 return 0;
3241 }
3242
3243 ap->hsm_task_state = HSM_ST_IDLE;
3244
3245 WARN_ON(qc->err_mask);
3246 ata_poll_qc_complete(qc);
3247
3248 /* another command may start at this point */
3249
3250 return 1;
3251 }
3252
3253
3254 /**
3255 * swap_buf_le16 - swap halves of 16-bit words in place
3256 * @buf: Buffer to swap
3257 * @buf_words: Number of 16-bit words in buffer.
3258 *
3259 * Swap halves of 16-bit words if needed to convert from
3260 * little-endian byte order to native cpu byte order, or
3261 * vice-versa.
3262 *
3263 * LOCKING:
3264 * Inherited from caller.
3265 */
3266 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3267 {
3268 #ifdef __BIG_ENDIAN
3269 unsigned int i;
3270
3271 for (i = 0; i < buf_words; i++)
3272 buf[i] = le16_to_cpu(buf[i]);
3273 #endif /* __BIG_ENDIAN */
3274 }
3275
3276 /**
3277 * ata_mmio_data_xfer - Transfer data by MMIO
3278 * @ap: port to read/write
3279 * @buf: data buffer
3280 * @buflen: buffer length
3281 * @write_data: read/write
3282 *
3283 * Transfer data from/to the device data register by MMIO.
3284 *
3285 * LOCKING:
3286 * Inherited from caller.
3287 */
3288
3289 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3290 unsigned int buflen, int write_data)
3291 {
3292 unsigned int i;
3293 unsigned int words = buflen >> 1;
3294 u16 *buf16 = (u16 *) buf;
3295 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3296
3297 /* Transfer multiple of 2 bytes */
3298 if (write_data) {
3299 for (i = 0; i < words; i++)
3300 writew(le16_to_cpu(buf16[i]), mmio);
3301 } else {
3302 for (i = 0; i < words; i++)
3303 buf16[i] = cpu_to_le16(readw(mmio));
3304 }
3305
3306 /* Transfer trailing 1 byte, if any. */
3307 if (unlikely(buflen & 0x01)) {
3308 u16 align_buf[1] = { 0 };
3309 unsigned char *trailing_buf = buf + buflen - 1;
3310
3311 if (write_data) {
3312 memcpy(align_buf, trailing_buf, 1);
3313 writew(le16_to_cpu(align_buf[0]), mmio);
3314 } else {
3315 align_buf[0] = cpu_to_le16(readw(mmio));
3316 memcpy(trailing_buf, align_buf, 1);
3317 }
3318 }
3319 }
3320
3321 /**
3322 * ata_pio_data_xfer - Transfer data by PIO
3323 * @ap: port to read/write
3324 * @buf: data buffer
3325 * @buflen: buffer length
3326 * @write_data: read/write
3327 *
3328 * Transfer data from/to the device data register by PIO.
3329 *
3330 * LOCKING:
3331 * Inherited from caller.
3332 */
3333
3334 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3335 unsigned int buflen, int write_data)
3336 {
3337 unsigned int words = buflen >> 1;
3338
3339 /* Transfer multiple of 2 bytes */
3340 if (write_data)
3341 outsw(ap->ioaddr.data_addr, buf, words);
3342 else
3343 insw(ap->ioaddr.data_addr, buf, words);
3344
3345 /* Transfer trailing 1 byte, if any. */
3346 if (unlikely(buflen & 0x01)) {
3347 u16 align_buf[1] = { 0 };
3348 unsigned char *trailing_buf = buf + buflen - 1;
3349
3350 if (write_data) {
3351 memcpy(align_buf, trailing_buf, 1);
3352 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3353 } else {
3354 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3355 memcpy(trailing_buf, align_buf, 1);
3356 }
3357 }
3358 }
3359
3360 /**
3361 * ata_data_xfer - Transfer data from/to the data register.
3362 * @ap: port to read/write
3363 * @buf: data buffer
3364 * @buflen: buffer length
3365 * @do_write: read/write
3366 *
3367 * Transfer data from/to the device data register.
3368 *
3369 * LOCKING:
3370 * Inherited from caller.
3371 */
3372
3373 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3374 unsigned int buflen, int do_write)
3375 {
3376 /* Make the crap hardware pay the costs not the good stuff */
3377 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3378 unsigned long flags;
3379 local_irq_save(flags);
3380 if (ap->flags & ATA_FLAG_MMIO)
3381 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3382 else
3383 ata_pio_data_xfer(ap, buf, buflen, do_write);
3384 local_irq_restore(flags);
3385 } else {
3386 if (ap->flags & ATA_FLAG_MMIO)
3387 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3388 else
3389 ata_pio_data_xfer(ap, buf, buflen, do_write);
3390 }
3391 }
3392
3393 /**
3394 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3395 * @qc: Command on going
3396 *
3397 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3398 *
3399 * LOCKING:
3400 * Inherited from caller.
3401 */
3402
3403 static void ata_pio_sector(struct ata_queued_cmd *qc)
3404 {
3405 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3406 struct scatterlist *sg = qc->__sg;
3407 struct ata_port *ap = qc->ap;
3408 struct page *page;
3409 unsigned int offset;
3410 unsigned char *buf;
3411
3412 if (qc->cursect == (qc->nsect - 1))
3413 ap->hsm_task_state = HSM_ST_LAST;
3414
3415 page = sg[qc->cursg].page;
3416 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3417
3418 /* get the current page and offset */
3419 page = nth_page(page, (offset >> PAGE_SHIFT));
3420 offset %= PAGE_SIZE;
3421
3422 buf = kmap(page) + offset;
3423
3424 qc->cursect++;
3425 qc->cursg_ofs++;
3426
3427 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3428 qc->cursg++;
3429 qc->cursg_ofs = 0;
3430 }
3431
3432 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3433
3434 /* do the actual data transfer */
3435 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3436 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3437
3438 kunmap(page);
3439 }
3440
3441 /**
3442 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3443 * @qc: Command on going
3444 * @bytes: number of bytes
3445 *
3446 * Transfer Transfer data from/to the ATAPI device.
3447 *
3448 * LOCKING:
3449 * Inherited from caller.
3450 *
3451 */
3452
3453 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3454 {
3455 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3456 struct scatterlist *sg = qc->__sg;
3457 struct ata_port *ap = qc->ap;
3458 struct page *page;
3459 unsigned char *buf;
3460 unsigned int offset, count;
3461
3462 if (qc->curbytes + bytes >= qc->nbytes)
3463 ap->hsm_task_state = HSM_ST_LAST;
3464
3465 next_sg:
3466 if (unlikely(qc->cursg >= qc->n_elem)) {
3467 /*
3468 * The end of qc->sg is reached and the device expects
3469 * more data to transfer. In order not to overrun qc->sg
3470 * and fulfill length specified in the byte count register,
3471 * - for read case, discard trailing data from the device
3472 * - for write case, padding zero data to the device
3473 */
3474 u16 pad_buf[1] = { 0 };
3475 unsigned int words = bytes >> 1;
3476 unsigned int i;
3477
3478 if (words) /* warning if bytes > 1 */
3479 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3480 ap->id, bytes);
3481
3482 for (i = 0; i < words; i++)
3483 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3484
3485 ap->hsm_task_state = HSM_ST_LAST;
3486 return;
3487 }
3488
3489 sg = &qc->__sg[qc->cursg];
3490
3491 page = sg->page;
3492 offset = sg->offset + qc->cursg_ofs;
3493
3494 /* get the current page and offset */
3495 page = nth_page(page, (offset >> PAGE_SHIFT));
3496 offset %= PAGE_SIZE;
3497
3498 /* don't overrun current sg */
3499 count = min(sg->length - qc->cursg_ofs, bytes);
3500
3501 /* don't cross page boundaries */
3502 count = min(count, (unsigned int)PAGE_SIZE - offset);
3503
3504 buf = kmap(page) + offset;
3505
3506 bytes -= count;
3507 qc->curbytes += count;
3508 qc->cursg_ofs += count;
3509
3510 if (qc->cursg_ofs == sg->length) {
3511 qc->cursg++;
3512 qc->cursg_ofs = 0;
3513 }
3514
3515 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3516
3517 /* do the actual data transfer */
3518 ata_data_xfer(ap, buf, count, do_write);
3519
3520 kunmap(page);
3521
3522 if (bytes)
3523 goto next_sg;
3524 }
3525
3526 /**
3527 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3528 * @qc: Command on going
3529 *
3530 * Transfer Transfer data from/to the ATAPI device.
3531 *
3532 * LOCKING:
3533 * Inherited from caller.
3534 */
3535
3536 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3537 {
3538 struct ata_port *ap = qc->ap;
3539 struct ata_device *dev = qc->dev;
3540 unsigned int ireason, bc_lo, bc_hi, bytes;
3541 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3542
3543 ap->ops->tf_read(ap, &qc->tf);
3544 ireason = qc->tf.nsect;
3545 bc_lo = qc->tf.lbam;
3546 bc_hi = qc->tf.lbah;
3547 bytes = (bc_hi << 8) | bc_lo;
3548
3549 /* shall be cleared to zero, indicating xfer of data */
3550 if (ireason & (1 << 0))
3551 goto err_out;
3552
3553 /* make sure transfer direction matches expected */
3554 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3555 if (do_write != i_write)
3556 goto err_out;
3557
3558 __atapi_pio_bytes(qc, bytes);
3559
3560 return;
3561
3562 err_out:
3563 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3564 ap->id, dev->devno);
3565 qc->err_mask |= AC_ERR_HSM;
3566 ap->hsm_task_state = HSM_ST_ERR;
3567 }
3568
3569 /**
3570 * ata_pio_block - start PIO on a block
3571 * @ap: the target ata_port
3572 *
3573 * LOCKING:
3574 * None. (executing in kernel thread context)
3575 */
3576
3577 static void ata_pio_block(struct ata_port *ap)
3578 {
3579 struct ata_queued_cmd *qc;
3580 u8 status;
3581
3582 /*
3583 * This is purely heuristic. This is a fast path.
3584 * Sometimes when we enter, BSY will be cleared in
3585 * a chk-status or two. If not, the drive is probably seeking
3586 * or something. Snooze for a couple msecs, then
3587 * chk-status again. If still busy, fall back to
3588 * HSM_ST_POLL state.
3589 */
3590 status = ata_busy_wait(ap, ATA_BUSY, 5);
3591 if (status & ATA_BUSY) {
3592 msleep(2);
3593 status = ata_busy_wait(ap, ATA_BUSY, 10);
3594 if (status & ATA_BUSY) {
3595 ap->hsm_task_state = HSM_ST_POLL;
3596 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3597 return;
3598 }
3599 }
3600
3601 qc = ata_qc_from_tag(ap, ap->active_tag);
3602 WARN_ON(qc == NULL);
3603
3604 /* check error */
3605 if (status & (ATA_ERR | ATA_DF)) {
3606 qc->err_mask |= AC_ERR_DEV;
3607 ap->hsm_task_state = HSM_ST_ERR;
3608 return;
3609 }
3610
3611 /* transfer data if any */
3612 if (is_atapi_taskfile(&qc->tf)) {
3613 /* DRQ=0 means no more data to transfer */
3614 if ((status & ATA_DRQ) == 0) {
3615 ap->hsm_task_state = HSM_ST_LAST;
3616 return;
3617 }
3618
3619 atapi_pio_bytes(qc);
3620 } else {
3621 /* handle BSY=0, DRQ=0 as error */
3622 if ((status & ATA_DRQ) == 0) {
3623 qc->err_mask |= AC_ERR_HSM;
3624 ap->hsm_task_state = HSM_ST_ERR;
3625 return;
3626 }
3627
3628 ata_pio_sector(qc);
3629 }
3630 }
3631
3632 static void ata_pio_error(struct ata_port *ap)
3633 {
3634 struct ata_queued_cmd *qc;
3635
3636 qc = ata_qc_from_tag(ap, ap->active_tag);
3637 WARN_ON(qc == NULL);
3638
3639 if (qc->tf.command != ATA_CMD_PACKET)
3640 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3641
3642 /* make sure qc->err_mask is available to
3643 * know what's wrong and recover
3644 */
3645 WARN_ON(qc->err_mask == 0);
3646
3647 ap->hsm_task_state = HSM_ST_IDLE;
3648
3649 ata_poll_qc_complete(qc);
3650 }
3651
3652 static void ata_pio_task(void *_data)
3653 {
3654 struct ata_port *ap = _data;
3655 unsigned long timeout;
3656 int qc_completed;
3657
3658 fsm_start:
3659 timeout = 0;
3660 qc_completed = 0;
3661
3662 switch (ap->hsm_task_state) {
3663 case HSM_ST_IDLE:
3664 return;
3665
3666 case HSM_ST:
3667 ata_pio_block(ap);
3668 break;
3669
3670 case HSM_ST_LAST:
3671 qc_completed = ata_pio_complete(ap);
3672 break;
3673
3674 case HSM_ST_POLL:
3675 case HSM_ST_LAST_POLL:
3676 timeout = ata_pio_poll(ap);
3677 break;
3678
3679 case HSM_ST_TMOUT:
3680 case HSM_ST_ERR:
3681 ata_pio_error(ap);
3682 return;
3683 }
3684
3685 if (timeout)
3686 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3687 else if (!qc_completed)
3688 goto fsm_start;
3689 }
3690
3691 /**
3692 * atapi_packet_task - Write CDB bytes to hardware
3693 * @_data: Port to which ATAPI device is attached.
3694 *
3695 * When device has indicated its readiness to accept
3696 * a CDB, this function is called. Send the CDB.
3697 * If DMA is to be performed, exit immediately.
3698 * Otherwise, we are in polling mode, so poll
3699 * status under operation succeeds or fails.
3700 *
3701 * LOCKING:
3702 * Kernel thread context (may sleep)
3703 */
3704
3705 static void atapi_packet_task(void *_data)
3706 {
3707 struct ata_port *ap = _data;
3708 struct ata_queued_cmd *qc;
3709 u8 status;
3710
3711 qc = ata_qc_from_tag(ap, ap->active_tag);
3712 WARN_ON(qc == NULL);
3713 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3714
3715 /* sleep-wait for BSY to clear */
3716 DPRINTK("busy wait\n");
3717 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3718 qc->err_mask |= AC_ERR_TIMEOUT;
3719 goto err_out;
3720 }
3721
3722 /* make sure DRQ is set */
3723 status = ata_chk_status(ap);
3724 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3725 qc->err_mask |= AC_ERR_HSM;
3726 goto err_out;
3727 }
3728
3729 /* send SCSI cdb */
3730 DPRINTK("send cdb\n");
3731 WARN_ON(qc->dev->cdb_len < 12);
3732
3733 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3734 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3735 unsigned long flags;
3736
3737 /* Once we're done issuing command and kicking bmdma,
3738 * irq handler takes over. To not lose irq, we need
3739 * to clear NOINTR flag before sending cdb, but
3740 * interrupt handler shouldn't be invoked before we're
3741 * finished. Hence, the following locking.
3742 */
3743 spin_lock_irqsave(&ap->host_set->lock, flags);
3744 ap->flags &= ~ATA_FLAG_NOINTR;
3745 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3746 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3747 ap->ops->bmdma_start(qc); /* initiate bmdma */
3748 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3749 } else {
3750 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3751
3752 /* PIO commands are handled by polling */
3753 ap->hsm_task_state = HSM_ST;
3754 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3755 }
3756
3757 return;
3758
3759 err_out:
3760 ata_poll_qc_complete(qc);
3761 }
3762
3763 /**
3764 * ata_qc_timeout - Handle timeout of queued command
3765 * @qc: Command that timed out
3766 *
3767 * Some part of the kernel (currently, only the SCSI layer)
3768 * has noticed that the active command on port @ap has not
3769 * completed after a specified length of time. Handle this
3770 * condition by disabling DMA (if necessary) and completing
3771 * transactions, with error if necessary.
3772 *
3773 * This also handles the case of the "lost interrupt", where
3774 * for some reason (possibly hardware bug, possibly driver bug)
3775 * an interrupt was not delivered to the driver, even though the
3776 * transaction completed successfully.
3777 *
3778 * LOCKING:
3779 * Inherited from SCSI layer (none, can sleep)
3780 */
3781
3782 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3783 {
3784 struct ata_port *ap = qc->ap;
3785 struct ata_host_set *host_set = ap->host_set;
3786 u8 host_stat = 0, drv_stat;
3787 unsigned long flags;
3788
3789 DPRINTK("ENTER\n");
3790
3791 ap->hsm_task_state = HSM_ST_IDLE;
3792
3793 spin_lock_irqsave(&host_set->lock, flags);
3794
3795 switch (qc->tf.protocol) {
3796
3797 case ATA_PROT_DMA:
3798 case ATA_PROT_ATAPI_DMA:
3799 host_stat = ap->ops->bmdma_status(ap);
3800
3801 /* before we do anything else, clear DMA-Start bit */
3802 ap->ops->bmdma_stop(qc);
3803
3804 /* fall through */
3805
3806 default:
3807 ata_altstatus(ap);
3808 drv_stat = ata_chk_status(ap);
3809
3810 /* ack bmdma irq events */
3811 ap->ops->irq_clear(ap);
3812
3813 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3814 ap->id, qc->tf.command, drv_stat, host_stat);
3815
3816 /* complete taskfile transaction */
3817 qc->err_mask |= ac_err_mask(drv_stat);
3818 break;
3819 }
3820
3821 spin_unlock_irqrestore(&host_set->lock, flags);
3822
3823 ata_eh_qc_complete(qc);
3824
3825 DPRINTK("EXIT\n");
3826 }
3827
3828 /**
3829 * ata_eng_timeout - Handle timeout of queued command
3830 * @ap: Port on which timed-out command is active
3831 *
3832 * Some part of the kernel (currently, only the SCSI layer)
3833 * has noticed that the active command on port @ap has not
3834 * completed after a specified length of time. Handle this
3835 * condition by disabling DMA (if necessary) and completing
3836 * transactions, with error if necessary.
3837 *
3838 * This also handles the case of the "lost interrupt", where
3839 * for some reason (possibly hardware bug, possibly driver bug)
3840 * an interrupt was not delivered to the driver, even though the
3841 * transaction completed successfully.
3842 *
3843 * LOCKING:
3844 * Inherited from SCSI layer (none, can sleep)
3845 */
3846
3847 void ata_eng_timeout(struct ata_port *ap)
3848 {
3849 DPRINTK("ENTER\n");
3850
3851 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3852
3853 DPRINTK("EXIT\n");
3854 }
3855
3856 /**
3857 * ata_qc_new - Request an available ATA command, for queueing
3858 * @ap: Port associated with device @dev
3859 * @dev: Device from whom we request an available command structure
3860 *
3861 * LOCKING:
3862 * None.
3863 */
3864
3865 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3866 {
3867 struct ata_queued_cmd *qc = NULL;
3868 unsigned int i;
3869
3870 for (i = 0; i < ATA_MAX_QUEUE; i++)
3871 if (!test_and_set_bit(i, &ap->qactive)) {
3872 qc = ata_qc_from_tag(ap, i);
3873 break;
3874 }
3875
3876 if (qc)
3877 qc->tag = i;
3878
3879 return qc;
3880 }
3881
3882 /**
3883 * ata_qc_new_init - Request an available ATA command, and initialize it
3884 * @ap: Port associated with device @dev
3885 * @dev: Device from whom we request an available command structure
3886 *
3887 * LOCKING:
3888 * None.
3889 */
3890
3891 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3892 struct ata_device *dev)
3893 {
3894 struct ata_queued_cmd *qc;
3895
3896 qc = ata_qc_new(ap);
3897 if (qc) {
3898 qc->scsicmd = NULL;
3899 qc->ap = ap;
3900 qc->dev = dev;
3901
3902 ata_qc_reinit(qc);
3903 }
3904
3905 return qc;
3906 }
3907
3908 /**
3909 * ata_qc_free - free unused ata_queued_cmd
3910 * @qc: Command to complete
3911 *
3912 * Designed to free unused ata_queued_cmd object
3913 * in case something prevents using it.
3914 *
3915 * LOCKING:
3916 * spin_lock_irqsave(host_set lock)
3917 */
3918 void ata_qc_free(struct ata_queued_cmd *qc)
3919 {
3920 struct ata_port *ap = qc->ap;
3921 unsigned int tag;
3922
3923 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3924
3925 qc->flags = 0;
3926 tag = qc->tag;
3927 if (likely(ata_tag_valid(tag))) {
3928 if (tag == ap->active_tag)
3929 ap->active_tag = ATA_TAG_POISON;
3930 qc->tag = ATA_TAG_POISON;
3931 clear_bit(tag, &ap->qactive);
3932 }
3933 }
3934
3935 void __ata_qc_complete(struct ata_queued_cmd *qc)
3936 {
3937 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3938 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3939
3940 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3941 ata_sg_clean(qc);
3942
3943 /* atapi: mark qc as inactive to prevent the interrupt handler
3944 * from completing the command twice later, before the error handler
3945 * is called. (when rc != 0 and atapi request sense is needed)
3946 */
3947 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3948
3949 /* call completion callback */
3950 qc->complete_fn(qc);
3951 }
3952
3953 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3954 {
3955 struct ata_port *ap = qc->ap;
3956
3957 switch (qc->tf.protocol) {
3958 case ATA_PROT_DMA:
3959 case ATA_PROT_ATAPI_DMA:
3960 return 1;
3961
3962 case ATA_PROT_ATAPI:
3963 case ATA_PROT_PIO:
3964 if (ap->flags & ATA_FLAG_PIO_DMA)
3965 return 1;
3966
3967 /* fall through */
3968
3969 default:
3970 return 0;
3971 }
3972
3973 /* never reached */
3974 }
3975
3976 /**
3977 * ata_qc_issue - issue taskfile to device
3978 * @qc: command to issue to device
3979 *
3980 * Prepare an ATA command to submission to device.
3981 * This includes mapping the data into a DMA-able
3982 * area, filling in the S/G table, and finally
3983 * writing the taskfile to hardware, starting the command.
3984 *
3985 * LOCKING:
3986 * spin_lock_irqsave(host_set lock)
3987 *
3988 * RETURNS:
3989 * Zero on success, AC_ERR_* mask on failure
3990 */
3991
3992 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3993 {
3994 struct ata_port *ap = qc->ap;
3995
3996 if (ata_should_dma_map(qc)) {
3997 if (qc->flags & ATA_QCFLAG_SG) {
3998 if (ata_sg_setup(qc))
3999 goto sg_err;
4000 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4001 if (ata_sg_setup_one(qc))
4002 goto sg_err;
4003 }
4004 } else {
4005 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4006 }
4007
4008 ap->ops->qc_prep(qc);
4009
4010 qc->ap->active_tag = qc->tag;
4011 qc->flags |= ATA_QCFLAG_ACTIVE;
4012
4013 return ap->ops->qc_issue(qc);
4014
4015 sg_err:
4016 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4017 return AC_ERR_SYSTEM;
4018 }
4019
4020
4021 /**
4022 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4023 * @qc: command to issue to device
4024 *
4025 * Using various libata functions and hooks, this function
4026 * starts an ATA command. ATA commands are grouped into
4027 * classes called "protocols", and issuing each type of protocol
4028 * is slightly different.
4029 *
4030 * May be used as the qc_issue() entry in ata_port_operations.
4031 *
4032 * LOCKING:
4033 * spin_lock_irqsave(host_set lock)
4034 *
4035 * RETURNS:
4036 * Zero on success, AC_ERR_* mask on failure
4037 */
4038
4039 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4040 {
4041 struct ata_port *ap = qc->ap;
4042
4043 ata_dev_select(ap, qc->dev->devno, 1, 0);
4044
4045 switch (qc->tf.protocol) {
4046 case ATA_PROT_NODATA:
4047 ata_tf_to_host(ap, &qc->tf);
4048 break;
4049
4050 case ATA_PROT_DMA:
4051 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4052 ap->ops->bmdma_setup(qc); /* set up bmdma */
4053 ap->ops->bmdma_start(qc); /* initiate bmdma */
4054 break;
4055
4056 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4057 ata_qc_set_polling(qc);
4058 ata_tf_to_host(ap, &qc->tf);
4059 ap->hsm_task_state = HSM_ST;
4060 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4061 break;
4062
4063 case ATA_PROT_ATAPI:
4064 ata_qc_set_polling(qc);
4065 ata_tf_to_host(ap, &qc->tf);
4066 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4067 break;
4068
4069 case ATA_PROT_ATAPI_NODATA:
4070 ap->flags |= ATA_FLAG_NOINTR;
4071 ata_tf_to_host(ap, &qc->tf);
4072 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4073 break;
4074
4075 case ATA_PROT_ATAPI_DMA:
4076 ap->flags |= ATA_FLAG_NOINTR;
4077 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4078 ap->ops->bmdma_setup(qc); /* set up bmdma */
4079 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4080 break;
4081
4082 default:
4083 WARN_ON(1);
4084 return AC_ERR_SYSTEM;
4085 }
4086
4087 return 0;
4088 }
4089
4090 /**
4091 * ata_host_intr - Handle host interrupt for given (port, task)
4092 * @ap: Port on which interrupt arrived (possibly...)
4093 * @qc: Taskfile currently active in engine
4094 *
4095 * Handle host interrupt for given queued command. Currently,
4096 * only DMA interrupts are handled. All other commands are
4097 * handled via polling with interrupts disabled (nIEN bit).
4098 *
4099 * LOCKING:
4100 * spin_lock_irqsave(host_set lock)
4101 *
4102 * RETURNS:
4103 * One if interrupt was handled, zero if not (shared irq).
4104 */
4105
4106 inline unsigned int ata_host_intr (struct ata_port *ap,
4107 struct ata_queued_cmd *qc)
4108 {
4109 u8 status, host_stat;
4110
4111 switch (qc->tf.protocol) {
4112
4113 case ATA_PROT_DMA:
4114 case ATA_PROT_ATAPI_DMA:
4115 case ATA_PROT_ATAPI:
4116 /* check status of DMA engine */
4117 host_stat = ap->ops->bmdma_status(ap);
4118 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4119
4120 /* if it's not our irq... */
4121 if (!(host_stat & ATA_DMA_INTR))
4122 goto idle_irq;
4123
4124 /* before we do anything else, clear DMA-Start bit */
4125 ap->ops->bmdma_stop(qc);
4126
4127 /* fall through */
4128
4129 case ATA_PROT_ATAPI_NODATA:
4130 case ATA_PROT_NODATA:
4131 /* check altstatus */
4132 status = ata_altstatus(ap);
4133 if (status & ATA_BUSY)
4134 goto idle_irq;
4135
4136 /* check main status, clearing INTRQ */
4137 status = ata_chk_status(ap);
4138 if (unlikely(status & ATA_BUSY))
4139 goto idle_irq;
4140 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4141 ap->id, qc->tf.protocol, status);
4142
4143 /* ack bmdma irq events */
4144 ap->ops->irq_clear(ap);
4145
4146 /* complete taskfile transaction */
4147 qc->err_mask |= ac_err_mask(status);
4148 ata_qc_complete(qc);
4149 break;
4150
4151 default:
4152 goto idle_irq;
4153 }
4154
4155 return 1; /* irq handled */
4156
4157 idle_irq:
4158 ap->stats.idle_irq++;
4159
4160 #ifdef ATA_IRQ_TRAP
4161 if ((ap->stats.idle_irq % 1000) == 0) {
4162 ata_irq_ack(ap, 0); /* debug trap */
4163 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4164 return 1;
4165 }
4166 #endif
4167 return 0; /* irq not handled */
4168 }
4169
4170 /**
4171 * ata_interrupt - Default ATA host interrupt handler
4172 * @irq: irq line (unused)
4173 * @dev_instance: pointer to our ata_host_set information structure
4174 * @regs: unused
4175 *
4176 * Default interrupt handler for PCI IDE devices. Calls
4177 * ata_host_intr() for each port that is not disabled.
4178 *
4179 * LOCKING:
4180 * Obtains host_set lock during operation.
4181 *
4182 * RETURNS:
4183 * IRQ_NONE or IRQ_HANDLED.
4184 */
4185
4186 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4187 {
4188 struct ata_host_set *host_set = dev_instance;
4189 unsigned int i;
4190 unsigned int handled = 0;
4191 unsigned long flags;
4192
4193 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4194 spin_lock_irqsave(&host_set->lock, flags);
4195
4196 for (i = 0; i < host_set->n_ports; i++) {
4197 struct ata_port *ap;
4198
4199 ap = host_set->ports[i];
4200 if (ap &&
4201 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4202 struct ata_queued_cmd *qc;
4203
4204 qc = ata_qc_from_tag(ap, ap->active_tag);
4205 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4206 (qc->flags & ATA_QCFLAG_ACTIVE))
4207 handled |= ata_host_intr(ap, qc);
4208 }
4209 }
4210
4211 spin_unlock_irqrestore(&host_set->lock, flags);
4212
4213 return IRQ_RETVAL(handled);
4214 }
4215
4216
4217 /*
4218 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4219 * without filling any other registers
4220 */
4221 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4222 u8 cmd)
4223 {
4224 struct ata_taskfile tf;
4225 int err;
4226
4227 ata_tf_init(ap, &tf, dev->devno);
4228
4229 tf.command = cmd;
4230 tf.flags |= ATA_TFLAG_DEVICE;
4231 tf.protocol = ATA_PROT_NODATA;
4232
4233 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4234 if (err)
4235 printk(KERN_ERR "%s: ata command failed: %d\n",
4236 __FUNCTION__, err);
4237
4238 return err;
4239 }
4240
4241 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4242 {
4243 u8 cmd;
4244
4245 if (!ata_try_flush_cache(dev))
4246 return 0;
4247
4248 if (ata_id_has_flush_ext(dev->id))
4249 cmd = ATA_CMD_FLUSH_EXT;
4250 else
4251 cmd = ATA_CMD_FLUSH;
4252
4253 return ata_do_simple_cmd(ap, dev, cmd);
4254 }
4255
4256 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4257 {
4258 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4259 }
4260
4261 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4262 {
4263 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4264 }
4265
4266 /**
4267 * ata_device_resume - wakeup a previously suspended devices
4268 * @ap: port the device is connected to
4269 * @dev: the device to resume
4270 *
4271 * Kick the drive back into action, by sending it an idle immediate
4272 * command and making sure its transfer mode matches between drive
4273 * and host.
4274 *
4275 */
4276 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4277 {
4278 if (ap->flags & ATA_FLAG_SUSPENDED) {
4279 ap->flags &= ~ATA_FLAG_SUSPENDED;
4280 ata_set_mode(ap);
4281 }
4282 if (!ata_dev_present(dev))
4283 return 0;
4284 if (dev->class == ATA_DEV_ATA)
4285 ata_start_drive(ap, dev);
4286
4287 return 0;
4288 }
4289
4290 /**
4291 * ata_device_suspend - prepare a device for suspend
4292 * @ap: port the device is connected to
4293 * @dev: the device to suspend
4294 *
4295 * Flush the cache on the drive, if appropriate, then issue a
4296 * standbynow command.
4297 */
4298 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4299 {
4300 if (!ata_dev_present(dev))
4301 return 0;
4302 if (dev->class == ATA_DEV_ATA)
4303 ata_flush_cache(ap, dev);
4304
4305 ata_standby_drive(ap, dev);
4306 ap->flags |= ATA_FLAG_SUSPENDED;
4307 return 0;
4308 }
4309
4310 /**
4311 * ata_port_start - Set port up for dma.
4312 * @ap: Port to initialize
4313 *
4314 * Called just after data structures for each port are
4315 * initialized. Allocates space for PRD table.
4316 *
4317 * May be used as the port_start() entry in ata_port_operations.
4318 *
4319 * LOCKING:
4320 * Inherited from caller.
4321 */
4322
4323 int ata_port_start (struct ata_port *ap)
4324 {
4325 struct device *dev = ap->host_set->dev;
4326 int rc;
4327
4328 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4329 if (!ap->prd)
4330 return -ENOMEM;
4331
4332 rc = ata_pad_alloc(ap, dev);
4333 if (rc) {
4334 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4335 return rc;
4336 }
4337
4338 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4339
4340 return 0;
4341 }
4342
4343
4344 /**
4345 * ata_port_stop - Undo ata_port_start()
4346 * @ap: Port to shut down
4347 *
4348 * Frees the PRD table.
4349 *
4350 * May be used as the port_stop() entry in ata_port_operations.
4351 *
4352 * LOCKING:
4353 * Inherited from caller.
4354 */
4355
4356 void ata_port_stop (struct ata_port *ap)
4357 {
4358 struct device *dev = ap->host_set->dev;
4359
4360 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4361 ata_pad_free(ap, dev);
4362 }
4363
4364 void ata_host_stop (struct ata_host_set *host_set)
4365 {
4366 if (host_set->mmio_base)
4367 iounmap(host_set->mmio_base);
4368 }
4369
4370
4371 /**
4372 * ata_host_remove - Unregister SCSI host structure with upper layers
4373 * @ap: Port to unregister
4374 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4375 *
4376 * LOCKING:
4377 * Inherited from caller.
4378 */
4379
4380 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4381 {
4382 struct Scsi_Host *sh = ap->host;
4383
4384 DPRINTK("ENTER\n");
4385
4386 if (do_unregister)
4387 scsi_remove_host(sh);
4388
4389 ap->ops->port_stop(ap);
4390 }
4391
4392 /**
4393 * ata_host_init - Initialize an ata_port structure
4394 * @ap: Structure to initialize
4395 * @host: associated SCSI mid-layer structure
4396 * @host_set: Collection of hosts to which @ap belongs
4397 * @ent: Probe information provided by low-level driver
4398 * @port_no: Port number associated with this ata_port
4399 *
4400 * Initialize a new ata_port structure, and its associated
4401 * scsi_host.
4402 *
4403 * LOCKING:
4404 * Inherited from caller.
4405 */
4406
4407 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4408 struct ata_host_set *host_set,
4409 const struct ata_probe_ent *ent, unsigned int port_no)
4410 {
4411 unsigned int i;
4412
4413 host->max_id = 16;
4414 host->max_lun = 1;
4415 host->max_channel = 1;
4416 host->unique_id = ata_unique_id++;
4417 host->max_cmd_len = 12;
4418
4419 ap->flags = ATA_FLAG_PORT_DISABLED;
4420 ap->id = host->unique_id;
4421 ap->host = host;
4422 ap->ctl = ATA_DEVCTL_OBS;
4423 ap->host_set = host_set;
4424 ap->port_no = port_no;
4425 ap->hard_port_no =
4426 ent->legacy_mode ? ent->hard_port_no : port_no;
4427 ap->pio_mask = ent->pio_mask;
4428 ap->mwdma_mask = ent->mwdma_mask;
4429 ap->udma_mask = ent->udma_mask;
4430 ap->flags |= ent->host_flags;
4431 ap->ops = ent->port_ops;
4432 ap->cbl = ATA_CBL_NONE;
4433 ap->active_tag = ATA_TAG_POISON;
4434 ap->last_ctl = 0xFF;
4435
4436 INIT_WORK(&ap->port_task, NULL, NULL);
4437 INIT_LIST_HEAD(&ap->eh_done_q);
4438
4439 for (i = 0; i < ATA_MAX_DEVICES; i++)
4440 ap->device[i].devno = i;
4441
4442 #ifdef ATA_IRQ_TRAP
4443 ap->stats.unhandled_irq = 1;
4444 ap->stats.idle_irq = 1;
4445 #endif
4446
4447 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4448 }
4449
4450 /**
4451 * ata_host_add - Attach low-level ATA driver to system
4452 * @ent: Information provided by low-level driver
4453 * @host_set: Collections of ports to which we add
4454 * @port_no: Port number associated with this host
4455 *
4456 * Attach low-level ATA driver to system.
4457 *
4458 * LOCKING:
4459 * PCI/etc. bus probe sem.
4460 *
4461 * RETURNS:
4462 * New ata_port on success, for NULL on error.
4463 */
4464
4465 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4466 struct ata_host_set *host_set,
4467 unsigned int port_no)
4468 {
4469 struct Scsi_Host *host;
4470 struct ata_port *ap;
4471 int rc;
4472
4473 DPRINTK("ENTER\n");
4474 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4475 if (!host)
4476 return NULL;
4477
4478 host->transportt = &ata_scsi_transport_template;
4479
4480 ap = (struct ata_port *) &host->hostdata[0];
4481
4482 ata_host_init(ap, host, host_set, ent, port_no);
4483
4484 rc = ap->ops->port_start(ap);
4485 if (rc)
4486 goto err_out;
4487
4488 return ap;
4489
4490 err_out:
4491 scsi_host_put(host);
4492 return NULL;
4493 }
4494
4495 /**
4496 * ata_device_add - Register hardware device with ATA and SCSI layers
4497 * @ent: Probe information describing hardware device to be registered
4498 *
4499 * This function processes the information provided in the probe
4500 * information struct @ent, allocates the necessary ATA and SCSI
4501 * host information structures, initializes them, and registers
4502 * everything with requisite kernel subsystems.
4503 *
4504 * This function requests irqs, probes the ATA bus, and probes
4505 * the SCSI bus.
4506 *
4507 * LOCKING:
4508 * PCI/etc. bus probe sem.
4509 *
4510 * RETURNS:
4511 * Number of ports registered. Zero on error (no ports registered).
4512 */
4513
4514 int ata_device_add(const struct ata_probe_ent *ent)
4515 {
4516 unsigned int count = 0, i;
4517 struct device *dev = ent->dev;
4518 struct ata_host_set *host_set;
4519
4520 DPRINTK("ENTER\n");
4521 /* alloc a container for our list of ATA ports (buses) */
4522 host_set = kzalloc(sizeof(struct ata_host_set) +
4523 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4524 if (!host_set)
4525 return 0;
4526 spin_lock_init(&host_set->lock);
4527
4528 host_set->dev = dev;
4529 host_set->n_ports = ent->n_ports;
4530 host_set->irq = ent->irq;
4531 host_set->mmio_base = ent->mmio_base;
4532 host_set->private_data = ent->private_data;
4533 host_set->ops = ent->port_ops;
4534
4535 /* register each port bound to this device */
4536 for (i = 0; i < ent->n_ports; i++) {
4537 struct ata_port *ap;
4538 unsigned long xfer_mode_mask;
4539
4540 ap = ata_host_add(ent, host_set, i);
4541 if (!ap)
4542 goto err_out;
4543
4544 host_set->ports[i] = ap;
4545 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4546 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4547 (ap->pio_mask << ATA_SHIFT_PIO);
4548
4549 /* print per-port info to dmesg */
4550 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4551 "bmdma 0x%lX irq %lu\n",
4552 ap->id,
4553 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4554 ata_mode_string(xfer_mode_mask),
4555 ap->ioaddr.cmd_addr,
4556 ap->ioaddr.ctl_addr,
4557 ap->ioaddr.bmdma_addr,
4558 ent->irq);
4559
4560 ata_chk_status(ap);
4561 host_set->ops->irq_clear(ap);
4562 count++;
4563 }
4564
4565 if (!count)
4566 goto err_free_ret;
4567
4568 /* obtain irq, that is shared between channels */
4569 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4570 DRV_NAME, host_set))
4571 goto err_out;
4572
4573 /* perform each probe synchronously */
4574 DPRINTK("probe begin\n");
4575 for (i = 0; i < count; i++) {
4576 struct ata_port *ap;
4577 int rc;
4578
4579 ap = host_set->ports[i];
4580
4581 DPRINTK("ata%u: bus probe begin\n", ap->id);
4582 rc = ata_bus_probe(ap);
4583 DPRINTK("ata%u: bus probe end\n", ap->id);
4584
4585 if (rc) {
4586 /* FIXME: do something useful here?
4587 * Current libata behavior will
4588 * tear down everything when
4589 * the module is removed
4590 * or the h/w is unplugged.
4591 */
4592 }
4593
4594 rc = scsi_add_host(ap->host, dev);
4595 if (rc) {
4596 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4597 ap->id);
4598 /* FIXME: do something useful here */
4599 /* FIXME: handle unconditional calls to
4600 * scsi_scan_host and ata_host_remove, below,
4601 * at the very least
4602 */
4603 }
4604 }
4605
4606 /* probes are done, now scan each port's disk(s) */
4607 DPRINTK("host probe begin\n");
4608 for (i = 0; i < count; i++) {
4609 struct ata_port *ap = host_set->ports[i];
4610
4611 ata_scsi_scan_host(ap);
4612 }
4613
4614 dev_set_drvdata(dev, host_set);
4615
4616 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4617 return ent->n_ports; /* success */
4618
4619 err_out:
4620 for (i = 0; i < count; i++) {
4621 ata_host_remove(host_set->ports[i], 1);
4622 scsi_host_put(host_set->ports[i]->host);
4623 }
4624 err_free_ret:
4625 kfree(host_set);
4626 VPRINTK("EXIT, returning 0\n");
4627 return 0;
4628 }
4629
4630 /**
4631 * ata_host_set_remove - PCI layer callback for device removal
4632 * @host_set: ATA host set that was removed
4633 *
4634 * Unregister all objects associated with this host set. Free those
4635 * objects.
4636 *
4637 * LOCKING:
4638 * Inherited from calling layer (may sleep).
4639 */
4640
4641 void ata_host_set_remove(struct ata_host_set *host_set)
4642 {
4643 struct ata_port *ap;
4644 unsigned int i;
4645
4646 for (i = 0; i < host_set->n_ports; i++) {
4647 ap = host_set->ports[i];
4648 scsi_remove_host(ap->host);
4649 }
4650
4651 free_irq(host_set->irq, host_set);
4652
4653 for (i = 0; i < host_set->n_ports; i++) {
4654 ap = host_set->ports[i];
4655
4656 ata_scsi_release(ap->host);
4657
4658 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4659 struct ata_ioports *ioaddr = &ap->ioaddr;
4660
4661 if (ioaddr->cmd_addr == 0x1f0)
4662 release_region(0x1f0, 8);
4663 else if (ioaddr->cmd_addr == 0x170)
4664 release_region(0x170, 8);
4665 }
4666
4667 scsi_host_put(ap->host);
4668 }
4669
4670 if (host_set->ops->host_stop)
4671 host_set->ops->host_stop(host_set);
4672
4673 kfree(host_set);
4674 }
4675
4676 /**
4677 * ata_scsi_release - SCSI layer callback hook for host unload
4678 * @host: libata host to be unloaded
4679 *
4680 * Performs all duties necessary to shut down a libata port...
4681 * Kill port kthread, disable port, and release resources.
4682 *
4683 * LOCKING:
4684 * Inherited from SCSI layer.
4685 *
4686 * RETURNS:
4687 * One.
4688 */
4689
4690 int ata_scsi_release(struct Scsi_Host *host)
4691 {
4692 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4693 int i;
4694
4695 DPRINTK("ENTER\n");
4696
4697 ap->ops->port_disable(ap);
4698 ata_host_remove(ap, 0);
4699 for (i = 0; i < ATA_MAX_DEVICES; i++)
4700 kfree(ap->device[i].id);
4701
4702 DPRINTK("EXIT\n");
4703 return 1;
4704 }
4705
4706 /**
4707 * ata_std_ports - initialize ioaddr with standard port offsets.
4708 * @ioaddr: IO address structure to be initialized
4709 *
4710 * Utility function which initializes data_addr, error_addr,
4711 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4712 * device_addr, status_addr, and command_addr to standard offsets
4713 * relative to cmd_addr.
4714 *
4715 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4716 */
4717
4718 void ata_std_ports(struct ata_ioports *ioaddr)
4719 {
4720 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4721 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4722 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4723 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4724 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4725 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4726 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4727 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4728 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4729 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4730 }
4731
4732
4733 #ifdef CONFIG_PCI
4734
4735 void ata_pci_host_stop (struct ata_host_set *host_set)
4736 {
4737 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4738
4739 pci_iounmap(pdev, host_set->mmio_base);
4740 }
4741
4742 /**
4743 * ata_pci_remove_one - PCI layer callback for device removal
4744 * @pdev: PCI device that was removed
4745 *
4746 * PCI layer indicates to libata via this hook that
4747 * hot-unplug or module unload event has occurred.
4748 * Handle this by unregistering all objects associated
4749 * with this PCI device. Free those objects. Then finally
4750 * release PCI resources and disable device.
4751 *
4752 * LOCKING:
4753 * Inherited from PCI layer (may sleep).
4754 */
4755
4756 void ata_pci_remove_one (struct pci_dev *pdev)
4757 {
4758 struct device *dev = pci_dev_to_dev(pdev);
4759 struct ata_host_set *host_set = dev_get_drvdata(dev);
4760
4761 ata_host_set_remove(host_set);
4762 pci_release_regions(pdev);
4763 pci_disable_device(pdev);
4764 dev_set_drvdata(dev, NULL);
4765 }
4766
4767 /* move to PCI subsystem */
4768 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4769 {
4770 unsigned long tmp = 0;
4771
4772 switch (bits->width) {
4773 case 1: {
4774 u8 tmp8 = 0;
4775 pci_read_config_byte(pdev, bits->reg, &tmp8);
4776 tmp = tmp8;
4777 break;
4778 }
4779 case 2: {
4780 u16 tmp16 = 0;
4781 pci_read_config_word(pdev, bits->reg, &tmp16);
4782 tmp = tmp16;
4783 break;
4784 }
4785 case 4: {
4786 u32 tmp32 = 0;
4787 pci_read_config_dword(pdev, bits->reg, &tmp32);
4788 tmp = tmp32;
4789 break;
4790 }
4791
4792 default:
4793 return -EINVAL;
4794 }
4795
4796 tmp &= bits->mask;
4797
4798 return (tmp == bits->val) ? 1 : 0;
4799 }
4800
4801 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4802 {
4803 pci_save_state(pdev);
4804 pci_disable_device(pdev);
4805 pci_set_power_state(pdev, PCI_D3hot);
4806 return 0;
4807 }
4808
4809 int ata_pci_device_resume(struct pci_dev *pdev)
4810 {
4811 pci_set_power_state(pdev, PCI_D0);
4812 pci_restore_state(pdev);
4813 pci_enable_device(pdev);
4814 pci_set_master(pdev);
4815 return 0;
4816 }
4817 #endif /* CONFIG_PCI */
4818
4819
4820 static int __init ata_init(void)
4821 {
4822 ata_wq = create_workqueue("ata");
4823 if (!ata_wq)
4824 return -ENOMEM;
4825
4826 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4827 return 0;
4828 }
4829
4830 static void __exit ata_exit(void)
4831 {
4832 destroy_workqueue(ata_wq);
4833 }
4834
4835 module_init(ata_init);
4836 module_exit(ata_exit);
4837
4838 static unsigned long ratelimit_time;
4839 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4840
4841 int ata_ratelimit(void)
4842 {
4843 int rc;
4844 unsigned long flags;
4845
4846 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4847
4848 if (time_after(jiffies, ratelimit_time)) {
4849 rc = 1;
4850 ratelimit_time = jiffies + (HZ/5);
4851 } else
4852 rc = 0;
4853
4854 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4855
4856 return rc;
4857 }
4858
4859 /*
4860 * libata is essentially a library of internal helper functions for
4861 * low-level ATA host controller drivers. As such, the API/ABI is
4862 * likely to change as new drivers are added and updated.
4863 * Do not depend on ABI/API stability.
4864 */
4865
4866 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4867 EXPORT_SYMBOL_GPL(ata_std_ports);
4868 EXPORT_SYMBOL_GPL(ata_device_add);
4869 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4870 EXPORT_SYMBOL_GPL(ata_sg_init);
4871 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4872 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4873 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4874 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4875 EXPORT_SYMBOL_GPL(ata_tf_load);
4876 EXPORT_SYMBOL_GPL(ata_tf_read);
4877 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4878 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4879 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4880 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4881 EXPORT_SYMBOL_GPL(ata_check_status);
4882 EXPORT_SYMBOL_GPL(ata_altstatus);
4883 EXPORT_SYMBOL_GPL(ata_exec_command);
4884 EXPORT_SYMBOL_GPL(ata_port_start);
4885 EXPORT_SYMBOL_GPL(ata_port_stop);
4886 EXPORT_SYMBOL_GPL(ata_host_stop);
4887 EXPORT_SYMBOL_GPL(ata_interrupt);
4888 EXPORT_SYMBOL_GPL(ata_qc_prep);
4889 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4890 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4891 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4892 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4893 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4894 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4895 EXPORT_SYMBOL_GPL(ata_port_probe);
4896 EXPORT_SYMBOL_GPL(sata_phy_reset);
4897 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4898 EXPORT_SYMBOL_GPL(ata_bus_reset);
4899 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4900 EXPORT_SYMBOL_GPL(ata_std_softreset);
4901 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4902 EXPORT_SYMBOL_GPL(ata_std_postreset);
4903 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4904 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4905 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4906 EXPORT_SYMBOL_GPL(ata_port_disable);
4907 EXPORT_SYMBOL_GPL(ata_ratelimit);
4908 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4909 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4910 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4911 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4912 EXPORT_SYMBOL_GPL(ata_scsi_error);
4913 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4914 EXPORT_SYMBOL_GPL(ata_scsi_release);
4915 EXPORT_SYMBOL_GPL(ata_host_intr);
4916 EXPORT_SYMBOL_GPL(ata_dev_classify);
4917 EXPORT_SYMBOL_GPL(ata_id_string);
4918 EXPORT_SYMBOL_GPL(ata_id_c_string);
4919 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4920 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4921 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4922
4923 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4924 EXPORT_SYMBOL_GPL(ata_timing_compute);
4925 EXPORT_SYMBOL_GPL(ata_timing_merge);
4926
4927 #ifdef CONFIG_PCI
4928 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4929 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4930 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4931 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4932 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4933 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4934 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4935 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4936 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4937 #endif /* CONFIG_PCI */
4938
4939 EXPORT_SYMBOL_GPL(ata_device_suspend);
4940 EXPORT_SYMBOL_GPL(ata_device_resume);
4941 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4942 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.360521 seconds and 5 git commands to generate.