[PATCH] libata: kill trailing whitespace
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
68 struct ata_device *dev);
69 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
70
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
73
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
77
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
81
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
86
87
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
93 *
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
102 {
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
108
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
113
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
118
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
123
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
128 }
129
130 /**
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
134 *
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
136 *
137 * LOCKING:
138 * Inherited from caller.
139 */
140
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
142 {
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
145
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
150
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
154
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
157 }
158
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
165 0,
166 0,
167 0,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
174 0,
175 0,
176 0,
177 0,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_FUA_EXT
187 };
188
189 /**
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
192 *
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
195 *
196 * LOCKING:
197 * caller.
198 */
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
200 {
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
204
205 int index, fua, lba48, write;
206
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
210
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
221 }
222
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
227 }
228 return -1;
229 }
230
231 /**
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
236 *
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
239 *
240 * LOCKING:
241 * None.
242 *
243 * RETURNS:
244 * Packed xfer_mask.
245 */
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249 {
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253 }
254
255 /**
256 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
257 * @xfer_mask: xfer_mask to unpack
258 * @pio_mask: resulting pio_mask
259 * @mwdma_mask: resulting mwdma_mask
260 * @udma_mask: resulting udma_mask
261 *
262 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
263 * Any NULL distination masks will be ignored.
264 */
265 static void ata_unpack_xfermask(unsigned int xfer_mask,
266 unsigned int *pio_mask,
267 unsigned int *mwdma_mask,
268 unsigned int *udma_mask)
269 {
270 if (pio_mask)
271 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
272 if (mwdma_mask)
273 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
274 if (udma_mask)
275 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
276 }
277
278 static const struct ata_xfer_ent {
279 unsigned int shift, bits;
280 u8 base;
281 } ata_xfer_tbl[] = {
282 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
283 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
284 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
285 { -1, },
286 };
287
288 /**
289 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
290 * @xfer_mask: xfer_mask of interest
291 *
292 * Return matching XFER_* value for @xfer_mask. Only the highest
293 * bit of @xfer_mask is considered.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching XFER_* value, 0 if no match found.
300 */
301 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
302 {
303 int highbit = fls(xfer_mask) - 1;
304 const struct ata_xfer_ent *ent;
305
306 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
307 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
308 return ent->base + highbit - ent->shift;
309 return 0;
310 }
311
312 /**
313 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
314 * @xfer_mode: XFER_* of interest
315 *
316 * Return matching xfer_mask for @xfer_mode.
317 *
318 * LOCKING:
319 * None.
320 *
321 * RETURNS:
322 * Matching xfer_mask, 0 if no match found.
323 */
324 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
325 {
326 const struct ata_xfer_ent *ent;
327
328 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
329 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
330 return 1 << (ent->shift + xfer_mode - ent->base);
331 return 0;
332 }
333
334 /**
335 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
336 * @xfer_mode: XFER_* of interest
337 *
338 * Return matching xfer_shift for @xfer_mode.
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Matching xfer_shift, -1 if no match found.
345 */
346 static int ata_xfer_mode2shift(unsigned int xfer_mode)
347 {
348 const struct ata_xfer_ent *ent;
349
350 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
351 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
352 return ent->shift;
353 return -1;
354 }
355
356 /**
357 * ata_mode_string - convert xfer_mask to string
358 * @xfer_mask: mask of bits supported; only highest bit counts.
359 *
360 * Determine string which represents the highest speed
361 * (highest bit in @modemask).
362 *
363 * LOCKING:
364 * None.
365 *
366 * RETURNS:
367 * Constant C string representing highest speed listed in
368 * @mode_mask, or the constant C string "<n/a>".
369 */
370 static const char *ata_mode_string(unsigned int xfer_mask)
371 {
372 static const char * const xfer_mode_str[] = {
373 "PIO0",
374 "PIO1",
375 "PIO2",
376 "PIO3",
377 "PIO4",
378 "MWDMA0",
379 "MWDMA1",
380 "MWDMA2",
381 "UDMA/16",
382 "UDMA/25",
383 "UDMA/33",
384 "UDMA/44",
385 "UDMA/66",
386 "UDMA/100",
387 "UDMA/133",
388 "UDMA7",
389 };
390 int highbit;
391
392 highbit = fls(xfer_mask) - 1;
393 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
394 return xfer_mode_str[highbit];
395 return "<n/a>";
396 }
397
398 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
399 {
400 if (ata_dev_present(dev)) {
401 printk(KERN_WARNING "ata%u: dev %u disabled\n",
402 ap->id, dev->devno);
403 dev->class++;
404 }
405 }
406
407 /**
408 * ata_pio_devchk - PATA device presence detection
409 * @ap: ATA channel to examine
410 * @device: Device to examine (starting at zero)
411 *
412 * This technique was originally described in
413 * Hale Landis's ATADRVR (www.ata-atapi.com), and
414 * later found its way into the ATA/ATAPI spec.
415 *
416 * Write a pattern to the ATA shadow registers,
417 * and if a device is present, it will respond by
418 * correctly storing and echoing back the
419 * ATA shadow register contents.
420 *
421 * LOCKING:
422 * caller.
423 */
424
425 static unsigned int ata_pio_devchk(struct ata_port *ap,
426 unsigned int device)
427 {
428 struct ata_ioports *ioaddr = &ap->ioaddr;
429 u8 nsect, lbal;
430
431 ap->ops->dev_select(ap, device);
432
433 outb(0x55, ioaddr->nsect_addr);
434 outb(0xaa, ioaddr->lbal_addr);
435
436 outb(0xaa, ioaddr->nsect_addr);
437 outb(0x55, ioaddr->lbal_addr);
438
439 outb(0x55, ioaddr->nsect_addr);
440 outb(0xaa, ioaddr->lbal_addr);
441
442 nsect = inb(ioaddr->nsect_addr);
443 lbal = inb(ioaddr->lbal_addr);
444
445 if ((nsect == 0x55) && (lbal == 0xaa))
446 return 1; /* we found a device */
447
448 return 0; /* nothing found */
449 }
450
451 /**
452 * ata_mmio_devchk - PATA device presence detection
453 * @ap: ATA channel to examine
454 * @device: Device to examine (starting at zero)
455 *
456 * This technique was originally described in
457 * Hale Landis's ATADRVR (www.ata-atapi.com), and
458 * later found its way into the ATA/ATAPI spec.
459 *
460 * Write a pattern to the ATA shadow registers,
461 * and if a device is present, it will respond by
462 * correctly storing and echoing back the
463 * ATA shadow register contents.
464 *
465 * LOCKING:
466 * caller.
467 */
468
469 static unsigned int ata_mmio_devchk(struct ata_port *ap,
470 unsigned int device)
471 {
472 struct ata_ioports *ioaddr = &ap->ioaddr;
473 u8 nsect, lbal;
474
475 ap->ops->dev_select(ap, device);
476
477 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
478 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
479
480 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
481 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
482
483 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
484 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
485
486 nsect = readb((void __iomem *) ioaddr->nsect_addr);
487 lbal = readb((void __iomem *) ioaddr->lbal_addr);
488
489 if ((nsect == 0x55) && (lbal == 0xaa))
490 return 1; /* we found a device */
491
492 return 0; /* nothing found */
493 }
494
495 /**
496 * ata_devchk - PATA device presence detection
497 * @ap: ATA channel to examine
498 * @device: Device to examine (starting at zero)
499 *
500 * Dispatch ATA device presence detection, depending
501 * on whether we are using PIO or MMIO to talk to the
502 * ATA shadow registers.
503 *
504 * LOCKING:
505 * caller.
506 */
507
508 static unsigned int ata_devchk(struct ata_port *ap,
509 unsigned int device)
510 {
511 if (ap->flags & ATA_FLAG_MMIO)
512 return ata_mmio_devchk(ap, device);
513 return ata_pio_devchk(ap, device);
514 }
515
516 /**
517 * ata_dev_classify - determine device type based on ATA-spec signature
518 * @tf: ATA taskfile register set for device to be identified
519 *
520 * Determine from taskfile register contents whether a device is
521 * ATA or ATAPI, as per "Signature and persistence" section
522 * of ATA/PI spec (volume 1, sect 5.14).
523 *
524 * LOCKING:
525 * None.
526 *
527 * RETURNS:
528 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
529 * the event of failure.
530 */
531
532 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
533 {
534 /* Apple's open source Darwin code hints that some devices only
535 * put a proper signature into the LBA mid/high registers,
536 * So, we only check those. It's sufficient for uniqueness.
537 */
538
539 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
540 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
541 DPRINTK("found ATA device by sig\n");
542 return ATA_DEV_ATA;
543 }
544
545 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
546 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
547 DPRINTK("found ATAPI device by sig\n");
548 return ATA_DEV_ATAPI;
549 }
550
551 DPRINTK("unknown device\n");
552 return ATA_DEV_UNKNOWN;
553 }
554
555 /**
556 * ata_dev_try_classify - Parse returned ATA device signature
557 * @ap: ATA channel to examine
558 * @device: Device to examine (starting at zero)
559 * @r_err: Value of error register on completion
560 *
561 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
562 * an ATA/ATAPI-defined set of values is placed in the ATA
563 * shadow registers, indicating the results of device detection
564 * and diagnostics.
565 *
566 * Select the ATA device, and read the values from the ATA shadow
567 * registers. Then parse according to the Error register value,
568 * and the spec-defined values examined by ata_dev_classify().
569 *
570 * LOCKING:
571 * caller.
572 *
573 * RETURNS:
574 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
575 */
576
577 static unsigned int
578 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
579 {
580 struct ata_taskfile tf;
581 unsigned int class;
582 u8 err;
583
584 ap->ops->dev_select(ap, device);
585
586 memset(&tf, 0, sizeof(tf));
587
588 ap->ops->tf_read(ap, &tf);
589 err = tf.feature;
590 if (r_err)
591 *r_err = err;
592
593 /* see if device passed diags */
594 if (err == 1)
595 /* do nothing */ ;
596 else if ((device == 0) && (err == 0x81))
597 /* do nothing */ ;
598 else
599 return ATA_DEV_NONE;
600
601 /* determine if device is ATA or ATAPI */
602 class = ata_dev_classify(&tf);
603
604 if (class == ATA_DEV_UNKNOWN)
605 return ATA_DEV_NONE;
606 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
607 return ATA_DEV_NONE;
608 return class;
609 }
610
611 /**
612 * ata_id_string - Convert IDENTIFY DEVICE page into string
613 * @id: IDENTIFY DEVICE results we will examine
614 * @s: string into which data is output
615 * @ofs: offset into identify device page
616 * @len: length of string to return. must be an even number.
617 *
618 * The strings in the IDENTIFY DEVICE page are broken up into
619 * 16-bit chunks. Run through the string, and output each
620 * 8-bit chunk linearly, regardless of platform.
621 *
622 * LOCKING:
623 * caller.
624 */
625
626 void ata_id_string(const u16 *id, unsigned char *s,
627 unsigned int ofs, unsigned int len)
628 {
629 unsigned int c;
630
631 while (len > 0) {
632 c = id[ofs] >> 8;
633 *s = c;
634 s++;
635
636 c = id[ofs] & 0xff;
637 *s = c;
638 s++;
639
640 ofs++;
641 len -= 2;
642 }
643 }
644
645 /**
646 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
647 * @id: IDENTIFY DEVICE results we will examine
648 * @s: string into which data is output
649 * @ofs: offset into identify device page
650 * @len: length of string to return. must be an odd number.
651 *
652 * This function is identical to ata_id_string except that it
653 * trims trailing spaces and terminates the resulting string with
654 * null. @len must be actual maximum length (even number) + 1.
655 *
656 * LOCKING:
657 * caller.
658 */
659 void ata_id_c_string(const u16 *id, unsigned char *s,
660 unsigned int ofs, unsigned int len)
661 {
662 unsigned char *p;
663
664 WARN_ON(!(len & 1));
665
666 ata_id_string(id, s, ofs, len - 1);
667
668 p = s + strnlen(s, len - 1);
669 while (p > s && p[-1] == ' ')
670 p--;
671 *p = '\0';
672 }
673
674 static u64 ata_id_n_sectors(const u16 *id)
675 {
676 if (ata_id_has_lba(id)) {
677 if (ata_id_has_lba48(id))
678 return ata_id_u64(id, 100);
679 else
680 return ata_id_u32(id, 60);
681 } else {
682 if (ata_id_current_chs_valid(id))
683 return ata_id_u32(id, 57);
684 else
685 return id[1] * id[3] * id[6];
686 }
687 }
688
689 /**
690 * ata_noop_dev_select - Select device 0/1 on ATA bus
691 * @ap: ATA channel to manipulate
692 * @device: ATA device (numbered from zero) to select
693 *
694 * This function performs no actual function.
695 *
696 * May be used as the dev_select() entry in ata_port_operations.
697 *
698 * LOCKING:
699 * caller.
700 */
701 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
702 {
703 }
704
705
706 /**
707 * ata_std_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
710 *
711 * Use the method defined in the ATA specification to
712 * make either device 0, or device 1, active on the
713 * ATA channel. Works with both PIO and MMIO.
714 *
715 * May be used as the dev_select() entry in ata_port_operations.
716 *
717 * LOCKING:
718 * caller.
719 */
720
721 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
722 {
723 u8 tmp;
724
725 if (device == 0)
726 tmp = ATA_DEVICE_OBS;
727 else
728 tmp = ATA_DEVICE_OBS | ATA_DEV1;
729
730 if (ap->flags & ATA_FLAG_MMIO) {
731 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
732 } else {
733 outb(tmp, ap->ioaddr.device_addr);
734 }
735 ata_pause(ap); /* needed; also flushes, for mmio */
736 }
737
738 /**
739 * ata_dev_select - Select device 0/1 on ATA bus
740 * @ap: ATA channel to manipulate
741 * @device: ATA device (numbered from zero) to select
742 * @wait: non-zero to wait for Status register BSY bit to clear
743 * @can_sleep: non-zero if context allows sleeping
744 *
745 * Use the method defined in the ATA specification to
746 * make either device 0, or device 1, active on the
747 * ATA channel.
748 *
749 * This is a high-level version of ata_std_dev_select(),
750 * which additionally provides the services of inserting
751 * the proper pauses and status polling, where needed.
752 *
753 * LOCKING:
754 * caller.
755 */
756
757 void ata_dev_select(struct ata_port *ap, unsigned int device,
758 unsigned int wait, unsigned int can_sleep)
759 {
760 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
761 ap->id, device, wait);
762
763 if (wait)
764 ata_wait_idle(ap);
765
766 ap->ops->dev_select(ap, device);
767
768 if (wait) {
769 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
770 msleep(150);
771 ata_wait_idle(ap);
772 }
773 }
774
775 /**
776 * ata_dump_id - IDENTIFY DEVICE info debugging output
777 * @id: IDENTIFY DEVICE page to dump
778 *
779 * Dump selected 16-bit words from the given IDENTIFY DEVICE
780 * page.
781 *
782 * LOCKING:
783 * caller.
784 */
785
786 static inline void ata_dump_id(const u16 *id)
787 {
788 DPRINTK("49==0x%04x "
789 "53==0x%04x "
790 "63==0x%04x "
791 "64==0x%04x "
792 "75==0x%04x \n",
793 id[49],
794 id[53],
795 id[63],
796 id[64],
797 id[75]);
798 DPRINTK("80==0x%04x "
799 "81==0x%04x "
800 "82==0x%04x "
801 "83==0x%04x "
802 "84==0x%04x \n",
803 id[80],
804 id[81],
805 id[82],
806 id[83],
807 id[84]);
808 DPRINTK("88==0x%04x "
809 "93==0x%04x\n",
810 id[88],
811 id[93]);
812 }
813
814 /**
815 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
816 * @id: IDENTIFY data to compute xfer mask from
817 *
818 * Compute the xfermask for this device. This is not as trivial
819 * as it seems if we must consider early devices correctly.
820 *
821 * FIXME: pre IDE drive timing (do we care ?).
822 *
823 * LOCKING:
824 * None.
825 *
826 * RETURNS:
827 * Computed xfermask
828 */
829 static unsigned int ata_id_xfermask(const u16 *id)
830 {
831 unsigned int pio_mask, mwdma_mask, udma_mask;
832
833 /* Usual case. Word 53 indicates word 64 is valid */
834 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
835 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
836 pio_mask <<= 3;
837 pio_mask |= 0x7;
838 } else {
839 /* If word 64 isn't valid then Word 51 high byte holds
840 * the PIO timing number for the maximum. Turn it into
841 * a mask.
842 */
843 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
844
845 /* But wait.. there's more. Design your standards by
846 * committee and you too can get a free iordy field to
847 * process. However its the speeds not the modes that
848 * are supported... Note drivers using the timing API
849 * will get this right anyway
850 */
851 }
852
853 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
854
855 udma_mask = 0;
856 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
857 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
858
859 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
860 }
861
862 /**
863 * ata_port_queue_task - Queue port_task
864 * @ap: The ata_port to queue port_task for
865 *
866 * Schedule @fn(@data) for execution after @delay jiffies using
867 * port_task. There is one port_task per port and it's the
868 * user(low level driver)'s responsibility to make sure that only
869 * one task is active at any given time.
870 *
871 * libata core layer takes care of synchronization between
872 * port_task and EH. ata_port_queue_task() may be ignored for EH
873 * synchronization.
874 *
875 * LOCKING:
876 * Inherited from caller.
877 */
878 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
879 unsigned long delay)
880 {
881 int rc;
882
883 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
884 return;
885
886 PREPARE_WORK(&ap->port_task, fn, data);
887
888 if (!delay)
889 rc = queue_work(ata_wq, &ap->port_task);
890 else
891 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
892
893 /* rc == 0 means that another user is using port task */
894 WARN_ON(rc == 0);
895 }
896
897 /**
898 * ata_port_flush_task - Flush port_task
899 * @ap: The ata_port to flush port_task for
900 *
901 * After this function completes, port_task is guranteed not to
902 * be running or scheduled.
903 *
904 * LOCKING:
905 * Kernel thread context (may sleep)
906 */
907 void ata_port_flush_task(struct ata_port *ap)
908 {
909 unsigned long flags;
910
911 DPRINTK("ENTER\n");
912
913 spin_lock_irqsave(&ap->host_set->lock, flags);
914 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
915 spin_unlock_irqrestore(&ap->host_set->lock, flags);
916
917 DPRINTK("flush #1\n");
918 flush_workqueue(ata_wq);
919
920 /*
921 * At this point, if a task is running, it's guaranteed to see
922 * the FLUSH flag; thus, it will never queue pio tasks again.
923 * Cancel and flush.
924 */
925 if (!cancel_delayed_work(&ap->port_task)) {
926 DPRINTK("flush #2\n");
927 flush_workqueue(ata_wq);
928 }
929
930 spin_lock_irqsave(&ap->host_set->lock, flags);
931 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
932 spin_unlock_irqrestore(&ap->host_set->lock, flags);
933
934 DPRINTK("EXIT\n");
935 }
936
937 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
938 {
939 struct completion *waiting = qc->private_data;
940
941 qc->ap->ops->tf_read(qc->ap, &qc->tf);
942 complete(waiting);
943 }
944
945 /**
946 * ata_exec_internal - execute libata internal command
947 * @ap: Port to which the command is sent
948 * @dev: Device to which the command is sent
949 * @tf: Taskfile registers for the command and the result
950 * @dma_dir: Data tranfer direction of the command
951 * @buf: Data buffer of the command
952 * @buflen: Length of data buffer
953 *
954 * Executes libata internal command with timeout. @tf contains
955 * command on entry and result on return. Timeout and error
956 * conditions are reported via return value. No recovery action
957 * is taken after a command times out. It's caller's duty to
958 * clean up after timeout.
959 *
960 * LOCKING:
961 * None. Should be called with kernel context, might sleep.
962 */
963
964 static unsigned
965 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
966 struct ata_taskfile *tf,
967 int dma_dir, void *buf, unsigned int buflen)
968 {
969 u8 command = tf->command;
970 struct ata_queued_cmd *qc;
971 DECLARE_COMPLETION(wait);
972 unsigned long flags;
973 unsigned int err_mask;
974
975 spin_lock_irqsave(&ap->host_set->lock, flags);
976
977 qc = ata_qc_new_init(ap, dev);
978 BUG_ON(qc == NULL);
979
980 qc->tf = *tf;
981 qc->dma_dir = dma_dir;
982 if (dma_dir != DMA_NONE) {
983 ata_sg_init_one(qc, buf, buflen);
984 qc->nsect = buflen / ATA_SECT_SIZE;
985 }
986
987 qc->private_data = &wait;
988 qc->complete_fn = ata_qc_complete_internal;
989
990 qc->err_mask = ata_qc_issue(qc);
991 if (qc->err_mask)
992 ata_qc_complete(qc);
993
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
995
996 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
997 ata_port_flush_task(ap);
998
999 spin_lock_irqsave(&ap->host_set->lock, flags);
1000
1001 /* We're racing with irq here. If we lose, the
1002 * following test prevents us from completing the qc
1003 * again. If completion irq occurs after here but
1004 * before the caller cleans up, it will result in a
1005 * spurious interrupt. We can live with that.
1006 */
1007 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1008 qc->err_mask = AC_ERR_TIMEOUT;
1009 ata_qc_complete(qc);
1010 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1011 ap->id, command);
1012 }
1013
1014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1015 }
1016
1017 *tf = qc->tf;
1018 err_mask = qc->err_mask;
1019
1020 ata_qc_free(qc);
1021
1022 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1023 * Until those drivers are fixed, we detect the condition
1024 * here, fail the command with AC_ERR_SYSTEM and reenable the
1025 * port.
1026 *
1027 * Note that this doesn't change any behavior as internal
1028 * command failure results in disabling the device in the
1029 * higher layer for LLDDs without new reset/EH callbacks.
1030 *
1031 * Kill the following code as soon as those drivers are fixed.
1032 */
1033 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1034 err_mask |= AC_ERR_SYSTEM;
1035 ata_port_probe(ap);
1036 }
1037
1038 return err_mask;
1039 }
1040
1041 /**
1042 * ata_pio_need_iordy - check if iordy needed
1043 * @adev: ATA device
1044 *
1045 * Check if the current speed of the device requires IORDY. Used
1046 * by various controllers for chip configuration.
1047 */
1048
1049 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1050 {
1051 int pio;
1052 int speed = adev->pio_mode - XFER_PIO_0;
1053
1054 if (speed < 2)
1055 return 0;
1056 if (speed > 2)
1057 return 1;
1058
1059 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1060
1061 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1062 pio = adev->id[ATA_ID_EIDE_PIO];
1063 /* Is the speed faster than the drive allows non IORDY ? */
1064 if (pio) {
1065 /* This is cycle times not frequency - watch the logic! */
1066 if (pio > 240) /* PIO2 is 240nS per cycle */
1067 return 1;
1068 return 0;
1069 }
1070 }
1071 return 0;
1072 }
1073
1074 /**
1075 * ata_dev_read_id - Read ID data from the specified device
1076 * @ap: port on which target device resides
1077 * @dev: target device
1078 * @p_class: pointer to class of the target device (may be changed)
1079 * @post_reset: is this read ID post-reset?
1080 * @p_id: read IDENTIFY page (newly allocated)
1081 *
1082 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1083 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1084 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1085 * for pre-ATA4 drives.
1086 *
1087 * LOCKING:
1088 * Kernel thread context (may sleep)
1089 *
1090 * RETURNS:
1091 * 0 on success, -errno otherwise.
1092 */
1093 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1094 unsigned int *p_class, int post_reset, u16 **p_id)
1095 {
1096 unsigned int class = *p_class;
1097 struct ata_taskfile tf;
1098 unsigned int err_mask = 0;
1099 u16 *id;
1100 const char *reason;
1101 int rc;
1102
1103 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1104
1105 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1106
1107 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1108 if (id == NULL) {
1109 rc = -ENOMEM;
1110 reason = "out of memory";
1111 goto err_out;
1112 }
1113
1114 retry:
1115 ata_tf_init(ap, &tf, dev->devno);
1116
1117 switch (class) {
1118 case ATA_DEV_ATA:
1119 tf.command = ATA_CMD_ID_ATA;
1120 break;
1121 case ATA_DEV_ATAPI:
1122 tf.command = ATA_CMD_ID_ATAPI;
1123 break;
1124 default:
1125 rc = -ENODEV;
1126 reason = "unsupported class";
1127 goto err_out;
1128 }
1129
1130 tf.protocol = ATA_PROT_PIO;
1131
1132 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1133 id, sizeof(id[0]) * ATA_ID_WORDS);
1134 if (err_mask) {
1135 rc = -EIO;
1136 reason = "I/O error";
1137 goto err_out;
1138 }
1139
1140 swap_buf_le16(id, ATA_ID_WORDS);
1141
1142 /* sanity check */
1143 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1144 rc = -EINVAL;
1145 reason = "device reports illegal type";
1146 goto err_out;
1147 }
1148
1149 if (post_reset && class == ATA_DEV_ATA) {
1150 /*
1151 * The exact sequence expected by certain pre-ATA4 drives is:
1152 * SRST RESET
1153 * IDENTIFY
1154 * INITIALIZE DEVICE PARAMETERS
1155 * anything else..
1156 * Some drives were very specific about that exact sequence.
1157 */
1158 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1159 err_mask = ata_dev_init_params(ap, dev);
1160 if (err_mask) {
1161 rc = -EIO;
1162 reason = "INIT_DEV_PARAMS failed";
1163 goto err_out;
1164 }
1165
1166 /* current CHS translation info (id[53-58]) might be
1167 * changed. reread the identify device info.
1168 */
1169 post_reset = 0;
1170 goto retry;
1171 }
1172 }
1173
1174 *p_class = class;
1175 *p_id = id;
1176 return 0;
1177
1178 err_out:
1179 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1180 ap->id, dev->devno, reason);
1181 kfree(id);
1182 return rc;
1183 }
1184
1185 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1186 struct ata_device *dev)
1187 {
1188 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1189 }
1190
1191 /**
1192 * ata_dev_configure - Configure the specified ATA/ATAPI device
1193 * @ap: Port on which target device resides
1194 * @dev: Target device to configure
1195 * @print_info: Enable device info printout
1196 *
1197 * Configure @dev according to @dev->id. Generic and low-level
1198 * driver specific fixups are also applied.
1199 *
1200 * LOCKING:
1201 * Kernel thread context (may sleep)
1202 *
1203 * RETURNS:
1204 * 0 on success, -errno otherwise
1205 */
1206 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1207 int print_info)
1208 {
1209 const u16 *id = dev->id;
1210 unsigned int xfer_mask;
1211 int i, rc;
1212
1213 if (!ata_dev_present(dev)) {
1214 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1215 ap->id, dev->devno);
1216 return 0;
1217 }
1218
1219 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1220
1221 /* print device capabilities */
1222 if (print_info)
1223 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1224 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1225 ap->id, dev->devno, id[49], id[82], id[83],
1226 id[84], id[85], id[86], id[87], id[88]);
1227
1228 /* initialize to-be-configured parameters */
1229 dev->flags = 0;
1230 dev->max_sectors = 0;
1231 dev->cdb_len = 0;
1232 dev->n_sectors = 0;
1233 dev->cylinders = 0;
1234 dev->heads = 0;
1235 dev->sectors = 0;
1236
1237 /*
1238 * common ATA, ATAPI feature tests
1239 */
1240
1241 /* find max transfer mode; for printk only */
1242 xfer_mask = ata_id_xfermask(id);
1243
1244 ata_dump_id(id);
1245
1246 /* ATA-specific feature tests */
1247 if (dev->class == ATA_DEV_ATA) {
1248 dev->n_sectors = ata_id_n_sectors(id);
1249
1250 if (ata_id_has_lba(id)) {
1251 const char *lba_desc;
1252
1253 lba_desc = "LBA";
1254 dev->flags |= ATA_DFLAG_LBA;
1255 if (ata_id_has_lba48(id)) {
1256 dev->flags |= ATA_DFLAG_LBA48;
1257 lba_desc = "LBA48";
1258 }
1259
1260 /* print device info to dmesg */
1261 if (print_info)
1262 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1263 "max %s, %Lu sectors: %s\n",
1264 ap->id, dev->devno,
1265 ata_id_major_version(id),
1266 ata_mode_string(xfer_mask),
1267 (unsigned long long)dev->n_sectors,
1268 lba_desc);
1269 } else {
1270 /* CHS */
1271
1272 /* Default translation */
1273 dev->cylinders = id[1];
1274 dev->heads = id[3];
1275 dev->sectors = id[6];
1276
1277 if (ata_id_current_chs_valid(id)) {
1278 /* Current CHS translation is valid. */
1279 dev->cylinders = id[54];
1280 dev->heads = id[55];
1281 dev->sectors = id[56];
1282 }
1283
1284 /* print device info to dmesg */
1285 if (print_info)
1286 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1287 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1288 ap->id, dev->devno,
1289 ata_id_major_version(id),
1290 ata_mode_string(xfer_mask),
1291 (unsigned long long)dev->n_sectors,
1292 dev->cylinders, dev->heads, dev->sectors);
1293 }
1294
1295 dev->cdb_len = 16;
1296 }
1297
1298 /* ATAPI-specific feature tests */
1299 else if (dev->class == ATA_DEV_ATAPI) {
1300 rc = atapi_cdb_len(id);
1301 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1302 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1303 rc = -EINVAL;
1304 goto err_out_nosup;
1305 }
1306 dev->cdb_len = (unsigned int) rc;
1307
1308 /* print device info to dmesg */
1309 if (print_info)
1310 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1311 ap->id, dev->devno, ata_mode_string(xfer_mask));
1312 }
1313
1314 ap->host->max_cmd_len = 0;
1315 for (i = 0; i < ATA_MAX_DEVICES; i++)
1316 ap->host->max_cmd_len = max_t(unsigned int,
1317 ap->host->max_cmd_len,
1318 ap->device[i].cdb_len);
1319
1320 /* limit bridge transfers to udma5, 200 sectors */
1321 if (ata_dev_knobble(ap, dev)) {
1322 if (print_info)
1323 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1324 ap->id, dev->devno);
1325 dev->udma_mask &= ATA_UDMA5;
1326 dev->max_sectors = ATA_MAX_SECTORS;
1327 }
1328
1329 if (ap->ops->dev_config)
1330 ap->ops->dev_config(ap, dev);
1331
1332 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1333 return 0;
1334
1335 err_out_nosup:
1336 DPRINTK("EXIT, err\n");
1337 return rc;
1338 }
1339
1340 /**
1341 * ata_bus_probe - Reset and probe ATA bus
1342 * @ap: Bus to probe
1343 *
1344 * Master ATA bus probing function. Initiates a hardware-dependent
1345 * bus reset, then attempts to identify any devices found on
1346 * the bus.
1347 *
1348 * LOCKING:
1349 * PCI/etc. bus probe sem.
1350 *
1351 * RETURNS:
1352 * Zero on success, non-zero on error.
1353 */
1354
1355 static int ata_bus_probe(struct ata_port *ap)
1356 {
1357 unsigned int classes[ATA_MAX_DEVICES];
1358 unsigned int i, rc, found = 0;
1359
1360 ata_port_probe(ap);
1361
1362 /* reset and determine device classes */
1363 for (i = 0; i < ATA_MAX_DEVICES; i++)
1364 classes[i] = ATA_DEV_UNKNOWN;
1365
1366 if (ap->ops->probe_reset) {
1367 rc = ap->ops->probe_reset(ap, classes);
1368 if (rc) {
1369 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1370 return rc;
1371 }
1372 } else {
1373 ap->ops->phy_reset(ap);
1374
1375 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1376 for (i = 0; i < ATA_MAX_DEVICES; i++)
1377 classes[i] = ap->device[i].class;
1378
1379 ata_port_probe(ap);
1380 }
1381
1382 for (i = 0; i < ATA_MAX_DEVICES; i++)
1383 if (classes[i] == ATA_DEV_UNKNOWN)
1384 classes[i] = ATA_DEV_NONE;
1385
1386 /* read IDENTIFY page and configure devices */
1387 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1388 struct ata_device *dev = &ap->device[i];
1389
1390 dev->class = classes[i];
1391
1392 if (!ata_dev_present(dev))
1393 continue;
1394
1395 WARN_ON(dev->id != NULL);
1396 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1397 dev->class = ATA_DEV_NONE;
1398 continue;
1399 }
1400
1401 if (ata_dev_configure(ap, dev, 1)) {
1402 ata_dev_disable(ap, dev);
1403 continue;
1404 }
1405
1406 found = 1;
1407 }
1408
1409 if (!found)
1410 goto err_out_disable;
1411
1412 ata_set_mode(ap);
1413 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1414 goto err_out_disable;
1415
1416 return 0;
1417
1418 err_out_disable:
1419 ap->ops->port_disable(ap);
1420 return -1;
1421 }
1422
1423 /**
1424 * ata_port_probe - Mark port as enabled
1425 * @ap: Port for which we indicate enablement
1426 *
1427 * Modify @ap data structure such that the system
1428 * thinks that the entire port is enabled.
1429 *
1430 * LOCKING: host_set lock, or some other form of
1431 * serialization.
1432 */
1433
1434 void ata_port_probe(struct ata_port *ap)
1435 {
1436 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1437 }
1438
1439 /**
1440 * sata_print_link_status - Print SATA link status
1441 * @ap: SATA port to printk link status about
1442 *
1443 * This function prints link speed and status of a SATA link.
1444 *
1445 * LOCKING:
1446 * None.
1447 */
1448 static void sata_print_link_status(struct ata_port *ap)
1449 {
1450 u32 sstatus, tmp;
1451 const char *speed;
1452
1453 if (!ap->ops->scr_read)
1454 return;
1455
1456 sstatus = scr_read(ap, SCR_STATUS);
1457
1458 if (sata_dev_present(ap)) {
1459 tmp = (sstatus >> 4) & 0xf;
1460 if (tmp & (1 << 0))
1461 speed = "1.5";
1462 else if (tmp & (1 << 1))
1463 speed = "3.0";
1464 else
1465 speed = "<unknown>";
1466 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1467 ap->id, speed, sstatus);
1468 } else {
1469 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1470 ap->id, sstatus);
1471 }
1472 }
1473
1474 /**
1475 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1476 * @ap: SATA port associated with target SATA PHY.
1477 *
1478 * This function issues commands to standard SATA Sxxx
1479 * PHY registers, to wake up the phy (and device), and
1480 * clear any reset condition.
1481 *
1482 * LOCKING:
1483 * PCI/etc. bus probe sem.
1484 *
1485 */
1486 void __sata_phy_reset(struct ata_port *ap)
1487 {
1488 u32 sstatus;
1489 unsigned long timeout = jiffies + (HZ * 5);
1490
1491 if (ap->flags & ATA_FLAG_SATA_RESET) {
1492 /* issue phy wake/reset */
1493 scr_write_flush(ap, SCR_CONTROL, 0x301);
1494 /* Couldn't find anything in SATA I/II specs, but
1495 * AHCI-1.1 10.4.2 says at least 1 ms. */
1496 mdelay(1);
1497 }
1498 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1499
1500 /* wait for phy to become ready, if necessary */
1501 do {
1502 msleep(200);
1503 sstatus = scr_read(ap, SCR_STATUS);
1504 if ((sstatus & 0xf) != 1)
1505 break;
1506 } while (time_before(jiffies, timeout));
1507
1508 /* print link status */
1509 sata_print_link_status(ap);
1510
1511 /* TODO: phy layer with polling, timeouts, etc. */
1512 if (sata_dev_present(ap))
1513 ata_port_probe(ap);
1514 else
1515 ata_port_disable(ap);
1516
1517 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1518 return;
1519
1520 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1521 ata_port_disable(ap);
1522 return;
1523 }
1524
1525 ap->cbl = ATA_CBL_SATA;
1526 }
1527
1528 /**
1529 * sata_phy_reset - Reset SATA bus.
1530 * @ap: SATA port associated with target SATA PHY.
1531 *
1532 * This function resets the SATA bus, and then probes
1533 * the bus for devices.
1534 *
1535 * LOCKING:
1536 * PCI/etc. bus probe sem.
1537 *
1538 */
1539 void sata_phy_reset(struct ata_port *ap)
1540 {
1541 __sata_phy_reset(ap);
1542 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1543 return;
1544 ata_bus_reset(ap);
1545 }
1546
1547 /**
1548 * ata_dev_pair - return other device on cable
1549 * @ap: port
1550 * @adev: device
1551 *
1552 * Obtain the other device on the same cable, or if none is
1553 * present NULL is returned
1554 */
1555
1556 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1557 {
1558 struct ata_device *pair = &ap->device[1 - adev->devno];
1559 if (!ata_dev_present(pair))
1560 return NULL;
1561 return pair;
1562 }
1563
1564 /**
1565 * ata_port_disable - Disable port.
1566 * @ap: Port to be disabled.
1567 *
1568 * Modify @ap data structure such that the system
1569 * thinks that the entire port is disabled, and should
1570 * never attempt to probe or communicate with devices
1571 * on this port.
1572 *
1573 * LOCKING: host_set lock, or some other form of
1574 * serialization.
1575 */
1576
1577 void ata_port_disable(struct ata_port *ap)
1578 {
1579 ap->device[0].class = ATA_DEV_NONE;
1580 ap->device[1].class = ATA_DEV_NONE;
1581 ap->flags |= ATA_FLAG_PORT_DISABLED;
1582 }
1583
1584 /*
1585 * This mode timing computation functionality is ported over from
1586 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1587 */
1588 /*
1589 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1590 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1591 * for PIO 5, which is a nonstandard extension and UDMA6, which
1592 * is currently supported only by Maxtor drives.
1593 */
1594
1595 static const struct ata_timing ata_timing[] = {
1596
1597 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1598 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1599 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1600 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1601
1602 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1603 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1604 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1605
1606 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1607
1608 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1609 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1610 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1611
1612 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1613 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1614 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1615
1616 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1617 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1618 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1619
1620 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1621 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1622 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1623
1624 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1625
1626 { 0xFF }
1627 };
1628
1629 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1630 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1631
1632 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1633 {
1634 q->setup = EZ(t->setup * 1000, T);
1635 q->act8b = EZ(t->act8b * 1000, T);
1636 q->rec8b = EZ(t->rec8b * 1000, T);
1637 q->cyc8b = EZ(t->cyc8b * 1000, T);
1638 q->active = EZ(t->active * 1000, T);
1639 q->recover = EZ(t->recover * 1000, T);
1640 q->cycle = EZ(t->cycle * 1000, T);
1641 q->udma = EZ(t->udma * 1000, UT);
1642 }
1643
1644 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1645 struct ata_timing *m, unsigned int what)
1646 {
1647 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1648 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1649 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1650 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1651 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1652 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1653 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1654 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1655 }
1656
1657 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1658 {
1659 const struct ata_timing *t;
1660
1661 for (t = ata_timing; t->mode != speed; t++)
1662 if (t->mode == 0xFF)
1663 return NULL;
1664 return t;
1665 }
1666
1667 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1668 struct ata_timing *t, int T, int UT)
1669 {
1670 const struct ata_timing *s;
1671 struct ata_timing p;
1672
1673 /*
1674 * Find the mode.
1675 */
1676
1677 if (!(s = ata_timing_find_mode(speed)))
1678 return -EINVAL;
1679
1680 memcpy(t, s, sizeof(*s));
1681
1682 /*
1683 * If the drive is an EIDE drive, it can tell us it needs extended
1684 * PIO/MW_DMA cycle timing.
1685 */
1686
1687 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1688 memset(&p, 0, sizeof(p));
1689 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1690 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1691 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1692 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1693 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1694 }
1695 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1696 }
1697
1698 /*
1699 * Convert the timing to bus clock counts.
1700 */
1701
1702 ata_timing_quantize(t, t, T, UT);
1703
1704 /*
1705 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1706 * S.M.A.R.T * and some other commands. We have to ensure that the
1707 * DMA cycle timing is slower/equal than the fastest PIO timing.
1708 */
1709
1710 if (speed > XFER_PIO_4) {
1711 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1712 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1713 }
1714
1715 /*
1716 * Lengthen active & recovery time so that cycle time is correct.
1717 */
1718
1719 if (t->act8b + t->rec8b < t->cyc8b) {
1720 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1721 t->rec8b = t->cyc8b - t->act8b;
1722 }
1723
1724 if (t->active + t->recover < t->cycle) {
1725 t->active += (t->cycle - (t->active + t->recover)) / 2;
1726 t->recover = t->cycle - t->active;
1727 }
1728
1729 return 0;
1730 }
1731
1732 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1733 {
1734 unsigned int err_mask;
1735 int rc;
1736
1737 if (dev->xfer_shift == ATA_SHIFT_PIO)
1738 dev->flags |= ATA_DFLAG_PIO;
1739
1740 err_mask = ata_dev_set_xfermode(ap, dev);
1741 if (err_mask) {
1742 printk(KERN_ERR
1743 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1744 ap->id, err_mask);
1745 return -EIO;
1746 }
1747
1748 rc = ata_dev_revalidate(ap, dev, 0);
1749 if (rc) {
1750 printk(KERN_ERR
1751 "ata%u: failed to revalidate after set xfermode\n",
1752 ap->id);
1753 return rc;
1754 }
1755
1756 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1757 dev->xfer_shift, (int)dev->xfer_mode);
1758
1759 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1760 ap->id, dev->devno,
1761 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1762 return 0;
1763 }
1764
1765 static int ata_host_set_pio(struct ata_port *ap)
1766 {
1767 int i;
1768
1769 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1770 struct ata_device *dev = &ap->device[i];
1771
1772 if (!ata_dev_present(dev))
1773 continue;
1774
1775 if (!dev->pio_mode) {
1776 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1777 return -1;
1778 }
1779
1780 dev->xfer_mode = dev->pio_mode;
1781 dev->xfer_shift = ATA_SHIFT_PIO;
1782 if (ap->ops->set_piomode)
1783 ap->ops->set_piomode(ap, dev);
1784 }
1785
1786 return 0;
1787 }
1788
1789 static void ata_host_set_dma(struct ata_port *ap)
1790 {
1791 int i;
1792
1793 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1794 struct ata_device *dev = &ap->device[i];
1795
1796 if (!ata_dev_present(dev) || !dev->dma_mode)
1797 continue;
1798
1799 dev->xfer_mode = dev->dma_mode;
1800 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1801 if (ap->ops->set_dmamode)
1802 ap->ops->set_dmamode(ap, dev);
1803 }
1804 }
1805
1806 /**
1807 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1808 * @ap: port on which timings will be programmed
1809 *
1810 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1811 *
1812 * LOCKING:
1813 * PCI/etc. bus probe sem.
1814 */
1815 static void ata_set_mode(struct ata_port *ap)
1816 {
1817 int i, rc;
1818
1819 /* step 1: calculate xfer_mask */
1820 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1821 struct ata_device *dev = &ap->device[i];
1822 unsigned int pio_mask, dma_mask;
1823
1824 if (!ata_dev_present(dev))
1825 continue;
1826
1827 ata_dev_xfermask(ap, dev);
1828
1829 /* TODO: let LLDD filter dev->*_mask here */
1830
1831 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1832 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1833 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1834 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1835 }
1836
1837 /* step 2: always set host PIO timings */
1838 rc = ata_host_set_pio(ap);
1839 if (rc)
1840 goto err_out;
1841
1842 /* step 3: set host DMA timings */
1843 ata_host_set_dma(ap);
1844
1845 /* step 4: update devices' xfer mode */
1846 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1847 struct ata_device *dev = &ap->device[i];
1848
1849 if (!ata_dev_present(dev))
1850 continue;
1851
1852 if (ata_dev_set_mode(ap, dev))
1853 goto err_out;
1854 }
1855
1856 if (ap->ops->post_set_mode)
1857 ap->ops->post_set_mode(ap);
1858
1859 return;
1860
1861 err_out:
1862 ata_port_disable(ap);
1863 }
1864
1865 /**
1866 * ata_tf_to_host - issue ATA taskfile to host controller
1867 * @ap: port to which command is being issued
1868 * @tf: ATA taskfile register set
1869 *
1870 * Issues ATA taskfile register set to ATA host controller,
1871 * with proper synchronization with interrupt handler and
1872 * other threads.
1873 *
1874 * LOCKING:
1875 * spin_lock_irqsave(host_set lock)
1876 */
1877
1878 static inline void ata_tf_to_host(struct ata_port *ap,
1879 const struct ata_taskfile *tf)
1880 {
1881 ap->ops->tf_load(ap, tf);
1882 ap->ops->exec_command(ap, tf);
1883 }
1884
1885 /**
1886 * ata_busy_sleep - sleep until BSY clears, or timeout
1887 * @ap: port containing status register to be polled
1888 * @tmout_pat: impatience timeout
1889 * @tmout: overall timeout
1890 *
1891 * Sleep until ATA Status register bit BSY clears,
1892 * or a timeout occurs.
1893 *
1894 * LOCKING: None.
1895 */
1896
1897 unsigned int ata_busy_sleep (struct ata_port *ap,
1898 unsigned long tmout_pat, unsigned long tmout)
1899 {
1900 unsigned long timer_start, timeout;
1901 u8 status;
1902
1903 status = ata_busy_wait(ap, ATA_BUSY, 300);
1904 timer_start = jiffies;
1905 timeout = timer_start + tmout_pat;
1906 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1907 msleep(50);
1908 status = ata_busy_wait(ap, ATA_BUSY, 3);
1909 }
1910
1911 if (status & ATA_BUSY)
1912 printk(KERN_WARNING "ata%u is slow to respond, "
1913 "please be patient\n", ap->id);
1914
1915 timeout = timer_start + tmout;
1916 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1917 msleep(50);
1918 status = ata_chk_status(ap);
1919 }
1920
1921 if (status & ATA_BUSY) {
1922 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1923 ap->id, tmout / HZ);
1924 return 1;
1925 }
1926
1927 return 0;
1928 }
1929
1930 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1931 {
1932 struct ata_ioports *ioaddr = &ap->ioaddr;
1933 unsigned int dev0 = devmask & (1 << 0);
1934 unsigned int dev1 = devmask & (1 << 1);
1935 unsigned long timeout;
1936
1937 /* if device 0 was found in ata_devchk, wait for its
1938 * BSY bit to clear
1939 */
1940 if (dev0)
1941 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1942
1943 /* if device 1 was found in ata_devchk, wait for
1944 * register access, then wait for BSY to clear
1945 */
1946 timeout = jiffies + ATA_TMOUT_BOOT;
1947 while (dev1) {
1948 u8 nsect, lbal;
1949
1950 ap->ops->dev_select(ap, 1);
1951 if (ap->flags & ATA_FLAG_MMIO) {
1952 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1953 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1954 } else {
1955 nsect = inb(ioaddr->nsect_addr);
1956 lbal = inb(ioaddr->lbal_addr);
1957 }
1958 if ((nsect == 1) && (lbal == 1))
1959 break;
1960 if (time_after(jiffies, timeout)) {
1961 dev1 = 0;
1962 break;
1963 }
1964 msleep(50); /* give drive a breather */
1965 }
1966 if (dev1)
1967 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1968
1969 /* is all this really necessary? */
1970 ap->ops->dev_select(ap, 0);
1971 if (dev1)
1972 ap->ops->dev_select(ap, 1);
1973 if (dev0)
1974 ap->ops->dev_select(ap, 0);
1975 }
1976
1977 static unsigned int ata_bus_softreset(struct ata_port *ap,
1978 unsigned int devmask)
1979 {
1980 struct ata_ioports *ioaddr = &ap->ioaddr;
1981
1982 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1983
1984 /* software reset. causes dev0 to be selected */
1985 if (ap->flags & ATA_FLAG_MMIO) {
1986 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1987 udelay(20); /* FIXME: flush */
1988 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1989 udelay(20); /* FIXME: flush */
1990 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1991 } else {
1992 outb(ap->ctl, ioaddr->ctl_addr);
1993 udelay(10);
1994 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1995 udelay(10);
1996 outb(ap->ctl, ioaddr->ctl_addr);
1997 }
1998
1999 /* spec mandates ">= 2ms" before checking status.
2000 * We wait 150ms, because that was the magic delay used for
2001 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2002 * between when the ATA command register is written, and then
2003 * status is checked. Because waiting for "a while" before
2004 * checking status is fine, post SRST, we perform this magic
2005 * delay here as well.
2006 *
2007 * Old drivers/ide uses the 2mS rule and then waits for ready
2008 */
2009 msleep(150);
2010
2011 /* Before we perform post reset processing we want to see if
2012 * the bus shows 0xFF because the odd clown forgets the D7
2013 * pulldown resistor.
2014 */
2015 if (ata_check_status(ap) == 0xFF)
2016 return AC_ERR_OTHER;
2017
2018 ata_bus_post_reset(ap, devmask);
2019
2020 return 0;
2021 }
2022
2023 /**
2024 * ata_bus_reset - reset host port and associated ATA channel
2025 * @ap: port to reset
2026 *
2027 * This is typically the first time we actually start issuing
2028 * commands to the ATA channel. We wait for BSY to clear, then
2029 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2030 * result. Determine what devices, if any, are on the channel
2031 * by looking at the device 0/1 error register. Look at the signature
2032 * stored in each device's taskfile registers, to determine if
2033 * the device is ATA or ATAPI.
2034 *
2035 * LOCKING:
2036 * PCI/etc. bus probe sem.
2037 * Obtains host_set lock.
2038 *
2039 * SIDE EFFECTS:
2040 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2041 */
2042
2043 void ata_bus_reset(struct ata_port *ap)
2044 {
2045 struct ata_ioports *ioaddr = &ap->ioaddr;
2046 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2047 u8 err;
2048 unsigned int dev0, dev1 = 0, devmask = 0;
2049
2050 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2051
2052 /* determine if device 0/1 are present */
2053 if (ap->flags & ATA_FLAG_SATA_RESET)
2054 dev0 = 1;
2055 else {
2056 dev0 = ata_devchk(ap, 0);
2057 if (slave_possible)
2058 dev1 = ata_devchk(ap, 1);
2059 }
2060
2061 if (dev0)
2062 devmask |= (1 << 0);
2063 if (dev1)
2064 devmask |= (1 << 1);
2065
2066 /* select device 0 again */
2067 ap->ops->dev_select(ap, 0);
2068
2069 /* issue bus reset */
2070 if (ap->flags & ATA_FLAG_SRST)
2071 if (ata_bus_softreset(ap, devmask))
2072 goto err_out;
2073
2074 /*
2075 * determine by signature whether we have ATA or ATAPI devices
2076 */
2077 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2078 if ((slave_possible) && (err != 0x81))
2079 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2080
2081 /* re-enable interrupts */
2082 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2083 ata_irq_on(ap);
2084
2085 /* is double-select really necessary? */
2086 if (ap->device[1].class != ATA_DEV_NONE)
2087 ap->ops->dev_select(ap, 1);
2088 if (ap->device[0].class != ATA_DEV_NONE)
2089 ap->ops->dev_select(ap, 0);
2090
2091 /* if no devices were detected, disable this port */
2092 if ((ap->device[0].class == ATA_DEV_NONE) &&
2093 (ap->device[1].class == ATA_DEV_NONE))
2094 goto err_out;
2095
2096 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2097 /* set up device control for ATA_FLAG_SATA_RESET */
2098 if (ap->flags & ATA_FLAG_MMIO)
2099 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2100 else
2101 outb(ap->ctl, ioaddr->ctl_addr);
2102 }
2103
2104 DPRINTK("EXIT\n");
2105 return;
2106
2107 err_out:
2108 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2109 ap->ops->port_disable(ap);
2110
2111 DPRINTK("EXIT\n");
2112 }
2113
2114 static int sata_phy_resume(struct ata_port *ap)
2115 {
2116 unsigned long timeout = jiffies + (HZ * 5);
2117 u32 sstatus;
2118
2119 scr_write_flush(ap, SCR_CONTROL, 0x300);
2120
2121 /* Wait for phy to become ready, if necessary. */
2122 do {
2123 msleep(200);
2124 sstatus = scr_read(ap, SCR_STATUS);
2125 if ((sstatus & 0xf) != 1)
2126 return 0;
2127 } while (time_before(jiffies, timeout));
2128
2129 return -1;
2130 }
2131
2132 /**
2133 * ata_std_probeinit - initialize probing
2134 * @ap: port to be probed
2135 *
2136 * @ap is about to be probed. Initialize it. This function is
2137 * to be used as standard callback for ata_drive_probe_reset().
2138 *
2139 * NOTE!!! Do not use this function as probeinit if a low level
2140 * driver implements only hardreset. Just pass NULL as probeinit
2141 * in that case. Using this function is probably okay but doing
2142 * so makes reset sequence different from the original
2143 * ->phy_reset implementation and Jeff nervous. :-P
2144 */
2145 extern void ata_std_probeinit(struct ata_port *ap)
2146 {
2147 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2148 sata_phy_resume(ap);
2149 if (sata_dev_present(ap))
2150 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2151 }
2152 }
2153
2154 /**
2155 * ata_std_softreset - reset host port via ATA SRST
2156 * @ap: port to reset
2157 * @verbose: fail verbosely
2158 * @classes: resulting classes of attached devices
2159 *
2160 * Reset host port using ATA SRST. This function is to be used
2161 * as standard callback for ata_drive_*_reset() functions.
2162 *
2163 * LOCKING:
2164 * Kernel thread context (may sleep)
2165 *
2166 * RETURNS:
2167 * 0 on success, -errno otherwise.
2168 */
2169 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2170 {
2171 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2172 unsigned int devmask = 0, err_mask;
2173 u8 err;
2174
2175 DPRINTK("ENTER\n");
2176
2177 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2178 classes[0] = ATA_DEV_NONE;
2179 goto out;
2180 }
2181
2182 /* determine if device 0/1 are present */
2183 if (ata_devchk(ap, 0))
2184 devmask |= (1 << 0);
2185 if (slave_possible && ata_devchk(ap, 1))
2186 devmask |= (1 << 1);
2187
2188 /* select device 0 again */
2189 ap->ops->dev_select(ap, 0);
2190
2191 /* issue bus reset */
2192 DPRINTK("about to softreset, devmask=%x\n", devmask);
2193 err_mask = ata_bus_softreset(ap, devmask);
2194 if (err_mask) {
2195 if (verbose)
2196 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2197 ap->id, err_mask);
2198 else
2199 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2200 err_mask);
2201 return -EIO;
2202 }
2203
2204 /* determine by signature whether we have ATA or ATAPI devices */
2205 classes[0] = ata_dev_try_classify(ap, 0, &err);
2206 if (slave_possible && err != 0x81)
2207 classes[1] = ata_dev_try_classify(ap, 1, &err);
2208
2209 out:
2210 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2211 return 0;
2212 }
2213
2214 /**
2215 * sata_std_hardreset - reset host port via SATA phy reset
2216 * @ap: port to reset
2217 * @verbose: fail verbosely
2218 * @class: resulting class of attached device
2219 *
2220 * SATA phy-reset host port using DET bits of SControl register.
2221 * This function is to be used as standard callback for
2222 * ata_drive_*_reset().
2223 *
2224 * LOCKING:
2225 * Kernel thread context (may sleep)
2226 *
2227 * RETURNS:
2228 * 0 on success, -errno otherwise.
2229 */
2230 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2231 {
2232 DPRINTK("ENTER\n");
2233
2234 /* Issue phy wake/reset */
2235 scr_write_flush(ap, SCR_CONTROL, 0x301);
2236
2237 /*
2238 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2239 * 10.4.2 says at least 1 ms.
2240 */
2241 msleep(1);
2242
2243 /* Bring phy back */
2244 sata_phy_resume(ap);
2245
2246 /* TODO: phy layer with polling, timeouts, etc. */
2247 if (!sata_dev_present(ap)) {
2248 *class = ATA_DEV_NONE;
2249 DPRINTK("EXIT, link offline\n");
2250 return 0;
2251 }
2252
2253 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2254 if (verbose)
2255 printk(KERN_ERR "ata%u: COMRESET failed "
2256 "(device not ready)\n", ap->id);
2257 else
2258 DPRINTK("EXIT, device not ready\n");
2259 return -EIO;
2260 }
2261
2262 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2263
2264 *class = ata_dev_try_classify(ap, 0, NULL);
2265
2266 DPRINTK("EXIT, class=%u\n", *class);
2267 return 0;
2268 }
2269
2270 /**
2271 * ata_std_postreset - standard postreset callback
2272 * @ap: the target ata_port
2273 * @classes: classes of attached devices
2274 *
2275 * This function is invoked after a successful reset. Note that
2276 * the device might have been reset more than once using
2277 * different reset methods before postreset is invoked.
2278 *
2279 * This function is to be used as standard callback for
2280 * ata_drive_*_reset().
2281 *
2282 * LOCKING:
2283 * Kernel thread context (may sleep)
2284 */
2285 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2286 {
2287 DPRINTK("ENTER\n");
2288
2289 /* set cable type if it isn't already set */
2290 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2291 ap->cbl = ATA_CBL_SATA;
2292
2293 /* print link status */
2294 if (ap->cbl == ATA_CBL_SATA)
2295 sata_print_link_status(ap);
2296
2297 /* re-enable interrupts */
2298 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2299 ata_irq_on(ap);
2300
2301 /* is double-select really necessary? */
2302 if (classes[0] != ATA_DEV_NONE)
2303 ap->ops->dev_select(ap, 1);
2304 if (classes[1] != ATA_DEV_NONE)
2305 ap->ops->dev_select(ap, 0);
2306
2307 /* bail out if no device is present */
2308 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2309 DPRINTK("EXIT, no device\n");
2310 return;
2311 }
2312
2313 /* set up device control */
2314 if (ap->ioaddr.ctl_addr) {
2315 if (ap->flags & ATA_FLAG_MMIO)
2316 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2317 else
2318 outb(ap->ctl, ap->ioaddr.ctl_addr);
2319 }
2320
2321 DPRINTK("EXIT\n");
2322 }
2323
2324 /**
2325 * ata_std_probe_reset - standard probe reset method
2326 * @ap: prot to perform probe-reset
2327 * @classes: resulting classes of attached devices
2328 *
2329 * The stock off-the-shelf ->probe_reset method.
2330 *
2331 * LOCKING:
2332 * Kernel thread context (may sleep)
2333 *
2334 * RETURNS:
2335 * 0 on success, -errno otherwise.
2336 */
2337 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2338 {
2339 ata_reset_fn_t hardreset;
2340
2341 hardreset = NULL;
2342 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2343 hardreset = sata_std_hardreset;
2344
2345 return ata_drive_probe_reset(ap, ata_std_probeinit,
2346 ata_std_softreset, hardreset,
2347 ata_std_postreset, classes);
2348 }
2349
2350 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2351 ata_postreset_fn_t postreset,
2352 unsigned int *classes)
2353 {
2354 int i, rc;
2355
2356 for (i = 0; i < ATA_MAX_DEVICES; i++)
2357 classes[i] = ATA_DEV_UNKNOWN;
2358
2359 rc = reset(ap, 0, classes);
2360 if (rc)
2361 return rc;
2362
2363 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2364 * is complete and convert all ATA_DEV_UNKNOWN to
2365 * ATA_DEV_NONE.
2366 */
2367 for (i = 0; i < ATA_MAX_DEVICES; i++)
2368 if (classes[i] != ATA_DEV_UNKNOWN)
2369 break;
2370
2371 if (i < ATA_MAX_DEVICES)
2372 for (i = 0; i < ATA_MAX_DEVICES; i++)
2373 if (classes[i] == ATA_DEV_UNKNOWN)
2374 classes[i] = ATA_DEV_NONE;
2375
2376 if (postreset)
2377 postreset(ap, classes);
2378
2379 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2380 }
2381
2382 /**
2383 * ata_drive_probe_reset - Perform probe reset with given methods
2384 * @ap: port to reset
2385 * @probeinit: probeinit method (can be NULL)
2386 * @softreset: softreset method (can be NULL)
2387 * @hardreset: hardreset method (can be NULL)
2388 * @postreset: postreset method (can be NULL)
2389 * @classes: resulting classes of attached devices
2390 *
2391 * Reset the specified port and classify attached devices using
2392 * given methods. This function prefers softreset but tries all
2393 * possible reset sequences to reset and classify devices. This
2394 * function is intended to be used for constructing ->probe_reset
2395 * callback by low level drivers.
2396 *
2397 * Reset methods should follow the following rules.
2398 *
2399 * - Return 0 on sucess, -errno on failure.
2400 * - If classification is supported, fill classes[] with
2401 * recognized class codes.
2402 * - If classification is not supported, leave classes[] alone.
2403 * - If verbose is non-zero, print error message on failure;
2404 * otherwise, shut up.
2405 *
2406 * LOCKING:
2407 * Kernel thread context (may sleep)
2408 *
2409 * RETURNS:
2410 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2411 * if classification fails, and any error code from reset
2412 * methods.
2413 */
2414 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2415 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2416 ata_postreset_fn_t postreset, unsigned int *classes)
2417 {
2418 int rc = -EINVAL;
2419
2420 if (probeinit)
2421 probeinit(ap);
2422
2423 if (softreset) {
2424 rc = do_probe_reset(ap, softreset, postreset, classes);
2425 if (rc == 0)
2426 return 0;
2427 }
2428
2429 if (!hardreset)
2430 return rc;
2431
2432 rc = do_probe_reset(ap, hardreset, postreset, classes);
2433 if (rc == 0 || rc != -ENODEV)
2434 return rc;
2435
2436 if (softreset)
2437 rc = do_probe_reset(ap, softreset, postreset, classes);
2438
2439 return rc;
2440 }
2441
2442 /**
2443 * ata_dev_same_device - Determine whether new ID matches configured device
2444 * @ap: port on which the device to compare against resides
2445 * @dev: device to compare against
2446 * @new_class: class of the new device
2447 * @new_id: IDENTIFY page of the new device
2448 *
2449 * Compare @new_class and @new_id against @dev and determine
2450 * whether @dev is the device indicated by @new_class and
2451 * @new_id.
2452 *
2453 * LOCKING:
2454 * None.
2455 *
2456 * RETURNS:
2457 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2458 */
2459 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2460 unsigned int new_class, const u16 *new_id)
2461 {
2462 const u16 *old_id = dev->id;
2463 unsigned char model[2][41], serial[2][21];
2464 u64 new_n_sectors;
2465
2466 if (dev->class != new_class) {
2467 printk(KERN_INFO
2468 "ata%u: dev %u class mismatch %d != %d\n",
2469 ap->id, dev->devno, dev->class, new_class);
2470 return 0;
2471 }
2472
2473 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2474 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2475 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2476 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2477 new_n_sectors = ata_id_n_sectors(new_id);
2478
2479 if (strcmp(model[0], model[1])) {
2480 printk(KERN_INFO
2481 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2482 ap->id, dev->devno, model[0], model[1]);
2483 return 0;
2484 }
2485
2486 if (strcmp(serial[0], serial[1])) {
2487 printk(KERN_INFO
2488 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2489 ap->id, dev->devno, serial[0], serial[1]);
2490 return 0;
2491 }
2492
2493 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2494 printk(KERN_INFO
2495 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2496 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2497 (unsigned long long)new_n_sectors);
2498 return 0;
2499 }
2500
2501 return 1;
2502 }
2503
2504 /**
2505 * ata_dev_revalidate - Revalidate ATA device
2506 * @ap: port on which the device to revalidate resides
2507 * @dev: device to revalidate
2508 * @post_reset: is this revalidation after reset?
2509 *
2510 * Re-read IDENTIFY page and make sure @dev is still attached to
2511 * the port.
2512 *
2513 * LOCKING:
2514 * Kernel thread context (may sleep)
2515 *
2516 * RETURNS:
2517 * 0 on success, negative errno otherwise
2518 */
2519 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2520 int post_reset)
2521 {
2522 unsigned int class;
2523 u16 *id;
2524 int rc;
2525
2526 if (!ata_dev_present(dev))
2527 return -ENODEV;
2528
2529 class = dev->class;
2530 id = NULL;
2531
2532 /* allocate & read ID data */
2533 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2534 if (rc)
2535 goto fail;
2536
2537 /* is the device still there? */
2538 if (!ata_dev_same_device(ap, dev, class, id)) {
2539 rc = -ENODEV;
2540 goto fail;
2541 }
2542
2543 kfree(dev->id);
2544 dev->id = id;
2545
2546 /* configure device according to the new ID */
2547 return ata_dev_configure(ap, dev, 0);
2548
2549 fail:
2550 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2551 ap->id, dev->devno, rc);
2552 kfree(id);
2553 return rc;
2554 }
2555
2556 static const char * const ata_dma_blacklist [] = {
2557 "WDC AC11000H", NULL,
2558 "WDC AC22100H", NULL,
2559 "WDC AC32500H", NULL,
2560 "WDC AC33100H", NULL,
2561 "WDC AC31600H", NULL,
2562 "WDC AC32100H", "24.09P07",
2563 "WDC AC23200L", "21.10N21",
2564 "Compaq CRD-8241B", NULL,
2565 "CRD-8400B", NULL,
2566 "CRD-8480B", NULL,
2567 "CRD-8482B", NULL,
2568 "CRD-84", NULL,
2569 "SanDisk SDP3B", NULL,
2570 "SanDisk SDP3B-64", NULL,
2571 "SANYO CD-ROM CRD", NULL,
2572 "HITACHI CDR-8", NULL,
2573 "HITACHI CDR-8335", NULL,
2574 "HITACHI CDR-8435", NULL,
2575 "Toshiba CD-ROM XM-6202B", NULL,
2576 "TOSHIBA CD-ROM XM-1702BC", NULL,
2577 "CD-532E-A", NULL,
2578 "E-IDE CD-ROM CR-840", NULL,
2579 "CD-ROM Drive/F5A", NULL,
2580 "WPI CDD-820", NULL,
2581 "SAMSUNG CD-ROM SC-148C", NULL,
2582 "SAMSUNG CD-ROM SC", NULL,
2583 "SanDisk SDP3B-64", NULL,
2584 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2585 "_NEC DV5800A", NULL,
2586 "SAMSUNG CD-ROM SN-124", "N001"
2587 };
2588
2589 static int ata_strim(char *s, size_t len)
2590 {
2591 len = strnlen(s, len);
2592
2593 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2594 while ((len > 0) && (s[len - 1] == ' ')) {
2595 len--;
2596 s[len] = 0;
2597 }
2598 return len;
2599 }
2600
2601 static int ata_dma_blacklisted(const struct ata_device *dev)
2602 {
2603 unsigned char model_num[40];
2604 unsigned char model_rev[16];
2605 unsigned int nlen, rlen;
2606 int i;
2607
2608 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2609 sizeof(model_num));
2610 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2611 sizeof(model_rev));
2612 nlen = ata_strim(model_num, sizeof(model_num));
2613 rlen = ata_strim(model_rev, sizeof(model_rev));
2614
2615 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2616 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2617 if (ata_dma_blacklist[i+1] == NULL)
2618 return 1;
2619 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2620 return 1;
2621 }
2622 }
2623 return 0;
2624 }
2625
2626 /**
2627 * ata_dev_xfermask - Compute supported xfermask of the given device
2628 * @ap: Port on which the device to compute xfermask for resides
2629 * @dev: Device to compute xfermask for
2630 *
2631 * Compute supported xfermask of @dev and store it in
2632 * dev->*_mask. This function is responsible for applying all
2633 * known limits including host controller limits, device
2634 * blacklist, etc...
2635 *
2636 * FIXME: The current implementation limits all transfer modes to
2637 * the fastest of the lowested device on the port. This is not
2638 * required on most controllers.
2639 *
2640 * LOCKING:
2641 * None.
2642 */
2643 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2644 {
2645 unsigned long xfer_mask;
2646 int i;
2647
2648 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2649 ap->udma_mask);
2650
2651 /* use port-wide xfermask for now */
2652 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2653 struct ata_device *d = &ap->device[i];
2654 if (!ata_dev_present(d))
2655 continue;
2656 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2657 d->udma_mask);
2658 xfer_mask &= ata_id_xfermask(d->id);
2659 if (ata_dma_blacklisted(d))
2660 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2661 }
2662
2663 if (ata_dma_blacklisted(dev))
2664 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2665 "disabling DMA\n", ap->id, dev->devno);
2666
2667 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2668 &dev->udma_mask);
2669 }
2670
2671 /**
2672 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2673 * @ap: Port associated with device @dev
2674 * @dev: Device to which command will be sent
2675 *
2676 * Issue SET FEATURES - XFER MODE command to device @dev
2677 * on port @ap.
2678 *
2679 * LOCKING:
2680 * PCI/etc. bus probe sem.
2681 *
2682 * RETURNS:
2683 * 0 on success, AC_ERR_* mask otherwise.
2684 */
2685
2686 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2687 struct ata_device *dev)
2688 {
2689 struct ata_taskfile tf;
2690 unsigned int err_mask;
2691
2692 /* set up set-features taskfile */
2693 DPRINTK("set features - xfer mode\n");
2694
2695 ata_tf_init(ap, &tf, dev->devno);
2696 tf.command = ATA_CMD_SET_FEATURES;
2697 tf.feature = SETFEATURES_XFER;
2698 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2699 tf.protocol = ATA_PROT_NODATA;
2700 tf.nsect = dev->xfer_mode;
2701
2702 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2703
2704 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2705 return err_mask;
2706 }
2707
2708 /**
2709 * ata_dev_init_params - Issue INIT DEV PARAMS command
2710 * @ap: Port associated with device @dev
2711 * @dev: Device to which command will be sent
2712 *
2713 * LOCKING:
2714 * Kernel thread context (may sleep)
2715 *
2716 * RETURNS:
2717 * 0 on success, AC_ERR_* mask otherwise.
2718 */
2719
2720 static unsigned int ata_dev_init_params(struct ata_port *ap,
2721 struct ata_device *dev)
2722 {
2723 struct ata_taskfile tf;
2724 unsigned int err_mask;
2725 u16 sectors = dev->id[6];
2726 u16 heads = dev->id[3];
2727
2728 /* Number of sectors per track 1-255. Number of heads 1-16 */
2729 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2730 return 0;
2731
2732 /* set up init dev params taskfile */
2733 DPRINTK("init dev params \n");
2734
2735 ata_tf_init(ap, &tf, dev->devno);
2736 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2737 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2738 tf.protocol = ATA_PROT_NODATA;
2739 tf.nsect = sectors;
2740 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2741
2742 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2743
2744 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2745 return err_mask;
2746 }
2747
2748 /**
2749 * ata_sg_clean - Unmap DMA memory associated with command
2750 * @qc: Command containing DMA memory to be released
2751 *
2752 * Unmap all mapped DMA memory associated with this command.
2753 *
2754 * LOCKING:
2755 * spin_lock_irqsave(host_set lock)
2756 */
2757
2758 static void ata_sg_clean(struct ata_queued_cmd *qc)
2759 {
2760 struct ata_port *ap = qc->ap;
2761 struct scatterlist *sg = qc->__sg;
2762 int dir = qc->dma_dir;
2763 void *pad_buf = NULL;
2764
2765 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2766 WARN_ON(sg == NULL);
2767
2768 if (qc->flags & ATA_QCFLAG_SINGLE)
2769 WARN_ON(qc->n_elem > 1);
2770
2771 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2772
2773 /* if we padded the buffer out to 32-bit bound, and data
2774 * xfer direction is from-device, we must copy from the
2775 * pad buffer back into the supplied buffer
2776 */
2777 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2778 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2779
2780 if (qc->flags & ATA_QCFLAG_SG) {
2781 if (qc->n_elem)
2782 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2783 /* restore last sg */
2784 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2785 if (pad_buf) {
2786 struct scatterlist *psg = &qc->pad_sgent;
2787 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2788 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2789 kunmap_atomic(addr, KM_IRQ0);
2790 }
2791 } else {
2792 if (qc->n_elem)
2793 dma_unmap_single(ap->dev,
2794 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2795 dir);
2796 /* restore sg */
2797 sg->length += qc->pad_len;
2798 if (pad_buf)
2799 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2800 pad_buf, qc->pad_len);
2801 }
2802
2803 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2804 qc->__sg = NULL;
2805 }
2806
2807 /**
2808 * ata_fill_sg - Fill PCI IDE PRD table
2809 * @qc: Metadata associated with taskfile to be transferred
2810 *
2811 * Fill PCI IDE PRD (scatter-gather) table with segments
2812 * associated with the current disk command.
2813 *
2814 * LOCKING:
2815 * spin_lock_irqsave(host_set lock)
2816 *
2817 */
2818 static void ata_fill_sg(struct ata_queued_cmd *qc)
2819 {
2820 struct ata_port *ap = qc->ap;
2821 struct scatterlist *sg;
2822 unsigned int idx;
2823
2824 WARN_ON(qc->__sg == NULL);
2825 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2826
2827 idx = 0;
2828 ata_for_each_sg(sg, qc) {
2829 u32 addr, offset;
2830 u32 sg_len, len;
2831
2832 /* determine if physical DMA addr spans 64K boundary.
2833 * Note h/w doesn't support 64-bit, so we unconditionally
2834 * truncate dma_addr_t to u32.
2835 */
2836 addr = (u32) sg_dma_address(sg);
2837 sg_len = sg_dma_len(sg);
2838
2839 while (sg_len) {
2840 offset = addr & 0xffff;
2841 len = sg_len;
2842 if ((offset + sg_len) > 0x10000)
2843 len = 0x10000 - offset;
2844
2845 ap->prd[idx].addr = cpu_to_le32(addr);
2846 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2847 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2848
2849 idx++;
2850 sg_len -= len;
2851 addr += len;
2852 }
2853 }
2854
2855 if (idx)
2856 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2857 }
2858 /**
2859 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2860 * @qc: Metadata associated with taskfile to check
2861 *
2862 * Allow low-level driver to filter ATA PACKET commands, returning
2863 * a status indicating whether or not it is OK to use DMA for the
2864 * supplied PACKET command.
2865 *
2866 * LOCKING:
2867 * spin_lock_irqsave(host_set lock)
2868 *
2869 * RETURNS: 0 when ATAPI DMA can be used
2870 * nonzero otherwise
2871 */
2872 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2873 {
2874 struct ata_port *ap = qc->ap;
2875 int rc = 0; /* Assume ATAPI DMA is OK by default */
2876
2877 if (ap->ops->check_atapi_dma)
2878 rc = ap->ops->check_atapi_dma(qc);
2879
2880 return rc;
2881 }
2882 /**
2883 * ata_qc_prep - Prepare taskfile for submission
2884 * @qc: Metadata associated with taskfile to be prepared
2885 *
2886 * Prepare ATA taskfile for submission.
2887 *
2888 * LOCKING:
2889 * spin_lock_irqsave(host_set lock)
2890 */
2891 void ata_qc_prep(struct ata_queued_cmd *qc)
2892 {
2893 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2894 return;
2895
2896 ata_fill_sg(qc);
2897 }
2898
2899 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2900
2901 /**
2902 * ata_sg_init_one - Associate command with memory buffer
2903 * @qc: Command to be associated
2904 * @buf: Memory buffer
2905 * @buflen: Length of memory buffer, in bytes.
2906 *
2907 * Initialize the data-related elements of queued_cmd @qc
2908 * to point to a single memory buffer, @buf of byte length @buflen.
2909 *
2910 * LOCKING:
2911 * spin_lock_irqsave(host_set lock)
2912 */
2913
2914 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2915 {
2916 struct scatterlist *sg;
2917
2918 qc->flags |= ATA_QCFLAG_SINGLE;
2919
2920 memset(&qc->sgent, 0, sizeof(qc->sgent));
2921 qc->__sg = &qc->sgent;
2922 qc->n_elem = 1;
2923 qc->orig_n_elem = 1;
2924 qc->buf_virt = buf;
2925
2926 sg = qc->__sg;
2927 sg_init_one(sg, buf, buflen);
2928 }
2929
2930 /**
2931 * ata_sg_init - Associate command with scatter-gather table.
2932 * @qc: Command to be associated
2933 * @sg: Scatter-gather table.
2934 * @n_elem: Number of elements in s/g table.
2935 *
2936 * Initialize the data-related elements of queued_cmd @qc
2937 * to point to a scatter-gather table @sg, containing @n_elem
2938 * elements.
2939 *
2940 * LOCKING:
2941 * spin_lock_irqsave(host_set lock)
2942 */
2943
2944 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2945 unsigned int n_elem)
2946 {
2947 qc->flags |= ATA_QCFLAG_SG;
2948 qc->__sg = sg;
2949 qc->n_elem = n_elem;
2950 qc->orig_n_elem = n_elem;
2951 }
2952
2953 /**
2954 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2955 * @qc: Command with memory buffer to be mapped.
2956 *
2957 * DMA-map the memory buffer associated with queued_cmd @qc.
2958 *
2959 * LOCKING:
2960 * spin_lock_irqsave(host_set lock)
2961 *
2962 * RETURNS:
2963 * Zero on success, negative on error.
2964 */
2965
2966 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2967 {
2968 struct ata_port *ap = qc->ap;
2969 int dir = qc->dma_dir;
2970 struct scatterlist *sg = qc->__sg;
2971 dma_addr_t dma_address;
2972 int trim_sg = 0;
2973
2974 /* we must lengthen transfers to end on a 32-bit boundary */
2975 qc->pad_len = sg->length & 3;
2976 if (qc->pad_len) {
2977 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2978 struct scatterlist *psg = &qc->pad_sgent;
2979
2980 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2981
2982 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2983
2984 if (qc->tf.flags & ATA_TFLAG_WRITE)
2985 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2986 qc->pad_len);
2987
2988 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2989 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2990 /* trim sg */
2991 sg->length -= qc->pad_len;
2992 if (sg->length == 0)
2993 trim_sg = 1;
2994
2995 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2996 sg->length, qc->pad_len);
2997 }
2998
2999 if (trim_sg) {
3000 qc->n_elem--;
3001 goto skip_map;
3002 }
3003
3004 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3005 sg->length, dir);
3006 if (dma_mapping_error(dma_address)) {
3007 /* restore sg */
3008 sg->length += qc->pad_len;
3009 return -1;
3010 }
3011
3012 sg_dma_address(sg) = dma_address;
3013 sg_dma_len(sg) = sg->length;
3014
3015 skip_map:
3016 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3017 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3018
3019 return 0;
3020 }
3021
3022 /**
3023 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3024 * @qc: Command with scatter-gather table to be mapped.
3025 *
3026 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3027 *
3028 * LOCKING:
3029 * spin_lock_irqsave(host_set lock)
3030 *
3031 * RETURNS:
3032 * Zero on success, negative on error.
3033 *
3034 */
3035
3036 static int ata_sg_setup(struct ata_queued_cmd *qc)
3037 {
3038 struct ata_port *ap = qc->ap;
3039 struct scatterlist *sg = qc->__sg;
3040 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3041 int n_elem, pre_n_elem, dir, trim_sg = 0;
3042
3043 VPRINTK("ENTER, ata%u\n", ap->id);
3044 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3045
3046 /* we must lengthen transfers to end on a 32-bit boundary */
3047 qc->pad_len = lsg->length & 3;
3048 if (qc->pad_len) {
3049 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3050 struct scatterlist *psg = &qc->pad_sgent;
3051 unsigned int offset;
3052
3053 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3054
3055 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3056
3057 /*
3058 * psg->page/offset are used to copy to-be-written
3059 * data in this function or read data in ata_sg_clean.
3060 */
3061 offset = lsg->offset + lsg->length - qc->pad_len;
3062 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3063 psg->offset = offset_in_page(offset);
3064
3065 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3066 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3067 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3068 kunmap_atomic(addr, KM_IRQ0);
3069 }
3070
3071 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3072 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3073 /* trim last sg */
3074 lsg->length -= qc->pad_len;
3075 if (lsg->length == 0)
3076 trim_sg = 1;
3077
3078 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3079 qc->n_elem - 1, lsg->length, qc->pad_len);
3080 }
3081
3082 pre_n_elem = qc->n_elem;
3083 if (trim_sg && pre_n_elem)
3084 pre_n_elem--;
3085
3086 if (!pre_n_elem) {
3087 n_elem = 0;
3088 goto skip_map;
3089 }
3090
3091 dir = qc->dma_dir;
3092 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3093 if (n_elem < 1) {
3094 /* restore last sg */
3095 lsg->length += qc->pad_len;
3096 return -1;
3097 }
3098
3099 DPRINTK("%d sg elements mapped\n", n_elem);
3100
3101 skip_map:
3102 qc->n_elem = n_elem;
3103
3104 return 0;
3105 }
3106
3107 /**
3108 * ata_poll_qc_complete - turn irq back on and finish qc
3109 * @qc: Command to complete
3110 * @err_mask: ATA status register content
3111 *
3112 * LOCKING:
3113 * None. (grabs host lock)
3114 */
3115
3116 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3117 {
3118 struct ata_port *ap = qc->ap;
3119 unsigned long flags;
3120
3121 spin_lock_irqsave(&ap->host_set->lock, flags);
3122 ap->flags &= ~ATA_FLAG_NOINTR;
3123 ata_irq_on(ap);
3124 ata_qc_complete(qc);
3125 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3126 }
3127
3128 /**
3129 * ata_pio_poll - poll using PIO, depending on current state
3130 * @ap: the target ata_port
3131 *
3132 * LOCKING:
3133 * None. (executing in kernel thread context)
3134 *
3135 * RETURNS:
3136 * timeout value to use
3137 */
3138
3139 static unsigned long ata_pio_poll(struct ata_port *ap)
3140 {
3141 struct ata_queued_cmd *qc;
3142 u8 status;
3143 unsigned int poll_state = HSM_ST_UNKNOWN;
3144 unsigned int reg_state = HSM_ST_UNKNOWN;
3145
3146 qc = ata_qc_from_tag(ap, ap->active_tag);
3147 WARN_ON(qc == NULL);
3148
3149 switch (ap->hsm_task_state) {
3150 case HSM_ST:
3151 case HSM_ST_POLL:
3152 poll_state = HSM_ST_POLL;
3153 reg_state = HSM_ST;
3154 break;
3155 case HSM_ST_LAST:
3156 case HSM_ST_LAST_POLL:
3157 poll_state = HSM_ST_LAST_POLL;
3158 reg_state = HSM_ST_LAST;
3159 break;
3160 default:
3161 BUG();
3162 break;
3163 }
3164
3165 status = ata_chk_status(ap);
3166 if (status & ATA_BUSY) {
3167 if (time_after(jiffies, ap->pio_task_timeout)) {
3168 qc->err_mask |= AC_ERR_TIMEOUT;
3169 ap->hsm_task_state = HSM_ST_TMOUT;
3170 return 0;
3171 }
3172 ap->hsm_task_state = poll_state;
3173 return ATA_SHORT_PAUSE;
3174 }
3175
3176 ap->hsm_task_state = reg_state;
3177 return 0;
3178 }
3179
3180 /**
3181 * ata_pio_complete - check if drive is busy or idle
3182 * @ap: the target ata_port
3183 *
3184 * LOCKING:
3185 * None. (executing in kernel thread context)
3186 *
3187 * RETURNS:
3188 * Non-zero if qc completed, zero otherwise.
3189 */
3190
3191 static int ata_pio_complete (struct ata_port *ap)
3192 {
3193 struct ata_queued_cmd *qc;
3194 u8 drv_stat;
3195
3196 /*
3197 * This is purely heuristic. This is a fast path. Sometimes when
3198 * we enter, BSY will be cleared in a chk-status or two. If not,
3199 * the drive is probably seeking or something. Snooze for a couple
3200 * msecs, then chk-status again. If still busy, fall back to
3201 * HSM_ST_POLL state.
3202 */
3203 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3204 if (drv_stat & ATA_BUSY) {
3205 msleep(2);
3206 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3207 if (drv_stat & ATA_BUSY) {
3208 ap->hsm_task_state = HSM_ST_LAST_POLL;
3209 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3210 return 0;
3211 }
3212 }
3213
3214 qc = ata_qc_from_tag(ap, ap->active_tag);
3215 WARN_ON(qc == NULL);
3216
3217 drv_stat = ata_wait_idle(ap);
3218 if (!ata_ok(drv_stat)) {
3219 qc->err_mask |= __ac_err_mask(drv_stat);
3220 ap->hsm_task_state = HSM_ST_ERR;
3221 return 0;
3222 }
3223
3224 ap->hsm_task_state = HSM_ST_IDLE;
3225
3226 WARN_ON(qc->err_mask);
3227 ata_poll_qc_complete(qc);
3228
3229 /* another command may start at this point */
3230
3231 return 1;
3232 }
3233
3234
3235 /**
3236 * swap_buf_le16 - swap halves of 16-bit words in place
3237 * @buf: Buffer to swap
3238 * @buf_words: Number of 16-bit words in buffer.
3239 *
3240 * Swap halves of 16-bit words if needed to convert from
3241 * little-endian byte order to native cpu byte order, or
3242 * vice-versa.
3243 *
3244 * LOCKING:
3245 * Inherited from caller.
3246 */
3247 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3248 {
3249 #ifdef __BIG_ENDIAN
3250 unsigned int i;
3251
3252 for (i = 0; i < buf_words; i++)
3253 buf[i] = le16_to_cpu(buf[i]);
3254 #endif /* __BIG_ENDIAN */
3255 }
3256
3257 /**
3258 * ata_mmio_data_xfer - Transfer data by MMIO
3259 * @ap: port to read/write
3260 * @buf: data buffer
3261 * @buflen: buffer length
3262 * @write_data: read/write
3263 *
3264 * Transfer data from/to the device data register by MMIO.
3265 *
3266 * LOCKING:
3267 * Inherited from caller.
3268 */
3269
3270 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3271 unsigned int buflen, int write_data)
3272 {
3273 unsigned int i;
3274 unsigned int words = buflen >> 1;
3275 u16 *buf16 = (u16 *) buf;
3276 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3277
3278 /* Transfer multiple of 2 bytes */
3279 if (write_data) {
3280 for (i = 0; i < words; i++)
3281 writew(le16_to_cpu(buf16[i]), mmio);
3282 } else {
3283 for (i = 0; i < words; i++)
3284 buf16[i] = cpu_to_le16(readw(mmio));
3285 }
3286
3287 /* Transfer trailing 1 byte, if any. */
3288 if (unlikely(buflen & 0x01)) {
3289 u16 align_buf[1] = { 0 };
3290 unsigned char *trailing_buf = buf + buflen - 1;
3291
3292 if (write_data) {
3293 memcpy(align_buf, trailing_buf, 1);
3294 writew(le16_to_cpu(align_buf[0]), mmio);
3295 } else {
3296 align_buf[0] = cpu_to_le16(readw(mmio));
3297 memcpy(trailing_buf, align_buf, 1);
3298 }
3299 }
3300 }
3301
3302 /**
3303 * ata_pio_data_xfer - Transfer data by PIO
3304 * @ap: port to read/write
3305 * @buf: data buffer
3306 * @buflen: buffer length
3307 * @write_data: read/write
3308 *
3309 * Transfer data from/to the device data register by PIO.
3310 *
3311 * LOCKING:
3312 * Inherited from caller.
3313 */
3314
3315 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3316 unsigned int buflen, int write_data)
3317 {
3318 unsigned int words = buflen >> 1;
3319
3320 /* Transfer multiple of 2 bytes */
3321 if (write_data)
3322 outsw(ap->ioaddr.data_addr, buf, words);
3323 else
3324 insw(ap->ioaddr.data_addr, buf, words);
3325
3326 /* Transfer trailing 1 byte, if any. */
3327 if (unlikely(buflen & 0x01)) {
3328 u16 align_buf[1] = { 0 };
3329 unsigned char *trailing_buf = buf + buflen - 1;
3330
3331 if (write_data) {
3332 memcpy(align_buf, trailing_buf, 1);
3333 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3334 } else {
3335 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3336 memcpy(trailing_buf, align_buf, 1);
3337 }
3338 }
3339 }
3340
3341 /**
3342 * ata_data_xfer - Transfer data from/to the data register.
3343 * @ap: port to read/write
3344 * @buf: data buffer
3345 * @buflen: buffer length
3346 * @do_write: read/write
3347 *
3348 * Transfer data from/to the device data register.
3349 *
3350 * LOCKING:
3351 * Inherited from caller.
3352 */
3353
3354 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3355 unsigned int buflen, int do_write)
3356 {
3357 /* Make the crap hardware pay the costs not the good stuff */
3358 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3359 unsigned long flags;
3360 local_irq_save(flags);
3361 if (ap->flags & ATA_FLAG_MMIO)
3362 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3363 else
3364 ata_pio_data_xfer(ap, buf, buflen, do_write);
3365 local_irq_restore(flags);
3366 } else {
3367 if (ap->flags & ATA_FLAG_MMIO)
3368 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3369 else
3370 ata_pio_data_xfer(ap, buf, buflen, do_write);
3371 }
3372 }
3373
3374 /**
3375 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3376 * @qc: Command on going
3377 *
3378 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3379 *
3380 * LOCKING:
3381 * Inherited from caller.
3382 */
3383
3384 static void ata_pio_sector(struct ata_queued_cmd *qc)
3385 {
3386 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3387 struct scatterlist *sg = qc->__sg;
3388 struct ata_port *ap = qc->ap;
3389 struct page *page;
3390 unsigned int offset;
3391 unsigned char *buf;
3392
3393 if (qc->cursect == (qc->nsect - 1))
3394 ap->hsm_task_state = HSM_ST_LAST;
3395
3396 page = sg[qc->cursg].page;
3397 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3398
3399 /* get the current page and offset */
3400 page = nth_page(page, (offset >> PAGE_SHIFT));
3401 offset %= PAGE_SIZE;
3402
3403 buf = kmap(page) + offset;
3404
3405 qc->cursect++;
3406 qc->cursg_ofs++;
3407
3408 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3409 qc->cursg++;
3410 qc->cursg_ofs = 0;
3411 }
3412
3413 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3414
3415 /* do the actual data transfer */
3416 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3417 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3418
3419 kunmap(page);
3420 }
3421
3422 /**
3423 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3424 * @qc: Command on going
3425 * @bytes: number of bytes
3426 *
3427 * Transfer Transfer data from/to the ATAPI device.
3428 *
3429 * LOCKING:
3430 * Inherited from caller.
3431 *
3432 */
3433
3434 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3435 {
3436 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3437 struct scatterlist *sg = qc->__sg;
3438 struct ata_port *ap = qc->ap;
3439 struct page *page;
3440 unsigned char *buf;
3441 unsigned int offset, count;
3442
3443 if (qc->curbytes + bytes >= qc->nbytes)
3444 ap->hsm_task_state = HSM_ST_LAST;
3445
3446 next_sg:
3447 if (unlikely(qc->cursg >= qc->n_elem)) {
3448 /*
3449 * The end of qc->sg is reached and the device expects
3450 * more data to transfer. In order not to overrun qc->sg
3451 * and fulfill length specified in the byte count register,
3452 * - for read case, discard trailing data from the device
3453 * - for write case, padding zero data to the device
3454 */
3455 u16 pad_buf[1] = { 0 };
3456 unsigned int words = bytes >> 1;
3457 unsigned int i;
3458
3459 if (words) /* warning if bytes > 1 */
3460 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3461 ap->id, bytes);
3462
3463 for (i = 0; i < words; i++)
3464 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3465
3466 ap->hsm_task_state = HSM_ST_LAST;
3467 return;
3468 }
3469
3470 sg = &qc->__sg[qc->cursg];
3471
3472 page = sg->page;
3473 offset = sg->offset + qc->cursg_ofs;
3474
3475 /* get the current page and offset */
3476 page = nth_page(page, (offset >> PAGE_SHIFT));
3477 offset %= PAGE_SIZE;
3478
3479 /* don't overrun current sg */
3480 count = min(sg->length - qc->cursg_ofs, bytes);
3481
3482 /* don't cross page boundaries */
3483 count = min(count, (unsigned int)PAGE_SIZE - offset);
3484
3485 buf = kmap(page) + offset;
3486
3487 bytes -= count;
3488 qc->curbytes += count;
3489 qc->cursg_ofs += count;
3490
3491 if (qc->cursg_ofs == sg->length) {
3492 qc->cursg++;
3493 qc->cursg_ofs = 0;
3494 }
3495
3496 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3497
3498 /* do the actual data transfer */
3499 ata_data_xfer(ap, buf, count, do_write);
3500
3501 kunmap(page);
3502
3503 if (bytes)
3504 goto next_sg;
3505 }
3506
3507 /**
3508 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3509 * @qc: Command on going
3510 *
3511 * Transfer Transfer data from/to the ATAPI device.
3512 *
3513 * LOCKING:
3514 * Inherited from caller.
3515 */
3516
3517 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3518 {
3519 struct ata_port *ap = qc->ap;
3520 struct ata_device *dev = qc->dev;
3521 unsigned int ireason, bc_lo, bc_hi, bytes;
3522 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3523
3524 ap->ops->tf_read(ap, &qc->tf);
3525 ireason = qc->tf.nsect;
3526 bc_lo = qc->tf.lbam;
3527 bc_hi = qc->tf.lbah;
3528 bytes = (bc_hi << 8) | bc_lo;
3529
3530 /* shall be cleared to zero, indicating xfer of data */
3531 if (ireason & (1 << 0))
3532 goto err_out;
3533
3534 /* make sure transfer direction matches expected */
3535 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3536 if (do_write != i_write)
3537 goto err_out;
3538
3539 __atapi_pio_bytes(qc, bytes);
3540
3541 return;
3542
3543 err_out:
3544 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3545 ap->id, dev->devno);
3546 qc->err_mask |= AC_ERR_HSM;
3547 ap->hsm_task_state = HSM_ST_ERR;
3548 }
3549
3550 /**
3551 * ata_pio_block - start PIO on a block
3552 * @ap: the target ata_port
3553 *
3554 * LOCKING:
3555 * None. (executing in kernel thread context)
3556 */
3557
3558 static void ata_pio_block(struct ata_port *ap)
3559 {
3560 struct ata_queued_cmd *qc;
3561 u8 status;
3562
3563 /*
3564 * This is purely heuristic. This is a fast path.
3565 * Sometimes when we enter, BSY will be cleared in
3566 * a chk-status or two. If not, the drive is probably seeking
3567 * or something. Snooze for a couple msecs, then
3568 * chk-status again. If still busy, fall back to
3569 * HSM_ST_POLL state.
3570 */
3571 status = ata_busy_wait(ap, ATA_BUSY, 5);
3572 if (status & ATA_BUSY) {
3573 msleep(2);
3574 status = ata_busy_wait(ap, ATA_BUSY, 10);
3575 if (status & ATA_BUSY) {
3576 ap->hsm_task_state = HSM_ST_POLL;
3577 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3578 return;
3579 }
3580 }
3581
3582 qc = ata_qc_from_tag(ap, ap->active_tag);
3583 WARN_ON(qc == NULL);
3584
3585 /* check error */
3586 if (status & (ATA_ERR | ATA_DF)) {
3587 qc->err_mask |= AC_ERR_DEV;
3588 ap->hsm_task_state = HSM_ST_ERR;
3589 return;
3590 }
3591
3592 /* transfer data if any */
3593 if (is_atapi_taskfile(&qc->tf)) {
3594 /* DRQ=0 means no more data to transfer */
3595 if ((status & ATA_DRQ) == 0) {
3596 ap->hsm_task_state = HSM_ST_LAST;
3597 return;
3598 }
3599
3600 atapi_pio_bytes(qc);
3601 } else {
3602 /* handle BSY=0, DRQ=0 as error */
3603 if ((status & ATA_DRQ) == 0) {
3604 qc->err_mask |= AC_ERR_HSM;
3605 ap->hsm_task_state = HSM_ST_ERR;
3606 return;
3607 }
3608
3609 ata_pio_sector(qc);
3610 }
3611 }
3612
3613 static void ata_pio_error(struct ata_port *ap)
3614 {
3615 struct ata_queued_cmd *qc;
3616
3617 qc = ata_qc_from_tag(ap, ap->active_tag);
3618 WARN_ON(qc == NULL);
3619
3620 if (qc->tf.command != ATA_CMD_PACKET)
3621 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3622
3623 /* make sure qc->err_mask is available to
3624 * know what's wrong and recover
3625 */
3626 WARN_ON(qc->err_mask == 0);
3627
3628 ap->hsm_task_state = HSM_ST_IDLE;
3629
3630 ata_poll_qc_complete(qc);
3631 }
3632
3633 static void ata_pio_task(void *_data)
3634 {
3635 struct ata_port *ap = _data;
3636 unsigned long timeout;
3637 int qc_completed;
3638
3639 fsm_start:
3640 timeout = 0;
3641 qc_completed = 0;
3642
3643 switch (ap->hsm_task_state) {
3644 case HSM_ST_IDLE:
3645 return;
3646
3647 case HSM_ST:
3648 ata_pio_block(ap);
3649 break;
3650
3651 case HSM_ST_LAST:
3652 qc_completed = ata_pio_complete(ap);
3653 break;
3654
3655 case HSM_ST_POLL:
3656 case HSM_ST_LAST_POLL:
3657 timeout = ata_pio_poll(ap);
3658 break;
3659
3660 case HSM_ST_TMOUT:
3661 case HSM_ST_ERR:
3662 ata_pio_error(ap);
3663 return;
3664 }
3665
3666 if (timeout)
3667 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3668 else if (!qc_completed)
3669 goto fsm_start;
3670 }
3671
3672 /**
3673 * atapi_packet_task - Write CDB bytes to hardware
3674 * @_data: Port to which ATAPI device is attached.
3675 *
3676 * When device has indicated its readiness to accept
3677 * a CDB, this function is called. Send the CDB.
3678 * If DMA is to be performed, exit immediately.
3679 * Otherwise, we are in polling mode, so poll
3680 * status under operation succeeds or fails.
3681 *
3682 * LOCKING:
3683 * Kernel thread context (may sleep)
3684 */
3685
3686 static void atapi_packet_task(void *_data)
3687 {
3688 struct ata_port *ap = _data;
3689 struct ata_queued_cmd *qc;
3690 u8 status;
3691
3692 qc = ata_qc_from_tag(ap, ap->active_tag);
3693 WARN_ON(qc == NULL);
3694 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3695
3696 /* sleep-wait for BSY to clear */
3697 DPRINTK("busy wait\n");
3698 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3699 qc->err_mask |= AC_ERR_TIMEOUT;
3700 goto err_out;
3701 }
3702
3703 /* make sure DRQ is set */
3704 status = ata_chk_status(ap);
3705 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3706 qc->err_mask |= AC_ERR_HSM;
3707 goto err_out;
3708 }
3709
3710 /* send SCSI cdb */
3711 DPRINTK("send cdb\n");
3712 WARN_ON(qc->dev->cdb_len < 12);
3713
3714 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3715 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3716 unsigned long flags;
3717
3718 /* Once we're done issuing command and kicking bmdma,
3719 * irq handler takes over. To not lose irq, we need
3720 * to clear NOINTR flag before sending cdb, but
3721 * interrupt handler shouldn't be invoked before we're
3722 * finished. Hence, the following locking.
3723 */
3724 spin_lock_irqsave(&ap->host_set->lock, flags);
3725 ap->flags &= ~ATA_FLAG_NOINTR;
3726 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3727 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3728 ap->ops->bmdma_start(qc); /* initiate bmdma */
3729 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3730 } else {
3731 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3732
3733 /* PIO commands are handled by polling */
3734 ap->hsm_task_state = HSM_ST;
3735 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3736 }
3737
3738 return;
3739
3740 err_out:
3741 ata_poll_qc_complete(qc);
3742 }
3743
3744 /**
3745 * ata_qc_timeout - Handle timeout of queued command
3746 * @qc: Command that timed out
3747 *
3748 * Some part of the kernel (currently, only the SCSI layer)
3749 * has noticed that the active command on port @ap has not
3750 * completed after a specified length of time. Handle this
3751 * condition by disabling DMA (if necessary) and completing
3752 * transactions, with error if necessary.
3753 *
3754 * This also handles the case of the "lost interrupt", where
3755 * for some reason (possibly hardware bug, possibly driver bug)
3756 * an interrupt was not delivered to the driver, even though the
3757 * transaction completed successfully.
3758 *
3759 * LOCKING:
3760 * Inherited from SCSI layer (none, can sleep)
3761 */
3762
3763 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3764 {
3765 struct ata_port *ap = qc->ap;
3766 struct ata_host_set *host_set = ap->host_set;
3767 u8 host_stat = 0, drv_stat;
3768 unsigned long flags;
3769
3770 DPRINTK("ENTER\n");
3771
3772 ap->hsm_task_state = HSM_ST_IDLE;
3773
3774 spin_lock_irqsave(&host_set->lock, flags);
3775
3776 switch (qc->tf.protocol) {
3777
3778 case ATA_PROT_DMA:
3779 case ATA_PROT_ATAPI_DMA:
3780 host_stat = ap->ops->bmdma_status(ap);
3781
3782 /* before we do anything else, clear DMA-Start bit */
3783 ap->ops->bmdma_stop(qc);
3784
3785 /* fall through */
3786
3787 default:
3788 ata_altstatus(ap);
3789 drv_stat = ata_chk_status(ap);
3790
3791 /* ack bmdma irq events */
3792 ap->ops->irq_clear(ap);
3793
3794 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3795 ap->id, qc->tf.command, drv_stat, host_stat);
3796
3797 /* complete taskfile transaction */
3798 qc->err_mask |= ac_err_mask(drv_stat);
3799 break;
3800 }
3801
3802 spin_unlock_irqrestore(&host_set->lock, flags);
3803
3804 ata_eh_qc_complete(qc);
3805
3806 DPRINTK("EXIT\n");
3807 }
3808
3809 /**
3810 * ata_eng_timeout - Handle timeout of queued command
3811 * @ap: Port on which timed-out command is active
3812 *
3813 * Some part of the kernel (currently, only the SCSI layer)
3814 * has noticed that the active command on port @ap has not
3815 * completed after a specified length of time. Handle this
3816 * condition by disabling DMA (if necessary) and completing
3817 * transactions, with error if necessary.
3818 *
3819 * This also handles the case of the "lost interrupt", where
3820 * for some reason (possibly hardware bug, possibly driver bug)
3821 * an interrupt was not delivered to the driver, even though the
3822 * transaction completed successfully.
3823 *
3824 * LOCKING:
3825 * Inherited from SCSI layer (none, can sleep)
3826 */
3827
3828 void ata_eng_timeout(struct ata_port *ap)
3829 {
3830 DPRINTK("ENTER\n");
3831
3832 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3833
3834 DPRINTK("EXIT\n");
3835 }
3836
3837 /**
3838 * ata_qc_new - Request an available ATA command, for queueing
3839 * @ap: Port associated with device @dev
3840 * @dev: Device from whom we request an available command structure
3841 *
3842 * LOCKING:
3843 * None.
3844 */
3845
3846 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3847 {
3848 struct ata_queued_cmd *qc = NULL;
3849 unsigned int i;
3850
3851 for (i = 0; i < ATA_MAX_QUEUE; i++)
3852 if (!test_and_set_bit(i, &ap->qactive)) {
3853 qc = ata_qc_from_tag(ap, i);
3854 break;
3855 }
3856
3857 if (qc)
3858 qc->tag = i;
3859
3860 return qc;
3861 }
3862
3863 /**
3864 * ata_qc_new_init - Request an available ATA command, and initialize it
3865 * @ap: Port associated with device @dev
3866 * @dev: Device from whom we request an available command structure
3867 *
3868 * LOCKING:
3869 * None.
3870 */
3871
3872 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3873 struct ata_device *dev)
3874 {
3875 struct ata_queued_cmd *qc;
3876
3877 qc = ata_qc_new(ap);
3878 if (qc) {
3879 qc->scsicmd = NULL;
3880 qc->ap = ap;
3881 qc->dev = dev;
3882
3883 ata_qc_reinit(qc);
3884 }
3885
3886 return qc;
3887 }
3888
3889 /**
3890 * ata_qc_free - free unused ata_queued_cmd
3891 * @qc: Command to complete
3892 *
3893 * Designed to free unused ata_queued_cmd object
3894 * in case something prevents using it.
3895 *
3896 * LOCKING:
3897 * spin_lock_irqsave(host_set lock)
3898 */
3899 void ata_qc_free(struct ata_queued_cmd *qc)
3900 {
3901 struct ata_port *ap = qc->ap;
3902 unsigned int tag;
3903
3904 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3905
3906 qc->flags = 0;
3907 tag = qc->tag;
3908 if (likely(ata_tag_valid(tag))) {
3909 if (tag == ap->active_tag)
3910 ap->active_tag = ATA_TAG_POISON;
3911 qc->tag = ATA_TAG_POISON;
3912 clear_bit(tag, &ap->qactive);
3913 }
3914 }
3915
3916 void __ata_qc_complete(struct ata_queued_cmd *qc)
3917 {
3918 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3919 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3920
3921 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3922 ata_sg_clean(qc);
3923
3924 /* atapi: mark qc as inactive to prevent the interrupt handler
3925 * from completing the command twice later, before the error handler
3926 * is called. (when rc != 0 and atapi request sense is needed)
3927 */
3928 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3929
3930 /* call completion callback */
3931 qc->complete_fn(qc);
3932 }
3933
3934 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3935 {
3936 struct ata_port *ap = qc->ap;
3937
3938 switch (qc->tf.protocol) {
3939 case ATA_PROT_DMA:
3940 case ATA_PROT_ATAPI_DMA:
3941 return 1;
3942
3943 case ATA_PROT_ATAPI:
3944 case ATA_PROT_PIO:
3945 if (ap->flags & ATA_FLAG_PIO_DMA)
3946 return 1;
3947
3948 /* fall through */
3949
3950 default:
3951 return 0;
3952 }
3953
3954 /* never reached */
3955 }
3956
3957 /**
3958 * ata_qc_issue - issue taskfile to device
3959 * @qc: command to issue to device
3960 *
3961 * Prepare an ATA command to submission to device.
3962 * This includes mapping the data into a DMA-able
3963 * area, filling in the S/G table, and finally
3964 * writing the taskfile to hardware, starting the command.
3965 *
3966 * LOCKING:
3967 * spin_lock_irqsave(host_set lock)
3968 *
3969 * RETURNS:
3970 * Zero on success, AC_ERR_* mask on failure
3971 */
3972
3973 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3974 {
3975 struct ata_port *ap = qc->ap;
3976
3977 if (ata_should_dma_map(qc)) {
3978 if (qc->flags & ATA_QCFLAG_SG) {
3979 if (ata_sg_setup(qc))
3980 goto sg_err;
3981 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3982 if (ata_sg_setup_one(qc))
3983 goto sg_err;
3984 }
3985 } else {
3986 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3987 }
3988
3989 ap->ops->qc_prep(qc);
3990
3991 qc->ap->active_tag = qc->tag;
3992 qc->flags |= ATA_QCFLAG_ACTIVE;
3993
3994 return ap->ops->qc_issue(qc);
3995
3996 sg_err:
3997 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3998 return AC_ERR_SYSTEM;
3999 }
4000
4001
4002 /**
4003 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4004 * @qc: command to issue to device
4005 *
4006 * Using various libata functions and hooks, this function
4007 * starts an ATA command. ATA commands are grouped into
4008 * classes called "protocols", and issuing each type of protocol
4009 * is slightly different.
4010 *
4011 * May be used as the qc_issue() entry in ata_port_operations.
4012 *
4013 * LOCKING:
4014 * spin_lock_irqsave(host_set lock)
4015 *
4016 * RETURNS:
4017 * Zero on success, AC_ERR_* mask on failure
4018 */
4019
4020 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4021 {
4022 struct ata_port *ap = qc->ap;
4023
4024 ata_dev_select(ap, qc->dev->devno, 1, 0);
4025
4026 switch (qc->tf.protocol) {
4027 case ATA_PROT_NODATA:
4028 ata_tf_to_host(ap, &qc->tf);
4029 break;
4030
4031 case ATA_PROT_DMA:
4032 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4033 ap->ops->bmdma_setup(qc); /* set up bmdma */
4034 ap->ops->bmdma_start(qc); /* initiate bmdma */
4035 break;
4036
4037 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4038 ata_qc_set_polling(qc);
4039 ata_tf_to_host(ap, &qc->tf);
4040 ap->hsm_task_state = HSM_ST;
4041 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4042 break;
4043
4044 case ATA_PROT_ATAPI:
4045 ata_qc_set_polling(qc);
4046 ata_tf_to_host(ap, &qc->tf);
4047 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4048 break;
4049
4050 case ATA_PROT_ATAPI_NODATA:
4051 ap->flags |= ATA_FLAG_NOINTR;
4052 ata_tf_to_host(ap, &qc->tf);
4053 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4054 break;
4055
4056 case ATA_PROT_ATAPI_DMA:
4057 ap->flags |= ATA_FLAG_NOINTR;
4058 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4059 ap->ops->bmdma_setup(qc); /* set up bmdma */
4060 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4061 break;
4062
4063 default:
4064 WARN_ON(1);
4065 return AC_ERR_SYSTEM;
4066 }
4067
4068 return 0;
4069 }
4070
4071 /**
4072 * ata_host_intr - Handle host interrupt for given (port, task)
4073 * @ap: Port on which interrupt arrived (possibly...)
4074 * @qc: Taskfile currently active in engine
4075 *
4076 * Handle host interrupt for given queued command. Currently,
4077 * only DMA interrupts are handled. All other commands are
4078 * handled via polling with interrupts disabled (nIEN bit).
4079 *
4080 * LOCKING:
4081 * spin_lock_irqsave(host_set lock)
4082 *
4083 * RETURNS:
4084 * One if interrupt was handled, zero if not (shared irq).
4085 */
4086
4087 inline unsigned int ata_host_intr (struct ata_port *ap,
4088 struct ata_queued_cmd *qc)
4089 {
4090 u8 status, host_stat;
4091
4092 switch (qc->tf.protocol) {
4093
4094 case ATA_PROT_DMA:
4095 case ATA_PROT_ATAPI_DMA:
4096 case ATA_PROT_ATAPI:
4097 /* check status of DMA engine */
4098 host_stat = ap->ops->bmdma_status(ap);
4099 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4100
4101 /* if it's not our irq... */
4102 if (!(host_stat & ATA_DMA_INTR))
4103 goto idle_irq;
4104
4105 /* before we do anything else, clear DMA-Start bit */
4106 ap->ops->bmdma_stop(qc);
4107
4108 /* fall through */
4109
4110 case ATA_PROT_ATAPI_NODATA:
4111 case ATA_PROT_NODATA:
4112 /* check altstatus */
4113 status = ata_altstatus(ap);
4114 if (status & ATA_BUSY)
4115 goto idle_irq;
4116
4117 /* check main status, clearing INTRQ */
4118 status = ata_chk_status(ap);
4119 if (unlikely(status & ATA_BUSY))
4120 goto idle_irq;
4121 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4122 ap->id, qc->tf.protocol, status);
4123
4124 /* ack bmdma irq events */
4125 ap->ops->irq_clear(ap);
4126
4127 /* complete taskfile transaction */
4128 qc->err_mask |= ac_err_mask(status);
4129 ata_qc_complete(qc);
4130 break;
4131
4132 default:
4133 goto idle_irq;
4134 }
4135
4136 return 1; /* irq handled */
4137
4138 idle_irq:
4139 ap->stats.idle_irq++;
4140
4141 #ifdef ATA_IRQ_TRAP
4142 if ((ap->stats.idle_irq % 1000) == 0) {
4143 ata_irq_ack(ap, 0); /* debug trap */
4144 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4145 return 1;
4146 }
4147 #endif
4148 return 0; /* irq not handled */
4149 }
4150
4151 /**
4152 * ata_interrupt - Default ATA host interrupt handler
4153 * @irq: irq line (unused)
4154 * @dev_instance: pointer to our ata_host_set information structure
4155 * @regs: unused
4156 *
4157 * Default interrupt handler for PCI IDE devices. Calls
4158 * ata_host_intr() for each port that is not disabled.
4159 *
4160 * LOCKING:
4161 * Obtains host_set lock during operation.
4162 *
4163 * RETURNS:
4164 * IRQ_NONE or IRQ_HANDLED.
4165 */
4166
4167 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4168 {
4169 struct ata_host_set *host_set = dev_instance;
4170 unsigned int i;
4171 unsigned int handled = 0;
4172 unsigned long flags;
4173
4174 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4175 spin_lock_irqsave(&host_set->lock, flags);
4176
4177 for (i = 0; i < host_set->n_ports; i++) {
4178 struct ata_port *ap;
4179
4180 ap = host_set->ports[i];
4181 if (ap &&
4182 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4183 struct ata_queued_cmd *qc;
4184
4185 qc = ata_qc_from_tag(ap, ap->active_tag);
4186 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4187 (qc->flags & ATA_QCFLAG_ACTIVE))
4188 handled |= ata_host_intr(ap, qc);
4189 }
4190 }
4191
4192 spin_unlock_irqrestore(&host_set->lock, flags);
4193
4194 return IRQ_RETVAL(handled);
4195 }
4196
4197
4198 /*
4199 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4200 * without filling any other registers
4201 */
4202 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4203 u8 cmd)
4204 {
4205 struct ata_taskfile tf;
4206 int err;
4207
4208 ata_tf_init(ap, &tf, dev->devno);
4209
4210 tf.command = cmd;
4211 tf.flags |= ATA_TFLAG_DEVICE;
4212 tf.protocol = ATA_PROT_NODATA;
4213
4214 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4215 if (err)
4216 printk(KERN_ERR "%s: ata command failed: %d\n",
4217 __FUNCTION__, err);
4218
4219 return err;
4220 }
4221
4222 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4223 {
4224 u8 cmd;
4225
4226 if (!ata_try_flush_cache(dev))
4227 return 0;
4228
4229 if (ata_id_has_flush_ext(dev->id))
4230 cmd = ATA_CMD_FLUSH_EXT;
4231 else
4232 cmd = ATA_CMD_FLUSH;
4233
4234 return ata_do_simple_cmd(ap, dev, cmd);
4235 }
4236
4237 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4238 {
4239 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4240 }
4241
4242 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4243 {
4244 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4245 }
4246
4247 /**
4248 * ata_device_resume - wakeup a previously suspended devices
4249 * @ap: port the device is connected to
4250 * @dev: the device to resume
4251 *
4252 * Kick the drive back into action, by sending it an idle immediate
4253 * command and making sure its transfer mode matches between drive
4254 * and host.
4255 *
4256 */
4257 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4258 {
4259 if (ap->flags & ATA_FLAG_SUSPENDED) {
4260 ap->flags &= ~ATA_FLAG_SUSPENDED;
4261 ata_set_mode(ap);
4262 }
4263 if (!ata_dev_present(dev))
4264 return 0;
4265 if (dev->class == ATA_DEV_ATA)
4266 ata_start_drive(ap, dev);
4267
4268 return 0;
4269 }
4270
4271 /**
4272 * ata_device_suspend - prepare a device for suspend
4273 * @ap: port the device is connected to
4274 * @dev: the device to suspend
4275 *
4276 * Flush the cache on the drive, if appropriate, then issue a
4277 * standbynow command.
4278 */
4279 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4280 {
4281 if (!ata_dev_present(dev))
4282 return 0;
4283 if (dev->class == ATA_DEV_ATA)
4284 ata_flush_cache(ap, dev);
4285
4286 if (state.event != PM_EVENT_FREEZE)
4287 ata_standby_drive(ap, dev);
4288 ap->flags |= ATA_FLAG_SUSPENDED;
4289 return 0;
4290 }
4291
4292 /**
4293 * ata_port_start - Set port up for dma.
4294 * @ap: Port to initialize
4295 *
4296 * Called just after data structures for each port are
4297 * initialized. Allocates space for PRD table.
4298 *
4299 * May be used as the port_start() entry in ata_port_operations.
4300 *
4301 * LOCKING:
4302 * Inherited from caller.
4303 */
4304
4305 int ata_port_start (struct ata_port *ap)
4306 {
4307 struct device *dev = ap->dev;
4308 int rc;
4309
4310 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4311 if (!ap->prd)
4312 return -ENOMEM;
4313
4314 rc = ata_pad_alloc(ap, dev);
4315 if (rc) {
4316 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4317 return rc;
4318 }
4319
4320 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4321
4322 return 0;
4323 }
4324
4325
4326 /**
4327 * ata_port_stop - Undo ata_port_start()
4328 * @ap: Port to shut down
4329 *
4330 * Frees the PRD table.
4331 *
4332 * May be used as the port_stop() entry in ata_port_operations.
4333 *
4334 * LOCKING:
4335 * Inherited from caller.
4336 */
4337
4338 void ata_port_stop (struct ata_port *ap)
4339 {
4340 struct device *dev = ap->dev;
4341
4342 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4343 ata_pad_free(ap, dev);
4344 }
4345
4346 void ata_host_stop (struct ata_host_set *host_set)
4347 {
4348 if (host_set->mmio_base)
4349 iounmap(host_set->mmio_base);
4350 }
4351
4352
4353 /**
4354 * ata_host_remove - Unregister SCSI host structure with upper layers
4355 * @ap: Port to unregister
4356 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4357 *
4358 * LOCKING:
4359 * Inherited from caller.
4360 */
4361
4362 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4363 {
4364 struct Scsi_Host *sh = ap->host;
4365
4366 DPRINTK("ENTER\n");
4367
4368 if (do_unregister)
4369 scsi_remove_host(sh);
4370
4371 ap->ops->port_stop(ap);
4372 }
4373
4374 /**
4375 * ata_host_init - Initialize an ata_port structure
4376 * @ap: Structure to initialize
4377 * @host: associated SCSI mid-layer structure
4378 * @host_set: Collection of hosts to which @ap belongs
4379 * @ent: Probe information provided by low-level driver
4380 * @port_no: Port number associated with this ata_port
4381 *
4382 * Initialize a new ata_port structure, and its associated
4383 * scsi_host.
4384 *
4385 * LOCKING:
4386 * Inherited from caller.
4387 */
4388
4389 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4390 struct ata_host_set *host_set,
4391 const struct ata_probe_ent *ent, unsigned int port_no)
4392 {
4393 unsigned int i;
4394
4395 host->max_id = 16;
4396 host->max_lun = 1;
4397 host->max_channel = 1;
4398 host->unique_id = ata_unique_id++;
4399 host->max_cmd_len = 12;
4400
4401 ap->flags = ATA_FLAG_PORT_DISABLED;
4402 ap->id = host->unique_id;
4403 ap->host = host;
4404 ap->ctl = ATA_DEVCTL_OBS;
4405 ap->host_set = host_set;
4406 ap->dev = ent->dev;
4407 ap->port_no = port_no;
4408 ap->hard_port_no =
4409 ent->legacy_mode ? ent->hard_port_no : port_no;
4410 ap->pio_mask = ent->pio_mask;
4411 ap->mwdma_mask = ent->mwdma_mask;
4412 ap->udma_mask = ent->udma_mask;
4413 ap->flags |= ent->host_flags;
4414 ap->ops = ent->port_ops;
4415 ap->cbl = ATA_CBL_NONE;
4416 ap->active_tag = ATA_TAG_POISON;
4417 ap->last_ctl = 0xFF;
4418
4419 INIT_WORK(&ap->port_task, NULL, NULL);
4420 INIT_LIST_HEAD(&ap->eh_done_q);
4421
4422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4423 struct ata_device *dev = &ap->device[i];
4424 dev->devno = i;
4425 dev->pio_mask = UINT_MAX;
4426 dev->mwdma_mask = UINT_MAX;
4427 dev->udma_mask = UINT_MAX;
4428 }
4429
4430 #ifdef ATA_IRQ_TRAP
4431 ap->stats.unhandled_irq = 1;
4432 ap->stats.idle_irq = 1;
4433 #endif
4434
4435 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4436 }
4437
4438 /**
4439 * ata_host_add - Attach low-level ATA driver to system
4440 * @ent: Information provided by low-level driver
4441 * @host_set: Collections of ports to which we add
4442 * @port_no: Port number associated with this host
4443 *
4444 * Attach low-level ATA driver to system.
4445 *
4446 * LOCKING:
4447 * PCI/etc. bus probe sem.
4448 *
4449 * RETURNS:
4450 * New ata_port on success, for NULL on error.
4451 */
4452
4453 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4454 struct ata_host_set *host_set,
4455 unsigned int port_no)
4456 {
4457 struct Scsi_Host *host;
4458 struct ata_port *ap;
4459 int rc;
4460
4461 DPRINTK("ENTER\n");
4462
4463 if (!ent->port_ops->probe_reset &&
4464 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4465 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4466 port_no);
4467 return NULL;
4468 }
4469
4470 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4471 if (!host)
4472 return NULL;
4473
4474 host->transportt = &ata_scsi_transport_template;
4475
4476 ap = (struct ata_port *) &host->hostdata[0];
4477
4478 ata_host_init(ap, host, host_set, ent, port_no);
4479
4480 rc = ap->ops->port_start(ap);
4481 if (rc)
4482 goto err_out;
4483
4484 return ap;
4485
4486 err_out:
4487 scsi_host_put(host);
4488 return NULL;
4489 }
4490
4491 /**
4492 * ata_device_add - Register hardware device with ATA and SCSI layers
4493 * @ent: Probe information describing hardware device to be registered
4494 *
4495 * This function processes the information provided in the probe
4496 * information struct @ent, allocates the necessary ATA and SCSI
4497 * host information structures, initializes them, and registers
4498 * everything with requisite kernel subsystems.
4499 *
4500 * This function requests irqs, probes the ATA bus, and probes
4501 * the SCSI bus.
4502 *
4503 * LOCKING:
4504 * PCI/etc. bus probe sem.
4505 *
4506 * RETURNS:
4507 * Number of ports registered. Zero on error (no ports registered).
4508 */
4509
4510 int ata_device_add(const struct ata_probe_ent *ent)
4511 {
4512 unsigned int count = 0, i;
4513 struct device *dev = ent->dev;
4514 struct ata_host_set *host_set;
4515
4516 DPRINTK("ENTER\n");
4517 /* alloc a container for our list of ATA ports (buses) */
4518 host_set = kzalloc(sizeof(struct ata_host_set) +
4519 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4520 if (!host_set)
4521 return 0;
4522 spin_lock_init(&host_set->lock);
4523
4524 host_set->dev = dev;
4525 host_set->n_ports = ent->n_ports;
4526 host_set->irq = ent->irq;
4527 host_set->mmio_base = ent->mmio_base;
4528 host_set->private_data = ent->private_data;
4529 host_set->ops = ent->port_ops;
4530
4531 /* register each port bound to this device */
4532 for (i = 0; i < ent->n_ports; i++) {
4533 struct ata_port *ap;
4534 unsigned long xfer_mode_mask;
4535
4536 ap = ata_host_add(ent, host_set, i);
4537 if (!ap)
4538 goto err_out;
4539
4540 host_set->ports[i] = ap;
4541 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4542 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4543 (ap->pio_mask << ATA_SHIFT_PIO);
4544
4545 /* print per-port info to dmesg */
4546 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4547 "bmdma 0x%lX irq %lu\n",
4548 ap->id,
4549 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4550 ata_mode_string(xfer_mode_mask),
4551 ap->ioaddr.cmd_addr,
4552 ap->ioaddr.ctl_addr,
4553 ap->ioaddr.bmdma_addr,
4554 ent->irq);
4555
4556 ata_chk_status(ap);
4557 host_set->ops->irq_clear(ap);
4558 count++;
4559 }
4560
4561 if (!count)
4562 goto err_free_ret;
4563
4564 /* obtain irq, that is shared between channels */
4565 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4566 DRV_NAME, host_set))
4567 goto err_out;
4568
4569 /* perform each probe synchronously */
4570 DPRINTK("probe begin\n");
4571 for (i = 0; i < count; i++) {
4572 struct ata_port *ap;
4573 int rc;
4574
4575 ap = host_set->ports[i];
4576
4577 DPRINTK("ata%u: bus probe begin\n", ap->id);
4578 rc = ata_bus_probe(ap);
4579 DPRINTK("ata%u: bus probe end\n", ap->id);
4580
4581 if (rc) {
4582 /* FIXME: do something useful here?
4583 * Current libata behavior will
4584 * tear down everything when
4585 * the module is removed
4586 * or the h/w is unplugged.
4587 */
4588 }
4589
4590 rc = scsi_add_host(ap->host, dev);
4591 if (rc) {
4592 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4593 ap->id);
4594 /* FIXME: do something useful here */
4595 /* FIXME: handle unconditional calls to
4596 * scsi_scan_host and ata_host_remove, below,
4597 * at the very least
4598 */
4599 }
4600 }
4601
4602 /* probes are done, now scan each port's disk(s) */
4603 DPRINTK("host probe begin\n");
4604 for (i = 0; i < count; i++) {
4605 struct ata_port *ap = host_set->ports[i];
4606
4607 ata_scsi_scan_host(ap);
4608 }
4609
4610 dev_set_drvdata(dev, host_set);
4611
4612 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4613 return ent->n_ports; /* success */
4614
4615 err_out:
4616 for (i = 0; i < count; i++) {
4617 ata_host_remove(host_set->ports[i], 1);
4618 scsi_host_put(host_set->ports[i]->host);
4619 }
4620 err_free_ret:
4621 kfree(host_set);
4622 VPRINTK("EXIT, returning 0\n");
4623 return 0;
4624 }
4625
4626 /**
4627 * ata_host_set_remove - PCI layer callback for device removal
4628 * @host_set: ATA host set that was removed
4629 *
4630 * Unregister all objects associated with this host set. Free those
4631 * objects.
4632 *
4633 * LOCKING:
4634 * Inherited from calling layer (may sleep).
4635 */
4636
4637 void ata_host_set_remove(struct ata_host_set *host_set)
4638 {
4639 struct ata_port *ap;
4640 unsigned int i;
4641
4642 for (i = 0; i < host_set->n_ports; i++) {
4643 ap = host_set->ports[i];
4644 scsi_remove_host(ap->host);
4645 }
4646
4647 free_irq(host_set->irq, host_set);
4648
4649 for (i = 0; i < host_set->n_ports; i++) {
4650 ap = host_set->ports[i];
4651
4652 ata_scsi_release(ap->host);
4653
4654 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4655 struct ata_ioports *ioaddr = &ap->ioaddr;
4656
4657 if (ioaddr->cmd_addr == 0x1f0)
4658 release_region(0x1f0, 8);
4659 else if (ioaddr->cmd_addr == 0x170)
4660 release_region(0x170, 8);
4661 }
4662
4663 scsi_host_put(ap->host);
4664 }
4665
4666 if (host_set->ops->host_stop)
4667 host_set->ops->host_stop(host_set);
4668
4669 kfree(host_set);
4670 }
4671
4672 /**
4673 * ata_scsi_release - SCSI layer callback hook for host unload
4674 * @host: libata host to be unloaded
4675 *
4676 * Performs all duties necessary to shut down a libata port...
4677 * Kill port kthread, disable port, and release resources.
4678 *
4679 * LOCKING:
4680 * Inherited from SCSI layer.
4681 *
4682 * RETURNS:
4683 * One.
4684 */
4685
4686 int ata_scsi_release(struct Scsi_Host *host)
4687 {
4688 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4689 int i;
4690
4691 DPRINTK("ENTER\n");
4692
4693 ap->ops->port_disable(ap);
4694 ata_host_remove(ap, 0);
4695 for (i = 0; i < ATA_MAX_DEVICES; i++)
4696 kfree(ap->device[i].id);
4697
4698 DPRINTK("EXIT\n");
4699 return 1;
4700 }
4701
4702 /**
4703 * ata_std_ports - initialize ioaddr with standard port offsets.
4704 * @ioaddr: IO address structure to be initialized
4705 *
4706 * Utility function which initializes data_addr, error_addr,
4707 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4708 * device_addr, status_addr, and command_addr to standard offsets
4709 * relative to cmd_addr.
4710 *
4711 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4712 */
4713
4714 void ata_std_ports(struct ata_ioports *ioaddr)
4715 {
4716 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4717 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4718 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4719 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4720 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4721 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4722 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4723 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4724 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4725 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4726 }
4727
4728
4729 #ifdef CONFIG_PCI
4730
4731 void ata_pci_host_stop (struct ata_host_set *host_set)
4732 {
4733 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4734
4735 pci_iounmap(pdev, host_set->mmio_base);
4736 }
4737
4738 /**
4739 * ata_pci_remove_one - PCI layer callback for device removal
4740 * @pdev: PCI device that was removed
4741 *
4742 * PCI layer indicates to libata via this hook that
4743 * hot-unplug or module unload event has occurred.
4744 * Handle this by unregistering all objects associated
4745 * with this PCI device. Free those objects. Then finally
4746 * release PCI resources and disable device.
4747 *
4748 * LOCKING:
4749 * Inherited from PCI layer (may sleep).
4750 */
4751
4752 void ata_pci_remove_one (struct pci_dev *pdev)
4753 {
4754 struct device *dev = pci_dev_to_dev(pdev);
4755 struct ata_host_set *host_set = dev_get_drvdata(dev);
4756
4757 ata_host_set_remove(host_set);
4758 pci_release_regions(pdev);
4759 pci_disable_device(pdev);
4760 dev_set_drvdata(dev, NULL);
4761 }
4762
4763 /* move to PCI subsystem */
4764 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4765 {
4766 unsigned long tmp = 0;
4767
4768 switch (bits->width) {
4769 case 1: {
4770 u8 tmp8 = 0;
4771 pci_read_config_byte(pdev, bits->reg, &tmp8);
4772 tmp = tmp8;
4773 break;
4774 }
4775 case 2: {
4776 u16 tmp16 = 0;
4777 pci_read_config_word(pdev, bits->reg, &tmp16);
4778 tmp = tmp16;
4779 break;
4780 }
4781 case 4: {
4782 u32 tmp32 = 0;
4783 pci_read_config_dword(pdev, bits->reg, &tmp32);
4784 tmp = tmp32;
4785 break;
4786 }
4787
4788 default:
4789 return -EINVAL;
4790 }
4791
4792 tmp &= bits->mask;
4793
4794 return (tmp == bits->val) ? 1 : 0;
4795 }
4796
4797 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4798 {
4799 pci_save_state(pdev);
4800 pci_disable_device(pdev);
4801 pci_set_power_state(pdev, PCI_D3hot);
4802 return 0;
4803 }
4804
4805 int ata_pci_device_resume(struct pci_dev *pdev)
4806 {
4807 pci_set_power_state(pdev, PCI_D0);
4808 pci_restore_state(pdev);
4809 pci_enable_device(pdev);
4810 pci_set_master(pdev);
4811 return 0;
4812 }
4813 #endif /* CONFIG_PCI */
4814
4815
4816 static int __init ata_init(void)
4817 {
4818 ata_wq = create_workqueue("ata");
4819 if (!ata_wq)
4820 return -ENOMEM;
4821
4822 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4823 return 0;
4824 }
4825
4826 static void __exit ata_exit(void)
4827 {
4828 destroy_workqueue(ata_wq);
4829 }
4830
4831 module_init(ata_init);
4832 module_exit(ata_exit);
4833
4834 static unsigned long ratelimit_time;
4835 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4836
4837 int ata_ratelimit(void)
4838 {
4839 int rc;
4840 unsigned long flags;
4841
4842 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4843
4844 if (time_after(jiffies, ratelimit_time)) {
4845 rc = 1;
4846 ratelimit_time = jiffies + (HZ/5);
4847 } else
4848 rc = 0;
4849
4850 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4851
4852 return rc;
4853 }
4854
4855 /*
4856 * libata is essentially a library of internal helper functions for
4857 * low-level ATA host controller drivers. As such, the API/ABI is
4858 * likely to change as new drivers are added and updated.
4859 * Do not depend on ABI/API stability.
4860 */
4861
4862 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4863 EXPORT_SYMBOL_GPL(ata_std_ports);
4864 EXPORT_SYMBOL_GPL(ata_device_add);
4865 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4866 EXPORT_SYMBOL_GPL(ata_sg_init);
4867 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4868 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4869 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4870 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4871 EXPORT_SYMBOL_GPL(ata_tf_load);
4872 EXPORT_SYMBOL_GPL(ata_tf_read);
4873 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4874 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4875 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4876 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4877 EXPORT_SYMBOL_GPL(ata_check_status);
4878 EXPORT_SYMBOL_GPL(ata_altstatus);
4879 EXPORT_SYMBOL_GPL(ata_exec_command);
4880 EXPORT_SYMBOL_GPL(ata_port_start);
4881 EXPORT_SYMBOL_GPL(ata_port_stop);
4882 EXPORT_SYMBOL_GPL(ata_host_stop);
4883 EXPORT_SYMBOL_GPL(ata_interrupt);
4884 EXPORT_SYMBOL_GPL(ata_qc_prep);
4885 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4886 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4887 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4888 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4889 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4890 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4891 EXPORT_SYMBOL_GPL(ata_port_probe);
4892 EXPORT_SYMBOL_GPL(sata_phy_reset);
4893 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4894 EXPORT_SYMBOL_GPL(ata_bus_reset);
4895 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4896 EXPORT_SYMBOL_GPL(ata_std_softreset);
4897 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4898 EXPORT_SYMBOL_GPL(ata_std_postreset);
4899 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4900 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4901 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4902 EXPORT_SYMBOL_GPL(ata_dev_classify);
4903 EXPORT_SYMBOL_GPL(ata_dev_pair);
4904 EXPORT_SYMBOL_GPL(ata_port_disable);
4905 EXPORT_SYMBOL_GPL(ata_ratelimit);
4906 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4907 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4908 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4909 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4910 EXPORT_SYMBOL_GPL(ata_scsi_error);
4911 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4912 EXPORT_SYMBOL_GPL(ata_scsi_release);
4913 EXPORT_SYMBOL_GPL(ata_host_intr);
4914 EXPORT_SYMBOL_GPL(ata_id_string);
4915 EXPORT_SYMBOL_GPL(ata_id_c_string);
4916 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4917 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4918 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4919
4920 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4921 EXPORT_SYMBOL_GPL(ata_timing_compute);
4922 EXPORT_SYMBOL_GPL(ata_timing_merge);
4923
4924 #ifdef CONFIG_PCI
4925 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4926 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4927 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4928 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4929 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4930 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4931 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4932 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4933 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4934 #endif /* CONFIG_PCI */
4935
4936 EXPORT_SYMBOL_GPL(ata_device_suspend);
4937 EXPORT_SYMBOL_GPL(ata_device_resume);
4938 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4939 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.127082 seconds and 6 git commands to generate.