[PATCH] libata: make ata_bus_probe() return negative errno on failure
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
72
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
75
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
401 {
402 if (ata_dev_present(dev)) {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n",
404 ap->id, dev->devno);
405 dev->class++;
406 }
407 }
408
409 /**
410 * ata_pio_devchk - PATA device presence detection
411 * @ap: ATA channel to examine
412 * @device: Device to examine (starting at zero)
413 *
414 * This technique was originally described in
415 * Hale Landis's ATADRVR (www.ata-atapi.com), and
416 * later found its way into the ATA/ATAPI spec.
417 *
418 * Write a pattern to the ATA shadow registers,
419 * and if a device is present, it will respond by
420 * correctly storing and echoing back the
421 * ATA shadow register contents.
422 *
423 * LOCKING:
424 * caller.
425 */
426
427 static unsigned int ata_pio_devchk(struct ata_port *ap,
428 unsigned int device)
429 {
430 struct ata_ioports *ioaddr = &ap->ioaddr;
431 u8 nsect, lbal;
432
433 ap->ops->dev_select(ap, device);
434
435 outb(0x55, ioaddr->nsect_addr);
436 outb(0xaa, ioaddr->lbal_addr);
437
438 outb(0xaa, ioaddr->nsect_addr);
439 outb(0x55, ioaddr->lbal_addr);
440
441 outb(0x55, ioaddr->nsect_addr);
442 outb(0xaa, ioaddr->lbal_addr);
443
444 nsect = inb(ioaddr->nsect_addr);
445 lbal = inb(ioaddr->lbal_addr);
446
447 if ((nsect == 0x55) && (lbal == 0xaa))
448 return 1; /* we found a device */
449
450 return 0; /* nothing found */
451 }
452
453 /**
454 * ata_mmio_devchk - PATA device presence detection
455 * @ap: ATA channel to examine
456 * @device: Device to examine (starting at zero)
457 *
458 * This technique was originally described in
459 * Hale Landis's ATADRVR (www.ata-atapi.com), and
460 * later found its way into the ATA/ATAPI spec.
461 *
462 * Write a pattern to the ATA shadow registers,
463 * and if a device is present, it will respond by
464 * correctly storing and echoing back the
465 * ATA shadow register contents.
466 *
467 * LOCKING:
468 * caller.
469 */
470
471 static unsigned int ata_mmio_devchk(struct ata_port *ap,
472 unsigned int device)
473 {
474 struct ata_ioports *ioaddr = &ap->ioaddr;
475 u8 nsect, lbal;
476
477 ap->ops->dev_select(ap, device);
478
479 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
481
482 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
483 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
484
485 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
486 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
487
488 nsect = readb((void __iomem *) ioaddr->nsect_addr);
489 lbal = readb((void __iomem *) ioaddr->lbal_addr);
490
491 if ((nsect == 0x55) && (lbal == 0xaa))
492 return 1; /* we found a device */
493
494 return 0; /* nothing found */
495 }
496
497 /**
498 * ata_devchk - PATA device presence detection
499 * @ap: ATA channel to examine
500 * @device: Device to examine (starting at zero)
501 *
502 * Dispatch ATA device presence detection, depending
503 * on whether we are using PIO or MMIO to talk to the
504 * ATA shadow registers.
505 *
506 * LOCKING:
507 * caller.
508 */
509
510 static unsigned int ata_devchk(struct ata_port *ap,
511 unsigned int device)
512 {
513 if (ap->flags & ATA_FLAG_MMIO)
514 return ata_mmio_devchk(ap, device);
515 return ata_pio_devchk(ap, device);
516 }
517
518 /**
519 * ata_dev_classify - determine device type based on ATA-spec signature
520 * @tf: ATA taskfile register set for device to be identified
521 *
522 * Determine from taskfile register contents whether a device is
523 * ATA or ATAPI, as per "Signature and persistence" section
524 * of ATA/PI spec (volume 1, sect 5.14).
525 *
526 * LOCKING:
527 * None.
528 *
529 * RETURNS:
530 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
531 * the event of failure.
532 */
533
534 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
535 {
536 /* Apple's open source Darwin code hints that some devices only
537 * put a proper signature into the LBA mid/high registers,
538 * So, we only check those. It's sufficient for uniqueness.
539 */
540
541 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
542 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
543 DPRINTK("found ATA device by sig\n");
544 return ATA_DEV_ATA;
545 }
546
547 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
548 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
549 DPRINTK("found ATAPI device by sig\n");
550 return ATA_DEV_ATAPI;
551 }
552
553 DPRINTK("unknown device\n");
554 return ATA_DEV_UNKNOWN;
555 }
556
557 /**
558 * ata_dev_try_classify - Parse returned ATA device signature
559 * @ap: ATA channel to examine
560 * @device: Device to examine (starting at zero)
561 * @r_err: Value of error register on completion
562 *
563 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
564 * an ATA/ATAPI-defined set of values is placed in the ATA
565 * shadow registers, indicating the results of device detection
566 * and diagnostics.
567 *
568 * Select the ATA device, and read the values from the ATA shadow
569 * registers. Then parse according to the Error register value,
570 * and the spec-defined values examined by ata_dev_classify().
571 *
572 * LOCKING:
573 * caller.
574 *
575 * RETURNS:
576 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
577 */
578
579 static unsigned int
580 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
581 {
582 struct ata_taskfile tf;
583 unsigned int class;
584 u8 err;
585
586 ap->ops->dev_select(ap, device);
587
588 memset(&tf, 0, sizeof(tf));
589
590 ap->ops->tf_read(ap, &tf);
591 err = tf.feature;
592 if (r_err)
593 *r_err = err;
594
595 /* see if device passed diags */
596 if (err == 1)
597 /* do nothing */ ;
598 else if ((device == 0) && (err == 0x81))
599 /* do nothing */ ;
600 else
601 return ATA_DEV_NONE;
602
603 /* determine if device is ATA or ATAPI */
604 class = ata_dev_classify(&tf);
605
606 if (class == ATA_DEV_UNKNOWN)
607 return ATA_DEV_NONE;
608 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
609 return ATA_DEV_NONE;
610 return class;
611 }
612
613 /**
614 * ata_id_string - Convert IDENTIFY DEVICE page into string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an even number.
619 *
620 * The strings in the IDENTIFY DEVICE page are broken up into
621 * 16-bit chunks. Run through the string, and output each
622 * 8-bit chunk linearly, regardless of platform.
623 *
624 * LOCKING:
625 * caller.
626 */
627
628 void ata_id_string(const u16 *id, unsigned char *s,
629 unsigned int ofs, unsigned int len)
630 {
631 unsigned int c;
632
633 while (len > 0) {
634 c = id[ofs] >> 8;
635 *s = c;
636 s++;
637
638 c = id[ofs] & 0xff;
639 *s = c;
640 s++;
641
642 ofs++;
643 len -= 2;
644 }
645 }
646
647 /**
648 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
649 * @id: IDENTIFY DEVICE results we will examine
650 * @s: string into which data is output
651 * @ofs: offset into identify device page
652 * @len: length of string to return. must be an odd number.
653 *
654 * This function is identical to ata_id_string except that it
655 * trims trailing spaces and terminates the resulting string with
656 * null. @len must be actual maximum length (even number) + 1.
657 *
658 * LOCKING:
659 * caller.
660 */
661 void ata_id_c_string(const u16 *id, unsigned char *s,
662 unsigned int ofs, unsigned int len)
663 {
664 unsigned char *p;
665
666 WARN_ON(!(len & 1));
667
668 ata_id_string(id, s, ofs, len - 1);
669
670 p = s + strnlen(s, len - 1);
671 while (p > s && p[-1] == ' ')
672 p--;
673 *p = '\0';
674 }
675
676 static u64 ata_id_n_sectors(const u16 *id)
677 {
678 if (ata_id_has_lba(id)) {
679 if (ata_id_has_lba48(id))
680 return ata_id_u64(id, 100);
681 else
682 return ata_id_u32(id, 60);
683 } else {
684 if (ata_id_current_chs_valid(id))
685 return ata_id_u32(id, 57);
686 else
687 return id[1] * id[3] * id[6];
688 }
689 }
690
691 /**
692 * ata_noop_dev_select - Select device 0/1 on ATA bus
693 * @ap: ATA channel to manipulate
694 * @device: ATA device (numbered from zero) to select
695 *
696 * This function performs no actual function.
697 *
698 * May be used as the dev_select() entry in ata_port_operations.
699 *
700 * LOCKING:
701 * caller.
702 */
703 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
704 {
705 }
706
707
708 /**
709 * ata_std_dev_select - Select device 0/1 on ATA bus
710 * @ap: ATA channel to manipulate
711 * @device: ATA device (numbered from zero) to select
712 *
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel. Works with both PIO and MMIO.
716 *
717 * May be used as the dev_select() entry in ata_port_operations.
718 *
719 * LOCKING:
720 * caller.
721 */
722
723 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
724 {
725 u8 tmp;
726
727 if (device == 0)
728 tmp = ATA_DEVICE_OBS;
729 else
730 tmp = ATA_DEVICE_OBS | ATA_DEV1;
731
732 if (ap->flags & ATA_FLAG_MMIO) {
733 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
734 } else {
735 outb(tmp, ap->ioaddr.device_addr);
736 }
737 ata_pause(ap); /* needed; also flushes, for mmio */
738 }
739
740 /**
741 * ata_dev_select - Select device 0/1 on ATA bus
742 * @ap: ATA channel to manipulate
743 * @device: ATA device (numbered from zero) to select
744 * @wait: non-zero to wait for Status register BSY bit to clear
745 * @can_sleep: non-zero if context allows sleeping
746 *
747 * Use the method defined in the ATA specification to
748 * make either device 0, or device 1, active on the
749 * ATA channel.
750 *
751 * This is a high-level version of ata_std_dev_select(),
752 * which additionally provides the services of inserting
753 * the proper pauses and status polling, where needed.
754 *
755 * LOCKING:
756 * caller.
757 */
758
759 void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep)
761 {
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
763 ap->id, device, wait);
764
765 if (wait)
766 ata_wait_idle(ap);
767
768 ap->ops->dev_select(ap, device);
769
770 if (wait) {
771 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
772 msleep(150);
773 ata_wait_idle(ap);
774 }
775 }
776
777 /**
778 * ata_dump_id - IDENTIFY DEVICE info debugging output
779 * @id: IDENTIFY DEVICE page to dump
780 *
781 * Dump selected 16-bit words from the given IDENTIFY DEVICE
782 * page.
783 *
784 * LOCKING:
785 * caller.
786 */
787
788 static inline void ata_dump_id(const u16 *id)
789 {
790 DPRINTK("49==0x%04x "
791 "53==0x%04x "
792 "63==0x%04x "
793 "64==0x%04x "
794 "75==0x%04x \n",
795 id[49],
796 id[53],
797 id[63],
798 id[64],
799 id[75]);
800 DPRINTK("80==0x%04x "
801 "81==0x%04x "
802 "82==0x%04x "
803 "83==0x%04x "
804 "84==0x%04x \n",
805 id[80],
806 id[81],
807 id[82],
808 id[83],
809 id[84]);
810 DPRINTK("88==0x%04x "
811 "93==0x%04x\n",
812 id[88],
813 id[93]);
814 }
815
816 /**
817 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
818 * @id: IDENTIFY data to compute xfer mask from
819 *
820 * Compute the xfermask for this device. This is not as trivial
821 * as it seems if we must consider early devices correctly.
822 *
823 * FIXME: pre IDE drive timing (do we care ?).
824 *
825 * LOCKING:
826 * None.
827 *
828 * RETURNS:
829 * Computed xfermask
830 */
831 static unsigned int ata_id_xfermask(const u16 *id)
832 {
833 unsigned int pio_mask, mwdma_mask, udma_mask;
834
835 /* Usual case. Word 53 indicates word 64 is valid */
836 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
837 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
838 pio_mask <<= 3;
839 pio_mask |= 0x7;
840 } else {
841 /* If word 64 isn't valid then Word 51 high byte holds
842 * the PIO timing number for the maximum. Turn it into
843 * a mask.
844 */
845 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
846
847 /* But wait.. there's more. Design your standards by
848 * committee and you too can get a free iordy field to
849 * process. However its the speeds not the modes that
850 * are supported... Note drivers using the timing API
851 * will get this right anyway
852 */
853 }
854
855 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
856
857 udma_mask = 0;
858 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
859 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
860
861 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
862 }
863
864 /**
865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for
867 *
868 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the
870 * user(low level driver)'s responsibility to make sure that only
871 * one task is active at any given time.
872 *
873 * libata core layer takes care of synchronization between
874 * port_task and EH. ata_port_queue_task() may be ignored for EH
875 * synchronization.
876 *
877 * LOCKING:
878 * Inherited from caller.
879 */
880 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
881 unsigned long delay)
882 {
883 int rc;
884
885 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
886 return;
887
888 PREPARE_WORK(&ap->port_task, fn, data);
889
890 if (!delay)
891 rc = queue_work(ata_wq, &ap->port_task);
892 else
893 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
894
895 /* rc == 0 means that another user is using port task */
896 WARN_ON(rc == 0);
897 }
898
899 /**
900 * ata_port_flush_task - Flush port_task
901 * @ap: The ata_port to flush port_task for
902 *
903 * After this function completes, port_task is guranteed not to
904 * be running or scheduled.
905 *
906 * LOCKING:
907 * Kernel thread context (may sleep)
908 */
909 void ata_port_flush_task(struct ata_port *ap)
910 {
911 unsigned long flags;
912
913 DPRINTK("ENTER\n");
914
915 spin_lock_irqsave(&ap->host_set->lock, flags);
916 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
917 spin_unlock_irqrestore(&ap->host_set->lock, flags);
918
919 DPRINTK("flush #1\n");
920 flush_workqueue(ata_wq);
921
922 /*
923 * At this point, if a task is running, it's guaranteed to see
924 * the FLUSH flag; thus, it will never queue pio tasks again.
925 * Cancel and flush.
926 */
927 if (!cancel_delayed_work(&ap->port_task)) {
928 DPRINTK("flush #2\n");
929 flush_workqueue(ata_wq);
930 }
931
932 spin_lock_irqsave(&ap->host_set->lock, flags);
933 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
934 spin_unlock_irqrestore(&ap->host_set->lock, flags);
935
936 DPRINTK("EXIT\n");
937 }
938
939 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
940 {
941 struct completion *waiting = qc->private_data;
942
943 qc->ap->ops->tf_read(qc->ap, &qc->tf);
944 complete(waiting);
945 }
946
947 /**
948 * ata_exec_internal - execute libata internal command
949 * @ap: Port to which the command is sent
950 * @dev: Device to which the command is sent
951 * @tf: Taskfile registers for the command and the result
952 * @dma_dir: Data tranfer direction of the command
953 * @buf: Data buffer of the command
954 * @buflen: Length of data buffer
955 *
956 * Executes libata internal command with timeout. @tf contains
957 * command on entry and result on return. Timeout and error
958 * conditions are reported via return value. No recovery action
959 * is taken after a command times out. It's caller's duty to
960 * clean up after timeout.
961 *
962 * LOCKING:
963 * None. Should be called with kernel context, might sleep.
964 */
965
966 static unsigned
967 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
968 struct ata_taskfile *tf,
969 int dma_dir, void *buf, unsigned int buflen)
970 {
971 u8 command = tf->command;
972 struct ata_queued_cmd *qc;
973 DECLARE_COMPLETION(wait);
974 unsigned long flags;
975 unsigned int err_mask;
976
977 spin_lock_irqsave(&ap->host_set->lock, flags);
978
979 qc = ata_qc_new_init(ap, dev);
980 BUG_ON(qc == NULL);
981
982 qc->tf = *tf;
983 qc->dma_dir = dma_dir;
984 if (dma_dir != DMA_NONE) {
985 ata_sg_init_one(qc, buf, buflen);
986 qc->nsect = buflen / ATA_SECT_SIZE;
987 }
988
989 qc->private_data = &wait;
990 qc->complete_fn = ata_qc_complete_internal;
991
992 ata_qc_issue(qc);
993
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
995
996 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
997 ata_port_flush_task(ap);
998
999 spin_lock_irqsave(&ap->host_set->lock, flags);
1000
1001 /* We're racing with irq here. If we lose, the
1002 * following test prevents us from completing the qc
1003 * again. If completion irq occurs after here but
1004 * before the caller cleans up, it will result in a
1005 * spurious interrupt. We can live with that.
1006 */
1007 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1008 qc->err_mask = AC_ERR_TIMEOUT;
1009 ata_qc_complete(qc);
1010 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1011 ap->id, command);
1012 }
1013
1014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1015 }
1016
1017 *tf = qc->tf;
1018 err_mask = qc->err_mask;
1019
1020 ata_qc_free(qc);
1021
1022 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1023 * Until those drivers are fixed, we detect the condition
1024 * here, fail the command with AC_ERR_SYSTEM and reenable the
1025 * port.
1026 *
1027 * Note that this doesn't change any behavior as internal
1028 * command failure results in disabling the device in the
1029 * higher layer for LLDDs without new reset/EH callbacks.
1030 *
1031 * Kill the following code as soon as those drivers are fixed.
1032 */
1033 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1034 err_mask |= AC_ERR_SYSTEM;
1035 ata_port_probe(ap);
1036 }
1037
1038 return err_mask;
1039 }
1040
1041 /**
1042 * ata_pio_need_iordy - check if iordy needed
1043 * @adev: ATA device
1044 *
1045 * Check if the current speed of the device requires IORDY. Used
1046 * by various controllers for chip configuration.
1047 */
1048
1049 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1050 {
1051 int pio;
1052 int speed = adev->pio_mode - XFER_PIO_0;
1053
1054 if (speed < 2)
1055 return 0;
1056 if (speed > 2)
1057 return 1;
1058
1059 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1060
1061 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1062 pio = adev->id[ATA_ID_EIDE_PIO];
1063 /* Is the speed faster than the drive allows non IORDY ? */
1064 if (pio) {
1065 /* This is cycle times not frequency - watch the logic! */
1066 if (pio > 240) /* PIO2 is 240nS per cycle */
1067 return 1;
1068 return 0;
1069 }
1070 }
1071 return 0;
1072 }
1073
1074 /**
1075 * ata_dev_read_id - Read ID data from the specified device
1076 * @ap: port on which target device resides
1077 * @dev: target device
1078 * @p_class: pointer to class of the target device (may be changed)
1079 * @post_reset: is this read ID post-reset?
1080 * @p_id: read IDENTIFY page (newly allocated)
1081 *
1082 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1083 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1084 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1085 * for pre-ATA4 drives.
1086 *
1087 * LOCKING:
1088 * Kernel thread context (may sleep)
1089 *
1090 * RETURNS:
1091 * 0 on success, -errno otherwise.
1092 */
1093 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1094 unsigned int *p_class, int post_reset, u16 **p_id)
1095 {
1096 unsigned int class = *p_class;
1097 struct ata_taskfile tf;
1098 unsigned int err_mask = 0;
1099 u16 *id;
1100 const char *reason;
1101 int rc;
1102
1103 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1104
1105 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1106
1107 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1108 if (id == NULL) {
1109 rc = -ENOMEM;
1110 reason = "out of memory";
1111 goto err_out;
1112 }
1113
1114 retry:
1115 ata_tf_init(ap, &tf, dev->devno);
1116
1117 switch (class) {
1118 case ATA_DEV_ATA:
1119 tf.command = ATA_CMD_ID_ATA;
1120 break;
1121 case ATA_DEV_ATAPI:
1122 tf.command = ATA_CMD_ID_ATAPI;
1123 break;
1124 default:
1125 rc = -ENODEV;
1126 reason = "unsupported class";
1127 goto err_out;
1128 }
1129
1130 tf.protocol = ATA_PROT_PIO;
1131
1132 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1133 id, sizeof(id[0]) * ATA_ID_WORDS);
1134 if (err_mask) {
1135 rc = -EIO;
1136 reason = "I/O error";
1137 goto err_out;
1138 }
1139
1140 swap_buf_le16(id, ATA_ID_WORDS);
1141
1142 /* sanity check */
1143 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1144 rc = -EINVAL;
1145 reason = "device reports illegal type";
1146 goto err_out;
1147 }
1148
1149 if (post_reset && class == ATA_DEV_ATA) {
1150 /*
1151 * The exact sequence expected by certain pre-ATA4 drives is:
1152 * SRST RESET
1153 * IDENTIFY
1154 * INITIALIZE DEVICE PARAMETERS
1155 * anything else..
1156 * Some drives were very specific about that exact sequence.
1157 */
1158 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1159 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1160 if (err_mask) {
1161 rc = -EIO;
1162 reason = "INIT_DEV_PARAMS failed";
1163 goto err_out;
1164 }
1165
1166 /* current CHS translation info (id[53-58]) might be
1167 * changed. reread the identify device info.
1168 */
1169 post_reset = 0;
1170 goto retry;
1171 }
1172 }
1173
1174 *p_class = class;
1175 *p_id = id;
1176 return 0;
1177
1178 err_out:
1179 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1180 ap->id, dev->devno, reason);
1181 kfree(id);
1182 return rc;
1183 }
1184
1185 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1186 struct ata_device *dev)
1187 {
1188 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1189 }
1190
1191 /**
1192 * ata_dev_configure - Configure the specified ATA/ATAPI device
1193 * @ap: Port on which target device resides
1194 * @dev: Target device to configure
1195 * @print_info: Enable device info printout
1196 *
1197 * Configure @dev according to @dev->id. Generic and low-level
1198 * driver specific fixups are also applied.
1199 *
1200 * LOCKING:
1201 * Kernel thread context (may sleep)
1202 *
1203 * RETURNS:
1204 * 0 on success, -errno otherwise
1205 */
1206 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1207 int print_info)
1208 {
1209 const u16 *id = dev->id;
1210 unsigned int xfer_mask;
1211 int i, rc;
1212
1213 if (!ata_dev_present(dev)) {
1214 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1215 ap->id, dev->devno);
1216 return 0;
1217 }
1218
1219 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1220
1221 /* print device capabilities */
1222 if (print_info)
1223 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1224 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1225 ap->id, dev->devno, id[49], id[82], id[83],
1226 id[84], id[85], id[86], id[87], id[88]);
1227
1228 /* initialize to-be-configured parameters */
1229 dev->flags = 0;
1230 dev->max_sectors = 0;
1231 dev->cdb_len = 0;
1232 dev->n_sectors = 0;
1233 dev->cylinders = 0;
1234 dev->heads = 0;
1235 dev->sectors = 0;
1236
1237 /*
1238 * common ATA, ATAPI feature tests
1239 */
1240
1241 /* find max transfer mode; for printk only */
1242 xfer_mask = ata_id_xfermask(id);
1243
1244 ata_dump_id(id);
1245
1246 /* ATA-specific feature tests */
1247 if (dev->class == ATA_DEV_ATA) {
1248 dev->n_sectors = ata_id_n_sectors(id);
1249
1250 if (ata_id_has_lba(id)) {
1251 const char *lba_desc;
1252
1253 lba_desc = "LBA";
1254 dev->flags |= ATA_DFLAG_LBA;
1255 if (ata_id_has_lba48(id)) {
1256 dev->flags |= ATA_DFLAG_LBA48;
1257 lba_desc = "LBA48";
1258 }
1259
1260 /* print device info to dmesg */
1261 if (print_info)
1262 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1263 "max %s, %Lu sectors: %s\n",
1264 ap->id, dev->devno,
1265 ata_id_major_version(id),
1266 ata_mode_string(xfer_mask),
1267 (unsigned long long)dev->n_sectors,
1268 lba_desc);
1269 } else {
1270 /* CHS */
1271
1272 /* Default translation */
1273 dev->cylinders = id[1];
1274 dev->heads = id[3];
1275 dev->sectors = id[6];
1276
1277 if (ata_id_current_chs_valid(id)) {
1278 /* Current CHS translation is valid. */
1279 dev->cylinders = id[54];
1280 dev->heads = id[55];
1281 dev->sectors = id[56];
1282 }
1283
1284 /* print device info to dmesg */
1285 if (print_info)
1286 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1287 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1288 ap->id, dev->devno,
1289 ata_id_major_version(id),
1290 ata_mode_string(xfer_mask),
1291 (unsigned long long)dev->n_sectors,
1292 dev->cylinders, dev->heads, dev->sectors);
1293 }
1294
1295 dev->cdb_len = 16;
1296 }
1297
1298 /* ATAPI-specific feature tests */
1299 else if (dev->class == ATA_DEV_ATAPI) {
1300 rc = atapi_cdb_len(id);
1301 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1302 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1303 rc = -EINVAL;
1304 goto err_out_nosup;
1305 }
1306 dev->cdb_len = (unsigned int) rc;
1307
1308 /* print device info to dmesg */
1309 if (print_info)
1310 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1311 ap->id, dev->devno, ata_mode_string(xfer_mask));
1312 }
1313
1314 ap->host->max_cmd_len = 0;
1315 for (i = 0; i < ATA_MAX_DEVICES; i++)
1316 ap->host->max_cmd_len = max_t(unsigned int,
1317 ap->host->max_cmd_len,
1318 ap->device[i].cdb_len);
1319
1320 /* limit bridge transfers to udma5, 200 sectors */
1321 if (ata_dev_knobble(ap, dev)) {
1322 if (print_info)
1323 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1324 ap->id, dev->devno);
1325 dev->udma_mask &= ATA_UDMA5;
1326 dev->max_sectors = ATA_MAX_SECTORS;
1327 }
1328
1329 if (ap->ops->dev_config)
1330 ap->ops->dev_config(ap, dev);
1331
1332 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1333 return 0;
1334
1335 err_out_nosup:
1336 DPRINTK("EXIT, err\n");
1337 return rc;
1338 }
1339
1340 /**
1341 * ata_bus_probe - Reset and probe ATA bus
1342 * @ap: Bus to probe
1343 *
1344 * Master ATA bus probing function. Initiates a hardware-dependent
1345 * bus reset, then attempts to identify any devices found on
1346 * the bus.
1347 *
1348 * LOCKING:
1349 * PCI/etc. bus probe sem.
1350 *
1351 * RETURNS:
1352 * Zero on success, negative errno otherwise.
1353 */
1354
1355 static int ata_bus_probe(struct ata_port *ap)
1356 {
1357 unsigned int classes[ATA_MAX_DEVICES];
1358 int i, rc, found = 0;
1359
1360 ata_port_probe(ap);
1361
1362 /* reset and determine device classes */
1363 for (i = 0; i < ATA_MAX_DEVICES; i++)
1364 classes[i] = ATA_DEV_UNKNOWN;
1365
1366 if (ap->ops->probe_reset) {
1367 rc = ap->ops->probe_reset(ap, classes);
1368 if (rc) {
1369 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1370 return rc;
1371 }
1372 } else {
1373 ap->ops->phy_reset(ap);
1374
1375 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1376 for (i = 0; i < ATA_MAX_DEVICES; i++)
1377 classes[i] = ap->device[i].class;
1378
1379 ata_port_probe(ap);
1380 }
1381
1382 for (i = 0; i < ATA_MAX_DEVICES; i++)
1383 if (classes[i] == ATA_DEV_UNKNOWN)
1384 classes[i] = ATA_DEV_NONE;
1385
1386 /* read IDENTIFY page and configure devices */
1387 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1388 struct ata_device *dev = &ap->device[i];
1389
1390 dev->class = classes[i];
1391
1392 if (!ata_dev_present(dev))
1393 continue;
1394
1395 WARN_ON(dev->id != NULL);
1396 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1397 dev->class = ATA_DEV_NONE;
1398 continue;
1399 }
1400
1401 if (ata_dev_configure(ap, dev, 1)) {
1402 ata_dev_disable(ap, dev);
1403 continue;
1404 }
1405
1406 found = 1;
1407 }
1408
1409 if (!found)
1410 goto err_out_disable;
1411
1412 if (ap->ops->set_mode)
1413 ap->ops->set_mode(ap);
1414 else
1415 ata_set_mode(ap);
1416
1417 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1418 goto err_out_disable;
1419
1420 return 0;
1421
1422 err_out_disable:
1423 ap->ops->port_disable(ap);
1424 return -ENODEV;
1425 }
1426
1427 /**
1428 * ata_port_probe - Mark port as enabled
1429 * @ap: Port for which we indicate enablement
1430 *
1431 * Modify @ap data structure such that the system
1432 * thinks that the entire port is enabled.
1433 *
1434 * LOCKING: host_set lock, or some other form of
1435 * serialization.
1436 */
1437
1438 void ata_port_probe(struct ata_port *ap)
1439 {
1440 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1441 }
1442
1443 /**
1444 * sata_print_link_status - Print SATA link status
1445 * @ap: SATA port to printk link status about
1446 *
1447 * This function prints link speed and status of a SATA link.
1448 *
1449 * LOCKING:
1450 * None.
1451 */
1452 static void sata_print_link_status(struct ata_port *ap)
1453 {
1454 u32 sstatus, tmp;
1455 const char *speed;
1456
1457 if (!ap->ops->scr_read)
1458 return;
1459
1460 sstatus = scr_read(ap, SCR_STATUS);
1461
1462 if (sata_dev_present(ap)) {
1463 tmp = (sstatus >> 4) & 0xf;
1464 if (tmp & (1 << 0))
1465 speed = "1.5";
1466 else if (tmp & (1 << 1))
1467 speed = "3.0";
1468 else
1469 speed = "<unknown>";
1470 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1471 ap->id, speed, sstatus);
1472 } else {
1473 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1474 ap->id, sstatus);
1475 }
1476 }
1477
1478 /**
1479 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1480 * @ap: SATA port associated with target SATA PHY.
1481 *
1482 * This function issues commands to standard SATA Sxxx
1483 * PHY registers, to wake up the phy (and device), and
1484 * clear any reset condition.
1485 *
1486 * LOCKING:
1487 * PCI/etc. bus probe sem.
1488 *
1489 */
1490 void __sata_phy_reset(struct ata_port *ap)
1491 {
1492 u32 sstatus;
1493 unsigned long timeout = jiffies + (HZ * 5);
1494
1495 if (ap->flags & ATA_FLAG_SATA_RESET) {
1496 /* issue phy wake/reset */
1497 scr_write_flush(ap, SCR_CONTROL, 0x301);
1498 /* Couldn't find anything in SATA I/II specs, but
1499 * AHCI-1.1 10.4.2 says at least 1 ms. */
1500 mdelay(1);
1501 }
1502 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1503
1504 /* wait for phy to become ready, if necessary */
1505 do {
1506 msleep(200);
1507 sstatus = scr_read(ap, SCR_STATUS);
1508 if ((sstatus & 0xf) != 1)
1509 break;
1510 } while (time_before(jiffies, timeout));
1511
1512 /* print link status */
1513 sata_print_link_status(ap);
1514
1515 /* TODO: phy layer with polling, timeouts, etc. */
1516 if (sata_dev_present(ap))
1517 ata_port_probe(ap);
1518 else
1519 ata_port_disable(ap);
1520
1521 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1522 return;
1523
1524 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1525 ata_port_disable(ap);
1526 return;
1527 }
1528
1529 ap->cbl = ATA_CBL_SATA;
1530 }
1531
1532 /**
1533 * sata_phy_reset - Reset SATA bus.
1534 * @ap: SATA port associated with target SATA PHY.
1535 *
1536 * This function resets the SATA bus, and then probes
1537 * the bus for devices.
1538 *
1539 * LOCKING:
1540 * PCI/etc. bus probe sem.
1541 *
1542 */
1543 void sata_phy_reset(struct ata_port *ap)
1544 {
1545 __sata_phy_reset(ap);
1546 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1547 return;
1548 ata_bus_reset(ap);
1549 }
1550
1551 /**
1552 * ata_dev_pair - return other device on cable
1553 * @ap: port
1554 * @adev: device
1555 *
1556 * Obtain the other device on the same cable, or if none is
1557 * present NULL is returned
1558 */
1559
1560 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1561 {
1562 struct ata_device *pair = &ap->device[1 - adev->devno];
1563 if (!ata_dev_present(pair))
1564 return NULL;
1565 return pair;
1566 }
1567
1568 /**
1569 * ata_port_disable - Disable port.
1570 * @ap: Port to be disabled.
1571 *
1572 * Modify @ap data structure such that the system
1573 * thinks that the entire port is disabled, and should
1574 * never attempt to probe or communicate with devices
1575 * on this port.
1576 *
1577 * LOCKING: host_set lock, or some other form of
1578 * serialization.
1579 */
1580
1581 void ata_port_disable(struct ata_port *ap)
1582 {
1583 ap->device[0].class = ATA_DEV_NONE;
1584 ap->device[1].class = ATA_DEV_NONE;
1585 ap->flags |= ATA_FLAG_PORT_DISABLED;
1586 }
1587
1588 /*
1589 * This mode timing computation functionality is ported over from
1590 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1591 */
1592 /*
1593 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1594 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1595 * for PIO 5, which is a nonstandard extension and UDMA6, which
1596 * is currently supported only by Maxtor drives.
1597 */
1598
1599 static const struct ata_timing ata_timing[] = {
1600
1601 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1602 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1603 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1604 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1605
1606 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1607 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1608 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1609
1610 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1611
1612 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1613 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1614 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1615
1616 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1617 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1618 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1619
1620 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1621 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1622 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1623
1624 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1625 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1626 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1627
1628 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1629
1630 { 0xFF }
1631 };
1632
1633 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1634 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1635
1636 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1637 {
1638 q->setup = EZ(t->setup * 1000, T);
1639 q->act8b = EZ(t->act8b * 1000, T);
1640 q->rec8b = EZ(t->rec8b * 1000, T);
1641 q->cyc8b = EZ(t->cyc8b * 1000, T);
1642 q->active = EZ(t->active * 1000, T);
1643 q->recover = EZ(t->recover * 1000, T);
1644 q->cycle = EZ(t->cycle * 1000, T);
1645 q->udma = EZ(t->udma * 1000, UT);
1646 }
1647
1648 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1649 struct ata_timing *m, unsigned int what)
1650 {
1651 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1652 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1653 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1654 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1655 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1656 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1657 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1658 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1659 }
1660
1661 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1662 {
1663 const struct ata_timing *t;
1664
1665 for (t = ata_timing; t->mode != speed; t++)
1666 if (t->mode == 0xFF)
1667 return NULL;
1668 return t;
1669 }
1670
1671 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1672 struct ata_timing *t, int T, int UT)
1673 {
1674 const struct ata_timing *s;
1675 struct ata_timing p;
1676
1677 /*
1678 * Find the mode.
1679 */
1680
1681 if (!(s = ata_timing_find_mode(speed)))
1682 return -EINVAL;
1683
1684 memcpy(t, s, sizeof(*s));
1685
1686 /*
1687 * If the drive is an EIDE drive, it can tell us it needs extended
1688 * PIO/MW_DMA cycle timing.
1689 */
1690
1691 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1692 memset(&p, 0, sizeof(p));
1693 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1694 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1695 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1696 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1697 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1698 }
1699 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1700 }
1701
1702 /*
1703 * Convert the timing to bus clock counts.
1704 */
1705
1706 ata_timing_quantize(t, t, T, UT);
1707
1708 /*
1709 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1710 * S.M.A.R.T * and some other commands. We have to ensure that the
1711 * DMA cycle timing is slower/equal than the fastest PIO timing.
1712 */
1713
1714 if (speed > XFER_PIO_4) {
1715 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1716 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1717 }
1718
1719 /*
1720 * Lengthen active & recovery time so that cycle time is correct.
1721 */
1722
1723 if (t->act8b + t->rec8b < t->cyc8b) {
1724 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1725 t->rec8b = t->cyc8b - t->act8b;
1726 }
1727
1728 if (t->active + t->recover < t->cycle) {
1729 t->active += (t->cycle - (t->active + t->recover)) / 2;
1730 t->recover = t->cycle - t->active;
1731 }
1732
1733 return 0;
1734 }
1735
1736 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1737 {
1738 unsigned int err_mask;
1739 int rc;
1740
1741 if (dev->xfer_shift == ATA_SHIFT_PIO)
1742 dev->flags |= ATA_DFLAG_PIO;
1743
1744 err_mask = ata_dev_set_xfermode(ap, dev);
1745 if (err_mask) {
1746 printk(KERN_ERR
1747 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1748 ap->id, err_mask);
1749 return -EIO;
1750 }
1751
1752 rc = ata_dev_revalidate(ap, dev, 0);
1753 if (rc) {
1754 printk(KERN_ERR
1755 "ata%u: failed to revalidate after set xfermode\n",
1756 ap->id);
1757 return rc;
1758 }
1759
1760 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1761 dev->xfer_shift, (int)dev->xfer_mode);
1762
1763 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1764 ap->id, dev->devno,
1765 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1766 return 0;
1767 }
1768
1769 static int ata_host_set_pio(struct ata_port *ap)
1770 {
1771 int i;
1772
1773 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1774 struct ata_device *dev = &ap->device[i];
1775
1776 if (!ata_dev_present(dev))
1777 continue;
1778
1779 if (!dev->pio_mode) {
1780 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1781 return -1;
1782 }
1783
1784 dev->xfer_mode = dev->pio_mode;
1785 dev->xfer_shift = ATA_SHIFT_PIO;
1786 if (ap->ops->set_piomode)
1787 ap->ops->set_piomode(ap, dev);
1788 }
1789
1790 return 0;
1791 }
1792
1793 static void ata_host_set_dma(struct ata_port *ap)
1794 {
1795 int i;
1796
1797 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1798 struct ata_device *dev = &ap->device[i];
1799
1800 if (!ata_dev_present(dev) || !dev->dma_mode)
1801 continue;
1802
1803 dev->xfer_mode = dev->dma_mode;
1804 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1805 if (ap->ops->set_dmamode)
1806 ap->ops->set_dmamode(ap, dev);
1807 }
1808 }
1809
1810 /**
1811 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1812 * @ap: port on which timings will be programmed
1813 *
1814 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1815 *
1816 * LOCKING:
1817 * PCI/etc. bus probe sem.
1818 */
1819 static void ata_set_mode(struct ata_port *ap)
1820 {
1821 int i, rc, used_dma = 0;
1822
1823 /* step 1: calculate xfer_mask */
1824 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1825 struct ata_device *dev = &ap->device[i];
1826 unsigned int pio_mask, dma_mask;
1827
1828 if (!ata_dev_present(dev))
1829 continue;
1830
1831 ata_dev_xfermask(ap, dev);
1832
1833 /* TODO: let LLDD filter dev->*_mask here */
1834
1835 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1836 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1837 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1838 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1839
1840 if (dev->dma_mode)
1841 used_dma = 1;
1842 }
1843
1844 /* step 2: always set host PIO timings */
1845 rc = ata_host_set_pio(ap);
1846 if (rc)
1847 goto err_out;
1848
1849 /* step 3: set host DMA timings */
1850 ata_host_set_dma(ap);
1851
1852 /* step 4: update devices' xfer mode */
1853 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1854 struct ata_device *dev = &ap->device[i];
1855
1856 if (!ata_dev_present(dev))
1857 continue;
1858
1859 rc = ata_dev_set_mode(ap, dev);
1860 if (rc)
1861 goto err_out;
1862 }
1863
1864 /*
1865 * Record simplex status. If we selected DMA then the other
1866 * host channels are not permitted to do so.
1867 */
1868
1869 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1870 ap->host_set->simplex_claimed = 1;
1871
1872 /*
1873 * Chip specific finalisation
1874 */
1875 if (ap->ops->post_set_mode)
1876 ap->ops->post_set_mode(ap);
1877
1878 return;
1879
1880 err_out:
1881 ata_port_disable(ap);
1882 }
1883
1884 /**
1885 * ata_tf_to_host - issue ATA taskfile to host controller
1886 * @ap: port to which command is being issued
1887 * @tf: ATA taskfile register set
1888 *
1889 * Issues ATA taskfile register set to ATA host controller,
1890 * with proper synchronization with interrupt handler and
1891 * other threads.
1892 *
1893 * LOCKING:
1894 * spin_lock_irqsave(host_set lock)
1895 */
1896
1897 static inline void ata_tf_to_host(struct ata_port *ap,
1898 const struct ata_taskfile *tf)
1899 {
1900 ap->ops->tf_load(ap, tf);
1901 ap->ops->exec_command(ap, tf);
1902 }
1903
1904 /**
1905 * ata_busy_sleep - sleep until BSY clears, or timeout
1906 * @ap: port containing status register to be polled
1907 * @tmout_pat: impatience timeout
1908 * @tmout: overall timeout
1909 *
1910 * Sleep until ATA Status register bit BSY clears,
1911 * or a timeout occurs.
1912 *
1913 * LOCKING: None.
1914 */
1915
1916 unsigned int ata_busy_sleep (struct ata_port *ap,
1917 unsigned long tmout_pat, unsigned long tmout)
1918 {
1919 unsigned long timer_start, timeout;
1920 u8 status;
1921
1922 status = ata_busy_wait(ap, ATA_BUSY, 300);
1923 timer_start = jiffies;
1924 timeout = timer_start + tmout_pat;
1925 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1926 msleep(50);
1927 status = ata_busy_wait(ap, ATA_BUSY, 3);
1928 }
1929
1930 if (status & ATA_BUSY)
1931 printk(KERN_WARNING "ata%u is slow to respond, "
1932 "please be patient\n", ap->id);
1933
1934 timeout = timer_start + tmout;
1935 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1936 msleep(50);
1937 status = ata_chk_status(ap);
1938 }
1939
1940 if (status & ATA_BUSY) {
1941 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1942 ap->id, tmout / HZ);
1943 return 1;
1944 }
1945
1946 return 0;
1947 }
1948
1949 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1950 {
1951 struct ata_ioports *ioaddr = &ap->ioaddr;
1952 unsigned int dev0 = devmask & (1 << 0);
1953 unsigned int dev1 = devmask & (1 << 1);
1954 unsigned long timeout;
1955
1956 /* if device 0 was found in ata_devchk, wait for its
1957 * BSY bit to clear
1958 */
1959 if (dev0)
1960 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1961
1962 /* if device 1 was found in ata_devchk, wait for
1963 * register access, then wait for BSY to clear
1964 */
1965 timeout = jiffies + ATA_TMOUT_BOOT;
1966 while (dev1) {
1967 u8 nsect, lbal;
1968
1969 ap->ops->dev_select(ap, 1);
1970 if (ap->flags & ATA_FLAG_MMIO) {
1971 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1972 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1973 } else {
1974 nsect = inb(ioaddr->nsect_addr);
1975 lbal = inb(ioaddr->lbal_addr);
1976 }
1977 if ((nsect == 1) && (lbal == 1))
1978 break;
1979 if (time_after(jiffies, timeout)) {
1980 dev1 = 0;
1981 break;
1982 }
1983 msleep(50); /* give drive a breather */
1984 }
1985 if (dev1)
1986 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1987
1988 /* is all this really necessary? */
1989 ap->ops->dev_select(ap, 0);
1990 if (dev1)
1991 ap->ops->dev_select(ap, 1);
1992 if (dev0)
1993 ap->ops->dev_select(ap, 0);
1994 }
1995
1996 static unsigned int ata_bus_softreset(struct ata_port *ap,
1997 unsigned int devmask)
1998 {
1999 struct ata_ioports *ioaddr = &ap->ioaddr;
2000
2001 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2002
2003 /* software reset. causes dev0 to be selected */
2004 if (ap->flags & ATA_FLAG_MMIO) {
2005 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2006 udelay(20); /* FIXME: flush */
2007 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2008 udelay(20); /* FIXME: flush */
2009 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2010 } else {
2011 outb(ap->ctl, ioaddr->ctl_addr);
2012 udelay(10);
2013 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2014 udelay(10);
2015 outb(ap->ctl, ioaddr->ctl_addr);
2016 }
2017
2018 /* spec mandates ">= 2ms" before checking status.
2019 * We wait 150ms, because that was the magic delay used for
2020 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2021 * between when the ATA command register is written, and then
2022 * status is checked. Because waiting for "a while" before
2023 * checking status is fine, post SRST, we perform this magic
2024 * delay here as well.
2025 *
2026 * Old drivers/ide uses the 2mS rule and then waits for ready
2027 */
2028 msleep(150);
2029
2030 /* Before we perform post reset processing we want to see if
2031 * the bus shows 0xFF because the odd clown forgets the D7
2032 * pulldown resistor.
2033 */
2034 if (ata_check_status(ap) == 0xFF)
2035 return AC_ERR_OTHER;
2036
2037 ata_bus_post_reset(ap, devmask);
2038
2039 return 0;
2040 }
2041
2042 /**
2043 * ata_bus_reset - reset host port and associated ATA channel
2044 * @ap: port to reset
2045 *
2046 * This is typically the first time we actually start issuing
2047 * commands to the ATA channel. We wait for BSY to clear, then
2048 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2049 * result. Determine what devices, if any, are on the channel
2050 * by looking at the device 0/1 error register. Look at the signature
2051 * stored in each device's taskfile registers, to determine if
2052 * the device is ATA or ATAPI.
2053 *
2054 * LOCKING:
2055 * PCI/etc. bus probe sem.
2056 * Obtains host_set lock.
2057 *
2058 * SIDE EFFECTS:
2059 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2060 */
2061
2062 void ata_bus_reset(struct ata_port *ap)
2063 {
2064 struct ata_ioports *ioaddr = &ap->ioaddr;
2065 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2066 u8 err;
2067 unsigned int dev0, dev1 = 0, devmask = 0;
2068
2069 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2070
2071 /* determine if device 0/1 are present */
2072 if (ap->flags & ATA_FLAG_SATA_RESET)
2073 dev0 = 1;
2074 else {
2075 dev0 = ata_devchk(ap, 0);
2076 if (slave_possible)
2077 dev1 = ata_devchk(ap, 1);
2078 }
2079
2080 if (dev0)
2081 devmask |= (1 << 0);
2082 if (dev1)
2083 devmask |= (1 << 1);
2084
2085 /* select device 0 again */
2086 ap->ops->dev_select(ap, 0);
2087
2088 /* issue bus reset */
2089 if (ap->flags & ATA_FLAG_SRST)
2090 if (ata_bus_softreset(ap, devmask))
2091 goto err_out;
2092
2093 /*
2094 * determine by signature whether we have ATA or ATAPI devices
2095 */
2096 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2097 if ((slave_possible) && (err != 0x81))
2098 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2099
2100 /* re-enable interrupts */
2101 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2102 ata_irq_on(ap);
2103
2104 /* is double-select really necessary? */
2105 if (ap->device[1].class != ATA_DEV_NONE)
2106 ap->ops->dev_select(ap, 1);
2107 if (ap->device[0].class != ATA_DEV_NONE)
2108 ap->ops->dev_select(ap, 0);
2109
2110 /* if no devices were detected, disable this port */
2111 if ((ap->device[0].class == ATA_DEV_NONE) &&
2112 (ap->device[1].class == ATA_DEV_NONE))
2113 goto err_out;
2114
2115 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2116 /* set up device control for ATA_FLAG_SATA_RESET */
2117 if (ap->flags & ATA_FLAG_MMIO)
2118 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2119 else
2120 outb(ap->ctl, ioaddr->ctl_addr);
2121 }
2122
2123 DPRINTK("EXIT\n");
2124 return;
2125
2126 err_out:
2127 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2128 ap->ops->port_disable(ap);
2129
2130 DPRINTK("EXIT\n");
2131 }
2132
2133 static int sata_phy_resume(struct ata_port *ap)
2134 {
2135 unsigned long timeout = jiffies + (HZ * 5);
2136 u32 sstatus;
2137
2138 scr_write_flush(ap, SCR_CONTROL, 0x300);
2139
2140 /* Wait for phy to become ready, if necessary. */
2141 do {
2142 msleep(200);
2143 sstatus = scr_read(ap, SCR_STATUS);
2144 if ((sstatus & 0xf) != 1)
2145 return 0;
2146 } while (time_before(jiffies, timeout));
2147
2148 return -1;
2149 }
2150
2151 /**
2152 * ata_std_probeinit - initialize probing
2153 * @ap: port to be probed
2154 *
2155 * @ap is about to be probed. Initialize it. This function is
2156 * to be used as standard callback for ata_drive_probe_reset().
2157 *
2158 * NOTE!!! Do not use this function as probeinit if a low level
2159 * driver implements only hardreset. Just pass NULL as probeinit
2160 * in that case. Using this function is probably okay but doing
2161 * so makes reset sequence different from the original
2162 * ->phy_reset implementation and Jeff nervous. :-P
2163 */
2164 void ata_std_probeinit(struct ata_port *ap)
2165 {
2166 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2167 sata_phy_resume(ap);
2168 if (sata_dev_present(ap))
2169 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2170 }
2171 }
2172
2173 /**
2174 * ata_std_softreset - reset host port via ATA SRST
2175 * @ap: port to reset
2176 * @verbose: fail verbosely
2177 * @classes: resulting classes of attached devices
2178 *
2179 * Reset host port using ATA SRST. This function is to be used
2180 * as standard callback for ata_drive_*_reset() functions.
2181 *
2182 * LOCKING:
2183 * Kernel thread context (may sleep)
2184 *
2185 * RETURNS:
2186 * 0 on success, -errno otherwise.
2187 */
2188 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2189 {
2190 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2191 unsigned int devmask = 0, err_mask;
2192 u8 err;
2193
2194 DPRINTK("ENTER\n");
2195
2196 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2197 classes[0] = ATA_DEV_NONE;
2198 goto out;
2199 }
2200
2201 /* determine if device 0/1 are present */
2202 if (ata_devchk(ap, 0))
2203 devmask |= (1 << 0);
2204 if (slave_possible && ata_devchk(ap, 1))
2205 devmask |= (1 << 1);
2206
2207 /* select device 0 again */
2208 ap->ops->dev_select(ap, 0);
2209
2210 /* issue bus reset */
2211 DPRINTK("about to softreset, devmask=%x\n", devmask);
2212 err_mask = ata_bus_softreset(ap, devmask);
2213 if (err_mask) {
2214 if (verbose)
2215 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2216 ap->id, err_mask);
2217 else
2218 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2219 err_mask);
2220 return -EIO;
2221 }
2222
2223 /* determine by signature whether we have ATA or ATAPI devices */
2224 classes[0] = ata_dev_try_classify(ap, 0, &err);
2225 if (slave_possible && err != 0x81)
2226 classes[1] = ata_dev_try_classify(ap, 1, &err);
2227
2228 out:
2229 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2230 return 0;
2231 }
2232
2233 /**
2234 * sata_std_hardreset - reset host port via SATA phy reset
2235 * @ap: port to reset
2236 * @verbose: fail verbosely
2237 * @class: resulting class of attached device
2238 *
2239 * SATA phy-reset host port using DET bits of SControl register.
2240 * This function is to be used as standard callback for
2241 * ata_drive_*_reset().
2242 *
2243 * LOCKING:
2244 * Kernel thread context (may sleep)
2245 *
2246 * RETURNS:
2247 * 0 on success, -errno otherwise.
2248 */
2249 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2250 {
2251 DPRINTK("ENTER\n");
2252
2253 /* Issue phy wake/reset */
2254 scr_write_flush(ap, SCR_CONTROL, 0x301);
2255
2256 /*
2257 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2258 * 10.4.2 says at least 1 ms.
2259 */
2260 msleep(1);
2261
2262 /* Bring phy back */
2263 sata_phy_resume(ap);
2264
2265 /* TODO: phy layer with polling, timeouts, etc. */
2266 if (!sata_dev_present(ap)) {
2267 *class = ATA_DEV_NONE;
2268 DPRINTK("EXIT, link offline\n");
2269 return 0;
2270 }
2271
2272 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2273 if (verbose)
2274 printk(KERN_ERR "ata%u: COMRESET failed "
2275 "(device not ready)\n", ap->id);
2276 else
2277 DPRINTK("EXIT, device not ready\n");
2278 return -EIO;
2279 }
2280
2281 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2282
2283 *class = ata_dev_try_classify(ap, 0, NULL);
2284
2285 DPRINTK("EXIT, class=%u\n", *class);
2286 return 0;
2287 }
2288
2289 /**
2290 * ata_std_postreset - standard postreset callback
2291 * @ap: the target ata_port
2292 * @classes: classes of attached devices
2293 *
2294 * This function is invoked after a successful reset. Note that
2295 * the device might have been reset more than once using
2296 * different reset methods before postreset is invoked.
2297 *
2298 * This function is to be used as standard callback for
2299 * ata_drive_*_reset().
2300 *
2301 * LOCKING:
2302 * Kernel thread context (may sleep)
2303 */
2304 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2305 {
2306 DPRINTK("ENTER\n");
2307
2308 /* set cable type if it isn't already set */
2309 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2310 ap->cbl = ATA_CBL_SATA;
2311
2312 /* print link status */
2313 if (ap->cbl == ATA_CBL_SATA)
2314 sata_print_link_status(ap);
2315
2316 /* re-enable interrupts */
2317 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2318 ata_irq_on(ap);
2319
2320 /* is double-select really necessary? */
2321 if (classes[0] != ATA_DEV_NONE)
2322 ap->ops->dev_select(ap, 1);
2323 if (classes[1] != ATA_DEV_NONE)
2324 ap->ops->dev_select(ap, 0);
2325
2326 /* bail out if no device is present */
2327 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2328 DPRINTK("EXIT, no device\n");
2329 return;
2330 }
2331
2332 /* set up device control */
2333 if (ap->ioaddr.ctl_addr) {
2334 if (ap->flags & ATA_FLAG_MMIO)
2335 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2336 else
2337 outb(ap->ctl, ap->ioaddr.ctl_addr);
2338 }
2339
2340 DPRINTK("EXIT\n");
2341 }
2342
2343 /**
2344 * ata_std_probe_reset - standard probe reset method
2345 * @ap: prot to perform probe-reset
2346 * @classes: resulting classes of attached devices
2347 *
2348 * The stock off-the-shelf ->probe_reset method.
2349 *
2350 * LOCKING:
2351 * Kernel thread context (may sleep)
2352 *
2353 * RETURNS:
2354 * 0 on success, -errno otherwise.
2355 */
2356 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2357 {
2358 ata_reset_fn_t hardreset;
2359
2360 hardreset = NULL;
2361 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2362 hardreset = sata_std_hardreset;
2363
2364 return ata_drive_probe_reset(ap, ata_std_probeinit,
2365 ata_std_softreset, hardreset,
2366 ata_std_postreset, classes);
2367 }
2368
2369 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2370 ata_postreset_fn_t postreset,
2371 unsigned int *classes)
2372 {
2373 int i, rc;
2374
2375 for (i = 0; i < ATA_MAX_DEVICES; i++)
2376 classes[i] = ATA_DEV_UNKNOWN;
2377
2378 rc = reset(ap, 0, classes);
2379 if (rc)
2380 return rc;
2381
2382 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2383 * is complete and convert all ATA_DEV_UNKNOWN to
2384 * ATA_DEV_NONE.
2385 */
2386 for (i = 0; i < ATA_MAX_DEVICES; i++)
2387 if (classes[i] != ATA_DEV_UNKNOWN)
2388 break;
2389
2390 if (i < ATA_MAX_DEVICES)
2391 for (i = 0; i < ATA_MAX_DEVICES; i++)
2392 if (classes[i] == ATA_DEV_UNKNOWN)
2393 classes[i] = ATA_DEV_NONE;
2394
2395 if (postreset)
2396 postreset(ap, classes);
2397
2398 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2399 }
2400
2401 /**
2402 * ata_drive_probe_reset - Perform probe reset with given methods
2403 * @ap: port to reset
2404 * @probeinit: probeinit method (can be NULL)
2405 * @softreset: softreset method (can be NULL)
2406 * @hardreset: hardreset method (can be NULL)
2407 * @postreset: postreset method (can be NULL)
2408 * @classes: resulting classes of attached devices
2409 *
2410 * Reset the specified port and classify attached devices using
2411 * given methods. This function prefers softreset but tries all
2412 * possible reset sequences to reset and classify devices. This
2413 * function is intended to be used for constructing ->probe_reset
2414 * callback by low level drivers.
2415 *
2416 * Reset methods should follow the following rules.
2417 *
2418 * - Return 0 on sucess, -errno on failure.
2419 * - If classification is supported, fill classes[] with
2420 * recognized class codes.
2421 * - If classification is not supported, leave classes[] alone.
2422 * - If verbose is non-zero, print error message on failure;
2423 * otherwise, shut up.
2424 *
2425 * LOCKING:
2426 * Kernel thread context (may sleep)
2427 *
2428 * RETURNS:
2429 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2430 * if classification fails, and any error code from reset
2431 * methods.
2432 */
2433 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2434 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2435 ata_postreset_fn_t postreset, unsigned int *classes)
2436 {
2437 int rc = -EINVAL;
2438
2439 if (probeinit)
2440 probeinit(ap);
2441
2442 if (softreset) {
2443 rc = do_probe_reset(ap, softreset, postreset, classes);
2444 if (rc == 0)
2445 return 0;
2446 }
2447
2448 if (!hardreset)
2449 return rc;
2450
2451 rc = do_probe_reset(ap, hardreset, postreset, classes);
2452 if (rc == 0 || rc != -ENODEV)
2453 return rc;
2454
2455 if (softreset)
2456 rc = do_probe_reset(ap, softreset, postreset, classes);
2457
2458 return rc;
2459 }
2460
2461 /**
2462 * ata_dev_same_device - Determine whether new ID matches configured device
2463 * @ap: port on which the device to compare against resides
2464 * @dev: device to compare against
2465 * @new_class: class of the new device
2466 * @new_id: IDENTIFY page of the new device
2467 *
2468 * Compare @new_class and @new_id against @dev and determine
2469 * whether @dev is the device indicated by @new_class and
2470 * @new_id.
2471 *
2472 * LOCKING:
2473 * None.
2474 *
2475 * RETURNS:
2476 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2477 */
2478 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2479 unsigned int new_class, const u16 *new_id)
2480 {
2481 const u16 *old_id = dev->id;
2482 unsigned char model[2][41], serial[2][21];
2483 u64 new_n_sectors;
2484
2485 if (dev->class != new_class) {
2486 printk(KERN_INFO
2487 "ata%u: dev %u class mismatch %d != %d\n",
2488 ap->id, dev->devno, dev->class, new_class);
2489 return 0;
2490 }
2491
2492 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2493 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2494 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2495 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2496 new_n_sectors = ata_id_n_sectors(new_id);
2497
2498 if (strcmp(model[0], model[1])) {
2499 printk(KERN_INFO
2500 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2501 ap->id, dev->devno, model[0], model[1]);
2502 return 0;
2503 }
2504
2505 if (strcmp(serial[0], serial[1])) {
2506 printk(KERN_INFO
2507 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2508 ap->id, dev->devno, serial[0], serial[1]);
2509 return 0;
2510 }
2511
2512 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2513 printk(KERN_INFO
2514 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2515 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2516 (unsigned long long)new_n_sectors);
2517 return 0;
2518 }
2519
2520 return 1;
2521 }
2522
2523 /**
2524 * ata_dev_revalidate - Revalidate ATA device
2525 * @ap: port on which the device to revalidate resides
2526 * @dev: device to revalidate
2527 * @post_reset: is this revalidation after reset?
2528 *
2529 * Re-read IDENTIFY page and make sure @dev is still attached to
2530 * the port.
2531 *
2532 * LOCKING:
2533 * Kernel thread context (may sleep)
2534 *
2535 * RETURNS:
2536 * 0 on success, negative errno otherwise
2537 */
2538 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2539 int post_reset)
2540 {
2541 unsigned int class;
2542 u16 *id;
2543 int rc;
2544
2545 if (!ata_dev_present(dev))
2546 return -ENODEV;
2547
2548 class = dev->class;
2549 id = NULL;
2550
2551 /* allocate & read ID data */
2552 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2553 if (rc)
2554 goto fail;
2555
2556 /* is the device still there? */
2557 if (!ata_dev_same_device(ap, dev, class, id)) {
2558 rc = -ENODEV;
2559 goto fail;
2560 }
2561
2562 kfree(dev->id);
2563 dev->id = id;
2564
2565 /* configure device according to the new ID */
2566 return ata_dev_configure(ap, dev, 0);
2567
2568 fail:
2569 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2570 ap->id, dev->devno, rc);
2571 kfree(id);
2572 return rc;
2573 }
2574
2575 static const char * const ata_dma_blacklist [] = {
2576 "WDC AC11000H", NULL,
2577 "WDC AC22100H", NULL,
2578 "WDC AC32500H", NULL,
2579 "WDC AC33100H", NULL,
2580 "WDC AC31600H", NULL,
2581 "WDC AC32100H", "24.09P07",
2582 "WDC AC23200L", "21.10N21",
2583 "Compaq CRD-8241B", NULL,
2584 "CRD-8400B", NULL,
2585 "CRD-8480B", NULL,
2586 "CRD-8482B", NULL,
2587 "CRD-84", NULL,
2588 "SanDisk SDP3B", NULL,
2589 "SanDisk SDP3B-64", NULL,
2590 "SANYO CD-ROM CRD", NULL,
2591 "HITACHI CDR-8", NULL,
2592 "HITACHI CDR-8335", NULL,
2593 "HITACHI CDR-8435", NULL,
2594 "Toshiba CD-ROM XM-6202B", NULL,
2595 "TOSHIBA CD-ROM XM-1702BC", NULL,
2596 "CD-532E-A", NULL,
2597 "E-IDE CD-ROM CR-840", NULL,
2598 "CD-ROM Drive/F5A", NULL,
2599 "WPI CDD-820", NULL,
2600 "SAMSUNG CD-ROM SC-148C", NULL,
2601 "SAMSUNG CD-ROM SC", NULL,
2602 "SanDisk SDP3B-64", NULL,
2603 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2604 "_NEC DV5800A", NULL,
2605 "SAMSUNG CD-ROM SN-124", "N001"
2606 };
2607
2608 static int ata_strim(char *s, size_t len)
2609 {
2610 len = strnlen(s, len);
2611
2612 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2613 while ((len > 0) && (s[len - 1] == ' ')) {
2614 len--;
2615 s[len] = 0;
2616 }
2617 return len;
2618 }
2619
2620 static int ata_dma_blacklisted(const struct ata_device *dev)
2621 {
2622 unsigned char model_num[40];
2623 unsigned char model_rev[16];
2624 unsigned int nlen, rlen;
2625 int i;
2626
2627 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2628 sizeof(model_num));
2629 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2630 sizeof(model_rev));
2631 nlen = ata_strim(model_num, sizeof(model_num));
2632 rlen = ata_strim(model_rev, sizeof(model_rev));
2633
2634 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2635 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2636 if (ata_dma_blacklist[i+1] == NULL)
2637 return 1;
2638 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2639 return 1;
2640 }
2641 }
2642 return 0;
2643 }
2644
2645 /**
2646 * ata_dev_xfermask - Compute supported xfermask of the given device
2647 * @ap: Port on which the device to compute xfermask for resides
2648 * @dev: Device to compute xfermask for
2649 *
2650 * Compute supported xfermask of @dev and store it in
2651 * dev->*_mask. This function is responsible for applying all
2652 * known limits including host controller limits, device
2653 * blacklist, etc...
2654 *
2655 * FIXME: The current implementation limits all transfer modes to
2656 * the fastest of the lowested device on the port. This is not
2657 * required on most controllers.
2658 *
2659 * LOCKING:
2660 * None.
2661 */
2662 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2663 {
2664 struct ata_host_set *hs = ap->host_set;
2665 unsigned long xfer_mask;
2666 int i;
2667
2668 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2669 ap->udma_mask);
2670
2671 /* FIXME: Use port-wide xfermask for now */
2672 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2673 struct ata_device *d = &ap->device[i];
2674 if (!ata_dev_present(d))
2675 continue;
2676 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2677 d->udma_mask);
2678 xfer_mask &= ata_id_xfermask(d->id);
2679 if (ata_dma_blacklisted(d))
2680 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2681 /* Apply cable rule here. Don't apply it early because when
2682 we handle hot plug the cable type can itself change */
2683 if (ap->cbl == ATA_CBL_PATA40)
2684 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2685 }
2686
2687 if (ata_dma_blacklisted(dev))
2688 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2689 "disabling DMA\n", ap->id, dev->devno);
2690
2691 if (hs->flags & ATA_HOST_SIMPLEX) {
2692 if (hs->simplex_claimed)
2693 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2694 }
2695 if (ap->ops->mode_filter)
2696 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2697
2698 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2699 &dev->udma_mask);
2700 }
2701
2702 /**
2703 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2704 * @ap: Port associated with device @dev
2705 * @dev: Device to which command will be sent
2706 *
2707 * Issue SET FEATURES - XFER MODE command to device @dev
2708 * on port @ap.
2709 *
2710 * LOCKING:
2711 * PCI/etc. bus probe sem.
2712 *
2713 * RETURNS:
2714 * 0 on success, AC_ERR_* mask otherwise.
2715 */
2716
2717 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2718 struct ata_device *dev)
2719 {
2720 struct ata_taskfile tf;
2721 unsigned int err_mask;
2722
2723 /* set up set-features taskfile */
2724 DPRINTK("set features - xfer mode\n");
2725
2726 ata_tf_init(ap, &tf, dev->devno);
2727 tf.command = ATA_CMD_SET_FEATURES;
2728 tf.feature = SETFEATURES_XFER;
2729 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2730 tf.protocol = ATA_PROT_NODATA;
2731 tf.nsect = dev->xfer_mode;
2732
2733 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2734
2735 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2736 return err_mask;
2737 }
2738
2739 /**
2740 * ata_dev_init_params - Issue INIT DEV PARAMS command
2741 * @ap: Port associated with device @dev
2742 * @dev: Device to which command will be sent
2743 *
2744 * LOCKING:
2745 * Kernel thread context (may sleep)
2746 *
2747 * RETURNS:
2748 * 0 on success, AC_ERR_* mask otherwise.
2749 */
2750
2751 static unsigned int ata_dev_init_params(struct ata_port *ap,
2752 struct ata_device *dev,
2753 u16 heads,
2754 u16 sectors)
2755 {
2756 struct ata_taskfile tf;
2757 unsigned int err_mask;
2758
2759 /* Number of sectors per track 1-255. Number of heads 1-16 */
2760 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2761 return AC_ERR_INVALID;
2762
2763 /* set up init dev params taskfile */
2764 DPRINTK("init dev params \n");
2765
2766 ata_tf_init(ap, &tf, dev->devno);
2767 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2768 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2769 tf.protocol = ATA_PROT_NODATA;
2770 tf.nsect = sectors;
2771 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2772
2773 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2774
2775 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2776 return err_mask;
2777 }
2778
2779 /**
2780 * ata_sg_clean - Unmap DMA memory associated with command
2781 * @qc: Command containing DMA memory to be released
2782 *
2783 * Unmap all mapped DMA memory associated with this command.
2784 *
2785 * LOCKING:
2786 * spin_lock_irqsave(host_set lock)
2787 */
2788
2789 static void ata_sg_clean(struct ata_queued_cmd *qc)
2790 {
2791 struct ata_port *ap = qc->ap;
2792 struct scatterlist *sg = qc->__sg;
2793 int dir = qc->dma_dir;
2794 void *pad_buf = NULL;
2795
2796 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2797 WARN_ON(sg == NULL);
2798
2799 if (qc->flags & ATA_QCFLAG_SINGLE)
2800 WARN_ON(qc->n_elem > 1);
2801
2802 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2803
2804 /* if we padded the buffer out to 32-bit bound, and data
2805 * xfer direction is from-device, we must copy from the
2806 * pad buffer back into the supplied buffer
2807 */
2808 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2809 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2810
2811 if (qc->flags & ATA_QCFLAG_SG) {
2812 if (qc->n_elem)
2813 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2814 /* restore last sg */
2815 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2816 if (pad_buf) {
2817 struct scatterlist *psg = &qc->pad_sgent;
2818 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2819 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2820 kunmap_atomic(addr, KM_IRQ0);
2821 }
2822 } else {
2823 if (qc->n_elem)
2824 dma_unmap_single(ap->dev,
2825 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2826 dir);
2827 /* restore sg */
2828 sg->length += qc->pad_len;
2829 if (pad_buf)
2830 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2831 pad_buf, qc->pad_len);
2832 }
2833
2834 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2835 qc->__sg = NULL;
2836 }
2837
2838 /**
2839 * ata_fill_sg - Fill PCI IDE PRD table
2840 * @qc: Metadata associated with taskfile to be transferred
2841 *
2842 * Fill PCI IDE PRD (scatter-gather) table with segments
2843 * associated with the current disk command.
2844 *
2845 * LOCKING:
2846 * spin_lock_irqsave(host_set lock)
2847 *
2848 */
2849 static void ata_fill_sg(struct ata_queued_cmd *qc)
2850 {
2851 struct ata_port *ap = qc->ap;
2852 struct scatterlist *sg;
2853 unsigned int idx;
2854
2855 WARN_ON(qc->__sg == NULL);
2856 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2857
2858 idx = 0;
2859 ata_for_each_sg(sg, qc) {
2860 u32 addr, offset;
2861 u32 sg_len, len;
2862
2863 /* determine if physical DMA addr spans 64K boundary.
2864 * Note h/w doesn't support 64-bit, so we unconditionally
2865 * truncate dma_addr_t to u32.
2866 */
2867 addr = (u32) sg_dma_address(sg);
2868 sg_len = sg_dma_len(sg);
2869
2870 while (sg_len) {
2871 offset = addr & 0xffff;
2872 len = sg_len;
2873 if ((offset + sg_len) > 0x10000)
2874 len = 0x10000 - offset;
2875
2876 ap->prd[idx].addr = cpu_to_le32(addr);
2877 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2878 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2879
2880 idx++;
2881 sg_len -= len;
2882 addr += len;
2883 }
2884 }
2885
2886 if (idx)
2887 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2888 }
2889 /**
2890 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2891 * @qc: Metadata associated with taskfile to check
2892 *
2893 * Allow low-level driver to filter ATA PACKET commands, returning
2894 * a status indicating whether or not it is OK to use DMA for the
2895 * supplied PACKET command.
2896 *
2897 * LOCKING:
2898 * spin_lock_irqsave(host_set lock)
2899 *
2900 * RETURNS: 0 when ATAPI DMA can be used
2901 * nonzero otherwise
2902 */
2903 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2904 {
2905 struct ata_port *ap = qc->ap;
2906 int rc = 0; /* Assume ATAPI DMA is OK by default */
2907
2908 if (ap->ops->check_atapi_dma)
2909 rc = ap->ops->check_atapi_dma(qc);
2910
2911 return rc;
2912 }
2913 /**
2914 * ata_qc_prep - Prepare taskfile for submission
2915 * @qc: Metadata associated with taskfile to be prepared
2916 *
2917 * Prepare ATA taskfile for submission.
2918 *
2919 * LOCKING:
2920 * spin_lock_irqsave(host_set lock)
2921 */
2922 void ata_qc_prep(struct ata_queued_cmd *qc)
2923 {
2924 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2925 return;
2926
2927 ata_fill_sg(qc);
2928 }
2929
2930 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2931
2932 /**
2933 * ata_sg_init_one - Associate command with memory buffer
2934 * @qc: Command to be associated
2935 * @buf: Memory buffer
2936 * @buflen: Length of memory buffer, in bytes.
2937 *
2938 * Initialize the data-related elements of queued_cmd @qc
2939 * to point to a single memory buffer, @buf of byte length @buflen.
2940 *
2941 * LOCKING:
2942 * spin_lock_irqsave(host_set lock)
2943 */
2944
2945 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2946 {
2947 struct scatterlist *sg;
2948
2949 qc->flags |= ATA_QCFLAG_SINGLE;
2950
2951 memset(&qc->sgent, 0, sizeof(qc->sgent));
2952 qc->__sg = &qc->sgent;
2953 qc->n_elem = 1;
2954 qc->orig_n_elem = 1;
2955 qc->buf_virt = buf;
2956
2957 sg = qc->__sg;
2958 sg_init_one(sg, buf, buflen);
2959 }
2960
2961 /**
2962 * ata_sg_init - Associate command with scatter-gather table.
2963 * @qc: Command to be associated
2964 * @sg: Scatter-gather table.
2965 * @n_elem: Number of elements in s/g table.
2966 *
2967 * Initialize the data-related elements of queued_cmd @qc
2968 * to point to a scatter-gather table @sg, containing @n_elem
2969 * elements.
2970 *
2971 * LOCKING:
2972 * spin_lock_irqsave(host_set lock)
2973 */
2974
2975 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2976 unsigned int n_elem)
2977 {
2978 qc->flags |= ATA_QCFLAG_SG;
2979 qc->__sg = sg;
2980 qc->n_elem = n_elem;
2981 qc->orig_n_elem = n_elem;
2982 }
2983
2984 /**
2985 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2986 * @qc: Command with memory buffer to be mapped.
2987 *
2988 * DMA-map the memory buffer associated with queued_cmd @qc.
2989 *
2990 * LOCKING:
2991 * spin_lock_irqsave(host_set lock)
2992 *
2993 * RETURNS:
2994 * Zero on success, negative on error.
2995 */
2996
2997 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2998 {
2999 struct ata_port *ap = qc->ap;
3000 int dir = qc->dma_dir;
3001 struct scatterlist *sg = qc->__sg;
3002 dma_addr_t dma_address;
3003 int trim_sg = 0;
3004
3005 /* we must lengthen transfers to end on a 32-bit boundary */
3006 qc->pad_len = sg->length & 3;
3007 if (qc->pad_len) {
3008 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3009 struct scatterlist *psg = &qc->pad_sgent;
3010
3011 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3012
3013 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3014
3015 if (qc->tf.flags & ATA_TFLAG_WRITE)
3016 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3017 qc->pad_len);
3018
3019 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3020 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3021 /* trim sg */
3022 sg->length -= qc->pad_len;
3023 if (sg->length == 0)
3024 trim_sg = 1;
3025
3026 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3027 sg->length, qc->pad_len);
3028 }
3029
3030 if (trim_sg) {
3031 qc->n_elem--;
3032 goto skip_map;
3033 }
3034
3035 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3036 sg->length, dir);
3037 if (dma_mapping_error(dma_address)) {
3038 /* restore sg */
3039 sg->length += qc->pad_len;
3040 return -1;
3041 }
3042
3043 sg_dma_address(sg) = dma_address;
3044 sg_dma_len(sg) = sg->length;
3045
3046 skip_map:
3047 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3048 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3049
3050 return 0;
3051 }
3052
3053 /**
3054 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3055 * @qc: Command with scatter-gather table to be mapped.
3056 *
3057 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3058 *
3059 * LOCKING:
3060 * spin_lock_irqsave(host_set lock)
3061 *
3062 * RETURNS:
3063 * Zero on success, negative on error.
3064 *
3065 */
3066
3067 static int ata_sg_setup(struct ata_queued_cmd *qc)
3068 {
3069 struct ata_port *ap = qc->ap;
3070 struct scatterlist *sg = qc->__sg;
3071 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3072 int n_elem, pre_n_elem, dir, trim_sg = 0;
3073
3074 VPRINTK("ENTER, ata%u\n", ap->id);
3075 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3076
3077 /* we must lengthen transfers to end on a 32-bit boundary */
3078 qc->pad_len = lsg->length & 3;
3079 if (qc->pad_len) {
3080 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3081 struct scatterlist *psg = &qc->pad_sgent;
3082 unsigned int offset;
3083
3084 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3085
3086 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3087
3088 /*
3089 * psg->page/offset are used to copy to-be-written
3090 * data in this function or read data in ata_sg_clean.
3091 */
3092 offset = lsg->offset + lsg->length - qc->pad_len;
3093 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3094 psg->offset = offset_in_page(offset);
3095
3096 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3097 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3098 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3099 kunmap_atomic(addr, KM_IRQ0);
3100 }
3101
3102 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3103 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3104 /* trim last sg */
3105 lsg->length -= qc->pad_len;
3106 if (lsg->length == 0)
3107 trim_sg = 1;
3108
3109 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3110 qc->n_elem - 1, lsg->length, qc->pad_len);
3111 }
3112
3113 pre_n_elem = qc->n_elem;
3114 if (trim_sg && pre_n_elem)
3115 pre_n_elem--;
3116
3117 if (!pre_n_elem) {
3118 n_elem = 0;
3119 goto skip_map;
3120 }
3121
3122 dir = qc->dma_dir;
3123 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3124 if (n_elem < 1) {
3125 /* restore last sg */
3126 lsg->length += qc->pad_len;
3127 return -1;
3128 }
3129
3130 DPRINTK("%d sg elements mapped\n", n_elem);
3131
3132 skip_map:
3133 qc->n_elem = n_elem;
3134
3135 return 0;
3136 }
3137
3138 /**
3139 * ata_poll_qc_complete - turn irq back on and finish qc
3140 * @qc: Command to complete
3141 * @err_mask: ATA status register content
3142 *
3143 * LOCKING:
3144 * None. (grabs host lock)
3145 */
3146
3147 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3148 {
3149 struct ata_port *ap = qc->ap;
3150 unsigned long flags;
3151
3152 spin_lock_irqsave(&ap->host_set->lock, flags);
3153 ap->flags &= ~ATA_FLAG_NOINTR;
3154 ata_irq_on(ap);
3155 ata_qc_complete(qc);
3156 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3157 }
3158
3159 /**
3160 * ata_pio_poll - poll using PIO, depending on current state
3161 * @ap: the target ata_port
3162 *
3163 * LOCKING:
3164 * None. (executing in kernel thread context)
3165 *
3166 * RETURNS:
3167 * timeout value to use
3168 */
3169
3170 static unsigned long ata_pio_poll(struct ata_port *ap)
3171 {
3172 struct ata_queued_cmd *qc;
3173 u8 status;
3174 unsigned int poll_state = HSM_ST_UNKNOWN;
3175 unsigned int reg_state = HSM_ST_UNKNOWN;
3176
3177 qc = ata_qc_from_tag(ap, ap->active_tag);
3178 WARN_ON(qc == NULL);
3179
3180 switch (ap->hsm_task_state) {
3181 case HSM_ST:
3182 case HSM_ST_POLL:
3183 poll_state = HSM_ST_POLL;
3184 reg_state = HSM_ST;
3185 break;
3186 case HSM_ST_LAST:
3187 case HSM_ST_LAST_POLL:
3188 poll_state = HSM_ST_LAST_POLL;
3189 reg_state = HSM_ST_LAST;
3190 break;
3191 default:
3192 BUG();
3193 break;
3194 }
3195
3196 status = ata_chk_status(ap);
3197 if (status & ATA_BUSY) {
3198 if (time_after(jiffies, ap->pio_task_timeout)) {
3199 qc->err_mask |= AC_ERR_TIMEOUT;
3200 ap->hsm_task_state = HSM_ST_TMOUT;
3201 return 0;
3202 }
3203 ap->hsm_task_state = poll_state;
3204 return ATA_SHORT_PAUSE;
3205 }
3206
3207 ap->hsm_task_state = reg_state;
3208 return 0;
3209 }
3210
3211 /**
3212 * ata_pio_complete - check if drive is busy or idle
3213 * @ap: the target ata_port
3214 *
3215 * LOCKING:
3216 * None. (executing in kernel thread context)
3217 *
3218 * RETURNS:
3219 * Non-zero if qc completed, zero otherwise.
3220 */
3221
3222 static int ata_pio_complete (struct ata_port *ap)
3223 {
3224 struct ata_queued_cmd *qc;
3225 u8 drv_stat;
3226
3227 /*
3228 * This is purely heuristic. This is a fast path. Sometimes when
3229 * we enter, BSY will be cleared in a chk-status or two. If not,
3230 * the drive is probably seeking or something. Snooze for a couple
3231 * msecs, then chk-status again. If still busy, fall back to
3232 * HSM_ST_POLL state.
3233 */
3234 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3235 if (drv_stat & ATA_BUSY) {
3236 msleep(2);
3237 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3238 if (drv_stat & ATA_BUSY) {
3239 ap->hsm_task_state = HSM_ST_LAST_POLL;
3240 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3241 return 0;
3242 }
3243 }
3244
3245 qc = ata_qc_from_tag(ap, ap->active_tag);
3246 WARN_ON(qc == NULL);
3247
3248 drv_stat = ata_wait_idle(ap);
3249 if (!ata_ok(drv_stat)) {
3250 qc->err_mask |= __ac_err_mask(drv_stat);
3251 ap->hsm_task_state = HSM_ST_ERR;
3252 return 0;
3253 }
3254
3255 ap->hsm_task_state = HSM_ST_IDLE;
3256
3257 WARN_ON(qc->err_mask);
3258 ata_poll_qc_complete(qc);
3259
3260 /* another command may start at this point */
3261
3262 return 1;
3263 }
3264
3265
3266 /**
3267 * swap_buf_le16 - swap halves of 16-bit words in place
3268 * @buf: Buffer to swap
3269 * @buf_words: Number of 16-bit words in buffer.
3270 *
3271 * Swap halves of 16-bit words if needed to convert from
3272 * little-endian byte order to native cpu byte order, or
3273 * vice-versa.
3274 *
3275 * LOCKING:
3276 * Inherited from caller.
3277 */
3278 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3279 {
3280 #ifdef __BIG_ENDIAN
3281 unsigned int i;
3282
3283 for (i = 0; i < buf_words; i++)
3284 buf[i] = le16_to_cpu(buf[i]);
3285 #endif /* __BIG_ENDIAN */
3286 }
3287
3288 /**
3289 * ata_mmio_data_xfer - Transfer data by MMIO
3290 * @ap: port to read/write
3291 * @buf: data buffer
3292 * @buflen: buffer length
3293 * @write_data: read/write
3294 *
3295 * Transfer data from/to the device data register by MMIO.
3296 *
3297 * LOCKING:
3298 * Inherited from caller.
3299 */
3300
3301 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3302 unsigned int buflen, int write_data)
3303 {
3304 unsigned int i;
3305 unsigned int words = buflen >> 1;
3306 u16 *buf16 = (u16 *) buf;
3307 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3308
3309 /* Transfer multiple of 2 bytes */
3310 if (write_data) {
3311 for (i = 0; i < words; i++)
3312 writew(le16_to_cpu(buf16[i]), mmio);
3313 } else {
3314 for (i = 0; i < words; i++)
3315 buf16[i] = cpu_to_le16(readw(mmio));
3316 }
3317
3318 /* Transfer trailing 1 byte, if any. */
3319 if (unlikely(buflen & 0x01)) {
3320 u16 align_buf[1] = { 0 };
3321 unsigned char *trailing_buf = buf + buflen - 1;
3322
3323 if (write_data) {
3324 memcpy(align_buf, trailing_buf, 1);
3325 writew(le16_to_cpu(align_buf[0]), mmio);
3326 } else {
3327 align_buf[0] = cpu_to_le16(readw(mmio));
3328 memcpy(trailing_buf, align_buf, 1);
3329 }
3330 }
3331 }
3332
3333 /**
3334 * ata_pio_data_xfer - Transfer data by PIO
3335 * @ap: port to read/write
3336 * @buf: data buffer
3337 * @buflen: buffer length
3338 * @write_data: read/write
3339 *
3340 * Transfer data from/to the device data register by PIO.
3341 *
3342 * LOCKING:
3343 * Inherited from caller.
3344 */
3345
3346 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3347 unsigned int buflen, int write_data)
3348 {
3349 unsigned int words = buflen >> 1;
3350
3351 /* Transfer multiple of 2 bytes */
3352 if (write_data)
3353 outsw(ap->ioaddr.data_addr, buf, words);
3354 else
3355 insw(ap->ioaddr.data_addr, buf, words);
3356
3357 /* Transfer trailing 1 byte, if any. */
3358 if (unlikely(buflen & 0x01)) {
3359 u16 align_buf[1] = { 0 };
3360 unsigned char *trailing_buf = buf + buflen - 1;
3361
3362 if (write_data) {
3363 memcpy(align_buf, trailing_buf, 1);
3364 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3365 } else {
3366 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3367 memcpy(trailing_buf, align_buf, 1);
3368 }
3369 }
3370 }
3371
3372 /**
3373 * ata_data_xfer - Transfer data from/to the data register.
3374 * @ap: port to read/write
3375 * @buf: data buffer
3376 * @buflen: buffer length
3377 * @do_write: read/write
3378 *
3379 * Transfer data from/to the device data register.
3380 *
3381 * LOCKING:
3382 * Inherited from caller.
3383 */
3384
3385 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3386 unsigned int buflen, int do_write)
3387 {
3388 /* Make the crap hardware pay the costs not the good stuff */
3389 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3390 unsigned long flags;
3391 local_irq_save(flags);
3392 if (ap->flags & ATA_FLAG_MMIO)
3393 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3394 else
3395 ata_pio_data_xfer(ap, buf, buflen, do_write);
3396 local_irq_restore(flags);
3397 } else {
3398 if (ap->flags & ATA_FLAG_MMIO)
3399 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3400 else
3401 ata_pio_data_xfer(ap, buf, buflen, do_write);
3402 }
3403 }
3404
3405 /**
3406 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3407 * @qc: Command on going
3408 *
3409 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3410 *
3411 * LOCKING:
3412 * Inherited from caller.
3413 */
3414
3415 static void ata_pio_sector(struct ata_queued_cmd *qc)
3416 {
3417 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3418 struct scatterlist *sg = qc->__sg;
3419 struct ata_port *ap = qc->ap;
3420 struct page *page;
3421 unsigned int offset;
3422 unsigned char *buf;
3423
3424 if (qc->cursect == (qc->nsect - 1))
3425 ap->hsm_task_state = HSM_ST_LAST;
3426
3427 page = sg[qc->cursg].page;
3428 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3429
3430 /* get the current page and offset */
3431 page = nth_page(page, (offset >> PAGE_SHIFT));
3432 offset %= PAGE_SIZE;
3433
3434 buf = kmap(page) + offset;
3435
3436 qc->cursect++;
3437 qc->cursg_ofs++;
3438
3439 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3440 qc->cursg++;
3441 qc->cursg_ofs = 0;
3442 }
3443
3444 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3445
3446 /* do the actual data transfer */
3447 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3448 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3449
3450 kunmap(page);
3451 }
3452
3453 /**
3454 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3455 * @qc: Command on going
3456 * @bytes: number of bytes
3457 *
3458 * Transfer Transfer data from/to the ATAPI device.
3459 *
3460 * LOCKING:
3461 * Inherited from caller.
3462 *
3463 */
3464
3465 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3466 {
3467 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3468 struct scatterlist *sg = qc->__sg;
3469 struct ata_port *ap = qc->ap;
3470 struct page *page;
3471 unsigned char *buf;
3472 unsigned int offset, count;
3473
3474 if (qc->curbytes + bytes >= qc->nbytes)
3475 ap->hsm_task_state = HSM_ST_LAST;
3476
3477 next_sg:
3478 if (unlikely(qc->cursg >= qc->n_elem)) {
3479 /*
3480 * The end of qc->sg is reached and the device expects
3481 * more data to transfer. In order not to overrun qc->sg
3482 * and fulfill length specified in the byte count register,
3483 * - for read case, discard trailing data from the device
3484 * - for write case, padding zero data to the device
3485 */
3486 u16 pad_buf[1] = { 0 };
3487 unsigned int words = bytes >> 1;
3488 unsigned int i;
3489
3490 if (words) /* warning if bytes > 1 */
3491 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3492 ap->id, bytes);
3493
3494 for (i = 0; i < words; i++)
3495 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3496
3497 ap->hsm_task_state = HSM_ST_LAST;
3498 return;
3499 }
3500
3501 sg = &qc->__sg[qc->cursg];
3502
3503 page = sg->page;
3504 offset = sg->offset + qc->cursg_ofs;
3505
3506 /* get the current page and offset */
3507 page = nth_page(page, (offset >> PAGE_SHIFT));
3508 offset %= PAGE_SIZE;
3509
3510 /* don't overrun current sg */
3511 count = min(sg->length - qc->cursg_ofs, bytes);
3512
3513 /* don't cross page boundaries */
3514 count = min(count, (unsigned int)PAGE_SIZE - offset);
3515
3516 buf = kmap(page) + offset;
3517
3518 bytes -= count;
3519 qc->curbytes += count;
3520 qc->cursg_ofs += count;
3521
3522 if (qc->cursg_ofs == sg->length) {
3523 qc->cursg++;
3524 qc->cursg_ofs = 0;
3525 }
3526
3527 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3528
3529 /* do the actual data transfer */
3530 ata_data_xfer(ap, buf, count, do_write);
3531
3532 kunmap(page);
3533
3534 if (bytes)
3535 goto next_sg;
3536 }
3537
3538 /**
3539 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3540 * @qc: Command on going
3541 *
3542 * Transfer Transfer data from/to the ATAPI device.
3543 *
3544 * LOCKING:
3545 * Inherited from caller.
3546 */
3547
3548 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3549 {
3550 struct ata_port *ap = qc->ap;
3551 struct ata_device *dev = qc->dev;
3552 unsigned int ireason, bc_lo, bc_hi, bytes;
3553 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3554
3555 ap->ops->tf_read(ap, &qc->tf);
3556 ireason = qc->tf.nsect;
3557 bc_lo = qc->tf.lbam;
3558 bc_hi = qc->tf.lbah;
3559 bytes = (bc_hi << 8) | bc_lo;
3560
3561 /* shall be cleared to zero, indicating xfer of data */
3562 if (ireason & (1 << 0))
3563 goto err_out;
3564
3565 /* make sure transfer direction matches expected */
3566 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3567 if (do_write != i_write)
3568 goto err_out;
3569
3570 __atapi_pio_bytes(qc, bytes);
3571
3572 return;
3573
3574 err_out:
3575 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3576 ap->id, dev->devno);
3577 qc->err_mask |= AC_ERR_HSM;
3578 ap->hsm_task_state = HSM_ST_ERR;
3579 }
3580
3581 /**
3582 * ata_pio_block - start PIO on a block
3583 * @ap: the target ata_port
3584 *
3585 * LOCKING:
3586 * None. (executing in kernel thread context)
3587 */
3588
3589 static void ata_pio_block(struct ata_port *ap)
3590 {
3591 struct ata_queued_cmd *qc;
3592 u8 status;
3593
3594 /*
3595 * This is purely heuristic. This is a fast path.
3596 * Sometimes when we enter, BSY will be cleared in
3597 * a chk-status or two. If not, the drive is probably seeking
3598 * or something. Snooze for a couple msecs, then
3599 * chk-status again. If still busy, fall back to
3600 * HSM_ST_POLL state.
3601 */
3602 status = ata_busy_wait(ap, ATA_BUSY, 5);
3603 if (status & ATA_BUSY) {
3604 msleep(2);
3605 status = ata_busy_wait(ap, ATA_BUSY, 10);
3606 if (status & ATA_BUSY) {
3607 ap->hsm_task_state = HSM_ST_POLL;
3608 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3609 return;
3610 }
3611 }
3612
3613 qc = ata_qc_from_tag(ap, ap->active_tag);
3614 WARN_ON(qc == NULL);
3615
3616 /* check error */
3617 if (status & (ATA_ERR | ATA_DF)) {
3618 qc->err_mask |= AC_ERR_DEV;
3619 ap->hsm_task_state = HSM_ST_ERR;
3620 return;
3621 }
3622
3623 /* transfer data if any */
3624 if (is_atapi_taskfile(&qc->tf)) {
3625 /* DRQ=0 means no more data to transfer */
3626 if ((status & ATA_DRQ) == 0) {
3627 ap->hsm_task_state = HSM_ST_LAST;
3628 return;
3629 }
3630
3631 atapi_pio_bytes(qc);
3632 } else {
3633 /* handle BSY=0, DRQ=0 as error */
3634 if ((status & ATA_DRQ) == 0) {
3635 qc->err_mask |= AC_ERR_HSM;
3636 ap->hsm_task_state = HSM_ST_ERR;
3637 return;
3638 }
3639
3640 ata_pio_sector(qc);
3641 }
3642 }
3643
3644 static void ata_pio_error(struct ata_port *ap)
3645 {
3646 struct ata_queued_cmd *qc;
3647
3648 qc = ata_qc_from_tag(ap, ap->active_tag);
3649 WARN_ON(qc == NULL);
3650
3651 if (qc->tf.command != ATA_CMD_PACKET)
3652 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3653
3654 /* make sure qc->err_mask is available to
3655 * know what's wrong and recover
3656 */
3657 WARN_ON(qc->err_mask == 0);
3658
3659 ap->hsm_task_state = HSM_ST_IDLE;
3660
3661 ata_poll_qc_complete(qc);
3662 }
3663
3664 static void ata_pio_task(void *_data)
3665 {
3666 struct ata_port *ap = _data;
3667 unsigned long timeout;
3668 int qc_completed;
3669
3670 fsm_start:
3671 timeout = 0;
3672 qc_completed = 0;
3673
3674 switch (ap->hsm_task_state) {
3675 case HSM_ST_IDLE:
3676 return;
3677
3678 case HSM_ST:
3679 ata_pio_block(ap);
3680 break;
3681
3682 case HSM_ST_LAST:
3683 qc_completed = ata_pio_complete(ap);
3684 break;
3685
3686 case HSM_ST_POLL:
3687 case HSM_ST_LAST_POLL:
3688 timeout = ata_pio_poll(ap);
3689 break;
3690
3691 case HSM_ST_TMOUT:
3692 case HSM_ST_ERR:
3693 ata_pio_error(ap);
3694 return;
3695 }
3696
3697 if (timeout)
3698 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3699 else if (!qc_completed)
3700 goto fsm_start;
3701 }
3702
3703 /**
3704 * atapi_packet_task - Write CDB bytes to hardware
3705 * @_data: Port to which ATAPI device is attached.
3706 *
3707 * When device has indicated its readiness to accept
3708 * a CDB, this function is called. Send the CDB.
3709 * If DMA is to be performed, exit immediately.
3710 * Otherwise, we are in polling mode, so poll
3711 * status under operation succeeds or fails.
3712 *
3713 * LOCKING:
3714 * Kernel thread context (may sleep)
3715 */
3716
3717 static void atapi_packet_task(void *_data)
3718 {
3719 struct ata_port *ap = _data;
3720 struct ata_queued_cmd *qc;
3721 u8 status;
3722
3723 qc = ata_qc_from_tag(ap, ap->active_tag);
3724 WARN_ON(qc == NULL);
3725 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3726
3727 /* sleep-wait for BSY to clear */
3728 DPRINTK("busy wait\n");
3729 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3730 qc->err_mask |= AC_ERR_TIMEOUT;
3731 goto err_out;
3732 }
3733
3734 /* make sure DRQ is set */
3735 status = ata_chk_status(ap);
3736 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3737 qc->err_mask |= AC_ERR_HSM;
3738 goto err_out;
3739 }
3740
3741 /* send SCSI cdb */
3742 DPRINTK("send cdb\n");
3743 WARN_ON(qc->dev->cdb_len < 12);
3744
3745 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3746 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3747 unsigned long flags;
3748
3749 /* Once we're done issuing command and kicking bmdma,
3750 * irq handler takes over. To not lose irq, we need
3751 * to clear NOINTR flag before sending cdb, but
3752 * interrupt handler shouldn't be invoked before we're
3753 * finished. Hence, the following locking.
3754 */
3755 spin_lock_irqsave(&ap->host_set->lock, flags);
3756 ap->flags &= ~ATA_FLAG_NOINTR;
3757 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3758 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3759 ap->ops->bmdma_start(qc); /* initiate bmdma */
3760 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3761 } else {
3762 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3763
3764 /* PIO commands are handled by polling */
3765 ap->hsm_task_state = HSM_ST;
3766 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3767 }
3768
3769 return;
3770
3771 err_out:
3772 ata_poll_qc_complete(qc);
3773 }
3774
3775 /**
3776 * ata_qc_timeout - Handle timeout of queued command
3777 * @qc: Command that timed out
3778 *
3779 * Some part of the kernel (currently, only the SCSI layer)
3780 * has noticed that the active command on port @ap has not
3781 * completed after a specified length of time. Handle this
3782 * condition by disabling DMA (if necessary) and completing
3783 * transactions, with error if necessary.
3784 *
3785 * This also handles the case of the "lost interrupt", where
3786 * for some reason (possibly hardware bug, possibly driver bug)
3787 * an interrupt was not delivered to the driver, even though the
3788 * transaction completed successfully.
3789 *
3790 * LOCKING:
3791 * Inherited from SCSI layer (none, can sleep)
3792 */
3793
3794 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3795 {
3796 struct ata_port *ap = qc->ap;
3797 struct ata_host_set *host_set = ap->host_set;
3798 u8 host_stat = 0, drv_stat;
3799 unsigned long flags;
3800
3801 DPRINTK("ENTER\n");
3802
3803 ap->hsm_task_state = HSM_ST_IDLE;
3804
3805 spin_lock_irqsave(&host_set->lock, flags);
3806
3807 switch (qc->tf.protocol) {
3808
3809 case ATA_PROT_DMA:
3810 case ATA_PROT_ATAPI_DMA:
3811 host_stat = ap->ops->bmdma_status(ap);
3812
3813 /* before we do anything else, clear DMA-Start bit */
3814 ap->ops->bmdma_stop(qc);
3815
3816 /* fall through */
3817
3818 default:
3819 ata_altstatus(ap);
3820 drv_stat = ata_chk_status(ap);
3821
3822 /* ack bmdma irq events */
3823 ap->ops->irq_clear(ap);
3824
3825 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3826 ap->id, qc->tf.command, drv_stat, host_stat);
3827
3828 /* complete taskfile transaction */
3829 qc->err_mask |= ac_err_mask(drv_stat);
3830 break;
3831 }
3832
3833 spin_unlock_irqrestore(&host_set->lock, flags);
3834
3835 ata_eh_qc_complete(qc);
3836
3837 DPRINTK("EXIT\n");
3838 }
3839
3840 /**
3841 * ata_eng_timeout - Handle timeout of queued command
3842 * @ap: Port on which timed-out command is active
3843 *
3844 * Some part of the kernel (currently, only the SCSI layer)
3845 * has noticed that the active command on port @ap has not
3846 * completed after a specified length of time. Handle this
3847 * condition by disabling DMA (if necessary) and completing
3848 * transactions, with error if necessary.
3849 *
3850 * This also handles the case of the "lost interrupt", where
3851 * for some reason (possibly hardware bug, possibly driver bug)
3852 * an interrupt was not delivered to the driver, even though the
3853 * transaction completed successfully.
3854 *
3855 * LOCKING:
3856 * Inherited from SCSI layer (none, can sleep)
3857 */
3858
3859 void ata_eng_timeout(struct ata_port *ap)
3860 {
3861 DPRINTK("ENTER\n");
3862
3863 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3864
3865 DPRINTK("EXIT\n");
3866 }
3867
3868 /**
3869 * ata_qc_new - Request an available ATA command, for queueing
3870 * @ap: Port associated with device @dev
3871 * @dev: Device from whom we request an available command structure
3872 *
3873 * LOCKING:
3874 * None.
3875 */
3876
3877 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3878 {
3879 struct ata_queued_cmd *qc = NULL;
3880 unsigned int i;
3881
3882 for (i = 0; i < ATA_MAX_QUEUE; i++)
3883 if (!test_and_set_bit(i, &ap->qactive)) {
3884 qc = ata_qc_from_tag(ap, i);
3885 break;
3886 }
3887
3888 if (qc)
3889 qc->tag = i;
3890
3891 return qc;
3892 }
3893
3894 /**
3895 * ata_qc_new_init - Request an available ATA command, and initialize it
3896 * @ap: Port associated with device @dev
3897 * @dev: Device from whom we request an available command structure
3898 *
3899 * LOCKING:
3900 * None.
3901 */
3902
3903 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3904 struct ata_device *dev)
3905 {
3906 struct ata_queued_cmd *qc;
3907
3908 qc = ata_qc_new(ap);
3909 if (qc) {
3910 qc->scsicmd = NULL;
3911 qc->ap = ap;
3912 qc->dev = dev;
3913
3914 ata_qc_reinit(qc);
3915 }
3916
3917 return qc;
3918 }
3919
3920 /**
3921 * ata_qc_free - free unused ata_queued_cmd
3922 * @qc: Command to complete
3923 *
3924 * Designed to free unused ata_queued_cmd object
3925 * in case something prevents using it.
3926 *
3927 * LOCKING:
3928 * spin_lock_irqsave(host_set lock)
3929 */
3930 void ata_qc_free(struct ata_queued_cmd *qc)
3931 {
3932 struct ata_port *ap = qc->ap;
3933 unsigned int tag;
3934
3935 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3936
3937 qc->flags = 0;
3938 tag = qc->tag;
3939 if (likely(ata_tag_valid(tag))) {
3940 if (tag == ap->active_tag)
3941 ap->active_tag = ATA_TAG_POISON;
3942 qc->tag = ATA_TAG_POISON;
3943 clear_bit(tag, &ap->qactive);
3944 }
3945 }
3946
3947 void __ata_qc_complete(struct ata_queued_cmd *qc)
3948 {
3949 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3950 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3951
3952 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3953 ata_sg_clean(qc);
3954
3955 /* atapi: mark qc as inactive to prevent the interrupt handler
3956 * from completing the command twice later, before the error handler
3957 * is called. (when rc != 0 and atapi request sense is needed)
3958 */
3959 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3960
3961 /* call completion callback */
3962 qc->complete_fn(qc);
3963 }
3964
3965 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3966 {
3967 struct ata_port *ap = qc->ap;
3968
3969 switch (qc->tf.protocol) {
3970 case ATA_PROT_DMA:
3971 case ATA_PROT_ATAPI_DMA:
3972 return 1;
3973
3974 case ATA_PROT_ATAPI:
3975 case ATA_PROT_PIO:
3976 if (ap->flags & ATA_FLAG_PIO_DMA)
3977 return 1;
3978
3979 /* fall through */
3980
3981 default:
3982 return 0;
3983 }
3984
3985 /* never reached */
3986 }
3987
3988 /**
3989 * ata_qc_issue - issue taskfile to device
3990 * @qc: command to issue to device
3991 *
3992 * Prepare an ATA command to submission to device.
3993 * This includes mapping the data into a DMA-able
3994 * area, filling in the S/G table, and finally
3995 * writing the taskfile to hardware, starting the command.
3996 *
3997 * LOCKING:
3998 * spin_lock_irqsave(host_set lock)
3999 */
4000 void ata_qc_issue(struct ata_queued_cmd *qc)
4001 {
4002 struct ata_port *ap = qc->ap;
4003
4004 qc->ap->active_tag = qc->tag;
4005 qc->flags |= ATA_QCFLAG_ACTIVE;
4006
4007 if (ata_should_dma_map(qc)) {
4008 if (qc->flags & ATA_QCFLAG_SG) {
4009 if (ata_sg_setup(qc))
4010 goto sg_err;
4011 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4012 if (ata_sg_setup_one(qc))
4013 goto sg_err;
4014 }
4015 } else {
4016 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4017 }
4018
4019 ap->ops->qc_prep(qc);
4020
4021 qc->err_mask |= ap->ops->qc_issue(qc);
4022 if (unlikely(qc->err_mask))
4023 goto err;
4024 return;
4025
4026 sg_err:
4027 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4028 qc->err_mask |= AC_ERR_SYSTEM;
4029 err:
4030 ata_qc_complete(qc);
4031 }
4032
4033 /**
4034 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4035 * @qc: command to issue to device
4036 *
4037 * Using various libata functions and hooks, this function
4038 * starts an ATA command. ATA commands are grouped into
4039 * classes called "protocols", and issuing each type of protocol
4040 * is slightly different.
4041 *
4042 * May be used as the qc_issue() entry in ata_port_operations.
4043 *
4044 * LOCKING:
4045 * spin_lock_irqsave(host_set lock)
4046 *
4047 * RETURNS:
4048 * Zero on success, AC_ERR_* mask on failure
4049 */
4050
4051 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4052 {
4053 struct ata_port *ap = qc->ap;
4054
4055 ata_dev_select(ap, qc->dev->devno, 1, 0);
4056
4057 switch (qc->tf.protocol) {
4058 case ATA_PROT_NODATA:
4059 ata_tf_to_host(ap, &qc->tf);
4060 break;
4061
4062 case ATA_PROT_DMA:
4063 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4064 ap->ops->bmdma_setup(qc); /* set up bmdma */
4065 ap->ops->bmdma_start(qc); /* initiate bmdma */
4066 break;
4067
4068 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4069 ata_qc_set_polling(qc);
4070 ata_tf_to_host(ap, &qc->tf);
4071 ap->hsm_task_state = HSM_ST;
4072 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4073 break;
4074
4075 case ATA_PROT_ATAPI:
4076 ata_qc_set_polling(qc);
4077 ata_tf_to_host(ap, &qc->tf);
4078 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4079 break;
4080
4081 case ATA_PROT_ATAPI_NODATA:
4082 ap->flags |= ATA_FLAG_NOINTR;
4083 ata_tf_to_host(ap, &qc->tf);
4084 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4085 break;
4086
4087 case ATA_PROT_ATAPI_DMA:
4088 ap->flags |= ATA_FLAG_NOINTR;
4089 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4090 ap->ops->bmdma_setup(qc); /* set up bmdma */
4091 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4092 break;
4093
4094 default:
4095 WARN_ON(1);
4096 return AC_ERR_SYSTEM;
4097 }
4098
4099 return 0;
4100 }
4101
4102 /**
4103 * ata_host_intr - Handle host interrupt for given (port, task)
4104 * @ap: Port on which interrupt arrived (possibly...)
4105 * @qc: Taskfile currently active in engine
4106 *
4107 * Handle host interrupt for given queued command. Currently,
4108 * only DMA interrupts are handled. All other commands are
4109 * handled via polling with interrupts disabled (nIEN bit).
4110 *
4111 * LOCKING:
4112 * spin_lock_irqsave(host_set lock)
4113 *
4114 * RETURNS:
4115 * One if interrupt was handled, zero if not (shared irq).
4116 */
4117
4118 inline unsigned int ata_host_intr (struct ata_port *ap,
4119 struct ata_queued_cmd *qc)
4120 {
4121 u8 status, host_stat;
4122
4123 switch (qc->tf.protocol) {
4124
4125 case ATA_PROT_DMA:
4126 case ATA_PROT_ATAPI_DMA:
4127 case ATA_PROT_ATAPI:
4128 /* check status of DMA engine */
4129 host_stat = ap->ops->bmdma_status(ap);
4130 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4131
4132 /* if it's not our irq... */
4133 if (!(host_stat & ATA_DMA_INTR))
4134 goto idle_irq;
4135
4136 /* before we do anything else, clear DMA-Start bit */
4137 ap->ops->bmdma_stop(qc);
4138
4139 /* fall through */
4140
4141 case ATA_PROT_ATAPI_NODATA:
4142 case ATA_PROT_NODATA:
4143 /* check altstatus */
4144 status = ata_altstatus(ap);
4145 if (status & ATA_BUSY)
4146 goto idle_irq;
4147
4148 /* check main status, clearing INTRQ */
4149 status = ata_chk_status(ap);
4150 if (unlikely(status & ATA_BUSY))
4151 goto idle_irq;
4152 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4153 ap->id, qc->tf.protocol, status);
4154
4155 /* ack bmdma irq events */
4156 ap->ops->irq_clear(ap);
4157
4158 /* complete taskfile transaction */
4159 qc->err_mask |= ac_err_mask(status);
4160 ata_qc_complete(qc);
4161 break;
4162
4163 default:
4164 goto idle_irq;
4165 }
4166
4167 return 1; /* irq handled */
4168
4169 idle_irq:
4170 ap->stats.idle_irq++;
4171
4172 #ifdef ATA_IRQ_TRAP
4173 if ((ap->stats.idle_irq % 1000) == 0) {
4174 ata_irq_ack(ap, 0); /* debug trap */
4175 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4176 return 1;
4177 }
4178 #endif
4179 return 0; /* irq not handled */
4180 }
4181
4182 /**
4183 * ata_interrupt - Default ATA host interrupt handler
4184 * @irq: irq line (unused)
4185 * @dev_instance: pointer to our ata_host_set information structure
4186 * @regs: unused
4187 *
4188 * Default interrupt handler for PCI IDE devices. Calls
4189 * ata_host_intr() for each port that is not disabled.
4190 *
4191 * LOCKING:
4192 * Obtains host_set lock during operation.
4193 *
4194 * RETURNS:
4195 * IRQ_NONE or IRQ_HANDLED.
4196 */
4197
4198 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4199 {
4200 struct ata_host_set *host_set = dev_instance;
4201 unsigned int i;
4202 unsigned int handled = 0;
4203 unsigned long flags;
4204
4205 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4206 spin_lock_irqsave(&host_set->lock, flags);
4207
4208 for (i = 0; i < host_set->n_ports; i++) {
4209 struct ata_port *ap;
4210
4211 ap = host_set->ports[i];
4212 if (ap &&
4213 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4214 struct ata_queued_cmd *qc;
4215
4216 qc = ata_qc_from_tag(ap, ap->active_tag);
4217 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4218 (qc->flags & ATA_QCFLAG_ACTIVE))
4219 handled |= ata_host_intr(ap, qc);
4220 }
4221 }
4222
4223 spin_unlock_irqrestore(&host_set->lock, flags);
4224
4225 return IRQ_RETVAL(handled);
4226 }
4227
4228
4229 /*
4230 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4231 * without filling any other registers
4232 */
4233 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4234 u8 cmd)
4235 {
4236 struct ata_taskfile tf;
4237 int err;
4238
4239 ata_tf_init(ap, &tf, dev->devno);
4240
4241 tf.command = cmd;
4242 tf.flags |= ATA_TFLAG_DEVICE;
4243 tf.protocol = ATA_PROT_NODATA;
4244
4245 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4246 if (err)
4247 printk(KERN_ERR "%s: ata command failed: %d\n",
4248 __FUNCTION__, err);
4249
4250 return err;
4251 }
4252
4253 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4254 {
4255 u8 cmd;
4256
4257 if (!ata_try_flush_cache(dev))
4258 return 0;
4259
4260 if (ata_id_has_flush_ext(dev->id))
4261 cmd = ATA_CMD_FLUSH_EXT;
4262 else
4263 cmd = ATA_CMD_FLUSH;
4264
4265 return ata_do_simple_cmd(ap, dev, cmd);
4266 }
4267
4268 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4269 {
4270 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4271 }
4272
4273 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4274 {
4275 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4276 }
4277
4278 /**
4279 * ata_device_resume - wakeup a previously suspended devices
4280 * @ap: port the device is connected to
4281 * @dev: the device to resume
4282 *
4283 * Kick the drive back into action, by sending it an idle immediate
4284 * command and making sure its transfer mode matches between drive
4285 * and host.
4286 *
4287 */
4288 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4289 {
4290 if (ap->flags & ATA_FLAG_SUSPENDED) {
4291 ap->flags &= ~ATA_FLAG_SUSPENDED;
4292 ata_set_mode(ap);
4293 }
4294 if (!ata_dev_present(dev))
4295 return 0;
4296 if (dev->class == ATA_DEV_ATA)
4297 ata_start_drive(ap, dev);
4298
4299 return 0;
4300 }
4301
4302 /**
4303 * ata_device_suspend - prepare a device for suspend
4304 * @ap: port the device is connected to
4305 * @dev: the device to suspend
4306 *
4307 * Flush the cache on the drive, if appropriate, then issue a
4308 * standbynow command.
4309 */
4310 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4311 {
4312 if (!ata_dev_present(dev))
4313 return 0;
4314 if (dev->class == ATA_DEV_ATA)
4315 ata_flush_cache(ap, dev);
4316
4317 if (state.event != PM_EVENT_FREEZE)
4318 ata_standby_drive(ap, dev);
4319 ap->flags |= ATA_FLAG_SUSPENDED;
4320 return 0;
4321 }
4322
4323 /**
4324 * ata_port_start - Set port up for dma.
4325 * @ap: Port to initialize
4326 *
4327 * Called just after data structures for each port are
4328 * initialized. Allocates space for PRD table.
4329 *
4330 * May be used as the port_start() entry in ata_port_operations.
4331 *
4332 * LOCKING:
4333 * Inherited from caller.
4334 */
4335
4336 int ata_port_start (struct ata_port *ap)
4337 {
4338 struct device *dev = ap->dev;
4339 int rc;
4340
4341 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4342 if (!ap->prd)
4343 return -ENOMEM;
4344
4345 rc = ata_pad_alloc(ap, dev);
4346 if (rc) {
4347 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4348 return rc;
4349 }
4350
4351 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4352
4353 return 0;
4354 }
4355
4356
4357 /**
4358 * ata_port_stop - Undo ata_port_start()
4359 * @ap: Port to shut down
4360 *
4361 * Frees the PRD table.
4362 *
4363 * May be used as the port_stop() entry in ata_port_operations.
4364 *
4365 * LOCKING:
4366 * Inherited from caller.
4367 */
4368
4369 void ata_port_stop (struct ata_port *ap)
4370 {
4371 struct device *dev = ap->dev;
4372
4373 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4374 ata_pad_free(ap, dev);
4375 }
4376
4377 void ata_host_stop (struct ata_host_set *host_set)
4378 {
4379 if (host_set->mmio_base)
4380 iounmap(host_set->mmio_base);
4381 }
4382
4383
4384 /**
4385 * ata_host_remove - Unregister SCSI host structure with upper layers
4386 * @ap: Port to unregister
4387 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4388 *
4389 * LOCKING:
4390 * Inherited from caller.
4391 */
4392
4393 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4394 {
4395 struct Scsi_Host *sh = ap->host;
4396
4397 DPRINTK("ENTER\n");
4398
4399 if (do_unregister)
4400 scsi_remove_host(sh);
4401
4402 ap->ops->port_stop(ap);
4403 }
4404
4405 /**
4406 * ata_host_init - Initialize an ata_port structure
4407 * @ap: Structure to initialize
4408 * @host: associated SCSI mid-layer structure
4409 * @host_set: Collection of hosts to which @ap belongs
4410 * @ent: Probe information provided by low-level driver
4411 * @port_no: Port number associated with this ata_port
4412 *
4413 * Initialize a new ata_port structure, and its associated
4414 * scsi_host.
4415 *
4416 * LOCKING:
4417 * Inherited from caller.
4418 */
4419
4420 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4421 struct ata_host_set *host_set,
4422 const struct ata_probe_ent *ent, unsigned int port_no)
4423 {
4424 unsigned int i;
4425
4426 host->max_id = 16;
4427 host->max_lun = 1;
4428 host->max_channel = 1;
4429 host->unique_id = ata_unique_id++;
4430 host->max_cmd_len = 12;
4431
4432 ap->flags = ATA_FLAG_PORT_DISABLED;
4433 ap->id = host->unique_id;
4434 ap->host = host;
4435 ap->ctl = ATA_DEVCTL_OBS;
4436 ap->host_set = host_set;
4437 ap->dev = ent->dev;
4438 ap->port_no = port_no;
4439 ap->hard_port_no =
4440 ent->legacy_mode ? ent->hard_port_no : port_no;
4441 ap->pio_mask = ent->pio_mask;
4442 ap->mwdma_mask = ent->mwdma_mask;
4443 ap->udma_mask = ent->udma_mask;
4444 ap->flags |= ent->host_flags;
4445 ap->ops = ent->port_ops;
4446 ap->cbl = ATA_CBL_NONE;
4447 ap->active_tag = ATA_TAG_POISON;
4448 ap->last_ctl = 0xFF;
4449
4450 INIT_WORK(&ap->port_task, NULL, NULL);
4451 INIT_LIST_HEAD(&ap->eh_done_q);
4452
4453 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4454 struct ata_device *dev = &ap->device[i];
4455 dev->devno = i;
4456 dev->pio_mask = UINT_MAX;
4457 dev->mwdma_mask = UINT_MAX;
4458 dev->udma_mask = UINT_MAX;
4459 }
4460
4461 #ifdef ATA_IRQ_TRAP
4462 ap->stats.unhandled_irq = 1;
4463 ap->stats.idle_irq = 1;
4464 #endif
4465
4466 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4467 }
4468
4469 /**
4470 * ata_host_add - Attach low-level ATA driver to system
4471 * @ent: Information provided by low-level driver
4472 * @host_set: Collections of ports to which we add
4473 * @port_no: Port number associated with this host
4474 *
4475 * Attach low-level ATA driver to system.
4476 *
4477 * LOCKING:
4478 * PCI/etc. bus probe sem.
4479 *
4480 * RETURNS:
4481 * New ata_port on success, for NULL on error.
4482 */
4483
4484 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4485 struct ata_host_set *host_set,
4486 unsigned int port_no)
4487 {
4488 struct Scsi_Host *host;
4489 struct ata_port *ap;
4490 int rc;
4491
4492 DPRINTK("ENTER\n");
4493
4494 if (!ent->port_ops->probe_reset &&
4495 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4496 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4497 port_no);
4498 return NULL;
4499 }
4500
4501 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4502 if (!host)
4503 return NULL;
4504
4505 host->transportt = &ata_scsi_transport_template;
4506
4507 ap = (struct ata_port *) &host->hostdata[0];
4508
4509 ata_host_init(ap, host, host_set, ent, port_no);
4510
4511 rc = ap->ops->port_start(ap);
4512 if (rc)
4513 goto err_out;
4514
4515 return ap;
4516
4517 err_out:
4518 scsi_host_put(host);
4519 return NULL;
4520 }
4521
4522 /**
4523 * ata_device_add - Register hardware device with ATA and SCSI layers
4524 * @ent: Probe information describing hardware device to be registered
4525 *
4526 * This function processes the information provided in the probe
4527 * information struct @ent, allocates the necessary ATA and SCSI
4528 * host information structures, initializes them, and registers
4529 * everything with requisite kernel subsystems.
4530 *
4531 * This function requests irqs, probes the ATA bus, and probes
4532 * the SCSI bus.
4533 *
4534 * LOCKING:
4535 * PCI/etc. bus probe sem.
4536 *
4537 * RETURNS:
4538 * Number of ports registered. Zero on error (no ports registered).
4539 */
4540
4541 int ata_device_add(const struct ata_probe_ent *ent)
4542 {
4543 unsigned int count = 0, i;
4544 struct device *dev = ent->dev;
4545 struct ata_host_set *host_set;
4546
4547 DPRINTK("ENTER\n");
4548 /* alloc a container for our list of ATA ports (buses) */
4549 host_set = kzalloc(sizeof(struct ata_host_set) +
4550 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4551 if (!host_set)
4552 return 0;
4553 spin_lock_init(&host_set->lock);
4554
4555 host_set->dev = dev;
4556 host_set->n_ports = ent->n_ports;
4557 host_set->irq = ent->irq;
4558 host_set->mmio_base = ent->mmio_base;
4559 host_set->private_data = ent->private_data;
4560 host_set->ops = ent->port_ops;
4561 host_set->flags = ent->host_set_flags;
4562
4563 /* register each port bound to this device */
4564 for (i = 0; i < ent->n_ports; i++) {
4565 struct ata_port *ap;
4566 unsigned long xfer_mode_mask;
4567
4568 ap = ata_host_add(ent, host_set, i);
4569 if (!ap)
4570 goto err_out;
4571
4572 host_set->ports[i] = ap;
4573 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4574 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4575 (ap->pio_mask << ATA_SHIFT_PIO);
4576
4577 /* print per-port info to dmesg */
4578 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4579 "bmdma 0x%lX irq %lu\n",
4580 ap->id,
4581 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4582 ata_mode_string(xfer_mode_mask),
4583 ap->ioaddr.cmd_addr,
4584 ap->ioaddr.ctl_addr,
4585 ap->ioaddr.bmdma_addr,
4586 ent->irq);
4587
4588 ata_chk_status(ap);
4589 host_set->ops->irq_clear(ap);
4590 count++;
4591 }
4592
4593 if (!count)
4594 goto err_free_ret;
4595
4596 /* obtain irq, that is shared between channels */
4597 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4598 DRV_NAME, host_set))
4599 goto err_out;
4600
4601 /* perform each probe synchronously */
4602 DPRINTK("probe begin\n");
4603 for (i = 0; i < count; i++) {
4604 struct ata_port *ap;
4605 int rc;
4606
4607 ap = host_set->ports[i];
4608
4609 DPRINTK("ata%u: bus probe begin\n", ap->id);
4610 rc = ata_bus_probe(ap);
4611 DPRINTK("ata%u: bus probe end\n", ap->id);
4612
4613 if (rc) {
4614 /* FIXME: do something useful here?
4615 * Current libata behavior will
4616 * tear down everything when
4617 * the module is removed
4618 * or the h/w is unplugged.
4619 */
4620 }
4621
4622 rc = scsi_add_host(ap->host, dev);
4623 if (rc) {
4624 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4625 ap->id);
4626 /* FIXME: do something useful here */
4627 /* FIXME: handle unconditional calls to
4628 * scsi_scan_host and ata_host_remove, below,
4629 * at the very least
4630 */
4631 }
4632 }
4633
4634 /* probes are done, now scan each port's disk(s) */
4635 DPRINTK("host probe begin\n");
4636 for (i = 0; i < count; i++) {
4637 struct ata_port *ap = host_set->ports[i];
4638
4639 ata_scsi_scan_host(ap);
4640 }
4641
4642 dev_set_drvdata(dev, host_set);
4643
4644 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4645 return ent->n_ports; /* success */
4646
4647 err_out:
4648 for (i = 0; i < count; i++) {
4649 ata_host_remove(host_set->ports[i], 1);
4650 scsi_host_put(host_set->ports[i]->host);
4651 }
4652 err_free_ret:
4653 kfree(host_set);
4654 VPRINTK("EXIT, returning 0\n");
4655 return 0;
4656 }
4657
4658 /**
4659 * ata_host_set_remove - PCI layer callback for device removal
4660 * @host_set: ATA host set that was removed
4661 *
4662 * Unregister all objects associated with this host set. Free those
4663 * objects.
4664 *
4665 * LOCKING:
4666 * Inherited from calling layer (may sleep).
4667 */
4668
4669 void ata_host_set_remove(struct ata_host_set *host_set)
4670 {
4671 struct ata_port *ap;
4672 unsigned int i;
4673
4674 for (i = 0; i < host_set->n_ports; i++) {
4675 ap = host_set->ports[i];
4676 scsi_remove_host(ap->host);
4677 }
4678
4679 free_irq(host_set->irq, host_set);
4680
4681 for (i = 0; i < host_set->n_ports; i++) {
4682 ap = host_set->ports[i];
4683
4684 ata_scsi_release(ap->host);
4685
4686 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4687 struct ata_ioports *ioaddr = &ap->ioaddr;
4688
4689 if (ioaddr->cmd_addr == 0x1f0)
4690 release_region(0x1f0, 8);
4691 else if (ioaddr->cmd_addr == 0x170)
4692 release_region(0x170, 8);
4693 }
4694
4695 scsi_host_put(ap->host);
4696 }
4697
4698 if (host_set->ops->host_stop)
4699 host_set->ops->host_stop(host_set);
4700
4701 kfree(host_set);
4702 }
4703
4704 /**
4705 * ata_scsi_release - SCSI layer callback hook for host unload
4706 * @host: libata host to be unloaded
4707 *
4708 * Performs all duties necessary to shut down a libata port...
4709 * Kill port kthread, disable port, and release resources.
4710 *
4711 * LOCKING:
4712 * Inherited from SCSI layer.
4713 *
4714 * RETURNS:
4715 * One.
4716 */
4717
4718 int ata_scsi_release(struct Scsi_Host *host)
4719 {
4720 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4721 int i;
4722
4723 DPRINTK("ENTER\n");
4724
4725 ap->ops->port_disable(ap);
4726 ata_host_remove(ap, 0);
4727 for (i = 0; i < ATA_MAX_DEVICES; i++)
4728 kfree(ap->device[i].id);
4729
4730 DPRINTK("EXIT\n");
4731 return 1;
4732 }
4733
4734 /**
4735 * ata_std_ports - initialize ioaddr with standard port offsets.
4736 * @ioaddr: IO address structure to be initialized
4737 *
4738 * Utility function which initializes data_addr, error_addr,
4739 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4740 * device_addr, status_addr, and command_addr to standard offsets
4741 * relative to cmd_addr.
4742 *
4743 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4744 */
4745
4746 void ata_std_ports(struct ata_ioports *ioaddr)
4747 {
4748 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4749 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4750 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4751 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4752 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4753 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4754 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4755 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4756 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4757 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4758 }
4759
4760
4761 #ifdef CONFIG_PCI
4762
4763 void ata_pci_host_stop (struct ata_host_set *host_set)
4764 {
4765 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4766
4767 pci_iounmap(pdev, host_set->mmio_base);
4768 }
4769
4770 /**
4771 * ata_pci_remove_one - PCI layer callback for device removal
4772 * @pdev: PCI device that was removed
4773 *
4774 * PCI layer indicates to libata via this hook that
4775 * hot-unplug or module unload event has occurred.
4776 * Handle this by unregistering all objects associated
4777 * with this PCI device. Free those objects. Then finally
4778 * release PCI resources and disable device.
4779 *
4780 * LOCKING:
4781 * Inherited from PCI layer (may sleep).
4782 */
4783
4784 void ata_pci_remove_one (struct pci_dev *pdev)
4785 {
4786 struct device *dev = pci_dev_to_dev(pdev);
4787 struct ata_host_set *host_set = dev_get_drvdata(dev);
4788
4789 ata_host_set_remove(host_set);
4790 pci_release_regions(pdev);
4791 pci_disable_device(pdev);
4792 dev_set_drvdata(dev, NULL);
4793 }
4794
4795 /* move to PCI subsystem */
4796 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4797 {
4798 unsigned long tmp = 0;
4799
4800 switch (bits->width) {
4801 case 1: {
4802 u8 tmp8 = 0;
4803 pci_read_config_byte(pdev, bits->reg, &tmp8);
4804 tmp = tmp8;
4805 break;
4806 }
4807 case 2: {
4808 u16 tmp16 = 0;
4809 pci_read_config_word(pdev, bits->reg, &tmp16);
4810 tmp = tmp16;
4811 break;
4812 }
4813 case 4: {
4814 u32 tmp32 = 0;
4815 pci_read_config_dword(pdev, bits->reg, &tmp32);
4816 tmp = tmp32;
4817 break;
4818 }
4819
4820 default:
4821 return -EINVAL;
4822 }
4823
4824 tmp &= bits->mask;
4825
4826 return (tmp == bits->val) ? 1 : 0;
4827 }
4828
4829 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4830 {
4831 pci_save_state(pdev);
4832 pci_disable_device(pdev);
4833 pci_set_power_state(pdev, PCI_D3hot);
4834 return 0;
4835 }
4836
4837 int ata_pci_device_resume(struct pci_dev *pdev)
4838 {
4839 pci_set_power_state(pdev, PCI_D0);
4840 pci_restore_state(pdev);
4841 pci_enable_device(pdev);
4842 pci_set_master(pdev);
4843 return 0;
4844 }
4845 #endif /* CONFIG_PCI */
4846
4847
4848 static int __init ata_init(void)
4849 {
4850 ata_wq = create_workqueue("ata");
4851 if (!ata_wq)
4852 return -ENOMEM;
4853
4854 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4855 return 0;
4856 }
4857
4858 static void __exit ata_exit(void)
4859 {
4860 destroy_workqueue(ata_wq);
4861 }
4862
4863 module_init(ata_init);
4864 module_exit(ata_exit);
4865
4866 static unsigned long ratelimit_time;
4867 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4868
4869 int ata_ratelimit(void)
4870 {
4871 int rc;
4872 unsigned long flags;
4873
4874 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4875
4876 if (time_after(jiffies, ratelimit_time)) {
4877 rc = 1;
4878 ratelimit_time = jiffies + (HZ/5);
4879 } else
4880 rc = 0;
4881
4882 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4883
4884 return rc;
4885 }
4886
4887 /*
4888 * libata is essentially a library of internal helper functions for
4889 * low-level ATA host controller drivers. As such, the API/ABI is
4890 * likely to change as new drivers are added and updated.
4891 * Do not depend on ABI/API stability.
4892 */
4893
4894 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4895 EXPORT_SYMBOL_GPL(ata_std_ports);
4896 EXPORT_SYMBOL_GPL(ata_device_add);
4897 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4898 EXPORT_SYMBOL_GPL(ata_sg_init);
4899 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4900 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4901 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4902 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4903 EXPORT_SYMBOL_GPL(ata_tf_load);
4904 EXPORT_SYMBOL_GPL(ata_tf_read);
4905 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4906 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4907 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4908 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4909 EXPORT_SYMBOL_GPL(ata_check_status);
4910 EXPORT_SYMBOL_GPL(ata_altstatus);
4911 EXPORT_SYMBOL_GPL(ata_exec_command);
4912 EXPORT_SYMBOL_GPL(ata_port_start);
4913 EXPORT_SYMBOL_GPL(ata_port_stop);
4914 EXPORT_SYMBOL_GPL(ata_host_stop);
4915 EXPORT_SYMBOL_GPL(ata_interrupt);
4916 EXPORT_SYMBOL_GPL(ata_qc_prep);
4917 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4918 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4919 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4920 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4921 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4922 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4923 EXPORT_SYMBOL_GPL(ata_port_probe);
4924 EXPORT_SYMBOL_GPL(sata_phy_reset);
4925 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4926 EXPORT_SYMBOL_GPL(ata_bus_reset);
4927 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4928 EXPORT_SYMBOL_GPL(ata_std_softreset);
4929 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4930 EXPORT_SYMBOL_GPL(ata_std_postreset);
4931 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4932 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4933 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4934 EXPORT_SYMBOL_GPL(ata_dev_classify);
4935 EXPORT_SYMBOL_GPL(ata_dev_pair);
4936 EXPORT_SYMBOL_GPL(ata_port_disable);
4937 EXPORT_SYMBOL_GPL(ata_ratelimit);
4938 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4939 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4940 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4941 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4942 EXPORT_SYMBOL_GPL(ata_scsi_error);
4943 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4944 EXPORT_SYMBOL_GPL(ata_scsi_release);
4945 EXPORT_SYMBOL_GPL(ata_host_intr);
4946 EXPORT_SYMBOL_GPL(ata_id_string);
4947 EXPORT_SYMBOL_GPL(ata_id_c_string);
4948 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4949 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4950 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4951
4952 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4953 EXPORT_SYMBOL_GPL(ata_timing_compute);
4954 EXPORT_SYMBOL_GPL(ata_timing_merge);
4955
4956 #ifdef CONFIG_PCI
4957 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4958 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4959 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4960 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4961 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4962 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4963 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4964 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4965 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4966 #endif /* CONFIG_PCI */
4967
4968 EXPORT_SYMBOL_GPL(ata_device_suspend);
4969 EXPORT_SYMBOL_GPL(ata_device_resume);
4970 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4971 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.226669 seconds and 6 git commands to generate.