[PATCH] libata-eh: implement dev->ering
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_device *dev,
65 u16 heads, u16 sectors);
66 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
67 static void ata_dev_xfermask(struct ata_device *dev);
68
69 static unsigned int ata_unique_id = 1;
70 static struct workqueue_struct *ata_wq;
71
72 int atapi_enabled = 1;
73 module_param(atapi_enabled, int, 0444);
74 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
75
76 int atapi_dmadir = 0;
77 module_param(atapi_dmadir, int, 0444);
78 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static const char *sata_spd_string(unsigned int spd)
401 {
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
405 };
406
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
410 }
411
412 void ata_dev_disable(struct ata_device *dev)
413 {
414 if (ata_dev_enabled(dev)) {
415 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
416 dev->class++;
417 }
418 }
419
420 /**
421 * ata_pio_devchk - PATA device presence detection
422 * @ap: ATA channel to examine
423 * @device: Device to examine (starting at zero)
424 *
425 * This technique was originally described in
426 * Hale Landis's ATADRVR (www.ata-atapi.com), and
427 * later found its way into the ATA/ATAPI spec.
428 *
429 * Write a pattern to the ATA shadow registers,
430 * and if a device is present, it will respond by
431 * correctly storing and echoing back the
432 * ATA shadow register contents.
433 *
434 * LOCKING:
435 * caller.
436 */
437
438 static unsigned int ata_pio_devchk(struct ata_port *ap,
439 unsigned int device)
440 {
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442 u8 nsect, lbal;
443
444 ap->ops->dev_select(ap, device);
445
446 outb(0x55, ioaddr->nsect_addr);
447 outb(0xaa, ioaddr->lbal_addr);
448
449 outb(0xaa, ioaddr->nsect_addr);
450 outb(0x55, ioaddr->lbal_addr);
451
452 outb(0x55, ioaddr->nsect_addr);
453 outb(0xaa, ioaddr->lbal_addr);
454
455 nsect = inb(ioaddr->nsect_addr);
456 lbal = inb(ioaddr->lbal_addr);
457
458 if ((nsect == 0x55) && (lbal == 0xaa))
459 return 1; /* we found a device */
460
461 return 0; /* nothing found */
462 }
463
464 /**
465 * ata_mmio_devchk - PATA device presence detection
466 * @ap: ATA channel to examine
467 * @device: Device to examine (starting at zero)
468 *
469 * This technique was originally described in
470 * Hale Landis's ATADRVR (www.ata-atapi.com), and
471 * later found its way into the ATA/ATAPI spec.
472 *
473 * Write a pattern to the ATA shadow registers,
474 * and if a device is present, it will respond by
475 * correctly storing and echoing back the
476 * ATA shadow register contents.
477 *
478 * LOCKING:
479 * caller.
480 */
481
482 static unsigned int ata_mmio_devchk(struct ata_port *ap,
483 unsigned int device)
484 {
485 struct ata_ioports *ioaddr = &ap->ioaddr;
486 u8 nsect, lbal;
487
488 ap->ops->dev_select(ap, device);
489
490 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
491 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
492
493 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
494 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
495
496 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
497 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
498
499 nsect = readb((void __iomem *) ioaddr->nsect_addr);
500 lbal = readb((void __iomem *) ioaddr->lbal_addr);
501
502 if ((nsect == 0x55) && (lbal == 0xaa))
503 return 1; /* we found a device */
504
505 return 0; /* nothing found */
506 }
507
508 /**
509 * ata_devchk - PATA device presence detection
510 * @ap: ATA channel to examine
511 * @device: Device to examine (starting at zero)
512 *
513 * Dispatch ATA device presence detection, depending
514 * on whether we are using PIO or MMIO to talk to the
515 * ATA shadow registers.
516 *
517 * LOCKING:
518 * caller.
519 */
520
521 static unsigned int ata_devchk(struct ata_port *ap,
522 unsigned int device)
523 {
524 if (ap->flags & ATA_FLAG_MMIO)
525 return ata_mmio_devchk(ap, device);
526 return ata_pio_devchk(ap, device);
527 }
528
529 /**
530 * ata_dev_classify - determine device type based on ATA-spec signature
531 * @tf: ATA taskfile register set for device to be identified
532 *
533 * Determine from taskfile register contents whether a device is
534 * ATA or ATAPI, as per "Signature and persistence" section
535 * of ATA/PI spec (volume 1, sect 5.14).
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
542 * the event of failure.
543 */
544
545 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
546 {
547 /* Apple's open source Darwin code hints that some devices only
548 * put a proper signature into the LBA mid/high registers,
549 * So, we only check those. It's sufficient for uniqueness.
550 */
551
552 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
553 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
554 DPRINTK("found ATA device by sig\n");
555 return ATA_DEV_ATA;
556 }
557
558 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
559 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
560 DPRINTK("found ATAPI device by sig\n");
561 return ATA_DEV_ATAPI;
562 }
563
564 DPRINTK("unknown device\n");
565 return ATA_DEV_UNKNOWN;
566 }
567
568 /**
569 * ata_dev_try_classify - Parse returned ATA device signature
570 * @ap: ATA channel to examine
571 * @device: Device to examine (starting at zero)
572 * @r_err: Value of error register on completion
573 *
574 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
575 * an ATA/ATAPI-defined set of values is placed in the ATA
576 * shadow registers, indicating the results of device detection
577 * and diagnostics.
578 *
579 * Select the ATA device, and read the values from the ATA shadow
580 * registers. Then parse according to the Error register value,
581 * and the spec-defined values examined by ata_dev_classify().
582 *
583 * LOCKING:
584 * caller.
585 *
586 * RETURNS:
587 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
588 */
589
590 static unsigned int
591 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
592 {
593 struct ata_taskfile tf;
594 unsigned int class;
595 u8 err;
596
597 ap->ops->dev_select(ap, device);
598
599 memset(&tf, 0, sizeof(tf));
600
601 ap->ops->tf_read(ap, &tf);
602 err = tf.feature;
603 if (r_err)
604 *r_err = err;
605
606 /* see if device passed diags */
607 if (err == 1)
608 /* do nothing */ ;
609 else if ((device == 0) && (err == 0x81))
610 /* do nothing */ ;
611 else
612 return ATA_DEV_NONE;
613
614 /* determine if device is ATA or ATAPI */
615 class = ata_dev_classify(&tf);
616
617 if (class == ATA_DEV_UNKNOWN)
618 return ATA_DEV_NONE;
619 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
620 return ATA_DEV_NONE;
621 return class;
622 }
623
624 /**
625 * ata_id_string - Convert IDENTIFY DEVICE page into string
626 * @id: IDENTIFY DEVICE results we will examine
627 * @s: string into which data is output
628 * @ofs: offset into identify device page
629 * @len: length of string to return. must be an even number.
630 *
631 * The strings in the IDENTIFY DEVICE page are broken up into
632 * 16-bit chunks. Run through the string, and output each
633 * 8-bit chunk linearly, regardless of platform.
634 *
635 * LOCKING:
636 * caller.
637 */
638
639 void ata_id_string(const u16 *id, unsigned char *s,
640 unsigned int ofs, unsigned int len)
641 {
642 unsigned int c;
643
644 while (len > 0) {
645 c = id[ofs] >> 8;
646 *s = c;
647 s++;
648
649 c = id[ofs] & 0xff;
650 *s = c;
651 s++;
652
653 ofs++;
654 len -= 2;
655 }
656 }
657
658 /**
659 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
660 * @id: IDENTIFY DEVICE results we will examine
661 * @s: string into which data is output
662 * @ofs: offset into identify device page
663 * @len: length of string to return. must be an odd number.
664 *
665 * This function is identical to ata_id_string except that it
666 * trims trailing spaces and terminates the resulting string with
667 * null. @len must be actual maximum length (even number) + 1.
668 *
669 * LOCKING:
670 * caller.
671 */
672 void ata_id_c_string(const u16 *id, unsigned char *s,
673 unsigned int ofs, unsigned int len)
674 {
675 unsigned char *p;
676
677 WARN_ON(!(len & 1));
678
679 ata_id_string(id, s, ofs, len - 1);
680
681 p = s + strnlen(s, len - 1);
682 while (p > s && p[-1] == ' ')
683 p--;
684 *p = '\0';
685 }
686
687 static u64 ata_id_n_sectors(const u16 *id)
688 {
689 if (ata_id_has_lba(id)) {
690 if (ata_id_has_lba48(id))
691 return ata_id_u64(id, 100);
692 else
693 return ata_id_u32(id, 60);
694 } else {
695 if (ata_id_current_chs_valid(id))
696 return ata_id_u32(id, 57);
697 else
698 return id[1] * id[3] * id[6];
699 }
700 }
701
702 /**
703 * ata_noop_dev_select - Select device 0/1 on ATA bus
704 * @ap: ATA channel to manipulate
705 * @device: ATA device (numbered from zero) to select
706 *
707 * This function performs no actual function.
708 *
709 * May be used as the dev_select() entry in ata_port_operations.
710 *
711 * LOCKING:
712 * caller.
713 */
714 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
715 {
716 }
717
718
719 /**
720 * ata_std_dev_select - Select device 0/1 on ATA bus
721 * @ap: ATA channel to manipulate
722 * @device: ATA device (numbered from zero) to select
723 *
724 * Use the method defined in the ATA specification to
725 * make either device 0, or device 1, active on the
726 * ATA channel. Works with both PIO and MMIO.
727 *
728 * May be used as the dev_select() entry in ata_port_operations.
729 *
730 * LOCKING:
731 * caller.
732 */
733
734 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
735 {
736 u8 tmp;
737
738 if (device == 0)
739 tmp = ATA_DEVICE_OBS;
740 else
741 tmp = ATA_DEVICE_OBS | ATA_DEV1;
742
743 if (ap->flags & ATA_FLAG_MMIO) {
744 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
745 } else {
746 outb(tmp, ap->ioaddr.device_addr);
747 }
748 ata_pause(ap); /* needed; also flushes, for mmio */
749 }
750
751 /**
752 * ata_dev_select - Select device 0/1 on ATA bus
753 * @ap: ATA channel to manipulate
754 * @device: ATA device (numbered from zero) to select
755 * @wait: non-zero to wait for Status register BSY bit to clear
756 * @can_sleep: non-zero if context allows sleeping
757 *
758 * Use the method defined in the ATA specification to
759 * make either device 0, or device 1, active on the
760 * ATA channel.
761 *
762 * This is a high-level version of ata_std_dev_select(),
763 * which additionally provides the services of inserting
764 * the proper pauses and status polling, where needed.
765 *
766 * LOCKING:
767 * caller.
768 */
769
770 void ata_dev_select(struct ata_port *ap, unsigned int device,
771 unsigned int wait, unsigned int can_sleep)
772 {
773 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
774 ap->id, device, wait);
775
776 if (wait)
777 ata_wait_idle(ap);
778
779 ap->ops->dev_select(ap, device);
780
781 if (wait) {
782 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
783 msleep(150);
784 ata_wait_idle(ap);
785 }
786 }
787
788 /**
789 * ata_dump_id - IDENTIFY DEVICE info debugging output
790 * @id: IDENTIFY DEVICE page to dump
791 *
792 * Dump selected 16-bit words from the given IDENTIFY DEVICE
793 * page.
794 *
795 * LOCKING:
796 * caller.
797 */
798
799 static inline void ata_dump_id(const u16 *id)
800 {
801 DPRINTK("49==0x%04x "
802 "53==0x%04x "
803 "63==0x%04x "
804 "64==0x%04x "
805 "75==0x%04x \n",
806 id[49],
807 id[53],
808 id[63],
809 id[64],
810 id[75]);
811 DPRINTK("80==0x%04x "
812 "81==0x%04x "
813 "82==0x%04x "
814 "83==0x%04x "
815 "84==0x%04x \n",
816 id[80],
817 id[81],
818 id[82],
819 id[83],
820 id[84]);
821 DPRINTK("88==0x%04x "
822 "93==0x%04x\n",
823 id[88],
824 id[93]);
825 }
826
827 /**
828 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
829 * @id: IDENTIFY data to compute xfer mask from
830 *
831 * Compute the xfermask for this device. This is not as trivial
832 * as it seems if we must consider early devices correctly.
833 *
834 * FIXME: pre IDE drive timing (do we care ?).
835 *
836 * LOCKING:
837 * None.
838 *
839 * RETURNS:
840 * Computed xfermask
841 */
842 static unsigned int ata_id_xfermask(const u16 *id)
843 {
844 unsigned int pio_mask, mwdma_mask, udma_mask;
845
846 /* Usual case. Word 53 indicates word 64 is valid */
847 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
848 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
849 pio_mask <<= 3;
850 pio_mask |= 0x7;
851 } else {
852 /* If word 64 isn't valid then Word 51 high byte holds
853 * the PIO timing number for the maximum. Turn it into
854 * a mask.
855 */
856 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
857
858 /* But wait.. there's more. Design your standards by
859 * committee and you too can get a free iordy field to
860 * process. However its the speeds not the modes that
861 * are supported... Note drivers using the timing API
862 * will get this right anyway
863 */
864 }
865
866 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
867
868 udma_mask = 0;
869 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
870 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
871
872 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
873 }
874
875 /**
876 * ata_port_queue_task - Queue port_task
877 * @ap: The ata_port to queue port_task for
878 *
879 * Schedule @fn(@data) for execution after @delay jiffies using
880 * port_task. There is one port_task per port and it's the
881 * user(low level driver)'s responsibility to make sure that only
882 * one task is active at any given time.
883 *
884 * libata core layer takes care of synchronization between
885 * port_task and EH. ata_port_queue_task() may be ignored for EH
886 * synchronization.
887 *
888 * LOCKING:
889 * Inherited from caller.
890 */
891 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
892 unsigned long delay)
893 {
894 int rc;
895
896 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
897 return;
898
899 PREPARE_WORK(&ap->port_task, fn, data);
900
901 if (!delay)
902 rc = queue_work(ata_wq, &ap->port_task);
903 else
904 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
905
906 /* rc == 0 means that another user is using port task */
907 WARN_ON(rc == 0);
908 }
909
910 /**
911 * ata_port_flush_task - Flush port_task
912 * @ap: The ata_port to flush port_task for
913 *
914 * After this function completes, port_task is guranteed not to
915 * be running or scheduled.
916 *
917 * LOCKING:
918 * Kernel thread context (may sleep)
919 */
920 void ata_port_flush_task(struct ata_port *ap)
921 {
922 unsigned long flags;
923
924 DPRINTK("ENTER\n");
925
926 spin_lock_irqsave(&ap->host_set->lock, flags);
927 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
928 spin_unlock_irqrestore(&ap->host_set->lock, flags);
929
930 DPRINTK("flush #1\n");
931 flush_workqueue(ata_wq);
932
933 /*
934 * At this point, if a task is running, it's guaranteed to see
935 * the FLUSH flag; thus, it will never queue pio tasks again.
936 * Cancel and flush.
937 */
938 if (!cancel_delayed_work(&ap->port_task)) {
939 DPRINTK("flush #2\n");
940 flush_workqueue(ata_wq);
941 }
942
943 spin_lock_irqsave(&ap->host_set->lock, flags);
944 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
945 spin_unlock_irqrestore(&ap->host_set->lock, flags);
946
947 DPRINTK("EXIT\n");
948 }
949
950 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
951 {
952 struct completion *waiting = qc->private_data;
953
954 complete(waiting);
955 }
956
957 /**
958 * ata_exec_internal - execute libata internal command
959 * @dev: Device to which the command is sent
960 * @tf: Taskfile registers for the command and the result
961 * @cdb: CDB for packet command
962 * @dma_dir: Data tranfer direction of the command
963 * @buf: Data buffer of the command
964 * @buflen: Length of data buffer
965 *
966 * Executes libata internal command with timeout. @tf contains
967 * command on entry and result on return. Timeout and error
968 * conditions are reported via return value. No recovery action
969 * is taken after a command times out. It's caller's duty to
970 * clean up after timeout.
971 *
972 * LOCKING:
973 * None. Should be called with kernel context, might sleep.
974 */
975
976 unsigned ata_exec_internal(struct ata_device *dev,
977 struct ata_taskfile *tf, const u8 *cdb,
978 int dma_dir, void *buf, unsigned int buflen)
979 {
980 struct ata_port *ap = dev->ap;
981 u8 command = tf->command;
982 struct ata_queued_cmd *qc;
983 unsigned int tag, preempted_tag;
984 DECLARE_COMPLETION(wait);
985 unsigned long flags;
986 unsigned int err_mask;
987 int rc;
988
989 spin_lock_irqsave(&ap->host_set->lock, flags);
990
991 /* no internal command while frozen */
992 if (ap->flags & ATA_FLAG_FROZEN) {
993 spin_unlock_irqrestore(&ap->host_set->lock, flags);
994 return AC_ERR_SYSTEM;
995 }
996
997 /* initialize internal qc */
998
999 /* XXX: Tag 0 is used for drivers with legacy EH as some
1000 * drivers choke if any other tag is given. This breaks
1001 * ata_tag_internal() test for those drivers. Don't use new
1002 * EH stuff without converting to it.
1003 */
1004 if (ap->ops->error_handler)
1005 tag = ATA_TAG_INTERNAL;
1006 else
1007 tag = 0;
1008
1009 if (test_and_set_bit(tag, &ap->qactive))
1010 BUG();
1011 qc = __ata_qc_from_tag(ap, tag);
1012
1013 qc->tag = tag;
1014 qc->scsicmd = NULL;
1015 qc->ap = ap;
1016 qc->dev = dev;
1017 ata_qc_reinit(qc);
1018
1019 preempted_tag = ap->active_tag;
1020 ap->active_tag = ATA_TAG_POISON;
1021
1022 /* prepare & issue qc */
1023 qc->tf = *tf;
1024 if (cdb)
1025 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1026 qc->flags |= ATA_QCFLAG_RESULT_TF;
1027 qc->dma_dir = dma_dir;
1028 if (dma_dir != DMA_NONE) {
1029 ata_sg_init_one(qc, buf, buflen);
1030 qc->nsect = buflen / ATA_SECT_SIZE;
1031 }
1032
1033 qc->private_data = &wait;
1034 qc->complete_fn = ata_qc_complete_internal;
1035
1036 ata_qc_issue(qc);
1037
1038 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1039
1040 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1041
1042 ata_port_flush_task(ap);
1043
1044 if (!rc) {
1045 spin_lock_irqsave(&ap->host_set->lock, flags);
1046
1047 /* We're racing with irq here. If we lose, the
1048 * following test prevents us from completing the qc
1049 * twice. If we win, the port is frozen and will be
1050 * cleaned up by ->post_internal_cmd().
1051 */
1052 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1053 qc->err_mask |= AC_ERR_TIMEOUT;
1054
1055 if (ap->ops->error_handler)
1056 ata_port_freeze(ap);
1057 else
1058 ata_qc_complete(qc);
1059
1060 ata_dev_printk(dev, KERN_WARNING,
1061 "qc timeout (cmd 0x%x)\n", command);
1062 }
1063
1064 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1065 }
1066
1067 /* do post_internal_cmd */
1068 if (ap->ops->post_internal_cmd)
1069 ap->ops->post_internal_cmd(qc);
1070
1071 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1072 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1073 "internal command, assuming AC_ERR_OTHER\n");
1074 qc->err_mask |= AC_ERR_OTHER;
1075 }
1076
1077 /* finish up */
1078 spin_lock_irqsave(&ap->host_set->lock, flags);
1079
1080 *tf = qc->result_tf;
1081 err_mask = qc->err_mask;
1082
1083 ata_qc_free(qc);
1084 ap->active_tag = preempted_tag;
1085
1086 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1087 * Until those drivers are fixed, we detect the condition
1088 * here, fail the command with AC_ERR_SYSTEM and reenable the
1089 * port.
1090 *
1091 * Note that this doesn't change any behavior as internal
1092 * command failure results in disabling the device in the
1093 * higher layer for LLDDs without new reset/EH callbacks.
1094 *
1095 * Kill the following code as soon as those drivers are fixed.
1096 */
1097 if (ap->flags & ATA_FLAG_DISABLED) {
1098 err_mask |= AC_ERR_SYSTEM;
1099 ata_port_probe(ap);
1100 }
1101
1102 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1103
1104 return err_mask;
1105 }
1106
1107 /**
1108 * ata_pio_need_iordy - check if iordy needed
1109 * @adev: ATA device
1110 *
1111 * Check if the current speed of the device requires IORDY. Used
1112 * by various controllers for chip configuration.
1113 */
1114
1115 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1116 {
1117 int pio;
1118 int speed = adev->pio_mode - XFER_PIO_0;
1119
1120 if (speed < 2)
1121 return 0;
1122 if (speed > 2)
1123 return 1;
1124
1125 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1126
1127 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1128 pio = adev->id[ATA_ID_EIDE_PIO];
1129 /* Is the speed faster than the drive allows non IORDY ? */
1130 if (pio) {
1131 /* This is cycle times not frequency - watch the logic! */
1132 if (pio > 240) /* PIO2 is 240nS per cycle */
1133 return 1;
1134 return 0;
1135 }
1136 }
1137 return 0;
1138 }
1139
1140 /**
1141 * ata_dev_read_id - Read ID data from the specified device
1142 * @dev: target device
1143 * @p_class: pointer to class of the target device (may be changed)
1144 * @post_reset: is this read ID post-reset?
1145 * @id: buffer to read IDENTIFY data into
1146 *
1147 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1148 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1149 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1150 * for pre-ATA4 drives.
1151 *
1152 * LOCKING:
1153 * Kernel thread context (may sleep)
1154 *
1155 * RETURNS:
1156 * 0 on success, -errno otherwise.
1157 */
1158 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1159 int post_reset, u16 *id)
1160 {
1161 struct ata_port *ap = dev->ap;
1162 unsigned int class = *p_class;
1163 struct ata_taskfile tf;
1164 unsigned int err_mask = 0;
1165 const char *reason;
1166 int rc;
1167
1168 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1169
1170 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1171
1172 retry:
1173 ata_tf_init(dev, &tf);
1174
1175 switch (class) {
1176 case ATA_DEV_ATA:
1177 tf.command = ATA_CMD_ID_ATA;
1178 break;
1179 case ATA_DEV_ATAPI:
1180 tf.command = ATA_CMD_ID_ATAPI;
1181 break;
1182 default:
1183 rc = -ENODEV;
1184 reason = "unsupported class";
1185 goto err_out;
1186 }
1187
1188 tf.protocol = ATA_PROT_PIO;
1189
1190 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1191 id, sizeof(id[0]) * ATA_ID_WORDS);
1192 if (err_mask) {
1193 rc = -EIO;
1194 reason = "I/O error";
1195 goto err_out;
1196 }
1197
1198 swap_buf_le16(id, ATA_ID_WORDS);
1199
1200 /* sanity check */
1201 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1202 rc = -EINVAL;
1203 reason = "device reports illegal type";
1204 goto err_out;
1205 }
1206
1207 if (post_reset && class == ATA_DEV_ATA) {
1208 /*
1209 * The exact sequence expected by certain pre-ATA4 drives is:
1210 * SRST RESET
1211 * IDENTIFY
1212 * INITIALIZE DEVICE PARAMETERS
1213 * anything else..
1214 * Some drives were very specific about that exact sequence.
1215 */
1216 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1217 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1218 if (err_mask) {
1219 rc = -EIO;
1220 reason = "INIT_DEV_PARAMS failed";
1221 goto err_out;
1222 }
1223
1224 /* current CHS translation info (id[53-58]) might be
1225 * changed. reread the identify device info.
1226 */
1227 post_reset = 0;
1228 goto retry;
1229 }
1230 }
1231
1232 *p_class = class;
1233
1234 return 0;
1235
1236 err_out:
1237 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1238 "(%s, err_mask=0x%x)\n", reason, err_mask);
1239 return rc;
1240 }
1241
1242 static inline u8 ata_dev_knobble(struct ata_device *dev)
1243 {
1244 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1245 }
1246
1247 /**
1248 * ata_dev_configure - Configure the specified ATA/ATAPI device
1249 * @dev: Target device to configure
1250 * @print_info: Enable device info printout
1251 *
1252 * Configure @dev according to @dev->id. Generic and low-level
1253 * driver specific fixups are also applied.
1254 *
1255 * LOCKING:
1256 * Kernel thread context (may sleep)
1257 *
1258 * RETURNS:
1259 * 0 on success, -errno otherwise
1260 */
1261 static int ata_dev_configure(struct ata_device *dev, int print_info)
1262 {
1263 struct ata_port *ap = dev->ap;
1264 const u16 *id = dev->id;
1265 unsigned int xfer_mask;
1266 int i, rc;
1267
1268 if (!ata_dev_enabled(dev)) {
1269 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1270 ap->id, dev->devno);
1271 return 0;
1272 }
1273
1274 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1275
1276 /* print device capabilities */
1277 if (print_info)
1278 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1279 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1280 id[49], id[82], id[83], id[84],
1281 id[85], id[86], id[87], id[88]);
1282
1283 /* initialize to-be-configured parameters */
1284 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1285 dev->max_sectors = 0;
1286 dev->cdb_len = 0;
1287 dev->n_sectors = 0;
1288 dev->cylinders = 0;
1289 dev->heads = 0;
1290 dev->sectors = 0;
1291
1292 /*
1293 * common ATA, ATAPI feature tests
1294 */
1295
1296 /* find max transfer mode; for printk only */
1297 xfer_mask = ata_id_xfermask(id);
1298
1299 ata_dump_id(id);
1300
1301 /* ATA-specific feature tests */
1302 if (dev->class == ATA_DEV_ATA) {
1303 dev->n_sectors = ata_id_n_sectors(id);
1304
1305 if (ata_id_has_lba(id)) {
1306 const char *lba_desc;
1307
1308 lba_desc = "LBA";
1309 dev->flags |= ATA_DFLAG_LBA;
1310 if (ata_id_has_lba48(id)) {
1311 dev->flags |= ATA_DFLAG_LBA48;
1312 lba_desc = "LBA48";
1313 }
1314
1315 /* print device info to dmesg */
1316 if (print_info)
1317 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1318 "max %s, %Lu sectors: %s\n",
1319 ata_id_major_version(id),
1320 ata_mode_string(xfer_mask),
1321 (unsigned long long)dev->n_sectors,
1322 lba_desc);
1323 } else {
1324 /* CHS */
1325
1326 /* Default translation */
1327 dev->cylinders = id[1];
1328 dev->heads = id[3];
1329 dev->sectors = id[6];
1330
1331 if (ata_id_current_chs_valid(id)) {
1332 /* Current CHS translation is valid. */
1333 dev->cylinders = id[54];
1334 dev->heads = id[55];
1335 dev->sectors = id[56];
1336 }
1337
1338 /* print device info to dmesg */
1339 if (print_info)
1340 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1341 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1342 ata_id_major_version(id),
1343 ata_mode_string(xfer_mask),
1344 (unsigned long long)dev->n_sectors,
1345 dev->cylinders, dev->heads, dev->sectors);
1346 }
1347
1348 dev->cdb_len = 16;
1349 }
1350
1351 /* ATAPI-specific feature tests */
1352 else if (dev->class == ATA_DEV_ATAPI) {
1353 rc = atapi_cdb_len(id);
1354 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1355 ata_dev_printk(dev, KERN_WARNING,
1356 "unsupported CDB len\n");
1357 rc = -EINVAL;
1358 goto err_out_nosup;
1359 }
1360 dev->cdb_len = (unsigned int) rc;
1361
1362 /* print device info to dmesg */
1363 if (print_info)
1364 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s\n",
1365 ata_mode_string(xfer_mask));
1366 }
1367
1368 ap->host->max_cmd_len = 0;
1369 for (i = 0; i < ATA_MAX_DEVICES; i++)
1370 ap->host->max_cmd_len = max_t(unsigned int,
1371 ap->host->max_cmd_len,
1372 ap->device[i].cdb_len);
1373
1374 /* limit bridge transfers to udma5, 200 sectors */
1375 if (ata_dev_knobble(dev)) {
1376 if (print_info)
1377 ata_dev_printk(dev, KERN_INFO,
1378 "applying bridge limits\n");
1379 dev->udma_mask &= ATA_UDMA5;
1380 dev->max_sectors = ATA_MAX_SECTORS;
1381 }
1382
1383 if (ap->ops->dev_config)
1384 ap->ops->dev_config(ap, dev);
1385
1386 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1387 return 0;
1388
1389 err_out_nosup:
1390 DPRINTK("EXIT, err\n");
1391 return rc;
1392 }
1393
1394 /**
1395 * ata_bus_probe - Reset and probe ATA bus
1396 * @ap: Bus to probe
1397 *
1398 * Master ATA bus probing function. Initiates a hardware-dependent
1399 * bus reset, then attempts to identify any devices found on
1400 * the bus.
1401 *
1402 * LOCKING:
1403 * PCI/etc. bus probe sem.
1404 *
1405 * RETURNS:
1406 * Zero on success, negative errno otherwise.
1407 */
1408
1409 static int ata_bus_probe(struct ata_port *ap)
1410 {
1411 unsigned int classes[ATA_MAX_DEVICES];
1412 int tries[ATA_MAX_DEVICES];
1413 int i, rc, down_xfermask;
1414 struct ata_device *dev;
1415
1416 ata_port_probe(ap);
1417
1418 for (i = 0; i < ATA_MAX_DEVICES; i++)
1419 tries[i] = ATA_PROBE_MAX_TRIES;
1420
1421 retry:
1422 down_xfermask = 0;
1423
1424 /* reset and determine device classes */
1425 for (i = 0; i < ATA_MAX_DEVICES; i++)
1426 classes[i] = ATA_DEV_UNKNOWN;
1427
1428 if (ap->ops->probe_reset) {
1429 rc = ap->ops->probe_reset(ap, classes);
1430 if (rc) {
1431 ata_port_printk(ap, KERN_ERR,
1432 "reset failed (errno=%d)\n", rc);
1433 return rc;
1434 }
1435 } else {
1436 ap->ops->phy_reset(ap);
1437
1438 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1439 if (!(ap->flags & ATA_FLAG_DISABLED))
1440 classes[i] = ap->device[i].class;
1441 ap->device[i].class = ATA_DEV_UNKNOWN;
1442 }
1443
1444 ata_port_probe(ap);
1445 }
1446
1447 for (i = 0; i < ATA_MAX_DEVICES; i++)
1448 if (classes[i] == ATA_DEV_UNKNOWN)
1449 classes[i] = ATA_DEV_NONE;
1450
1451 /* read IDENTIFY page and configure devices */
1452 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1453 dev = &ap->device[i];
1454
1455 if (tries[i])
1456 dev->class = classes[i];
1457
1458 if (!ata_dev_enabled(dev))
1459 continue;
1460
1461 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1462 if (rc)
1463 goto fail;
1464
1465 rc = ata_dev_configure(dev, 1);
1466 if (rc)
1467 goto fail;
1468 }
1469
1470 /* configure transfer mode */
1471 rc = ata_set_mode(ap, &dev);
1472 if (rc) {
1473 down_xfermask = 1;
1474 goto fail;
1475 }
1476
1477 for (i = 0; i < ATA_MAX_DEVICES; i++)
1478 if (ata_dev_enabled(&ap->device[i]))
1479 return 0;
1480
1481 /* no device present, disable port */
1482 ata_port_disable(ap);
1483 ap->ops->port_disable(ap);
1484 return -ENODEV;
1485
1486 fail:
1487 switch (rc) {
1488 case -EINVAL:
1489 case -ENODEV:
1490 tries[dev->devno] = 0;
1491 break;
1492 case -EIO:
1493 sata_down_spd_limit(ap);
1494 /* fall through */
1495 default:
1496 tries[dev->devno]--;
1497 if (down_xfermask &&
1498 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1499 tries[dev->devno] = 0;
1500 }
1501
1502 if (!tries[dev->devno]) {
1503 ata_down_xfermask_limit(dev, 1);
1504 ata_dev_disable(dev);
1505 }
1506
1507 goto retry;
1508 }
1509
1510 /**
1511 * ata_port_probe - Mark port as enabled
1512 * @ap: Port for which we indicate enablement
1513 *
1514 * Modify @ap data structure such that the system
1515 * thinks that the entire port is enabled.
1516 *
1517 * LOCKING: host_set lock, or some other form of
1518 * serialization.
1519 */
1520
1521 void ata_port_probe(struct ata_port *ap)
1522 {
1523 ap->flags &= ~ATA_FLAG_DISABLED;
1524 }
1525
1526 /**
1527 * sata_print_link_status - Print SATA link status
1528 * @ap: SATA port to printk link status about
1529 *
1530 * This function prints link speed and status of a SATA link.
1531 *
1532 * LOCKING:
1533 * None.
1534 */
1535 static void sata_print_link_status(struct ata_port *ap)
1536 {
1537 u32 sstatus, scontrol, tmp;
1538
1539 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1540 return;
1541 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1542
1543 if (ata_port_online(ap)) {
1544 tmp = (sstatus >> 4) & 0xf;
1545 ata_port_printk(ap, KERN_INFO,
1546 "SATA link up %s (SStatus %X SControl %X)\n",
1547 sata_spd_string(tmp), sstatus, scontrol);
1548 } else {
1549 ata_port_printk(ap, KERN_INFO,
1550 "SATA link down (SStatus %X SControl %X)\n",
1551 sstatus, scontrol);
1552 }
1553 }
1554
1555 /**
1556 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1557 * @ap: SATA port associated with target SATA PHY.
1558 *
1559 * This function issues commands to standard SATA Sxxx
1560 * PHY registers, to wake up the phy (and device), and
1561 * clear any reset condition.
1562 *
1563 * LOCKING:
1564 * PCI/etc. bus probe sem.
1565 *
1566 */
1567 void __sata_phy_reset(struct ata_port *ap)
1568 {
1569 u32 sstatus;
1570 unsigned long timeout = jiffies + (HZ * 5);
1571
1572 if (ap->flags & ATA_FLAG_SATA_RESET) {
1573 /* issue phy wake/reset */
1574 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1575 /* Couldn't find anything in SATA I/II specs, but
1576 * AHCI-1.1 10.4.2 says at least 1 ms. */
1577 mdelay(1);
1578 }
1579 /* phy wake/clear reset */
1580 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1581
1582 /* wait for phy to become ready, if necessary */
1583 do {
1584 msleep(200);
1585 sata_scr_read(ap, SCR_STATUS, &sstatus);
1586 if ((sstatus & 0xf) != 1)
1587 break;
1588 } while (time_before(jiffies, timeout));
1589
1590 /* print link status */
1591 sata_print_link_status(ap);
1592
1593 /* TODO: phy layer with polling, timeouts, etc. */
1594 if (!ata_port_offline(ap))
1595 ata_port_probe(ap);
1596 else
1597 ata_port_disable(ap);
1598
1599 if (ap->flags & ATA_FLAG_DISABLED)
1600 return;
1601
1602 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1603 ata_port_disable(ap);
1604 return;
1605 }
1606
1607 ap->cbl = ATA_CBL_SATA;
1608 }
1609
1610 /**
1611 * sata_phy_reset - Reset SATA bus.
1612 * @ap: SATA port associated with target SATA PHY.
1613 *
1614 * This function resets the SATA bus, and then probes
1615 * the bus for devices.
1616 *
1617 * LOCKING:
1618 * PCI/etc. bus probe sem.
1619 *
1620 */
1621 void sata_phy_reset(struct ata_port *ap)
1622 {
1623 __sata_phy_reset(ap);
1624 if (ap->flags & ATA_FLAG_DISABLED)
1625 return;
1626 ata_bus_reset(ap);
1627 }
1628
1629 /**
1630 * ata_dev_pair - return other device on cable
1631 * @adev: device
1632 *
1633 * Obtain the other device on the same cable, or if none is
1634 * present NULL is returned
1635 */
1636
1637 struct ata_device *ata_dev_pair(struct ata_device *adev)
1638 {
1639 struct ata_port *ap = adev->ap;
1640 struct ata_device *pair = &ap->device[1 - adev->devno];
1641 if (!ata_dev_enabled(pair))
1642 return NULL;
1643 return pair;
1644 }
1645
1646 /**
1647 * ata_port_disable - Disable port.
1648 * @ap: Port to be disabled.
1649 *
1650 * Modify @ap data structure such that the system
1651 * thinks that the entire port is disabled, and should
1652 * never attempt to probe or communicate with devices
1653 * on this port.
1654 *
1655 * LOCKING: host_set lock, or some other form of
1656 * serialization.
1657 */
1658
1659 void ata_port_disable(struct ata_port *ap)
1660 {
1661 ap->device[0].class = ATA_DEV_NONE;
1662 ap->device[1].class = ATA_DEV_NONE;
1663 ap->flags |= ATA_FLAG_DISABLED;
1664 }
1665
1666 /**
1667 * sata_down_spd_limit - adjust SATA spd limit downward
1668 * @ap: Port to adjust SATA spd limit for
1669 *
1670 * Adjust SATA spd limit of @ap downward. Note that this
1671 * function only adjusts the limit. The change must be applied
1672 * using sata_set_spd().
1673 *
1674 * LOCKING:
1675 * Inherited from caller.
1676 *
1677 * RETURNS:
1678 * 0 on success, negative errno on failure
1679 */
1680 int sata_down_spd_limit(struct ata_port *ap)
1681 {
1682 u32 sstatus, spd, mask;
1683 int rc, highbit;
1684
1685 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1686 if (rc)
1687 return rc;
1688
1689 mask = ap->sata_spd_limit;
1690 if (mask <= 1)
1691 return -EINVAL;
1692 highbit = fls(mask) - 1;
1693 mask &= ~(1 << highbit);
1694
1695 spd = (sstatus >> 4) & 0xf;
1696 if (spd <= 1)
1697 return -EINVAL;
1698 spd--;
1699 mask &= (1 << spd) - 1;
1700 if (!mask)
1701 return -EINVAL;
1702
1703 ap->sata_spd_limit = mask;
1704
1705 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1706 sata_spd_string(fls(mask)));
1707
1708 return 0;
1709 }
1710
1711 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1712 {
1713 u32 spd, limit;
1714
1715 if (ap->sata_spd_limit == UINT_MAX)
1716 limit = 0;
1717 else
1718 limit = fls(ap->sata_spd_limit);
1719
1720 spd = (*scontrol >> 4) & 0xf;
1721 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1722
1723 return spd != limit;
1724 }
1725
1726 /**
1727 * sata_set_spd_needed - is SATA spd configuration needed
1728 * @ap: Port in question
1729 *
1730 * Test whether the spd limit in SControl matches
1731 * @ap->sata_spd_limit. This function is used to determine
1732 * whether hardreset is necessary to apply SATA spd
1733 * configuration.
1734 *
1735 * LOCKING:
1736 * Inherited from caller.
1737 *
1738 * RETURNS:
1739 * 1 if SATA spd configuration is needed, 0 otherwise.
1740 */
1741 int sata_set_spd_needed(struct ata_port *ap)
1742 {
1743 u32 scontrol;
1744
1745 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1746 return 0;
1747
1748 return __sata_set_spd_needed(ap, &scontrol);
1749 }
1750
1751 /**
1752 * sata_set_spd - set SATA spd according to spd limit
1753 * @ap: Port to set SATA spd for
1754 *
1755 * Set SATA spd of @ap according to sata_spd_limit.
1756 *
1757 * LOCKING:
1758 * Inherited from caller.
1759 *
1760 * RETURNS:
1761 * 0 if spd doesn't need to be changed, 1 if spd has been
1762 * changed. Negative errno if SCR registers are inaccessible.
1763 */
1764 int sata_set_spd(struct ata_port *ap)
1765 {
1766 u32 scontrol;
1767 int rc;
1768
1769 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1770 return rc;
1771
1772 if (!__sata_set_spd_needed(ap, &scontrol))
1773 return 0;
1774
1775 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1776 return rc;
1777
1778 return 1;
1779 }
1780
1781 /*
1782 * This mode timing computation functionality is ported over from
1783 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1784 */
1785 /*
1786 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1787 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1788 * for PIO 5, which is a nonstandard extension and UDMA6, which
1789 * is currently supported only by Maxtor drives.
1790 */
1791
1792 static const struct ata_timing ata_timing[] = {
1793
1794 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1795 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1796 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1797 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1798
1799 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1800 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1801 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1802
1803 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1804
1805 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1806 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1807 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1808
1809 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1810 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1811 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1812
1813 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1814 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1815 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1816
1817 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1818 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1819 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1820
1821 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1822
1823 { 0xFF }
1824 };
1825
1826 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1827 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1828
1829 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1830 {
1831 q->setup = EZ(t->setup * 1000, T);
1832 q->act8b = EZ(t->act8b * 1000, T);
1833 q->rec8b = EZ(t->rec8b * 1000, T);
1834 q->cyc8b = EZ(t->cyc8b * 1000, T);
1835 q->active = EZ(t->active * 1000, T);
1836 q->recover = EZ(t->recover * 1000, T);
1837 q->cycle = EZ(t->cycle * 1000, T);
1838 q->udma = EZ(t->udma * 1000, UT);
1839 }
1840
1841 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1842 struct ata_timing *m, unsigned int what)
1843 {
1844 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1845 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1846 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1847 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1848 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1849 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1850 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1851 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1852 }
1853
1854 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1855 {
1856 const struct ata_timing *t;
1857
1858 for (t = ata_timing; t->mode != speed; t++)
1859 if (t->mode == 0xFF)
1860 return NULL;
1861 return t;
1862 }
1863
1864 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1865 struct ata_timing *t, int T, int UT)
1866 {
1867 const struct ata_timing *s;
1868 struct ata_timing p;
1869
1870 /*
1871 * Find the mode.
1872 */
1873
1874 if (!(s = ata_timing_find_mode(speed)))
1875 return -EINVAL;
1876
1877 memcpy(t, s, sizeof(*s));
1878
1879 /*
1880 * If the drive is an EIDE drive, it can tell us it needs extended
1881 * PIO/MW_DMA cycle timing.
1882 */
1883
1884 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1885 memset(&p, 0, sizeof(p));
1886 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1887 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1888 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1889 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1890 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1891 }
1892 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1893 }
1894
1895 /*
1896 * Convert the timing to bus clock counts.
1897 */
1898
1899 ata_timing_quantize(t, t, T, UT);
1900
1901 /*
1902 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1903 * S.M.A.R.T * and some other commands. We have to ensure that the
1904 * DMA cycle timing is slower/equal than the fastest PIO timing.
1905 */
1906
1907 if (speed > XFER_PIO_4) {
1908 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1909 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1910 }
1911
1912 /*
1913 * Lengthen active & recovery time so that cycle time is correct.
1914 */
1915
1916 if (t->act8b + t->rec8b < t->cyc8b) {
1917 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1918 t->rec8b = t->cyc8b - t->act8b;
1919 }
1920
1921 if (t->active + t->recover < t->cycle) {
1922 t->active += (t->cycle - (t->active + t->recover)) / 2;
1923 t->recover = t->cycle - t->active;
1924 }
1925
1926 return 0;
1927 }
1928
1929 /**
1930 * ata_down_xfermask_limit - adjust dev xfer masks downward
1931 * @dev: Device to adjust xfer masks
1932 * @force_pio0: Force PIO0
1933 *
1934 * Adjust xfer masks of @dev downward. Note that this function
1935 * does not apply the change. Invoking ata_set_mode() afterwards
1936 * will apply the limit.
1937 *
1938 * LOCKING:
1939 * Inherited from caller.
1940 *
1941 * RETURNS:
1942 * 0 on success, negative errno on failure
1943 */
1944 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
1945 {
1946 unsigned long xfer_mask;
1947 int highbit;
1948
1949 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
1950 dev->udma_mask);
1951
1952 if (!xfer_mask)
1953 goto fail;
1954 /* don't gear down to MWDMA from UDMA, go directly to PIO */
1955 if (xfer_mask & ATA_MASK_UDMA)
1956 xfer_mask &= ~ATA_MASK_MWDMA;
1957
1958 highbit = fls(xfer_mask) - 1;
1959 xfer_mask &= ~(1 << highbit);
1960 if (force_pio0)
1961 xfer_mask &= 1 << ATA_SHIFT_PIO;
1962 if (!xfer_mask)
1963 goto fail;
1964
1965 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1966 &dev->udma_mask);
1967
1968 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
1969 ata_mode_string(xfer_mask));
1970
1971 return 0;
1972
1973 fail:
1974 return -EINVAL;
1975 }
1976
1977 static int ata_dev_set_mode(struct ata_device *dev)
1978 {
1979 unsigned int err_mask;
1980 int rc;
1981
1982 dev->flags &= ~ATA_DFLAG_PIO;
1983 if (dev->xfer_shift == ATA_SHIFT_PIO)
1984 dev->flags |= ATA_DFLAG_PIO;
1985
1986 err_mask = ata_dev_set_xfermode(dev);
1987 if (err_mask) {
1988 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1989 "(err_mask=0x%x)\n", err_mask);
1990 return -EIO;
1991 }
1992
1993 rc = ata_dev_revalidate(dev, 0);
1994 if (rc)
1995 return rc;
1996
1997 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1998 dev->xfer_shift, (int)dev->xfer_mode);
1999
2000 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2001 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2002 return 0;
2003 }
2004
2005 /**
2006 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2007 * @ap: port on which timings will be programmed
2008 * @r_failed_dev: out paramter for failed device
2009 *
2010 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2011 * ata_set_mode() fails, pointer to the failing device is
2012 * returned in @r_failed_dev.
2013 *
2014 * LOCKING:
2015 * PCI/etc. bus probe sem.
2016 *
2017 * RETURNS:
2018 * 0 on success, negative errno otherwise
2019 */
2020 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2021 {
2022 struct ata_device *dev;
2023 int i, rc = 0, used_dma = 0, found = 0;
2024
2025 /* has private set_mode? */
2026 if (ap->ops->set_mode) {
2027 /* FIXME: make ->set_mode handle no device case and
2028 * return error code and failing device on failure.
2029 */
2030 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2031 if (ata_dev_enabled(&ap->device[i])) {
2032 ap->ops->set_mode(ap);
2033 break;
2034 }
2035 }
2036 return 0;
2037 }
2038
2039 /* step 1: calculate xfer_mask */
2040 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2041 unsigned int pio_mask, dma_mask;
2042
2043 dev = &ap->device[i];
2044
2045 if (!ata_dev_enabled(dev))
2046 continue;
2047
2048 ata_dev_xfermask(dev);
2049
2050 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2051 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2052 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2053 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2054
2055 found = 1;
2056 if (dev->dma_mode)
2057 used_dma = 1;
2058 }
2059 if (!found)
2060 goto out;
2061
2062 /* step 2: always set host PIO timings */
2063 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2064 dev = &ap->device[i];
2065 if (!ata_dev_enabled(dev))
2066 continue;
2067
2068 if (!dev->pio_mode) {
2069 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2070 rc = -EINVAL;
2071 goto out;
2072 }
2073
2074 dev->xfer_mode = dev->pio_mode;
2075 dev->xfer_shift = ATA_SHIFT_PIO;
2076 if (ap->ops->set_piomode)
2077 ap->ops->set_piomode(ap, dev);
2078 }
2079
2080 /* step 3: set host DMA timings */
2081 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2082 dev = &ap->device[i];
2083
2084 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2085 continue;
2086
2087 dev->xfer_mode = dev->dma_mode;
2088 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2089 if (ap->ops->set_dmamode)
2090 ap->ops->set_dmamode(ap, dev);
2091 }
2092
2093 /* step 4: update devices' xfer mode */
2094 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2095 dev = &ap->device[i];
2096
2097 if (!ata_dev_enabled(dev))
2098 continue;
2099
2100 rc = ata_dev_set_mode(dev);
2101 if (rc)
2102 goto out;
2103 }
2104
2105 /* Record simplex status. If we selected DMA then the other
2106 * host channels are not permitted to do so.
2107 */
2108 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2109 ap->host_set->simplex_claimed = 1;
2110
2111 /* step5: chip specific finalisation */
2112 if (ap->ops->post_set_mode)
2113 ap->ops->post_set_mode(ap);
2114
2115 out:
2116 if (rc)
2117 *r_failed_dev = dev;
2118 return rc;
2119 }
2120
2121 /**
2122 * ata_tf_to_host - issue ATA taskfile to host controller
2123 * @ap: port to which command is being issued
2124 * @tf: ATA taskfile register set
2125 *
2126 * Issues ATA taskfile register set to ATA host controller,
2127 * with proper synchronization with interrupt handler and
2128 * other threads.
2129 *
2130 * LOCKING:
2131 * spin_lock_irqsave(host_set lock)
2132 */
2133
2134 static inline void ata_tf_to_host(struct ata_port *ap,
2135 const struct ata_taskfile *tf)
2136 {
2137 ap->ops->tf_load(ap, tf);
2138 ap->ops->exec_command(ap, tf);
2139 }
2140
2141 /**
2142 * ata_busy_sleep - sleep until BSY clears, or timeout
2143 * @ap: port containing status register to be polled
2144 * @tmout_pat: impatience timeout
2145 * @tmout: overall timeout
2146 *
2147 * Sleep until ATA Status register bit BSY clears,
2148 * or a timeout occurs.
2149 *
2150 * LOCKING: None.
2151 */
2152
2153 unsigned int ata_busy_sleep (struct ata_port *ap,
2154 unsigned long tmout_pat, unsigned long tmout)
2155 {
2156 unsigned long timer_start, timeout;
2157 u8 status;
2158
2159 status = ata_busy_wait(ap, ATA_BUSY, 300);
2160 timer_start = jiffies;
2161 timeout = timer_start + tmout_pat;
2162 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2163 msleep(50);
2164 status = ata_busy_wait(ap, ATA_BUSY, 3);
2165 }
2166
2167 if (status & ATA_BUSY)
2168 ata_port_printk(ap, KERN_WARNING,
2169 "port is slow to respond, please be patient\n");
2170
2171 timeout = timer_start + tmout;
2172 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2173 msleep(50);
2174 status = ata_chk_status(ap);
2175 }
2176
2177 if (status & ATA_BUSY) {
2178 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2179 "(%lu secs)\n", tmout / HZ);
2180 return 1;
2181 }
2182
2183 return 0;
2184 }
2185
2186 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2187 {
2188 struct ata_ioports *ioaddr = &ap->ioaddr;
2189 unsigned int dev0 = devmask & (1 << 0);
2190 unsigned int dev1 = devmask & (1 << 1);
2191 unsigned long timeout;
2192
2193 /* if device 0 was found in ata_devchk, wait for its
2194 * BSY bit to clear
2195 */
2196 if (dev0)
2197 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2198
2199 /* if device 1 was found in ata_devchk, wait for
2200 * register access, then wait for BSY to clear
2201 */
2202 timeout = jiffies + ATA_TMOUT_BOOT;
2203 while (dev1) {
2204 u8 nsect, lbal;
2205
2206 ap->ops->dev_select(ap, 1);
2207 if (ap->flags & ATA_FLAG_MMIO) {
2208 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2209 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2210 } else {
2211 nsect = inb(ioaddr->nsect_addr);
2212 lbal = inb(ioaddr->lbal_addr);
2213 }
2214 if ((nsect == 1) && (lbal == 1))
2215 break;
2216 if (time_after(jiffies, timeout)) {
2217 dev1 = 0;
2218 break;
2219 }
2220 msleep(50); /* give drive a breather */
2221 }
2222 if (dev1)
2223 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2224
2225 /* is all this really necessary? */
2226 ap->ops->dev_select(ap, 0);
2227 if (dev1)
2228 ap->ops->dev_select(ap, 1);
2229 if (dev0)
2230 ap->ops->dev_select(ap, 0);
2231 }
2232
2233 static unsigned int ata_bus_softreset(struct ata_port *ap,
2234 unsigned int devmask)
2235 {
2236 struct ata_ioports *ioaddr = &ap->ioaddr;
2237
2238 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2239
2240 /* software reset. causes dev0 to be selected */
2241 if (ap->flags & ATA_FLAG_MMIO) {
2242 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2243 udelay(20); /* FIXME: flush */
2244 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2245 udelay(20); /* FIXME: flush */
2246 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2247 } else {
2248 outb(ap->ctl, ioaddr->ctl_addr);
2249 udelay(10);
2250 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2251 udelay(10);
2252 outb(ap->ctl, ioaddr->ctl_addr);
2253 }
2254
2255 /* spec mandates ">= 2ms" before checking status.
2256 * We wait 150ms, because that was the magic delay used for
2257 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2258 * between when the ATA command register is written, and then
2259 * status is checked. Because waiting for "a while" before
2260 * checking status is fine, post SRST, we perform this magic
2261 * delay here as well.
2262 *
2263 * Old drivers/ide uses the 2mS rule and then waits for ready
2264 */
2265 msleep(150);
2266
2267 /* Before we perform post reset processing we want to see if
2268 * the bus shows 0xFF because the odd clown forgets the D7
2269 * pulldown resistor.
2270 */
2271 if (ata_check_status(ap) == 0xFF) {
2272 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2273 return AC_ERR_OTHER;
2274 }
2275
2276 ata_bus_post_reset(ap, devmask);
2277
2278 return 0;
2279 }
2280
2281 /**
2282 * ata_bus_reset - reset host port and associated ATA channel
2283 * @ap: port to reset
2284 *
2285 * This is typically the first time we actually start issuing
2286 * commands to the ATA channel. We wait for BSY to clear, then
2287 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2288 * result. Determine what devices, if any, are on the channel
2289 * by looking at the device 0/1 error register. Look at the signature
2290 * stored in each device's taskfile registers, to determine if
2291 * the device is ATA or ATAPI.
2292 *
2293 * LOCKING:
2294 * PCI/etc. bus probe sem.
2295 * Obtains host_set lock.
2296 *
2297 * SIDE EFFECTS:
2298 * Sets ATA_FLAG_DISABLED if bus reset fails.
2299 */
2300
2301 void ata_bus_reset(struct ata_port *ap)
2302 {
2303 struct ata_ioports *ioaddr = &ap->ioaddr;
2304 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2305 u8 err;
2306 unsigned int dev0, dev1 = 0, devmask = 0;
2307
2308 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2309
2310 /* determine if device 0/1 are present */
2311 if (ap->flags & ATA_FLAG_SATA_RESET)
2312 dev0 = 1;
2313 else {
2314 dev0 = ata_devchk(ap, 0);
2315 if (slave_possible)
2316 dev1 = ata_devchk(ap, 1);
2317 }
2318
2319 if (dev0)
2320 devmask |= (1 << 0);
2321 if (dev1)
2322 devmask |= (1 << 1);
2323
2324 /* select device 0 again */
2325 ap->ops->dev_select(ap, 0);
2326
2327 /* issue bus reset */
2328 if (ap->flags & ATA_FLAG_SRST)
2329 if (ata_bus_softreset(ap, devmask))
2330 goto err_out;
2331
2332 /*
2333 * determine by signature whether we have ATA or ATAPI devices
2334 */
2335 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2336 if ((slave_possible) && (err != 0x81))
2337 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2338
2339 /* re-enable interrupts */
2340 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2341 ata_irq_on(ap);
2342
2343 /* is double-select really necessary? */
2344 if (ap->device[1].class != ATA_DEV_NONE)
2345 ap->ops->dev_select(ap, 1);
2346 if (ap->device[0].class != ATA_DEV_NONE)
2347 ap->ops->dev_select(ap, 0);
2348
2349 /* if no devices were detected, disable this port */
2350 if ((ap->device[0].class == ATA_DEV_NONE) &&
2351 (ap->device[1].class == ATA_DEV_NONE))
2352 goto err_out;
2353
2354 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2355 /* set up device control for ATA_FLAG_SATA_RESET */
2356 if (ap->flags & ATA_FLAG_MMIO)
2357 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2358 else
2359 outb(ap->ctl, ioaddr->ctl_addr);
2360 }
2361
2362 DPRINTK("EXIT\n");
2363 return;
2364
2365 err_out:
2366 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2367 ap->ops->port_disable(ap);
2368
2369 DPRINTK("EXIT\n");
2370 }
2371
2372 static int sata_phy_resume(struct ata_port *ap)
2373 {
2374 unsigned long timeout = jiffies + (HZ * 5);
2375 u32 scontrol, sstatus;
2376 int rc;
2377
2378 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2379 return rc;
2380
2381 scontrol = (scontrol & 0x0f0) | 0x300;
2382
2383 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2384 return rc;
2385
2386 /* Wait for phy to become ready, if necessary. */
2387 do {
2388 msleep(200);
2389 if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
2390 return rc;
2391 if ((sstatus & 0xf) != 1)
2392 return 0;
2393 } while (time_before(jiffies, timeout));
2394
2395 return -EBUSY;
2396 }
2397
2398 /**
2399 * ata_std_probeinit - initialize probing
2400 * @ap: port to be probed
2401 *
2402 * @ap is about to be probed. Initialize it. This function is
2403 * to be used as standard callback for ata_drive_probe_reset().
2404 *
2405 * NOTE!!! Do not use this function as probeinit if a low level
2406 * driver implements only hardreset. Just pass NULL as probeinit
2407 * in that case. Using this function is probably okay but doing
2408 * so makes reset sequence different from the original
2409 * ->phy_reset implementation and Jeff nervous. :-P
2410 */
2411 void ata_std_probeinit(struct ata_port *ap)
2412 {
2413 u32 scontrol;
2414
2415 /* resume link */
2416 sata_phy_resume(ap);
2417
2418 /* init sata_spd_limit to the current value */
2419 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
2420 int spd = (scontrol >> 4) & 0xf;
2421 ap->sata_spd_limit &= (1 << spd) - 1;
2422 }
2423
2424 /* wait for device */
2425 if (ata_port_online(ap))
2426 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2427 }
2428
2429 /**
2430 * ata_std_softreset - reset host port via ATA SRST
2431 * @ap: port to reset
2432 * @classes: resulting classes of attached devices
2433 *
2434 * Reset host port using ATA SRST. This function is to be used
2435 * as standard callback for ata_drive_*_reset() functions.
2436 *
2437 * LOCKING:
2438 * Kernel thread context (may sleep)
2439 *
2440 * RETURNS:
2441 * 0 on success, -errno otherwise.
2442 */
2443 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2444 {
2445 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2446 unsigned int devmask = 0, err_mask;
2447 u8 err;
2448
2449 DPRINTK("ENTER\n");
2450
2451 if (ata_port_offline(ap)) {
2452 classes[0] = ATA_DEV_NONE;
2453 goto out;
2454 }
2455
2456 /* determine if device 0/1 are present */
2457 if (ata_devchk(ap, 0))
2458 devmask |= (1 << 0);
2459 if (slave_possible && ata_devchk(ap, 1))
2460 devmask |= (1 << 1);
2461
2462 /* select device 0 again */
2463 ap->ops->dev_select(ap, 0);
2464
2465 /* issue bus reset */
2466 DPRINTK("about to softreset, devmask=%x\n", devmask);
2467 err_mask = ata_bus_softreset(ap, devmask);
2468 if (err_mask) {
2469 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2470 err_mask);
2471 return -EIO;
2472 }
2473
2474 /* determine by signature whether we have ATA or ATAPI devices */
2475 classes[0] = ata_dev_try_classify(ap, 0, &err);
2476 if (slave_possible && err != 0x81)
2477 classes[1] = ata_dev_try_classify(ap, 1, &err);
2478
2479 out:
2480 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2481 return 0;
2482 }
2483
2484 /**
2485 * sata_std_hardreset - reset host port via SATA phy reset
2486 * @ap: port to reset
2487 * @class: resulting class of attached device
2488 *
2489 * SATA phy-reset host port using DET bits of SControl register.
2490 * This function is to be used as standard callback for
2491 * ata_drive_*_reset().
2492 *
2493 * LOCKING:
2494 * Kernel thread context (may sleep)
2495 *
2496 * RETURNS:
2497 * 0 on success, -errno otherwise.
2498 */
2499 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2500 {
2501 u32 scontrol;
2502 int rc;
2503
2504 DPRINTK("ENTER\n");
2505
2506 if (sata_set_spd_needed(ap)) {
2507 /* SATA spec says nothing about how to reconfigure
2508 * spd. To be on the safe side, turn off phy during
2509 * reconfiguration. This works for at least ICH7 AHCI
2510 * and Sil3124.
2511 */
2512 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2513 return rc;
2514
2515 scontrol = (scontrol & 0x0f0) | 0x302;
2516
2517 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2518 return rc;
2519
2520 sata_set_spd(ap);
2521 }
2522
2523 /* issue phy wake/reset */
2524 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2525 return rc;
2526
2527 scontrol = (scontrol & 0x0f0) | 0x301;
2528
2529 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2530 return rc;
2531
2532 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2533 * 10.4.2 says at least 1 ms.
2534 */
2535 msleep(1);
2536
2537 /* bring phy back */
2538 sata_phy_resume(ap);
2539
2540 /* TODO: phy layer with polling, timeouts, etc. */
2541 if (ata_port_offline(ap)) {
2542 *class = ATA_DEV_NONE;
2543 DPRINTK("EXIT, link offline\n");
2544 return 0;
2545 }
2546
2547 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2548 ata_port_printk(ap, KERN_ERR,
2549 "COMRESET failed (device not ready)\n");
2550 return -EIO;
2551 }
2552
2553 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2554
2555 *class = ata_dev_try_classify(ap, 0, NULL);
2556
2557 DPRINTK("EXIT, class=%u\n", *class);
2558 return 0;
2559 }
2560
2561 /**
2562 * ata_std_postreset - standard postreset callback
2563 * @ap: the target ata_port
2564 * @classes: classes of attached devices
2565 *
2566 * This function is invoked after a successful reset. Note that
2567 * the device might have been reset more than once using
2568 * different reset methods before postreset is invoked.
2569 *
2570 * This function is to be used as standard callback for
2571 * ata_drive_*_reset().
2572 *
2573 * LOCKING:
2574 * Kernel thread context (may sleep)
2575 */
2576 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2577 {
2578 u32 serror;
2579
2580 DPRINTK("ENTER\n");
2581
2582 /* print link status */
2583 sata_print_link_status(ap);
2584
2585 /* clear SError */
2586 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2587 sata_scr_write(ap, SCR_ERROR, serror);
2588
2589 /* re-enable interrupts */
2590 if (!ap->ops->error_handler) {
2591 /* FIXME: hack. create a hook instead */
2592 if (ap->ioaddr.ctl_addr)
2593 ata_irq_on(ap);
2594 }
2595
2596 /* is double-select really necessary? */
2597 if (classes[0] != ATA_DEV_NONE)
2598 ap->ops->dev_select(ap, 1);
2599 if (classes[1] != ATA_DEV_NONE)
2600 ap->ops->dev_select(ap, 0);
2601
2602 /* bail out if no device is present */
2603 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2604 DPRINTK("EXIT, no device\n");
2605 return;
2606 }
2607
2608 /* set up device control */
2609 if (ap->ioaddr.ctl_addr) {
2610 if (ap->flags & ATA_FLAG_MMIO)
2611 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2612 else
2613 outb(ap->ctl, ap->ioaddr.ctl_addr);
2614 }
2615
2616 DPRINTK("EXIT\n");
2617 }
2618
2619 /**
2620 * ata_std_probe_reset - standard probe reset method
2621 * @ap: prot to perform probe-reset
2622 * @classes: resulting classes of attached devices
2623 *
2624 * The stock off-the-shelf ->probe_reset method.
2625 *
2626 * LOCKING:
2627 * Kernel thread context (may sleep)
2628 *
2629 * RETURNS:
2630 * 0 on success, -errno otherwise.
2631 */
2632 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2633 {
2634 ata_reset_fn_t hardreset;
2635
2636 hardreset = NULL;
2637 if (sata_scr_valid(ap))
2638 hardreset = sata_std_hardreset;
2639
2640 return ata_drive_probe_reset(ap, ata_std_probeinit,
2641 ata_std_softreset, hardreset,
2642 ata_std_postreset, classes);
2643 }
2644
2645 int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2646 unsigned int *classes)
2647 {
2648 int i, rc;
2649
2650 for (i = 0; i < ATA_MAX_DEVICES; i++)
2651 classes[i] = ATA_DEV_UNKNOWN;
2652
2653 rc = reset(ap, classes);
2654 if (rc)
2655 return rc;
2656
2657 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2658 * is complete and convert all ATA_DEV_UNKNOWN to
2659 * ATA_DEV_NONE.
2660 */
2661 for (i = 0; i < ATA_MAX_DEVICES; i++)
2662 if (classes[i] != ATA_DEV_UNKNOWN)
2663 break;
2664
2665 if (i < ATA_MAX_DEVICES)
2666 for (i = 0; i < ATA_MAX_DEVICES; i++)
2667 if (classes[i] == ATA_DEV_UNKNOWN)
2668 classes[i] = ATA_DEV_NONE;
2669
2670 return 0;
2671 }
2672
2673 /**
2674 * ata_drive_probe_reset - Perform probe reset with given methods
2675 * @ap: port to reset
2676 * @probeinit: probeinit method (can be NULL)
2677 * @softreset: softreset method (can be NULL)
2678 * @hardreset: hardreset method (can be NULL)
2679 * @postreset: postreset method (can be NULL)
2680 * @classes: resulting classes of attached devices
2681 *
2682 * Reset the specified port and classify attached devices using
2683 * given methods. This function prefers softreset but tries all
2684 * possible reset sequences to reset and classify devices. This
2685 * function is intended to be used for constructing ->probe_reset
2686 * callback by low level drivers.
2687 *
2688 * Reset methods should follow the following rules.
2689 *
2690 * - Return 0 on sucess, -errno on failure.
2691 * - If classification is supported, fill classes[] with
2692 * recognized class codes.
2693 * - If classification is not supported, leave classes[] alone.
2694 *
2695 * LOCKING:
2696 * Kernel thread context (may sleep)
2697 *
2698 * RETURNS:
2699 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2700 * if classification fails, and any error code from reset
2701 * methods.
2702 */
2703 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2704 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2705 ata_postreset_fn_t postreset, unsigned int *classes)
2706 {
2707 int rc = -EINVAL;
2708
2709 ata_eh_freeze_port(ap);
2710
2711 if (probeinit)
2712 probeinit(ap);
2713
2714 if (softreset && !sata_set_spd_needed(ap)) {
2715 rc = ata_do_reset(ap, softreset, classes);
2716 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2717 goto done;
2718 ata_port_printk(ap, KERN_INFO, "softreset failed, "
2719 "will try hardreset in 5 secs\n");
2720 ssleep(5);
2721 }
2722
2723 if (!hardreset)
2724 goto done;
2725
2726 while (1) {
2727 rc = ata_do_reset(ap, hardreset, classes);
2728 if (rc == 0) {
2729 if (classes[0] != ATA_DEV_UNKNOWN)
2730 goto done;
2731 break;
2732 }
2733
2734 if (sata_down_spd_limit(ap))
2735 goto done;
2736
2737 ata_port_printk(ap, KERN_INFO, "hardreset failed, "
2738 "will retry in 5 secs\n");
2739 ssleep(5);
2740 }
2741
2742 if (softreset) {
2743 ata_port_printk(ap, KERN_INFO,
2744 "hardreset succeeded without classification, "
2745 "will retry softreset in 5 secs\n");
2746 ssleep(5);
2747
2748 rc = ata_do_reset(ap, softreset, classes);
2749 }
2750
2751 done:
2752 if (rc == 0) {
2753 if (postreset)
2754 postreset(ap, classes);
2755
2756 ata_eh_thaw_port(ap);
2757
2758 if (classes[0] == ATA_DEV_UNKNOWN)
2759 rc = -ENODEV;
2760 }
2761 return rc;
2762 }
2763
2764 /**
2765 * ata_dev_same_device - Determine whether new ID matches configured device
2766 * @dev: device to compare against
2767 * @new_class: class of the new device
2768 * @new_id: IDENTIFY page of the new device
2769 *
2770 * Compare @new_class and @new_id against @dev and determine
2771 * whether @dev is the device indicated by @new_class and
2772 * @new_id.
2773 *
2774 * LOCKING:
2775 * None.
2776 *
2777 * RETURNS:
2778 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2779 */
2780 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2781 const u16 *new_id)
2782 {
2783 const u16 *old_id = dev->id;
2784 unsigned char model[2][41], serial[2][21];
2785 u64 new_n_sectors;
2786
2787 if (dev->class != new_class) {
2788 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2789 dev->class, new_class);
2790 return 0;
2791 }
2792
2793 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2794 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2795 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2796 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2797 new_n_sectors = ata_id_n_sectors(new_id);
2798
2799 if (strcmp(model[0], model[1])) {
2800 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2801 "'%s' != '%s'\n", model[0], model[1]);
2802 return 0;
2803 }
2804
2805 if (strcmp(serial[0], serial[1])) {
2806 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2807 "'%s' != '%s'\n", serial[0], serial[1]);
2808 return 0;
2809 }
2810
2811 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2812 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2813 "%llu != %llu\n",
2814 (unsigned long long)dev->n_sectors,
2815 (unsigned long long)new_n_sectors);
2816 return 0;
2817 }
2818
2819 return 1;
2820 }
2821
2822 /**
2823 * ata_dev_revalidate - Revalidate ATA device
2824 * @dev: device to revalidate
2825 * @post_reset: is this revalidation after reset?
2826 *
2827 * Re-read IDENTIFY page and make sure @dev is still attached to
2828 * the port.
2829 *
2830 * LOCKING:
2831 * Kernel thread context (may sleep)
2832 *
2833 * RETURNS:
2834 * 0 on success, negative errno otherwise
2835 */
2836 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2837 {
2838 unsigned int class = dev->class;
2839 u16 *id = (void *)dev->ap->sector_buf;
2840 int rc;
2841
2842 if (!ata_dev_enabled(dev)) {
2843 rc = -ENODEV;
2844 goto fail;
2845 }
2846
2847 /* read ID data */
2848 rc = ata_dev_read_id(dev, &class, post_reset, id);
2849 if (rc)
2850 goto fail;
2851
2852 /* is the device still there? */
2853 if (!ata_dev_same_device(dev, class, id)) {
2854 rc = -ENODEV;
2855 goto fail;
2856 }
2857
2858 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2859
2860 /* configure device according to the new ID */
2861 rc = ata_dev_configure(dev, 0);
2862 if (rc == 0)
2863 return 0;
2864
2865 fail:
2866 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2867 return rc;
2868 }
2869
2870 static const char * const ata_dma_blacklist [] = {
2871 "WDC AC11000H", NULL,
2872 "WDC AC22100H", NULL,
2873 "WDC AC32500H", NULL,
2874 "WDC AC33100H", NULL,
2875 "WDC AC31600H", NULL,
2876 "WDC AC32100H", "24.09P07",
2877 "WDC AC23200L", "21.10N21",
2878 "Compaq CRD-8241B", NULL,
2879 "CRD-8400B", NULL,
2880 "CRD-8480B", NULL,
2881 "CRD-8482B", NULL,
2882 "CRD-84", NULL,
2883 "SanDisk SDP3B", NULL,
2884 "SanDisk SDP3B-64", NULL,
2885 "SANYO CD-ROM CRD", NULL,
2886 "HITACHI CDR-8", NULL,
2887 "HITACHI CDR-8335", NULL,
2888 "HITACHI CDR-8435", NULL,
2889 "Toshiba CD-ROM XM-6202B", NULL,
2890 "TOSHIBA CD-ROM XM-1702BC", NULL,
2891 "CD-532E-A", NULL,
2892 "E-IDE CD-ROM CR-840", NULL,
2893 "CD-ROM Drive/F5A", NULL,
2894 "WPI CDD-820", NULL,
2895 "SAMSUNG CD-ROM SC-148C", NULL,
2896 "SAMSUNG CD-ROM SC", NULL,
2897 "SanDisk SDP3B-64", NULL,
2898 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2899 "_NEC DV5800A", NULL,
2900 "SAMSUNG CD-ROM SN-124", "N001"
2901 };
2902
2903 static int ata_strim(char *s, size_t len)
2904 {
2905 len = strnlen(s, len);
2906
2907 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2908 while ((len > 0) && (s[len - 1] == ' ')) {
2909 len--;
2910 s[len] = 0;
2911 }
2912 return len;
2913 }
2914
2915 static int ata_dma_blacklisted(const struct ata_device *dev)
2916 {
2917 unsigned char model_num[40];
2918 unsigned char model_rev[16];
2919 unsigned int nlen, rlen;
2920 int i;
2921
2922 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2923 sizeof(model_num));
2924 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2925 sizeof(model_rev));
2926 nlen = ata_strim(model_num, sizeof(model_num));
2927 rlen = ata_strim(model_rev, sizeof(model_rev));
2928
2929 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2930 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2931 if (ata_dma_blacklist[i+1] == NULL)
2932 return 1;
2933 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2934 return 1;
2935 }
2936 }
2937 return 0;
2938 }
2939
2940 /**
2941 * ata_dev_xfermask - Compute supported xfermask of the given device
2942 * @dev: Device to compute xfermask for
2943 *
2944 * Compute supported xfermask of @dev and store it in
2945 * dev->*_mask. This function is responsible for applying all
2946 * known limits including host controller limits, device
2947 * blacklist, etc...
2948 *
2949 * FIXME: The current implementation limits all transfer modes to
2950 * the fastest of the lowested device on the port. This is not
2951 * required on most controllers.
2952 *
2953 * LOCKING:
2954 * None.
2955 */
2956 static void ata_dev_xfermask(struct ata_device *dev)
2957 {
2958 struct ata_port *ap = dev->ap;
2959 struct ata_host_set *hs = ap->host_set;
2960 unsigned long xfer_mask;
2961 int i;
2962
2963 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2964 ap->mwdma_mask, ap->udma_mask);
2965
2966 /* Apply cable rule here. Don't apply it early because when
2967 * we handle hot plug the cable type can itself change.
2968 */
2969 if (ap->cbl == ATA_CBL_PATA40)
2970 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2971
2972 /* FIXME: Use port-wide xfermask for now */
2973 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2974 struct ata_device *d = &ap->device[i];
2975
2976 if (ata_dev_absent(d))
2977 continue;
2978
2979 if (ata_dev_disabled(d)) {
2980 /* to avoid violating device selection timing */
2981 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2982 UINT_MAX, UINT_MAX);
2983 continue;
2984 }
2985
2986 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2987 d->mwdma_mask, d->udma_mask);
2988 xfer_mask &= ata_id_xfermask(d->id);
2989 if (ata_dma_blacklisted(d))
2990 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2991 }
2992
2993 if (ata_dma_blacklisted(dev))
2994 ata_dev_printk(dev, KERN_WARNING,
2995 "device is on DMA blacklist, disabling DMA\n");
2996
2997 if (hs->flags & ATA_HOST_SIMPLEX) {
2998 if (hs->simplex_claimed)
2999 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3000 }
3001
3002 if (ap->ops->mode_filter)
3003 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3004
3005 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3006 &dev->mwdma_mask, &dev->udma_mask);
3007 }
3008
3009 /**
3010 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3011 * @dev: Device to which command will be sent
3012 *
3013 * Issue SET FEATURES - XFER MODE command to device @dev
3014 * on port @ap.
3015 *
3016 * LOCKING:
3017 * PCI/etc. bus probe sem.
3018 *
3019 * RETURNS:
3020 * 0 on success, AC_ERR_* mask otherwise.
3021 */
3022
3023 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3024 {
3025 struct ata_taskfile tf;
3026 unsigned int err_mask;
3027
3028 /* set up set-features taskfile */
3029 DPRINTK("set features - xfer mode\n");
3030
3031 ata_tf_init(dev, &tf);
3032 tf.command = ATA_CMD_SET_FEATURES;
3033 tf.feature = SETFEATURES_XFER;
3034 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3035 tf.protocol = ATA_PROT_NODATA;
3036 tf.nsect = dev->xfer_mode;
3037
3038 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3039
3040 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3041 return err_mask;
3042 }
3043
3044 /**
3045 * ata_dev_init_params - Issue INIT DEV PARAMS command
3046 * @dev: Device to which command will be sent
3047 * @heads: Number of heads
3048 * @sectors: Number of sectors
3049 *
3050 * LOCKING:
3051 * Kernel thread context (may sleep)
3052 *
3053 * RETURNS:
3054 * 0 on success, AC_ERR_* mask otherwise.
3055 */
3056 static unsigned int ata_dev_init_params(struct ata_device *dev,
3057 u16 heads, u16 sectors)
3058 {
3059 struct ata_taskfile tf;
3060 unsigned int err_mask;
3061
3062 /* Number of sectors per track 1-255. Number of heads 1-16 */
3063 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3064 return AC_ERR_INVALID;
3065
3066 /* set up init dev params taskfile */
3067 DPRINTK("init dev params \n");
3068
3069 ata_tf_init(dev, &tf);
3070 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3071 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3072 tf.protocol = ATA_PROT_NODATA;
3073 tf.nsect = sectors;
3074 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3075
3076 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3077
3078 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3079 return err_mask;
3080 }
3081
3082 /**
3083 * ata_sg_clean - Unmap DMA memory associated with command
3084 * @qc: Command containing DMA memory to be released
3085 *
3086 * Unmap all mapped DMA memory associated with this command.
3087 *
3088 * LOCKING:
3089 * spin_lock_irqsave(host_set lock)
3090 */
3091
3092 static void ata_sg_clean(struct ata_queued_cmd *qc)
3093 {
3094 struct ata_port *ap = qc->ap;
3095 struct scatterlist *sg = qc->__sg;
3096 int dir = qc->dma_dir;
3097 void *pad_buf = NULL;
3098
3099 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3100 WARN_ON(sg == NULL);
3101
3102 if (qc->flags & ATA_QCFLAG_SINGLE)
3103 WARN_ON(qc->n_elem > 1);
3104
3105 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3106
3107 /* if we padded the buffer out to 32-bit bound, and data
3108 * xfer direction is from-device, we must copy from the
3109 * pad buffer back into the supplied buffer
3110 */
3111 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3112 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3113
3114 if (qc->flags & ATA_QCFLAG_SG) {
3115 if (qc->n_elem)
3116 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3117 /* restore last sg */
3118 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3119 if (pad_buf) {
3120 struct scatterlist *psg = &qc->pad_sgent;
3121 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3122 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3123 kunmap_atomic(addr, KM_IRQ0);
3124 }
3125 } else {
3126 if (qc->n_elem)
3127 dma_unmap_single(ap->dev,
3128 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3129 dir);
3130 /* restore sg */
3131 sg->length += qc->pad_len;
3132 if (pad_buf)
3133 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3134 pad_buf, qc->pad_len);
3135 }
3136
3137 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3138 qc->__sg = NULL;
3139 }
3140
3141 /**
3142 * ata_fill_sg - Fill PCI IDE PRD table
3143 * @qc: Metadata associated with taskfile to be transferred
3144 *
3145 * Fill PCI IDE PRD (scatter-gather) table with segments
3146 * associated with the current disk command.
3147 *
3148 * LOCKING:
3149 * spin_lock_irqsave(host_set lock)
3150 *
3151 */
3152 static void ata_fill_sg(struct ata_queued_cmd *qc)
3153 {
3154 struct ata_port *ap = qc->ap;
3155 struct scatterlist *sg;
3156 unsigned int idx;
3157
3158 WARN_ON(qc->__sg == NULL);
3159 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3160
3161 idx = 0;
3162 ata_for_each_sg(sg, qc) {
3163 u32 addr, offset;
3164 u32 sg_len, len;
3165
3166 /* determine if physical DMA addr spans 64K boundary.
3167 * Note h/w doesn't support 64-bit, so we unconditionally
3168 * truncate dma_addr_t to u32.
3169 */
3170 addr = (u32) sg_dma_address(sg);
3171 sg_len = sg_dma_len(sg);
3172
3173 while (sg_len) {
3174 offset = addr & 0xffff;
3175 len = sg_len;
3176 if ((offset + sg_len) > 0x10000)
3177 len = 0x10000 - offset;
3178
3179 ap->prd[idx].addr = cpu_to_le32(addr);
3180 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3181 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3182
3183 idx++;
3184 sg_len -= len;
3185 addr += len;
3186 }
3187 }
3188
3189 if (idx)
3190 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3191 }
3192 /**
3193 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3194 * @qc: Metadata associated with taskfile to check
3195 *
3196 * Allow low-level driver to filter ATA PACKET commands, returning
3197 * a status indicating whether or not it is OK to use DMA for the
3198 * supplied PACKET command.
3199 *
3200 * LOCKING:
3201 * spin_lock_irqsave(host_set lock)
3202 *
3203 * RETURNS: 0 when ATAPI DMA can be used
3204 * nonzero otherwise
3205 */
3206 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3207 {
3208 struct ata_port *ap = qc->ap;
3209 int rc = 0; /* Assume ATAPI DMA is OK by default */
3210
3211 if (ap->ops->check_atapi_dma)
3212 rc = ap->ops->check_atapi_dma(qc);
3213
3214 return rc;
3215 }
3216 /**
3217 * ata_qc_prep - Prepare taskfile for submission
3218 * @qc: Metadata associated with taskfile to be prepared
3219 *
3220 * Prepare ATA taskfile for submission.
3221 *
3222 * LOCKING:
3223 * spin_lock_irqsave(host_set lock)
3224 */
3225 void ata_qc_prep(struct ata_queued_cmd *qc)
3226 {
3227 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3228 return;
3229
3230 ata_fill_sg(qc);
3231 }
3232
3233 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3234
3235 /**
3236 * ata_sg_init_one - Associate command with memory buffer
3237 * @qc: Command to be associated
3238 * @buf: Memory buffer
3239 * @buflen: Length of memory buffer, in bytes.
3240 *
3241 * Initialize the data-related elements of queued_cmd @qc
3242 * to point to a single memory buffer, @buf of byte length @buflen.
3243 *
3244 * LOCKING:
3245 * spin_lock_irqsave(host_set lock)
3246 */
3247
3248 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3249 {
3250 struct scatterlist *sg;
3251
3252 qc->flags |= ATA_QCFLAG_SINGLE;
3253
3254 memset(&qc->sgent, 0, sizeof(qc->sgent));
3255 qc->__sg = &qc->sgent;
3256 qc->n_elem = 1;
3257 qc->orig_n_elem = 1;
3258 qc->buf_virt = buf;
3259
3260 sg = qc->__sg;
3261 sg_init_one(sg, buf, buflen);
3262 }
3263
3264 /**
3265 * ata_sg_init - Associate command with scatter-gather table.
3266 * @qc: Command to be associated
3267 * @sg: Scatter-gather table.
3268 * @n_elem: Number of elements in s/g table.
3269 *
3270 * Initialize the data-related elements of queued_cmd @qc
3271 * to point to a scatter-gather table @sg, containing @n_elem
3272 * elements.
3273 *
3274 * LOCKING:
3275 * spin_lock_irqsave(host_set lock)
3276 */
3277
3278 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3279 unsigned int n_elem)
3280 {
3281 qc->flags |= ATA_QCFLAG_SG;
3282 qc->__sg = sg;
3283 qc->n_elem = n_elem;
3284 qc->orig_n_elem = n_elem;
3285 }
3286
3287 /**
3288 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3289 * @qc: Command with memory buffer to be mapped.
3290 *
3291 * DMA-map the memory buffer associated with queued_cmd @qc.
3292 *
3293 * LOCKING:
3294 * spin_lock_irqsave(host_set lock)
3295 *
3296 * RETURNS:
3297 * Zero on success, negative on error.
3298 */
3299
3300 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3301 {
3302 struct ata_port *ap = qc->ap;
3303 int dir = qc->dma_dir;
3304 struct scatterlist *sg = qc->__sg;
3305 dma_addr_t dma_address;
3306 int trim_sg = 0;
3307
3308 /* we must lengthen transfers to end on a 32-bit boundary */
3309 qc->pad_len = sg->length & 3;
3310 if (qc->pad_len) {
3311 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3312 struct scatterlist *psg = &qc->pad_sgent;
3313
3314 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3315
3316 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3317
3318 if (qc->tf.flags & ATA_TFLAG_WRITE)
3319 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3320 qc->pad_len);
3321
3322 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3323 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3324 /* trim sg */
3325 sg->length -= qc->pad_len;
3326 if (sg->length == 0)
3327 trim_sg = 1;
3328
3329 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3330 sg->length, qc->pad_len);
3331 }
3332
3333 if (trim_sg) {
3334 qc->n_elem--;
3335 goto skip_map;
3336 }
3337
3338 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3339 sg->length, dir);
3340 if (dma_mapping_error(dma_address)) {
3341 /* restore sg */
3342 sg->length += qc->pad_len;
3343 return -1;
3344 }
3345
3346 sg_dma_address(sg) = dma_address;
3347 sg_dma_len(sg) = sg->length;
3348
3349 skip_map:
3350 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3351 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3352
3353 return 0;
3354 }
3355
3356 /**
3357 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3358 * @qc: Command with scatter-gather table to be mapped.
3359 *
3360 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3361 *
3362 * LOCKING:
3363 * spin_lock_irqsave(host_set lock)
3364 *
3365 * RETURNS:
3366 * Zero on success, negative on error.
3367 *
3368 */
3369
3370 static int ata_sg_setup(struct ata_queued_cmd *qc)
3371 {
3372 struct ata_port *ap = qc->ap;
3373 struct scatterlist *sg = qc->__sg;
3374 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3375 int n_elem, pre_n_elem, dir, trim_sg = 0;
3376
3377 VPRINTK("ENTER, ata%u\n", ap->id);
3378 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3379
3380 /* we must lengthen transfers to end on a 32-bit boundary */
3381 qc->pad_len = lsg->length & 3;
3382 if (qc->pad_len) {
3383 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3384 struct scatterlist *psg = &qc->pad_sgent;
3385 unsigned int offset;
3386
3387 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3388
3389 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3390
3391 /*
3392 * psg->page/offset are used to copy to-be-written
3393 * data in this function or read data in ata_sg_clean.
3394 */
3395 offset = lsg->offset + lsg->length - qc->pad_len;
3396 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3397 psg->offset = offset_in_page(offset);
3398
3399 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3400 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3401 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3402 kunmap_atomic(addr, KM_IRQ0);
3403 }
3404
3405 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3406 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3407 /* trim last sg */
3408 lsg->length -= qc->pad_len;
3409 if (lsg->length == 0)
3410 trim_sg = 1;
3411
3412 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3413 qc->n_elem - 1, lsg->length, qc->pad_len);
3414 }
3415
3416 pre_n_elem = qc->n_elem;
3417 if (trim_sg && pre_n_elem)
3418 pre_n_elem--;
3419
3420 if (!pre_n_elem) {
3421 n_elem = 0;
3422 goto skip_map;
3423 }
3424
3425 dir = qc->dma_dir;
3426 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3427 if (n_elem < 1) {
3428 /* restore last sg */
3429 lsg->length += qc->pad_len;
3430 return -1;
3431 }
3432
3433 DPRINTK("%d sg elements mapped\n", n_elem);
3434
3435 skip_map:
3436 qc->n_elem = n_elem;
3437
3438 return 0;
3439 }
3440
3441 /**
3442 * ata_poll_qc_complete - turn irq back on and finish qc
3443 * @qc: Command to complete
3444 * @err_mask: ATA status register content
3445 *
3446 * LOCKING:
3447 * None. (grabs host lock)
3448 */
3449 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3450 {
3451 struct ata_port *ap = qc->ap;
3452 unsigned long flags;
3453
3454 spin_lock_irqsave(&ap->host_set->lock, flags);
3455
3456 if (ap->ops->error_handler) {
3457 /* EH might have kicked in while host_set lock is released */
3458 qc = ata_qc_from_tag(ap, qc->tag);
3459 if (qc) {
3460 if (!(qc->err_mask & AC_ERR_HSM)) {
3461 ap->flags &= ~ATA_FLAG_NOINTR;
3462 ata_irq_on(ap);
3463 ata_qc_complete(qc);
3464 } else
3465 ata_port_freeze(ap);
3466 }
3467 } else {
3468 /* old EH */
3469 ap->flags &= ~ATA_FLAG_NOINTR;
3470 ata_irq_on(ap);
3471 ata_qc_complete(qc);
3472 }
3473
3474 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3475 }
3476
3477 /**
3478 * ata_pio_poll - poll using PIO, depending on current state
3479 * @qc: qc in progress
3480 *
3481 * LOCKING:
3482 * None. (executing in kernel thread context)
3483 *
3484 * RETURNS:
3485 * timeout value to use
3486 */
3487 static unsigned long ata_pio_poll(struct ata_queued_cmd *qc)
3488 {
3489 struct ata_port *ap = qc->ap;
3490 u8 status;
3491 unsigned int poll_state = HSM_ST_UNKNOWN;
3492 unsigned int reg_state = HSM_ST_UNKNOWN;
3493
3494 switch (ap->hsm_task_state) {
3495 case HSM_ST:
3496 case HSM_ST_POLL:
3497 poll_state = HSM_ST_POLL;
3498 reg_state = HSM_ST;
3499 break;
3500 case HSM_ST_LAST:
3501 case HSM_ST_LAST_POLL:
3502 poll_state = HSM_ST_LAST_POLL;
3503 reg_state = HSM_ST_LAST;
3504 break;
3505 default:
3506 BUG();
3507 break;
3508 }
3509
3510 status = ata_chk_status(ap);
3511 if (status & ATA_BUSY) {
3512 if (time_after(jiffies, ap->pio_task_timeout)) {
3513 qc->err_mask |= AC_ERR_TIMEOUT;
3514 ap->hsm_task_state = HSM_ST_TMOUT;
3515 return 0;
3516 }
3517 ap->hsm_task_state = poll_state;
3518 return ATA_SHORT_PAUSE;
3519 }
3520
3521 ap->hsm_task_state = reg_state;
3522 return 0;
3523 }
3524
3525 /**
3526 * ata_pio_complete - check if drive is busy or idle
3527 * @qc: qc to complete
3528 *
3529 * LOCKING:
3530 * None. (executing in kernel thread context)
3531 *
3532 * RETURNS:
3533 * Non-zero if qc completed, zero otherwise.
3534 */
3535 static int ata_pio_complete(struct ata_queued_cmd *qc)
3536 {
3537 struct ata_port *ap = qc->ap;
3538 u8 drv_stat;
3539
3540 /*
3541 * This is purely heuristic. This is a fast path. Sometimes when
3542 * we enter, BSY will be cleared in a chk-status or two. If not,
3543 * the drive is probably seeking or something. Snooze for a couple
3544 * msecs, then chk-status again. If still busy, fall back to
3545 * HSM_ST_POLL state.
3546 */
3547 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3548 if (drv_stat & ATA_BUSY) {
3549 msleep(2);
3550 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3551 if (drv_stat & ATA_BUSY) {
3552 ap->hsm_task_state = HSM_ST_LAST_POLL;
3553 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3554 return 0;
3555 }
3556 }
3557
3558 drv_stat = ata_wait_idle(ap);
3559 if (!ata_ok(drv_stat)) {
3560 qc->err_mask |= __ac_err_mask(drv_stat);
3561 ap->hsm_task_state = HSM_ST_ERR;
3562 return 0;
3563 }
3564
3565 ap->hsm_task_state = HSM_ST_IDLE;
3566
3567 WARN_ON(qc->err_mask);
3568 ata_poll_qc_complete(qc);
3569
3570 /* another command may start at this point */
3571
3572 return 1;
3573 }
3574
3575
3576 /**
3577 * swap_buf_le16 - swap halves of 16-bit words in place
3578 * @buf: Buffer to swap
3579 * @buf_words: Number of 16-bit words in buffer.
3580 *
3581 * Swap halves of 16-bit words if needed to convert from
3582 * little-endian byte order to native cpu byte order, or
3583 * vice-versa.
3584 *
3585 * LOCKING:
3586 * Inherited from caller.
3587 */
3588 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3589 {
3590 #ifdef __BIG_ENDIAN
3591 unsigned int i;
3592
3593 for (i = 0; i < buf_words; i++)
3594 buf[i] = le16_to_cpu(buf[i]);
3595 #endif /* __BIG_ENDIAN */
3596 }
3597
3598 /**
3599 * ata_mmio_data_xfer - Transfer data by MMIO
3600 * @ap: port to read/write
3601 * @buf: data buffer
3602 * @buflen: buffer length
3603 * @write_data: read/write
3604 *
3605 * Transfer data from/to the device data register by MMIO.
3606 *
3607 * LOCKING:
3608 * Inherited from caller.
3609 */
3610
3611 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3612 unsigned int buflen, int write_data)
3613 {
3614 unsigned int i;
3615 unsigned int words = buflen >> 1;
3616 u16 *buf16 = (u16 *) buf;
3617 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3618
3619 /* Transfer multiple of 2 bytes */
3620 if (write_data) {
3621 for (i = 0; i < words; i++)
3622 writew(le16_to_cpu(buf16[i]), mmio);
3623 } else {
3624 for (i = 0; i < words; i++)
3625 buf16[i] = cpu_to_le16(readw(mmio));
3626 }
3627
3628 /* Transfer trailing 1 byte, if any. */
3629 if (unlikely(buflen & 0x01)) {
3630 u16 align_buf[1] = { 0 };
3631 unsigned char *trailing_buf = buf + buflen - 1;
3632
3633 if (write_data) {
3634 memcpy(align_buf, trailing_buf, 1);
3635 writew(le16_to_cpu(align_buf[0]), mmio);
3636 } else {
3637 align_buf[0] = cpu_to_le16(readw(mmio));
3638 memcpy(trailing_buf, align_buf, 1);
3639 }
3640 }
3641 }
3642
3643 /**
3644 * ata_pio_data_xfer - Transfer data by PIO
3645 * @ap: port to read/write
3646 * @buf: data buffer
3647 * @buflen: buffer length
3648 * @write_data: read/write
3649 *
3650 * Transfer data from/to the device data register by PIO.
3651 *
3652 * LOCKING:
3653 * Inherited from caller.
3654 */
3655
3656 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3657 unsigned int buflen, int write_data)
3658 {
3659 unsigned int words = buflen >> 1;
3660
3661 /* Transfer multiple of 2 bytes */
3662 if (write_data)
3663 outsw(ap->ioaddr.data_addr, buf, words);
3664 else
3665 insw(ap->ioaddr.data_addr, buf, words);
3666
3667 /* Transfer trailing 1 byte, if any. */
3668 if (unlikely(buflen & 0x01)) {
3669 u16 align_buf[1] = { 0 };
3670 unsigned char *trailing_buf = buf + buflen - 1;
3671
3672 if (write_data) {
3673 memcpy(align_buf, trailing_buf, 1);
3674 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3675 } else {
3676 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3677 memcpy(trailing_buf, align_buf, 1);
3678 }
3679 }
3680 }
3681
3682 /**
3683 * ata_data_xfer - Transfer data from/to the data register.
3684 * @ap: port to read/write
3685 * @buf: data buffer
3686 * @buflen: buffer length
3687 * @do_write: read/write
3688 *
3689 * Transfer data from/to the device data register.
3690 *
3691 * LOCKING:
3692 * Inherited from caller.
3693 */
3694
3695 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3696 unsigned int buflen, int do_write)
3697 {
3698 /* Make the crap hardware pay the costs not the good stuff */
3699 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3700 unsigned long flags;
3701 local_irq_save(flags);
3702 if (ap->flags & ATA_FLAG_MMIO)
3703 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3704 else
3705 ata_pio_data_xfer(ap, buf, buflen, do_write);
3706 local_irq_restore(flags);
3707 } else {
3708 if (ap->flags & ATA_FLAG_MMIO)
3709 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3710 else
3711 ata_pio_data_xfer(ap, buf, buflen, do_write);
3712 }
3713 }
3714
3715 /**
3716 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3717 * @qc: Command on going
3718 *
3719 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3720 *
3721 * LOCKING:
3722 * Inherited from caller.
3723 */
3724
3725 static void ata_pio_sector(struct ata_queued_cmd *qc)
3726 {
3727 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3728 struct scatterlist *sg = qc->__sg;
3729 struct ata_port *ap = qc->ap;
3730 struct page *page;
3731 unsigned int offset;
3732 unsigned char *buf;
3733
3734 if (qc->cursect == (qc->nsect - 1))
3735 ap->hsm_task_state = HSM_ST_LAST;
3736
3737 page = sg[qc->cursg].page;
3738 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3739
3740 /* get the current page and offset */
3741 page = nth_page(page, (offset >> PAGE_SHIFT));
3742 offset %= PAGE_SIZE;
3743
3744 buf = kmap(page) + offset;
3745
3746 qc->cursect++;
3747 qc->cursg_ofs++;
3748
3749 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3750 qc->cursg++;
3751 qc->cursg_ofs = 0;
3752 }
3753
3754 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3755
3756 /* do the actual data transfer */
3757 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3758 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3759
3760 kunmap(page);
3761 }
3762
3763 /**
3764 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3765 * @qc: Command on going
3766 * @bytes: number of bytes
3767 *
3768 * Transfer Transfer data from/to the ATAPI device.
3769 *
3770 * LOCKING:
3771 * Inherited from caller.
3772 *
3773 */
3774
3775 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3776 {
3777 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3778 struct scatterlist *sg = qc->__sg;
3779 struct ata_port *ap = qc->ap;
3780 struct page *page;
3781 unsigned char *buf;
3782 unsigned int offset, count;
3783
3784 if (qc->curbytes + bytes >= qc->nbytes)
3785 ap->hsm_task_state = HSM_ST_LAST;
3786
3787 next_sg:
3788 if (unlikely(qc->cursg >= qc->n_elem)) {
3789 /*
3790 * The end of qc->sg is reached and the device expects
3791 * more data to transfer. In order not to overrun qc->sg
3792 * and fulfill length specified in the byte count register,
3793 * - for read case, discard trailing data from the device
3794 * - for write case, padding zero data to the device
3795 */
3796 u16 pad_buf[1] = { 0 };
3797 unsigned int words = bytes >> 1;
3798 unsigned int i;
3799
3800 if (words) /* warning if bytes > 1 */
3801 ata_dev_printk(qc->dev, KERN_WARNING,
3802 "%u bytes trailing data\n", bytes);
3803
3804 for (i = 0; i < words; i++)
3805 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3806
3807 ap->hsm_task_state = HSM_ST_LAST;
3808 return;
3809 }
3810
3811 sg = &qc->__sg[qc->cursg];
3812
3813 page = sg->page;
3814 offset = sg->offset + qc->cursg_ofs;
3815
3816 /* get the current page and offset */
3817 page = nth_page(page, (offset >> PAGE_SHIFT));
3818 offset %= PAGE_SIZE;
3819
3820 /* don't overrun current sg */
3821 count = min(sg->length - qc->cursg_ofs, bytes);
3822
3823 /* don't cross page boundaries */
3824 count = min(count, (unsigned int)PAGE_SIZE - offset);
3825
3826 buf = kmap(page) + offset;
3827
3828 bytes -= count;
3829 qc->curbytes += count;
3830 qc->cursg_ofs += count;
3831
3832 if (qc->cursg_ofs == sg->length) {
3833 qc->cursg++;
3834 qc->cursg_ofs = 0;
3835 }
3836
3837 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3838
3839 /* do the actual data transfer */
3840 ata_data_xfer(ap, buf, count, do_write);
3841
3842 kunmap(page);
3843
3844 if (bytes)
3845 goto next_sg;
3846 }
3847
3848 /**
3849 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3850 * @qc: Command on going
3851 *
3852 * Transfer Transfer data from/to the ATAPI device.
3853 *
3854 * LOCKING:
3855 * Inherited from caller.
3856 */
3857
3858 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3859 {
3860 struct ata_port *ap = qc->ap;
3861 struct ata_device *dev = qc->dev;
3862 unsigned int ireason, bc_lo, bc_hi, bytes;
3863 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3864
3865 ap->ops->tf_read(ap, &qc->tf);
3866 ireason = qc->tf.nsect;
3867 bc_lo = qc->tf.lbam;
3868 bc_hi = qc->tf.lbah;
3869 bytes = (bc_hi << 8) | bc_lo;
3870
3871 /* shall be cleared to zero, indicating xfer of data */
3872 if (ireason & (1 << 0))
3873 goto err_out;
3874
3875 /* make sure transfer direction matches expected */
3876 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3877 if (do_write != i_write)
3878 goto err_out;
3879
3880 __atapi_pio_bytes(qc, bytes);
3881
3882 return;
3883
3884 err_out:
3885 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3886 qc->err_mask |= AC_ERR_HSM;
3887 ap->hsm_task_state = HSM_ST_ERR;
3888 }
3889
3890 /**
3891 * ata_pio_block - start PIO on a block
3892 * @qc: qc to transfer block for
3893 *
3894 * LOCKING:
3895 * None. (executing in kernel thread context)
3896 */
3897 static void ata_pio_block(struct ata_queued_cmd *qc)
3898 {
3899 struct ata_port *ap = qc->ap;
3900 u8 status;
3901
3902 /*
3903 * This is purely heuristic. This is a fast path.
3904 * Sometimes when we enter, BSY will be cleared in
3905 * a chk-status or two. If not, the drive is probably seeking
3906 * or something. Snooze for a couple msecs, then
3907 * chk-status again. If still busy, fall back to
3908 * HSM_ST_POLL state.
3909 */
3910 status = ata_busy_wait(ap, ATA_BUSY, 5);
3911 if (status & ATA_BUSY) {
3912 msleep(2);
3913 status = ata_busy_wait(ap, ATA_BUSY, 10);
3914 if (status & ATA_BUSY) {
3915 ap->hsm_task_state = HSM_ST_POLL;
3916 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3917 return;
3918 }
3919 }
3920
3921 /* check error */
3922 if (status & (ATA_ERR | ATA_DF)) {
3923 qc->err_mask |= AC_ERR_DEV;
3924 ap->hsm_task_state = HSM_ST_ERR;
3925 return;
3926 }
3927
3928 /* transfer data if any */
3929 if (is_atapi_taskfile(&qc->tf)) {
3930 /* DRQ=0 means no more data to transfer */
3931 if ((status & ATA_DRQ) == 0) {
3932 ap->hsm_task_state = HSM_ST_LAST;
3933 return;
3934 }
3935
3936 atapi_pio_bytes(qc);
3937 } else {
3938 /* handle BSY=0, DRQ=0 as error */
3939 if ((status & ATA_DRQ) == 0) {
3940 qc->err_mask |= AC_ERR_HSM;
3941 ap->hsm_task_state = HSM_ST_ERR;
3942 return;
3943 }
3944
3945 ata_pio_sector(qc);
3946 }
3947 }
3948
3949 static void ata_pio_error(struct ata_queued_cmd *qc)
3950 {
3951 struct ata_port *ap = qc->ap;
3952
3953 if (qc->tf.command != ATA_CMD_PACKET)
3954 ata_dev_printk(qc->dev, KERN_WARNING, "PIO error\n");
3955
3956 /* make sure qc->err_mask is available to
3957 * know what's wrong and recover
3958 */
3959 WARN_ON(qc->err_mask == 0);
3960
3961 ap->hsm_task_state = HSM_ST_IDLE;
3962
3963 ata_poll_qc_complete(qc);
3964 }
3965
3966 static void ata_pio_task(void *_data)
3967 {
3968 struct ata_queued_cmd *qc = _data;
3969 struct ata_port *ap = qc->ap;
3970 unsigned long timeout;
3971 int qc_completed;
3972
3973 fsm_start:
3974 timeout = 0;
3975 qc_completed = 0;
3976
3977 switch (ap->hsm_task_state) {
3978 case HSM_ST_IDLE:
3979 return;
3980
3981 case HSM_ST:
3982 ata_pio_block(qc);
3983 break;
3984
3985 case HSM_ST_LAST:
3986 qc_completed = ata_pio_complete(qc);
3987 break;
3988
3989 case HSM_ST_POLL:
3990 case HSM_ST_LAST_POLL:
3991 timeout = ata_pio_poll(qc);
3992 break;
3993
3994 case HSM_ST_TMOUT:
3995 case HSM_ST_ERR:
3996 ata_pio_error(qc);
3997 return;
3998 }
3999
4000 if (timeout)
4001 ata_port_queue_task(ap, ata_pio_task, qc, timeout);
4002 else if (!qc_completed)
4003 goto fsm_start;
4004 }
4005
4006 /**
4007 * atapi_packet_task - Write CDB bytes to hardware
4008 * @_data: qc in progress
4009 *
4010 * When device has indicated its readiness to accept
4011 * a CDB, this function is called. Send the CDB.
4012 * If DMA is to be performed, exit immediately.
4013 * Otherwise, we are in polling mode, so poll
4014 * status under operation succeeds or fails.
4015 *
4016 * LOCKING:
4017 * Kernel thread context (may sleep)
4018 */
4019 static void atapi_packet_task(void *_data)
4020 {
4021 struct ata_queued_cmd *qc = _data;
4022 struct ata_port *ap = qc->ap;
4023 u8 status;
4024
4025 /* sleep-wait for BSY to clear */
4026 DPRINTK("busy wait\n");
4027 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4028 qc->err_mask |= AC_ERR_TIMEOUT;
4029 goto err_out;
4030 }
4031
4032 /* make sure DRQ is set */
4033 status = ata_chk_status(ap);
4034 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4035 qc->err_mask |= AC_ERR_HSM;
4036 goto err_out;
4037 }
4038
4039 /* send SCSI cdb */
4040 DPRINTK("send cdb\n");
4041 WARN_ON(qc->dev->cdb_len < 12);
4042
4043 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4044 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4045 unsigned long flags;
4046
4047 /* Once we're done issuing command and kicking bmdma,
4048 * irq handler takes over. To not lose irq, we need
4049 * to clear NOINTR flag before sending cdb, but
4050 * interrupt handler shouldn't be invoked before we're
4051 * finished. Hence, the following locking.
4052 */
4053 spin_lock_irqsave(&ap->host_set->lock, flags);
4054 ap->flags &= ~ATA_FLAG_NOINTR;
4055 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4056 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4057 ap->ops->bmdma_start(qc); /* initiate bmdma */
4058 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4059 } else {
4060 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4061
4062 /* PIO commands are handled by polling */
4063 ap->hsm_task_state = HSM_ST;
4064 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4065 }
4066
4067 return;
4068
4069 err_out:
4070 ata_poll_qc_complete(qc);
4071 }
4072
4073 /**
4074 * ata_qc_new - Request an available ATA command, for queueing
4075 * @ap: Port associated with device @dev
4076 * @dev: Device from whom we request an available command structure
4077 *
4078 * LOCKING:
4079 * None.
4080 */
4081
4082 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4083 {
4084 struct ata_queued_cmd *qc = NULL;
4085 unsigned int i;
4086
4087 /* no command while frozen */
4088 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4089 return NULL;
4090
4091 /* the last tag is reserved for internal command. */
4092 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4093 if (!test_and_set_bit(i, &ap->qactive)) {
4094 qc = __ata_qc_from_tag(ap, i);
4095 break;
4096 }
4097
4098 if (qc)
4099 qc->tag = i;
4100
4101 return qc;
4102 }
4103
4104 /**
4105 * ata_qc_new_init - Request an available ATA command, and initialize it
4106 * @dev: Device from whom we request an available command structure
4107 *
4108 * LOCKING:
4109 * None.
4110 */
4111
4112 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4113 {
4114 struct ata_port *ap = dev->ap;
4115 struct ata_queued_cmd *qc;
4116
4117 qc = ata_qc_new(ap);
4118 if (qc) {
4119 qc->scsicmd = NULL;
4120 qc->ap = ap;
4121 qc->dev = dev;
4122
4123 ata_qc_reinit(qc);
4124 }
4125
4126 return qc;
4127 }
4128
4129 /**
4130 * ata_qc_free - free unused ata_queued_cmd
4131 * @qc: Command to complete
4132 *
4133 * Designed to free unused ata_queued_cmd object
4134 * in case something prevents using it.
4135 *
4136 * LOCKING:
4137 * spin_lock_irqsave(host_set lock)
4138 */
4139 void ata_qc_free(struct ata_queued_cmd *qc)
4140 {
4141 struct ata_port *ap = qc->ap;
4142 unsigned int tag;
4143
4144 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4145
4146 qc->flags = 0;
4147 tag = qc->tag;
4148 if (likely(ata_tag_valid(tag))) {
4149 qc->tag = ATA_TAG_POISON;
4150 clear_bit(tag, &ap->qactive);
4151 }
4152 }
4153
4154 void __ata_qc_complete(struct ata_queued_cmd *qc)
4155 {
4156 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4157 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4158
4159 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4160 ata_sg_clean(qc);
4161
4162 /* command should be marked inactive atomically with qc completion */
4163 qc->ap->active_tag = ATA_TAG_POISON;
4164
4165 /* atapi: mark qc as inactive to prevent the interrupt handler
4166 * from completing the command twice later, before the error handler
4167 * is called. (when rc != 0 and atapi request sense is needed)
4168 */
4169 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4170
4171 /* call completion callback */
4172 qc->complete_fn(qc);
4173 }
4174
4175 /**
4176 * ata_qc_complete - Complete an active ATA command
4177 * @qc: Command to complete
4178 * @err_mask: ATA Status register contents
4179 *
4180 * Indicate to the mid and upper layers that an ATA
4181 * command has completed, with either an ok or not-ok status.
4182 *
4183 * LOCKING:
4184 * spin_lock_irqsave(host_set lock)
4185 */
4186 void ata_qc_complete(struct ata_queued_cmd *qc)
4187 {
4188 struct ata_port *ap = qc->ap;
4189
4190 /* XXX: New EH and old EH use different mechanisms to
4191 * synchronize EH with regular execution path.
4192 *
4193 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4194 * Normal execution path is responsible for not accessing a
4195 * failed qc. libata core enforces the rule by returning NULL
4196 * from ata_qc_from_tag() for failed qcs.
4197 *
4198 * Old EH depends on ata_qc_complete() nullifying completion
4199 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4200 * not synchronize with interrupt handler. Only PIO task is
4201 * taken care of.
4202 */
4203 if (ap->ops->error_handler) {
4204 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4205
4206 if (unlikely(qc->err_mask))
4207 qc->flags |= ATA_QCFLAG_FAILED;
4208
4209 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4210 if (!ata_tag_internal(qc->tag)) {
4211 /* always fill result TF for failed qc */
4212 ap->ops->tf_read(ap, &qc->result_tf);
4213 ata_qc_schedule_eh(qc);
4214 return;
4215 }
4216 }
4217
4218 /* read result TF if requested */
4219 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4220 ap->ops->tf_read(ap, &qc->result_tf);
4221
4222 __ata_qc_complete(qc);
4223 } else {
4224 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4225 return;
4226
4227 /* read result TF if failed or requested */
4228 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4229 ap->ops->tf_read(ap, &qc->result_tf);
4230
4231 __ata_qc_complete(qc);
4232 }
4233 }
4234
4235 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4236 {
4237 struct ata_port *ap = qc->ap;
4238
4239 switch (qc->tf.protocol) {
4240 case ATA_PROT_DMA:
4241 case ATA_PROT_ATAPI_DMA:
4242 return 1;
4243
4244 case ATA_PROT_ATAPI:
4245 case ATA_PROT_PIO:
4246 if (ap->flags & ATA_FLAG_PIO_DMA)
4247 return 1;
4248
4249 /* fall through */
4250
4251 default:
4252 return 0;
4253 }
4254
4255 /* never reached */
4256 }
4257
4258 /**
4259 * ata_qc_issue - issue taskfile to device
4260 * @qc: command to issue to device
4261 *
4262 * Prepare an ATA command to submission to device.
4263 * This includes mapping the data into a DMA-able
4264 * area, filling in the S/G table, and finally
4265 * writing the taskfile to hardware, starting the command.
4266 *
4267 * LOCKING:
4268 * spin_lock_irqsave(host_set lock)
4269 */
4270 void ata_qc_issue(struct ata_queued_cmd *qc)
4271 {
4272 struct ata_port *ap = qc->ap;
4273
4274 qc->ap->active_tag = qc->tag;
4275 qc->flags |= ATA_QCFLAG_ACTIVE;
4276
4277 if (ata_should_dma_map(qc)) {
4278 if (qc->flags & ATA_QCFLAG_SG) {
4279 if (ata_sg_setup(qc))
4280 goto sg_err;
4281 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4282 if (ata_sg_setup_one(qc))
4283 goto sg_err;
4284 }
4285 } else {
4286 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4287 }
4288
4289 ap->ops->qc_prep(qc);
4290
4291 qc->err_mask |= ap->ops->qc_issue(qc);
4292 if (unlikely(qc->err_mask))
4293 goto err;
4294 return;
4295
4296 sg_err:
4297 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4298 qc->err_mask |= AC_ERR_SYSTEM;
4299 err:
4300 ata_qc_complete(qc);
4301 }
4302
4303 /**
4304 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4305 * @qc: command to issue to device
4306 *
4307 * Using various libata functions and hooks, this function
4308 * starts an ATA command. ATA commands are grouped into
4309 * classes called "protocols", and issuing each type of protocol
4310 * is slightly different.
4311 *
4312 * May be used as the qc_issue() entry in ata_port_operations.
4313 *
4314 * LOCKING:
4315 * spin_lock_irqsave(host_set lock)
4316 *
4317 * RETURNS:
4318 * Zero on success, AC_ERR_* mask on failure
4319 */
4320
4321 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4322 {
4323 struct ata_port *ap = qc->ap;
4324
4325 ata_dev_select(ap, qc->dev->devno, 1, 0);
4326
4327 switch (qc->tf.protocol) {
4328 case ATA_PROT_NODATA:
4329 ata_tf_to_host(ap, &qc->tf);
4330 break;
4331
4332 case ATA_PROT_DMA:
4333 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4334 ap->ops->bmdma_setup(qc); /* set up bmdma */
4335 ap->ops->bmdma_start(qc); /* initiate bmdma */
4336 break;
4337
4338 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4339 ata_qc_set_polling(qc);
4340 ata_tf_to_host(ap, &qc->tf);
4341 ap->hsm_task_state = HSM_ST;
4342 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4343 break;
4344
4345 case ATA_PROT_ATAPI:
4346 ata_qc_set_polling(qc);
4347 ata_tf_to_host(ap, &qc->tf);
4348 ata_port_queue_task(ap, atapi_packet_task, qc, 0);
4349 break;
4350
4351 case ATA_PROT_ATAPI_NODATA:
4352 ap->flags |= ATA_FLAG_NOINTR;
4353 ata_tf_to_host(ap, &qc->tf);
4354 ata_port_queue_task(ap, atapi_packet_task, qc, 0);
4355 break;
4356
4357 case ATA_PROT_ATAPI_DMA:
4358 ap->flags |= ATA_FLAG_NOINTR;
4359 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4360 ap->ops->bmdma_setup(qc); /* set up bmdma */
4361 ata_port_queue_task(ap, atapi_packet_task, qc, 0);
4362 break;
4363
4364 default:
4365 WARN_ON(1);
4366 return AC_ERR_SYSTEM;
4367 }
4368
4369 return 0;
4370 }
4371
4372 /**
4373 * ata_host_intr - Handle host interrupt for given (port, task)
4374 * @ap: Port on which interrupt arrived (possibly...)
4375 * @qc: Taskfile currently active in engine
4376 *
4377 * Handle host interrupt for given queued command. Currently,
4378 * only DMA interrupts are handled. All other commands are
4379 * handled via polling with interrupts disabled (nIEN bit).
4380 *
4381 * LOCKING:
4382 * spin_lock_irqsave(host_set lock)
4383 *
4384 * RETURNS:
4385 * One if interrupt was handled, zero if not (shared irq).
4386 */
4387
4388 inline unsigned int ata_host_intr (struct ata_port *ap,
4389 struct ata_queued_cmd *qc)
4390 {
4391 u8 status, host_stat;
4392
4393 switch (qc->tf.protocol) {
4394
4395 case ATA_PROT_DMA:
4396 case ATA_PROT_ATAPI_DMA:
4397 case ATA_PROT_ATAPI:
4398 /* check status of DMA engine */
4399 host_stat = ap->ops->bmdma_status(ap);
4400 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4401
4402 /* if it's not our irq... */
4403 if (!(host_stat & ATA_DMA_INTR))
4404 goto idle_irq;
4405
4406 /* before we do anything else, clear DMA-Start bit */
4407 ap->ops->bmdma_stop(qc);
4408
4409 /* fall through */
4410
4411 case ATA_PROT_ATAPI_NODATA:
4412 case ATA_PROT_NODATA:
4413 /* check altstatus */
4414 status = ata_altstatus(ap);
4415 if (status & ATA_BUSY)
4416 goto idle_irq;
4417
4418 /* check main status, clearing INTRQ */
4419 status = ata_chk_status(ap);
4420 if (unlikely(status & ATA_BUSY))
4421 goto idle_irq;
4422 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4423 ap->id, qc->tf.protocol, status);
4424
4425 /* ack bmdma irq events */
4426 ap->ops->irq_clear(ap);
4427
4428 /* complete taskfile transaction */
4429 qc->err_mask |= ac_err_mask(status);
4430 ata_qc_complete(qc);
4431 break;
4432
4433 default:
4434 goto idle_irq;
4435 }
4436
4437 return 1; /* irq handled */
4438
4439 idle_irq:
4440 ap->stats.idle_irq++;
4441
4442 #ifdef ATA_IRQ_TRAP
4443 if ((ap->stats.idle_irq % 1000) == 0) {
4444 ata_irq_ack(ap, 0); /* debug trap */
4445 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4446 return 1;
4447 }
4448 #endif
4449 return 0; /* irq not handled */
4450 }
4451
4452 /**
4453 * ata_interrupt - Default ATA host interrupt handler
4454 * @irq: irq line (unused)
4455 * @dev_instance: pointer to our ata_host_set information structure
4456 * @regs: unused
4457 *
4458 * Default interrupt handler for PCI IDE devices. Calls
4459 * ata_host_intr() for each port that is not disabled.
4460 *
4461 * LOCKING:
4462 * Obtains host_set lock during operation.
4463 *
4464 * RETURNS:
4465 * IRQ_NONE or IRQ_HANDLED.
4466 */
4467
4468 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4469 {
4470 struct ata_host_set *host_set = dev_instance;
4471 unsigned int i;
4472 unsigned int handled = 0;
4473 unsigned long flags;
4474
4475 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4476 spin_lock_irqsave(&host_set->lock, flags);
4477
4478 for (i = 0; i < host_set->n_ports; i++) {
4479 struct ata_port *ap;
4480
4481 ap = host_set->ports[i];
4482 if (ap &&
4483 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) {
4484 struct ata_queued_cmd *qc;
4485
4486 qc = ata_qc_from_tag(ap, ap->active_tag);
4487 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4488 (qc->flags & ATA_QCFLAG_ACTIVE))
4489 handled |= ata_host_intr(ap, qc);
4490 }
4491 }
4492
4493 spin_unlock_irqrestore(&host_set->lock, flags);
4494
4495 return IRQ_RETVAL(handled);
4496 }
4497
4498 /**
4499 * sata_scr_valid - test whether SCRs are accessible
4500 * @ap: ATA port to test SCR accessibility for
4501 *
4502 * Test whether SCRs are accessible for @ap.
4503 *
4504 * LOCKING:
4505 * None.
4506 *
4507 * RETURNS:
4508 * 1 if SCRs are accessible, 0 otherwise.
4509 */
4510 int sata_scr_valid(struct ata_port *ap)
4511 {
4512 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4513 }
4514
4515 /**
4516 * sata_scr_read - read SCR register of the specified port
4517 * @ap: ATA port to read SCR for
4518 * @reg: SCR to read
4519 * @val: Place to store read value
4520 *
4521 * Read SCR register @reg of @ap into *@val. This function is
4522 * guaranteed to succeed if the cable type of the port is SATA
4523 * and the port implements ->scr_read.
4524 *
4525 * LOCKING:
4526 * None.
4527 *
4528 * RETURNS:
4529 * 0 on success, negative errno on failure.
4530 */
4531 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4532 {
4533 if (sata_scr_valid(ap)) {
4534 *val = ap->ops->scr_read(ap, reg);
4535 return 0;
4536 }
4537 return -EOPNOTSUPP;
4538 }
4539
4540 /**
4541 * sata_scr_write - write SCR register of the specified port
4542 * @ap: ATA port to write SCR for
4543 * @reg: SCR to write
4544 * @val: value to write
4545 *
4546 * Write @val to SCR register @reg of @ap. This function is
4547 * guaranteed to succeed if the cable type of the port is SATA
4548 * and the port implements ->scr_read.
4549 *
4550 * LOCKING:
4551 * None.
4552 *
4553 * RETURNS:
4554 * 0 on success, negative errno on failure.
4555 */
4556 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4557 {
4558 if (sata_scr_valid(ap)) {
4559 ap->ops->scr_write(ap, reg, val);
4560 return 0;
4561 }
4562 return -EOPNOTSUPP;
4563 }
4564
4565 /**
4566 * sata_scr_write_flush - write SCR register of the specified port and flush
4567 * @ap: ATA port to write SCR for
4568 * @reg: SCR to write
4569 * @val: value to write
4570 *
4571 * This function is identical to sata_scr_write() except that this
4572 * function performs flush after writing to the register.
4573 *
4574 * LOCKING:
4575 * None.
4576 *
4577 * RETURNS:
4578 * 0 on success, negative errno on failure.
4579 */
4580 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4581 {
4582 if (sata_scr_valid(ap)) {
4583 ap->ops->scr_write(ap, reg, val);
4584 ap->ops->scr_read(ap, reg);
4585 return 0;
4586 }
4587 return -EOPNOTSUPP;
4588 }
4589
4590 /**
4591 * ata_port_online - test whether the given port is online
4592 * @ap: ATA port to test
4593 *
4594 * Test whether @ap is online. Note that this function returns 0
4595 * if online status of @ap cannot be obtained, so
4596 * ata_port_online(ap) != !ata_port_offline(ap).
4597 *
4598 * LOCKING:
4599 * None.
4600 *
4601 * RETURNS:
4602 * 1 if the port online status is available and online.
4603 */
4604 int ata_port_online(struct ata_port *ap)
4605 {
4606 u32 sstatus;
4607
4608 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4609 return 1;
4610 return 0;
4611 }
4612
4613 /**
4614 * ata_port_offline - test whether the given port is offline
4615 * @ap: ATA port to test
4616 *
4617 * Test whether @ap is offline. Note that this function returns
4618 * 0 if offline status of @ap cannot be obtained, so
4619 * ata_port_online(ap) != !ata_port_offline(ap).
4620 *
4621 * LOCKING:
4622 * None.
4623 *
4624 * RETURNS:
4625 * 1 if the port offline status is available and offline.
4626 */
4627 int ata_port_offline(struct ata_port *ap)
4628 {
4629 u32 sstatus;
4630
4631 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4632 return 1;
4633 return 0;
4634 }
4635
4636 /*
4637 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4638 * without filling any other registers
4639 */
4640 static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4641 {
4642 struct ata_taskfile tf;
4643 int err;
4644
4645 ata_tf_init(dev, &tf);
4646
4647 tf.command = cmd;
4648 tf.flags |= ATA_TFLAG_DEVICE;
4649 tf.protocol = ATA_PROT_NODATA;
4650
4651 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4652 if (err)
4653 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4654 __FUNCTION__, err);
4655
4656 return err;
4657 }
4658
4659 static int ata_flush_cache(struct ata_device *dev)
4660 {
4661 u8 cmd;
4662
4663 if (!ata_try_flush_cache(dev))
4664 return 0;
4665
4666 if (ata_id_has_flush_ext(dev->id))
4667 cmd = ATA_CMD_FLUSH_EXT;
4668 else
4669 cmd = ATA_CMD_FLUSH;
4670
4671 return ata_do_simple_cmd(dev, cmd);
4672 }
4673
4674 static int ata_standby_drive(struct ata_device *dev)
4675 {
4676 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4677 }
4678
4679 static int ata_start_drive(struct ata_device *dev)
4680 {
4681 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4682 }
4683
4684 /**
4685 * ata_device_resume - wakeup a previously suspended devices
4686 * @dev: the device to resume
4687 *
4688 * Kick the drive back into action, by sending it an idle immediate
4689 * command and making sure its transfer mode matches between drive
4690 * and host.
4691 *
4692 */
4693 int ata_device_resume(struct ata_device *dev)
4694 {
4695 struct ata_port *ap = dev->ap;
4696
4697 if (ap->flags & ATA_FLAG_SUSPENDED) {
4698 struct ata_device *failed_dev;
4699 ap->flags &= ~ATA_FLAG_SUSPENDED;
4700 while (ata_set_mode(ap, &failed_dev))
4701 ata_dev_disable(failed_dev);
4702 }
4703 if (!ata_dev_enabled(dev))
4704 return 0;
4705 if (dev->class == ATA_DEV_ATA)
4706 ata_start_drive(dev);
4707
4708 return 0;
4709 }
4710
4711 /**
4712 * ata_device_suspend - prepare a device for suspend
4713 * @dev: the device to suspend
4714 *
4715 * Flush the cache on the drive, if appropriate, then issue a
4716 * standbynow command.
4717 */
4718 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4719 {
4720 struct ata_port *ap = dev->ap;
4721
4722 if (!ata_dev_enabled(dev))
4723 return 0;
4724 if (dev->class == ATA_DEV_ATA)
4725 ata_flush_cache(dev);
4726
4727 if (state.event != PM_EVENT_FREEZE)
4728 ata_standby_drive(dev);
4729 ap->flags |= ATA_FLAG_SUSPENDED;
4730 return 0;
4731 }
4732
4733 /**
4734 * ata_port_start - Set port up for dma.
4735 * @ap: Port to initialize
4736 *
4737 * Called just after data structures for each port are
4738 * initialized. Allocates space for PRD table.
4739 *
4740 * May be used as the port_start() entry in ata_port_operations.
4741 *
4742 * LOCKING:
4743 * Inherited from caller.
4744 */
4745
4746 int ata_port_start (struct ata_port *ap)
4747 {
4748 struct device *dev = ap->dev;
4749 int rc;
4750
4751 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4752 if (!ap->prd)
4753 return -ENOMEM;
4754
4755 rc = ata_pad_alloc(ap, dev);
4756 if (rc) {
4757 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4758 return rc;
4759 }
4760
4761 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4762
4763 return 0;
4764 }
4765
4766
4767 /**
4768 * ata_port_stop - Undo ata_port_start()
4769 * @ap: Port to shut down
4770 *
4771 * Frees the PRD table.
4772 *
4773 * May be used as the port_stop() entry in ata_port_operations.
4774 *
4775 * LOCKING:
4776 * Inherited from caller.
4777 */
4778
4779 void ata_port_stop (struct ata_port *ap)
4780 {
4781 struct device *dev = ap->dev;
4782
4783 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4784 ata_pad_free(ap, dev);
4785 }
4786
4787 void ata_host_stop (struct ata_host_set *host_set)
4788 {
4789 if (host_set->mmio_base)
4790 iounmap(host_set->mmio_base);
4791 }
4792
4793
4794 /**
4795 * ata_host_remove - Unregister SCSI host structure with upper layers
4796 * @ap: Port to unregister
4797 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4798 *
4799 * LOCKING:
4800 * Inherited from caller.
4801 */
4802
4803 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4804 {
4805 struct Scsi_Host *sh = ap->host;
4806
4807 DPRINTK("ENTER\n");
4808
4809 if (do_unregister)
4810 scsi_remove_host(sh);
4811
4812 ap->ops->port_stop(ap);
4813 }
4814
4815 /**
4816 * ata_host_init - Initialize an ata_port structure
4817 * @ap: Structure to initialize
4818 * @host: associated SCSI mid-layer structure
4819 * @host_set: Collection of hosts to which @ap belongs
4820 * @ent: Probe information provided by low-level driver
4821 * @port_no: Port number associated with this ata_port
4822 *
4823 * Initialize a new ata_port structure, and its associated
4824 * scsi_host.
4825 *
4826 * LOCKING:
4827 * Inherited from caller.
4828 */
4829
4830 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4831 struct ata_host_set *host_set,
4832 const struct ata_probe_ent *ent, unsigned int port_no)
4833 {
4834 unsigned int i;
4835
4836 host->max_id = 16;
4837 host->max_lun = 1;
4838 host->max_channel = 1;
4839 host->unique_id = ata_unique_id++;
4840 host->max_cmd_len = 12;
4841
4842 ap->flags = ATA_FLAG_DISABLED;
4843 ap->id = host->unique_id;
4844 ap->host = host;
4845 ap->ctl = ATA_DEVCTL_OBS;
4846 ap->host_set = host_set;
4847 ap->dev = ent->dev;
4848 ap->port_no = port_no;
4849 ap->hard_port_no =
4850 ent->legacy_mode ? ent->hard_port_no : port_no;
4851 ap->pio_mask = ent->pio_mask;
4852 ap->mwdma_mask = ent->mwdma_mask;
4853 ap->udma_mask = ent->udma_mask;
4854 ap->flags |= ent->host_flags;
4855 ap->ops = ent->port_ops;
4856 ap->sata_spd_limit = UINT_MAX;
4857 ap->active_tag = ATA_TAG_POISON;
4858 ap->last_ctl = 0xFF;
4859
4860 INIT_WORK(&ap->port_task, NULL, NULL);
4861 INIT_LIST_HEAD(&ap->eh_done_q);
4862
4863 /* set cable type */
4864 ap->cbl = ATA_CBL_NONE;
4865 if (ap->flags & ATA_FLAG_SATA)
4866 ap->cbl = ATA_CBL_SATA;
4867
4868 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4869 struct ata_device *dev = &ap->device[i];
4870 dev->ap = ap;
4871 dev->devno = i;
4872 dev->pio_mask = UINT_MAX;
4873 dev->mwdma_mask = UINT_MAX;
4874 dev->udma_mask = UINT_MAX;
4875 }
4876
4877 #ifdef ATA_IRQ_TRAP
4878 ap->stats.unhandled_irq = 1;
4879 ap->stats.idle_irq = 1;
4880 #endif
4881
4882 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4883 }
4884
4885 /**
4886 * ata_host_add - Attach low-level ATA driver to system
4887 * @ent: Information provided by low-level driver
4888 * @host_set: Collections of ports to which we add
4889 * @port_no: Port number associated with this host
4890 *
4891 * Attach low-level ATA driver to system.
4892 *
4893 * LOCKING:
4894 * PCI/etc. bus probe sem.
4895 *
4896 * RETURNS:
4897 * New ata_port on success, for NULL on error.
4898 */
4899
4900 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4901 struct ata_host_set *host_set,
4902 unsigned int port_no)
4903 {
4904 struct Scsi_Host *host;
4905 struct ata_port *ap;
4906 int rc;
4907
4908 DPRINTK("ENTER\n");
4909
4910 if (!ent->port_ops->probe_reset &&
4911 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4912 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4913 port_no);
4914 return NULL;
4915 }
4916
4917 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4918 if (!host)
4919 return NULL;
4920
4921 host->transportt = &ata_scsi_transport_template;
4922
4923 ap = ata_shost_to_port(host);
4924
4925 ata_host_init(ap, host, host_set, ent, port_no);
4926
4927 rc = ap->ops->port_start(ap);
4928 if (rc)
4929 goto err_out;
4930
4931 return ap;
4932
4933 err_out:
4934 scsi_host_put(host);
4935 return NULL;
4936 }
4937
4938 /**
4939 * ata_device_add - Register hardware device with ATA and SCSI layers
4940 * @ent: Probe information describing hardware device to be registered
4941 *
4942 * This function processes the information provided in the probe
4943 * information struct @ent, allocates the necessary ATA and SCSI
4944 * host information structures, initializes them, and registers
4945 * everything with requisite kernel subsystems.
4946 *
4947 * This function requests irqs, probes the ATA bus, and probes
4948 * the SCSI bus.
4949 *
4950 * LOCKING:
4951 * PCI/etc. bus probe sem.
4952 *
4953 * RETURNS:
4954 * Number of ports registered. Zero on error (no ports registered).
4955 */
4956
4957 int ata_device_add(const struct ata_probe_ent *ent)
4958 {
4959 unsigned int count = 0, i;
4960 struct device *dev = ent->dev;
4961 struct ata_host_set *host_set;
4962
4963 DPRINTK("ENTER\n");
4964 /* alloc a container for our list of ATA ports (buses) */
4965 host_set = kzalloc(sizeof(struct ata_host_set) +
4966 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4967 if (!host_set)
4968 return 0;
4969 spin_lock_init(&host_set->lock);
4970
4971 host_set->dev = dev;
4972 host_set->n_ports = ent->n_ports;
4973 host_set->irq = ent->irq;
4974 host_set->mmio_base = ent->mmio_base;
4975 host_set->private_data = ent->private_data;
4976 host_set->ops = ent->port_ops;
4977 host_set->flags = ent->host_set_flags;
4978
4979 /* register each port bound to this device */
4980 for (i = 0; i < ent->n_ports; i++) {
4981 struct ata_port *ap;
4982 unsigned long xfer_mode_mask;
4983
4984 ap = ata_host_add(ent, host_set, i);
4985 if (!ap)
4986 goto err_out;
4987
4988 host_set->ports[i] = ap;
4989 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4990 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4991 (ap->pio_mask << ATA_SHIFT_PIO);
4992
4993 /* print per-port info to dmesg */
4994 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4995 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4996 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4997 ata_mode_string(xfer_mode_mask),
4998 ap->ioaddr.cmd_addr,
4999 ap->ioaddr.ctl_addr,
5000 ap->ioaddr.bmdma_addr,
5001 ent->irq);
5002
5003 ata_chk_status(ap);
5004 host_set->ops->irq_clear(ap);
5005 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5006 count++;
5007 }
5008
5009 if (!count)
5010 goto err_free_ret;
5011
5012 /* obtain irq, that is shared between channels */
5013 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5014 DRV_NAME, host_set))
5015 goto err_out;
5016
5017 /* perform each probe synchronously */
5018 DPRINTK("probe begin\n");
5019 for (i = 0; i < count; i++) {
5020 struct ata_port *ap;
5021 int rc;
5022
5023 ap = host_set->ports[i];
5024
5025 DPRINTK("ata%u: bus probe begin\n", ap->id);
5026 rc = ata_bus_probe(ap);
5027 DPRINTK("ata%u: bus probe end\n", ap->id);
5028
5029 if (rc) {
5030 /* FIXME: do something useful here?
5031 * Current libata behavior will
5032 * tear down everything when
5033 * the module is removed
5034 * or the h/w is unplugged.
5035 */
5036 }
5037
5038 rc = scsi_add_host(ap->host, dev);
5039 if (rc) {
5040 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5041 /* FIXME: do something useful here */
5042 /* FIXME: handle unconditional calls to
5043 * scsi_scan_host and ata_host_remove, below,
5044 * at the very least
5045 */
5046 }
5047 }
5048
5049 /* probes are done, now scan each port's disk(s) */
5050 DPRINTK("host probe begin\n");
5051 for (i = 0; i < count; i++) {
5052 struct ata_port *ap = host_set->ports[i];
5053
5054 ata_scsi_scan_host(ap);
5055 }
5056
5057 dev_set_drvdata(dev, host_set);
5058
5059 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5060 return ent->n_ports; /* success */
5061
5062 err_out:
5063 for (i = 0; i < count; i++) {
5064 ata_host_remove(host_set->ports[i], 1);
5065 scsi_host_put(host_set->ports[i]->host);
5066 }
5067 err_free_ret:
5068 kfree(host_set);
5069 VPRINTK("EXIT, returning 0\n");
5070 return 0;
5071 }
5072
5073 /**
5074 * ata_host_set_remove - PCI layer callback for device removal
5075 * @host_set: ATA host set that was removed
5076 *
5077 * Unregister all objects associated with this host set. Free those
5078 * objects.
5079 *
5080 * LOCKING:
5081 * Inherited from calling layer (may sleep).
5082 */
5083
5084 void ata_host_set_remove(struct ata_host_set *host_set)
5085 {
5086 struct ata_port *ap;
5087 unsigned int i;
5088
5089 for (i = 0; i < host_set->n_ports; i++) {
5090 ap = host_set->ports[i];
5091 scsi_remove_host(ap->host);
5092 }
5093
5094 free_irq(host_set->irq, host_set);
5095
5096 for (i = 0; i < host_set->n_ports; i++) {
5097 ap = host_set->ports[i];
5098
5099 ata_scsi_release(ap->host);
5100
5101 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5102 struct ata_ioports *ioaddr = &ap->ioaddr;
5103
5104 if (ioaddr->cmd_addr == 0x1f0)
5105 release_region(0x1f0, 8);
5106 else if (ioaddr->cmd_addr == 0x170)
5107 release_region(0x170, 8);
5108 }
5109
5110 scsi_host_put(ap->host);
5111 }
5112
5113 if (host_set->ops->host_stop)
5114 host_set->ops->host_stop(host_set);
5115
5116 kfree(host_set);
5117 }
5118
5119 /**
5120 * ata_scsi_release - SCSI layer callback hook for host unload
5121 * @host: libata host to be unloaded
5122 *
5123 * Performs all duties necessary to shut down a libata port...
5124 * Kill port kthread, disable port, and release resources.
5125 *
5126 * LOCKING:
5127 * Inherited from SCSI layer.
5128 *
5129 * RETURNS:
5130 * One.
5131 */
5132
5133 int ata_scsi_release(struct Scsi_Host *host)
5134 {
5135 struct ata_port *ap = ata_shost_to_port(host);
5136
5137 DPRINTK("ENTER\n");
5138
5139 ap->ops->port_disable(ap);
5140 ata_host_remove(ap, 0);
5141
5142 DPRINTK("EXIT\n");
5143 return 1;
5144 }
5145
5146 /**
5147 * ata_std_ports - initialize ioaddr with standard port offsets.
5148 * @ioaddr: IO address structure to be initialized
5149 *
5150 * Utility function which initializes data_addr, error_addr,
5151 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5152 * device_addr, status_addr, and command_addr to standard offsets
5153 * relative to cmd_addr.
5154 *
5155 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5156 */
5157
5158 void ata_std_ports(struct ata_ioports *ioaddr)
5159 {
5160 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5161 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5162 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5163 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5164 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5165 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5166 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5167 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5168 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5169 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5170 }
5171
5172
5173 #ifdef CONFIG_PCI
5174
5175 void ata_pci_host_stop (struct ata_host_set *host_set)
5176 {
5177 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5178
5179 pci_iounmap(pdev, host_set->mmio_base);
5180 }
5181
5182 /**
5183 * ata_pci_remove_one - PCI layer callback for device removal
5184 * @pdev: PCI device that was removed
5185 *
5186 * PCI layer indicates to libata via this hook that
5187 * hot-unplug or module unload event has occurred.
5188 * Handle this by unregistering all objects associated
5189 * with this PCI device. Free those objects. Then finally
5190 * release PCI resources and disable device.
5191 *
5192 * LOCKING:
5193 * Inherited from PCI layer (may sleep).
5194 */
5195
5196 void ata_pci_remove_one (struct pci_dev *pdev)
5197 {
5198 struct device *dev = pci_dev_to_dev(pdev);
5199 struct ata_host_set *host_set = dev_get_drvdata(dev);
5200
5201 ata_host_set_remove(host_set);
5202 pci_release_regions(pdev);
5203 pci_disable_device(pdev);
5204 dev_set_drvdata(dev, NULL);
5205 }
5206
5207 /* move to PCI subsystem */
5208 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5209 {
5210 unsigned long tmp = 0;
5211
5212 switch (bits->width) {
5213 case 1: {
5214 u8 tmp8 = 0;
5215 pci_read_config_byte(pdev, bits->reg, &tmp8);
5216 tmp = tmp8;
5217 break;
5218 }
5219 case 2: {
5220 u16 tmp16 = 0;
5221 pci_read_config_word(pdev, bits->reg, &tmp16);
5222 tmp = tmp16;
5223 break;
5224 }
5225 case 4: {
5226 u32 tmp32 = 0;
5227 pci_read_config_dword(pdev, bits->reg, &tmp32);
5228 tmp = tmp32;
5229 break;
5230 }
5231
5232 default:
5233 return -EINVAL;
5234 }
5235
5236 tmp &= bits->mask;
5237
5238 return (tmp == bits->val) ? 1 : 0;
5239 }
5240
5241 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5242 {
5243 pci_save_state(pdev);
5244 pci_disable_device(pdev);
5245 pci_set_power_state(pdev, PCI_D3hot);
5246 return 0;
5247 }
5248
5249 int ata_pci_device_resume(struct pci_dev *pdev)
5250 {
5251 pci_set_power_state(pdev, PCI_D0);
5252 pci_restore_state(pdev);
5253 pci_enable_device(pdev);
5254 pci_set_master(pdev);
5255 return 0;
5256 }
5257 #endif /* CONFIG_PCI */
5258
5259
5260 static int __init ata_init(void)
5261 {
5262 ata_wq = create_workqueue("ata");
5263 if (!ata_wq)
5264 return -ENOMEM;
5265
5266 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5267 return 0;
5268 }
5269
5270 static void __exit ata_exit(void)
5271 {
5272 destroy_workqueue(ata_wq);
5273 }
5274
5275 module_init(ata_init);
5276 module_exit(ata_exit);
5277
5278 static unsigned long ratelimit_time;
5279 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5280
5281 int ata_ratelimit(void)
5282 {
5283 int rc;
5284 unsigned long flags;
5285
5286 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5287
5288 if (time_after(jiffies, ratelimit_time)) {
5289 rc = 1;
5290 ratelimit_time = jiffies + (HZ/5);
5291 } else
5292 rc = 0;
5293
5294 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5295
5296 return rc;
5297 }
5298
5299 /**
5300 * ata_wait_register - wait until register value changes
5301 * @reg: IO-mapped register
5302 * @mask: Mask to apply to read register value
5303 * @val: Wait condition
5304 * @interval_msec: polling interval in milliseconds
5305 * @timeout_msec: timeout in milliseconds
5306 *
5307 * Waiting for some bits of register to change is a common
5308 * operation for ATA controllers. This function reads 32bit LE
5309 * IO-mapped register @reg and tests for the following condition.
5310 *
5311 * (*@reg & mask) != val
5312 *
5313 * If the condition is met, it returns; otherwise, the process is
5314 * repeated after @interval_msec until timeout.
5315 *
5316 * LOCKING:
5317 * Kernel thread context (may sleep)
5318 *
5319 * RETURNS:
5320 * The final register value.
5321 */
5322 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5323 unsigned long interval_msec,
5324 unsigned long timeout_msec)
5325 {
5326 unsigned long timeout;
5327 u32 tmp;
5328
5329 tmp = ioread32(reg);
5330
5331 /* Calculate timeout _after_ the first read to make sure
5332 * preceding writes reach the controller before starting to
5333 * eat away the timeout.
5334 */
5335 timeout = jiffies + (timeout_msec * HZ) / 1000;
5336
5337 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5338 msleep(interval_msec);
5339 tmp = ioread32(reg);
5340 }
5341
5342 return tmp;
5343 }
5344
5345 /*
5346 * libata is essentially a library of internal helper functions for
5347 * low-level ATA host controller drivers. As such, the API/ABI is
5348 * likely to change as new drivers are added and updated.
5349 * Do not depend on ABI/API stability.
5350 */
5351
5352 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5353 EXPORT_SYMBOL_GPL(ata_std_ports);
5354 EXPORT_SYMBOL_GPL(ata_device_add);
5355 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5356 EXPORT_SYMBOL_GPL(ata_sg_init);
5357 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5358 EXPORT_SYMBOL_GPL(ata_qc_complete);
5359 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5360 EXPORT_SYMBOL_GPL(ata_tf_load);
5361 EXPORT_SYMBOL_GPL(ata_tf_read);
5362 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5363 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5364 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5365 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5366 EXPORT_SYMBOL_GPL(ata_check_status);
5367 EXPORT_SYMBOL_GPL(ata_altstatus);
5368 EXPORT_SYMBOL_GPL(ata_exec_command);
5369 EXPORT_SYMBOL_GPL(ata_port_start);
5370 EXPORT_SYMBOL_GPL(ata_port_stop);
5371 EXPORT_SYMBOL_GPL(ata_host_stop);
5372 EXPORT_SYMBOL_GPL(ata_interrupt);
5373 EXPORT_SYMBOL_GPL(ata_qc_prep);
5374 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5375 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5376 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5377 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5378 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5379 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5380 EXPORT_SYMBOL_GPL(ata_port_probe);
5381 EXPORT_SYMBOL_GPL(sata_set_spd);
5382 EXPORT_SYMBOL_GPL(sata_phy_reset);
5383 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5384 EXPORT_SYMBOL_GPL(ata_bus_reset);
5385 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5386 EXPORT_SYMBOL_GPL(ata_std_softreset);
5387 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5388 EXPORT_SYMBOL_GPL(ata_std_postreset);
5389 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5390 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5391 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5392 EXPORT_SYMBOL_GPL(ata_dev_classify);
5393 EXPORT_SYMBOL_GPL(ata_dev_pair);
5394 EXPORT_SYMBOL_GPL(ata_port_disable);
5395 EXPORT_SYMBOL_GPL(ata_ratelimit);
5396 EXPORT_SYMBOL_GPL(ata_wait_register);
5397 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5398 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5399 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5400 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5401 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5402 EXPORT_SYMBOL_GPL(ata_scsi_release);
5403 EXPORT_SYMBOL_GPL(ata_host_intr);
5404 EXPORT_SYMBOL_GPL(sata_scr_valid);
5405 EXPORT_SYMBOL_GPL(sata_scr_read);
5406 EXPORT_SYMBOL_GPL(sata_scr_write);
5407 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5408 EXPORT_SYMBOL_GPL(ata_port_online);
5409 EXPORT_SYMBOL_GPL(ata_port_offline);
5410 EXPORT_SYMBOL_GPL(ata_id_string);
5411 EXPORT_SYMBOL_GPL(ata_id_c_string);
5412 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5413
5414 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5415 EXPORT_SYMBOL_GPL(ata_timing_compute);
5416 EXPORT_SYMBOL_GPL(ata_timing_merge);
5417
5418 #ifdef CONFIG_PCI
5419 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5420 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5421 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5422 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5423 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5424 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5425 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5426 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5427 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5428 #endif /* CONFIG_PCI */
5429
5430 EXPORT_SYMBOL_GPL(ata_device_suspend);
5431 EXPORT_SYMBOL_GPL(ata_device_resume);
5432 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5433 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5434
5435 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5436 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5437 EXPORT_SYMBOL_GPL(ata_port_abort);
5438 EXPORT_SYMBOL_GPL(ata_port_freeze);
5439 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5440 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5441 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5442 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
This page took 0.255861 seconds and 5 git commands to generate.