[PATCH] libata: add per-dev pio/mwdma/udma_mask
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
69
70 static unsigned int ata_unique_id = 1;
71 static struct workqueue_struct *ata_wq;
72
73 int atapi_enabled = 1;
74 module_param(atapi_enabled, int, 0444);
75 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
76
77 int libata_fua = 0;
78 module_param_named(fua, libata_fua, int, 0444);
79 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
80
81 MODULE_AUTHOR("Jeff Garzik");
82 MODULE_DESCRIPTION("Library module for ATA devices");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_VERSION);
85
86
87 /**
88 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
89 * @tf: Taskfile to convert
90 * @fis: Buffer into which data will output
91 * @pmp: Port multiplier port
92 *
93 * Converts a standard ATA taskfile to a Serial ATA
94 * FIS structure (Register - Host to Device).
95 *
96 * LOCKING:
97 * Inherited from caller.
98 */
99
100 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
101 {
102 fis[0] = 0x27; /* Register - Host to Device FIS */
103 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
104 bit 7 indicates Command FIS */
105 fis[2] = tf->command;
106 fis[3] = tf->feature;
107
108 fis[4] = tf->lbal;
109 fis[5] = tf->lbam;
110 fis[6] = tf->lbah;
111 fis[7] = tf->device;
112
113 fis[8] = tf->hob_lbal;
114 fis[9] = tf->hob_lbam;
115 fis[10] = tf->hob_lbah;
116 fis[11] = tf->hob_feature;
117
118 fis[12] = tf->nsect;
119 fis[13] = tf->hob_nsect;
120 fis[14] = 0;
121 fis[15] = tf->ctl;
122
123 fis[16] = 0;
124 fis[17] = 0;
125 fis[18] = 0;
126 fis[19] = 0;
127 }
128
129 /**
130 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
131 * @fis: Buffer from which data will be input
132 * @tf: Taskfile to output
133 *
134 * Converts a serial ATA FIS structure to a standard ATA taskfile.
135 *
136 * LOCKING:
137 * Inherited from caller.
138 */
139
140 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
141 {
142 tf->command = fis[2]; /* status */
143 tf->feature = fis[3]; /* error */
144
145 tf->lbal = fis[4];
146 tf->lbam = fis[5];
147 tf->lbah = fis[6];
148 tf->device = fis[7];
149
150 tf->hob_lbal = fis[8];
151 tf->hob_lbam = fis[9];
152 tf->hob_lbah = fis[10];
153
154 tf->nsect = fis[12];
155 tf->hob_nsect = fis[13];
156 }
157
158 static const u8 ata_rw_cmds[] = {
159 /* pio multi */
160 ATA_CMD_READ_MULTI,
161 ATA_CMD_WRITE_MULTI,
162 ATA_CMD_READ_MULTI_EXT,
163 ATA_CMD_WRITE_MULTI_EXT,
164 0,
165 0,
166 0,
167 ATA_CMD_WRITE_MULTI_FUA_EXT,
168 /* pio */
169 ATA_CMD_PIO_READ,
170 ATA_CMD_PIO_WRITE,
171 ATA_CMD_PIO_READ_EXT,
172 ATA_CMD_PIO_WRITE_EXT,
173 0,
174 0,
175 0,
176 0,
177 /* dma */
178 ATA_CMD_READ,
179 ATA_CMD_WRITE,
180 ATA_CMD_READ_EXT,
181 ATA_CMD_WRITE_EXT,
182 0,
183 0,
184 0,
185 ATA_CMD_WRITE_FUA_EXT
186 };
187
188 /**
189 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
190 * @qc: command to examine and configure
191 *
192 * Examine the device configuration and tf->flags to calculate
193 * the proper read/write commands and protocol to use.
194 *
195 * LOCKING:
196 * caller.
197 */
198 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
199 {
200 struct ata_taskfile *tf = &qc->tf;
201 struct ata_device *dev = qc->dev;
202 u8 cmd;
203
204 int index, fua, lba48, write;
205
206 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
207 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
208 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
209
210 if (dev->flags & ATA_DFLAG_PIO) {
211 tf->protocol = ATA_PROT_PIO;
212 index = dev->multi_count ? 0 : 8;
213 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
214 /* Unable to use DMA due to host limitation */
215 tf->protocol = ATA_PROT_PIO;
216 index = dev->multi_count ? 0 : 8;
217 } else {
218 tf->protocol = ATA_PROT_DMA;
219 index = 16;
220 }
221
222 cmd = ata_rw_cmds[index + fua + lba48 + write];
223 if (cmd) {
224 tf->command = cmd;
225 return 0;
226 }
227 return -1;
228 }
229
230 /**
231 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
232 * @pio_mask: pio_mask
233 * @mwdma_mask: mwdma_mask
234 * @udma_mask: udma_mask
235 *
236 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
237 * unsigned int xfer_mask.
238 *
239 * LOCKING:
240 * None.
241 *
242 * RETURNS:
243 * Packed xfer_mask.
244 */
245 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
246 unsigned int mwdma_mask,
247 unsigned int udma_mask)
248 {
249 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
250 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
251 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
252 }
253
254 /**
255 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
256 * @xfer_mask: xfer_mask to unpack
257 * @pio_mask: resulting pio_mask
258 * @mwdma_mask: resulting mwdma_mask
259 * @udma_mask: resulting udma_mask
260 *
261 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
262 * Any NULL distination masks will be ignored.
263 */
264 static void ata_unpack_xfermask(unsigned int xfer_mask,
265 unsigned int *pio_mask,
266 unsigned int *mwdma_mask,
267 unsigned int *udma_mask)
268 {
269 if (pio_mask)
270 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
271 if (mwdma_mask)
272 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
273 if (udma_mask)
274 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
275 }
276
277 static const struct ata_xfer_ent {
278 unsigned int shift, bits;
279 u8 base;
280 } ata_xfer_tbl[] = {
281 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
282 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
283 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
284 { -1, },
285 };
286
287 /**
288 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
289 * @xfer_mask: xfer_mask of interest
290 *
291 * Return matching XFER_* value for @xfer_mask. Only the highest
292 * bit of @xfer_mask is considered.
293 *
294 * LOCKING:
295 * None.
296 *
297 * RETURNS:
298 * Matching XFER_* value, 0 if no match found.
299 */
300 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
301 {
302 int highbit = fls(xfer_mask) - 1;
303 const struct ata_xfer_ent *ent;
304
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
307 return ent->base + highbit - ent->shift;
308 return 0;
309 }
310
311 /**
312 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_mask for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_mask, 0 if no match found.
322 */
323 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
324 {
325 const struct ata_xfer_ent *ent;
326
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return 1 << (ent->shift + xfer_mode - ent->base);
330 return 0;
331 }
332
333 /**
334 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
335 * @xfer_mode: XFER_* of interest
336 *
337 * Return matching xfer_shift for @xfer_mode.
338 *
339 * LOCKING:
340 * None.
341 *
342 * RETURNS:
343 * Matching xfer_shift, -1 if no match found.
344 */
345 static int ata_xfer_mode2shift(unsigned int xfer_mode)
346 {
347 const struct ata_xfer_ent *ent;
348
349 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
350 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
351 return ent->shift;
352 return -1;
353 }
354
355 /**
356 * ata_mode_string - convert xfer_mask to string
357 * @xfer_mask: mask of bits supported; only highest bit counts.
358 *
359 * Determine string which represents the highest speed
360 * (highest bit in @modemask).
361 *
362 * LOCKING:
363 * None.
364 *
365 * RETURNS:
366 * Constant C string representing highest speed listed in
367 * @mode_mask, or the constant C string "<n/a>".
368 */
369 static const char *ata_mode_string(unsigned int xfer_mask)
370 {
371 static const char * const xfer_mode_str[] = {
372 "PIO0",
373 "PIO1",
374 "PIO2",
375 "PIO3",
376 "PIO4",
377 "MWDMA0",
378 "MWDMA1",
379 "MWDMA2",
380 "UDMA/16",
381 "UDMA/25",
382 "UDMA/33",
383 "UDMA/44",
384 "UDMA/66",
385 "UDMA/100",
386 "UDMA/133",
387 "UDMA7",
388 };
389 int highbit;
390
391 highbit = fls(xfer_mask) - 1;
392 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
393 return xfer_mode_str[highbit];
394 return "<n/a>";
395 }
396
397 /**
398 * ata_pio_devchk - PATA device presence detection
399 * @ap: ATA channel to examine
400 * @device: Device to examine (starting at zero)
401 *
402 * This technique was originally described in
403 * Hale Landis's ATADRVR (www.ata-atapi.com), and
404 * later found its way into the ATA/ATAPI spec.
405 *
406 * Write a pattern to the ATA shadow registers,
407 * and if a device is present, it will respond by
408 * correctly storing and echoing back the
409 * ATA shadow register contents.
410 *
411 * LOCKING:
412 * caller.
413 */
414
415 static unsigned int ata_pio_devchk(struct ata_port *ap,
416 unsigned int device)
417 {
418 struct ata_ioports *ioaddr = &ap->ioaddr;
419 u8 nsect, lbal;
420
421 ap->ops->dev_select(ap, device);
422
423 outb(0x55, ioaddr->nsect_addr);
424 outb(0xaa, ioaddr->lbal_addr);
425
426 outb(0xaa, ioaddr->nsect_addr);
427 outb(0x55, ioaddr->lbal_addr);
428
429 outb(0x55, ioaddr->nsect_addr);
430 outb(0xaa, ioaddr->lbal_addr);
431
432 nsect = inb(ioaddr->nsect_addr);
433 lbal = inb(ioaddr->lbal_addr);
434
435 if ((nsect == 0x55) && (lbal == 0xaa))
436 return 1; /* we found a device */
437
438 return 0; /* nothing found */
439 }
440
441 /**
442 * ata_mmio_devchk - PATA device presence detection
443 * @ap: ATA channel to examine
444 * @device: Device to examine (starting at zero)
445 *
446 * This technique was originally described in
447 * Hale Landis's ATADRVR (www.ata-atapi.com), and
448 * later found its way into the ATA/ATAPI spec.
449 *
450 * Write a pattern to the ATA shadow registers,
451 * and if a device is present, it will respond by
452 * correctly storing and echoing back the
453 * ATA shadow register contents.
454 *
455 * LOCKING:
456 * caller.
457 */
458
459 static unsigned int ata_mmio_devchk(struct ata_port *ap,
460 unsigned int device)
461 {
462 struct ata_ioports *ioaddr = &ap->ioaddr;
463 u8 nsect, lbal;
464
465 ap->ops->dev_select(ap, device);
466
467 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
468 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
469
470 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
471 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
472
473 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
474 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
475
476 nsect = readb((void __iomem *) ioaddr->nsect_addr);
477 lbal = readb((void __iomem *) ioaddr->lbal_addr);
478
479 if ((nsect == 0x55) && (lbal == 0xaa))
480 return 1; /* we found a device */
481
482 return 0; /* nothing found */
483 }
484
485 /**
486 * ata_devchk - PATA device presence detection
487 * @ap: ATA channel to examine
488 * @device: Device to examine (starting at zero)
489 *
490 * Dispatch ATA device presence detection, depending
491 * on whether we are using PIO or MMIO to talk to the
492 * ATA shadow registers.
493 *
494 * LOCKING:
495 * caller.
496 */
497
498 static unsigned int ata_devchk(struct ata_port *ap,
499 unsigned int device)
500 {
501 if (ap->flags & ATA_FLAG_MMIO)
502 return ata_mmio_devchk(ap, device);
503 return ata_pio_devchk(ap, device);
504 }
505
506 /**
507 * ata_dev_classify - determine device type based on ATA-spec signature
508 * @tf: ATA taskfile register set for device to be identified
509 *
510 * Determine from taskfile register contents whether a device is
511 * ATA or ATAPI, as per "Signature and persistence" section
512 * of ATA/PI spec (volume 1, sect 5.14).
513 *
514 * LOCKING:
515 * None.
516 *
517 * RETURNS:
518 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
519 * the event of failure.
520 */
521
522 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
523 {
524 /* Apple's open source Darwin code hints that some devices only
525 * put a proper signature into the LBA mid/high registers,
526 * So, we only check those. It's sufficient for uniqueness.
527 */
528
529 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
530 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
531 DPRINTK("found ATA device by sig\n");
532 return ATA_DEV_ATA;
533 }
534
535 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
536 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
537 DPRINTK("found ATAPI device by sig\n");
538 return ATA_DEV_ATAPI;
539 }
540
541 DPRINTK("unknown device\n");
542 return ATA_DEV_UNKNOWN;
543 }
544
545 /**
546 * ata_dev_try_classify - Parse returned ATA device signature
547 * @ap: ATA channel to examine
548 * @device: Device to examine (starting at zero)
549 * @r_err: Value of error register on completion
550 *
551 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
552 * an ATA/ATAPI-defined set of values is placed in the ATA
553 * shadow registers, indicating the results of device detection
554 * and diagnostics.
555 *
556 * Select the ATA device, and read the values from the ATA shadow
557 * registers. Then parse according to the Error register value,
558 * and the spec-defined values examined by ata_dev_classify().
559 *
560 * LOCKING:
561 * caller.
562 *
563 * RETURNS:
564 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
565 */
566
567 static unsigned int
568 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
569 {
570 struct ata_taskfile tf;
571 unsigned int class;
572 u8 err;
573
574 ap->ops->dev_select(ap, device);
575
576 memset(&tf, 0, sizeof(tf));
577
578 ap->ops->tf_read(ap, &tf);
579 err = tf.feature;
580 if (r_err)
581 *r_err = err;
582
583 /* see if device passed diags */
584 if (err == 1)
585 /* do nothing */ ;
586 else if ((device == 0) && (err == 0x81))
587 /* do nothing */ ;
588 else
589 return ATA_DEV_NONE;
590
591 /* determine if device is ATA or ATAPI */
592 class = ata_dev_classify(&tf);
593
594 if (class == ATA_DEV_UNKNOWN)
595 return ATA_DEV_NONE;
596 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
597 return ATA_DEV_NONE;
598 return class;
599 }
600
601 /**
602 * ata_id_string - Convert IDENTIFY DEVICE page into string
603 * @id: IDENTIFY DEVICE results we will examine
604 * @s: string into which data is output
605 * @ofs: offset into identify device page
606 * @len: length of string to return. must be an even number.
607 *
608 * The strings in the IDENTIFY DEVICE page are broken up into
609 * 16-bit chunks. Run through the string, and output each
610 * 8-bit chunk linearly, regardless of platform.
611 *
612 * LOCKING:
613 * caller.
614 */
615
616 void ata_id_string(const u16 *id, unsigned char *s,
617 unsigned int ofs, unsigned int len)
618 {
619 unsigned int c;
620
621 while (len > 0) {
622 c = id[ofs] >> 8;
623 *s = c;
624 s++;
625
626 c = id[ofs] & 0xff;
627 *s = c;
628 s++;
629
630 ofs++;
631 len -= 2;
632 }
633 }
634
635 /**
636 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
637 * @id: IDENTIFY DEVICE results we will examine
638 * @s: string into which data is output
639 * @ofs: offset into identify device page
640 * @len: length of string to return. must be an odd number.
641 *
642 * This function is identical to ata_id_string except that it
643 * trims trailing spaces and terminates the resulting string with
644 * null. @len must be actual maximum length (even number) + 1.
645 *
646 * LOCKING:
647 * caller.
648 */
649 void ata_id_c_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
651 {
652 unsigned char *p;
653
654 WARN_ON(!(len & 1));
655
656 ata_id_string(id, s, ofs, len - 1);
657
658 p = s + strnlen(s, len - 1);
659 while (p > s && p[-1] == ' ')
660 p--;
661 *p = '\0';
662 }
663
664 static u64 ata_id_n_sectors(const u16 *id)
665 {
666 if (ata_id_has_lba(id)) {
667 if (ata_id_has_lba48(id))
668 return ata_id_u64(id, 100);
669 else
670 return ata_id_u32(id, 60);
671 } else {
672 if (ata_id_current_chs_valid(id))
673 return ata_id_u32(id, 57);
674 else
675 return id[1] * id[3] * id[6];
676 }
677 }
678
679 /**
680 * ata_noop_dev_select - Select device 0/1 on ATA bus
681 * @ap: ATA channel to manipulate
682 * @device: ATA device (numbered from zero) to select
683 *
684 * This function performs no actual function.
685 *
686 * May be used as the dev_select() entry in ata_port_operations.
687 *
688 * LOCKING:
689 * caller.
690 */
691 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
692 {
693 }
694
695
696 /**
697 * ata_std_dev_select - Select device 0/1 on ATA bus
698 * @ap: ATA channel to manipulate
699 * @device: ATA device (numbered from zero) to select
700 *
701 * Use the method defined in the ATA specification to
702 * make either device 0, or device 1, active on the
703 * ATA channel. Works with both PIO and MMIO.
704 *
705 * May be used as the dev_select() entry in ata_port_operations.
706 *
707 * LOCKING:
708 * caller.
709 */
710
711 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
712 {
713 u8 tmp;
714
715 if (device == 0)
716 tmp = ATA_DEVICE_OBS;
717 else
718 tmp = ATA_DEVICE_OBS | ATA_DEV1;
719
720 if (ap->flags & ATA_FLAG_MMIO) {
721 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
722 } else {
723 outb(tmp, ap->ioaddr.device_addr);
724 }
725 ata_pause(ap); /* needed; also flushes, for mmio */
726 }
727
728 /**
729 * ata_dev_select - Select device 0/1 on ATA bus
730 * @ap: ATA channel to manipulate
731 * @device: ATA device (numbered from zero) to select
732 * @wait: non-zero to wait for Status register BSY bit to clear
733 * @can_sleep: non-zero if context allows sleeping
734 *
735 * Use the method defined in the ATA specification to
736 * make either device 0, or device 1, active on the
737 * ATA channel.
738 *
739 * This is a high-level version of ata_std_dev_select(),
740 * which additionally provides the services of inserting
741 * the proper pauses and status polling, where needed.
742 *
743 * LOCKING:
744 * caller.
745 */
746
747 void ata_dev_select(struct ata_port *ap, unsigned int device,
748 unsigned int wait, unsigned int can_sleep)
749 {
750 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
751 ap->id, device, wait);
752
753 if (wait)
754 ata_wait_idle(ap);
755
756 ap->ops->dev_select(ap, device);
757
758 if (wait) {
759 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
760 msleep(150);
761 ata_wait_idle(ap);
762 }
763 }
764
765 /**
766 * ata_dump_id - IDENTIFY DEVICE info debugging output
767 * @id: IDENTIFY DEVICE page to dump
768 *
769 * Dump selected 16-bit words from the given IDENTIFY DEVICE
770 * page.
771 *
772 * LOCKING:
773 * caller.
774 */
775
776 static inline void ata_dump_id(const u16 *id)
777 {
778 DPRINTK("49==0x%04x "
779 "53==0x%04x "
780 "63==0x%04x "
781 "64==0x%04x "
782 "75==0x%04x \n",
783 id[49],
784 id[53],
785 id[63],
786 id[64],
787 id[75]);
788 DPRINTK("80==0x%04x "
789 "81==0x%04x "
790 "82==0x%04x "
791 "83==0x%04x "
792 "84==0x%04x \n",
793 id[80],
794 id[81],
795 id[82],
796 id[83],
797 id[84]);
798 DPRINTK("88==0x%04x "
799 "93==0x%04x\n",
800 id[88],
801 id[93]);
802 }
803
804 /**
805 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
806 * @id: IDENTIFY data to compute xfer mask from
807 *
808 * Compute the xfermask for this device. This is not as trivial
809 * as it seems if we must consider early devices correctly.
810 *
811 * FIXME: pre IDE drive timing (do we care ?).
812 *
813 * LOCKING:
814 * None.
815 *
816 * RETURNS:
817 * Computed xfermask
818 */
819 static unsigned int ata_id_xfermask(const u16 *id)
820 {
821 unsigned int pio_mask, mwdma_mask, udma_mask;
822
823 /* Usual case. Word 53 indicates word 64 is valid */
824 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
825 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
826 pio_mask <<= 3;
827 pio_mask |= 0x7;
828 } else {
829 /* If word 64 isn't valid then Word 51 high byte holds
830 * the PIO timing number for the maximum. Turn it into
831 * a mask.
832 */
833 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
834
835 /* But wait.. there's more. Design your standards by
836 * committee and you too can get a free iordy field to
837 * process. However its the speeds not the modes that
838 * are supported... Note drivers using the timing API
839 * will get this right anyway
840 */
841 }
842
843 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
844
845 udma_mask = 0;
846 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
847 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
848
849 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
850 }
851
852 /**
853 * ata_port_queue_task - Queue port_task
854 * @ap: The ata_port to queue port_task for
855 *
856 * Schedule @fn(@data) for execution after @delay jiffies using
857 * port_task. There is one port_task per port and it's the
858 * user(low level driver)'s responsibility to make sure that only
859 * one task is active at any given time.
860 *
861 * libata core layer takes care of synchronization between
862 * port_task and EH. ata_port_queue_task() may be ignored for EH
863 * synchronization.
864 *
865 * LOCKING:
866 * Inherited from caller.
867 */
868 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
869 unsigned long delay)
870 {
871 int rc;
872
873 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
874 return;
875
876 PREPARE_WORK(&ap->port_task, fn, data);
877
878 if (!delay)
879 rc = queue_work(ata_wq, &ap->port_task);
880 else
881 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
882
883 /* rc == 0 means that another user is using port task */
884 WARN_ON(rc == 0);
885 }
886
887 /**
888 * ata_port_flush_task - Flush port_task
889 * @ap: The ata_port to flush port_task for
890 *
891 * After this function completes, port_task is guranteed not to
892 * be running or scheduled.
893 *
894 * LOCKING:
895 * Kernel thread context (may sleep)
896 */
897 void ata_port_flush_task(struct ata_port *ap)
898 {
899 unsigned long flags;
900
901 DPRINTK("ENTER\n");
902
903 spin_lock_irqsave(&ap->host_set->lock, flags);
904 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
905 spin_unlock_irqrestore(&ap->host_set->lock, flags);
906
907 DPRINTK("flush #1\n");
908 flush_workqueue(ata_wq);
909
910 /*
911 * At this point, if a task is running, it's guaranteed to see
912 * the FLUSH flag; thus, it will never queue pio tasks again.
913 * Cancel and flush.
914 */
915 if (!cancel_delayed_work(&ap->port_task)) {
916 DPRINTK("flush #2\n");
917 flush_workqueue(ata_wq);
918 }
919
920 spin_lock_irqsave(&ap->host_set->lock, flags);
921 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
922 spin_unlock_irqrestore(&ap->host_set->lock, flags);
923
924 DPRINTK("EXIT\n");
925 }
926
927 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
928 {
929 struct completion *waiting = qc->private_data;
930
931 qc->ap->ops->tf_read(qc->ap, &qc->tf);
932 complete(waiting);
933 }
934
935 /**
936 * ata_exec_internal - execute libata internal command
937 * @ap: Port to which the command is sent
938 * @dev: Device to which the command is sent
939 * @tf: Taskfile registers for the command and the result
940 * @dma_dir: Data tranfer direction of the command
941 * @buf: Data buffer of the command
942 * @buflen: Length of data buffer
943 *
944 * Executes libata internal command with timeout. @tf contains
945 * command on entry and result on return. Timeout and error
946 * conditions are reported via return value. No recovery action
947 * is taken after a command times out. It's caller's duty to
948 * clean up after timeout.
949 *
950 * LOCKING:
951 * None. Should be called with kernel context, might sleep.
952 */
953
954 static unsigned
955 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
956 struct ata_taskfile *tf,
957 int dma_dir, void *buf, unsigned int buflen)
958 {
959 u8 command = tf->command;
960 struct ata_queued_cmd *qc;
961 DECLARE_COMPLETION(wait);
962 unsigned long flags;
963 unsigned int err_mask;
964
965 spin_lock_irqsave(&ap->host_set->lock, flags);
966
967 qc = ata_qc_new_init(ap, dev);
968 BUG_ON(qc == NULL);
969
970 qc->tf = *tf;
971 qc->dma_dir = dma_dir;
972 if (dma_dir != DMA_NONE) {
973 ata_sg_init_one(qc, buf, buflen);
974 qc->nsect = buflen / ATA_SECT_SIZE;
975 }
976
977 qc->private_data = &wait;
978 qc->complete_fn = ata_qc_complete_internal;
979
980 qc->err_mask = ata_qc_issue(qc);
981 if (qc->err_mask)
982 ata_qc_complete(qc);
983
984 spin_unlock_irqrestore(&ap->host_set->lock, flags);
985
986 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
987 ata_port_flush_task(ap);
988
989 spin_lock_irqsave(&ap->host_set->lock, flags);
990
991 /* We're racing with irq here. If we lose, the
992 * following test prevents us from completing the qc
993 * again. If completion irq occurs after here but
994 * before the caller cleans up, it will result in a
995 * spurious interrupt. We can live with that.
996 */
997 if (qc->flags & ATA_QCFLAG_ACTIVE) {
998 qc->err_mask = AC_ERR_TIMEOUT;
999 ata_qc_complete(qc);
1000 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1001 ap->id, command);
1002 }
1003
1004 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1005 }
1006
1007 *tf = qc->tf;
1008 err_mask = qc->err_mask;
1009
1010 ata_qc_free(qc);
1011
1012 return err_mask;
1013 }
1014
1015 /**
1016 * ata_pio_need_iordy - check if iordy needed
1017 * @adev: ATA device
1018 *
1019 * Check if the current speed of the device requires IORDY. Used
1020 * by various controllers for chip configuration.
1021 */
1022
1023 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1024 {
1025 int pio;
1026 int speed = adev->pio_mode - XFER_PIO_0;
1027
1028 if (speed < 2)
1029 return 0;
1030 if (speed > 2)
1031 return 1;
1032
1033 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1034
1035 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1036 pio = adev->id[ATA_ID_EIDE_PIO];
1037 /* Is the speed faster than the drive allows non IORDY ? */
1038 if (pio) {
1039 /* This is cycle times not frequency - watch the logic! */
1040 if (pio > 240) /* PIO2 is 240nS per cycle */
1041 return 1;
1042 return 0;
1043 }
1044 }
1045 return 0;
1046 }
1047
1048 /**
1049 * ata_dev_read_id - Read ID data from the specified device
1050 * @ap: port on which target device resides
1051 * @dev: target device
1052 * @p_class: pointer to class of the target device (may be changed)
1053 * @post_reset: is this read ID post-reset?
1054 * @p_id: read IDENTIFY page (newly allocated)
1055 *
1056 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1057 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1058 * devices. This function also takes care of EDD signature
1059 * misreporting (to be removed once EDD support is gone) and
1060 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1061 *
1062 * LOCKING:
1063 * Kernel thread context (may sleep)
1064 *
1065 * RETURNS:
1066 * 0 on success, -errno otherwise.
1067 */
1068 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1069 unsigned int *p_class, int post_reset, u16 **p_id)
1070 {
1071 unsigned int class = *p_class;
1072 unsigned int using_edd;
1073 struct ata_taskfile tf;
1074 unsigned int err_mask = 0;
1075 u16 *id;
1076 const char *reason;
1077 int rc;
1078
1079 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1080
1081 if (ap->ops->probe_reset ||
1082 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1083 using_edd = 0;
1084 else
1085 using_edd = 1;
1086
1087 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1088
1089 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1090 if (id == NULL) {
1091 rc = -ENOMEM;
1092 reason = "out of memory";
1093 goto err_out;
1094 }
1095
1096 retry:
1097 ata_tf_init(ap, &tf, dev->devno);
1098
1099 switch (class) {
1100 case ATA_DEV_ATA:
1101 tf.command = ATA_CMD_ID_ATA;
1102 break;
1103 case ATA_DEV_ATAPI:
1104 tf.command = ATA_CMD_ID_ATAPI;
1105 break;
1106 default:
1107 rc = -ENODEV;
1108 reason = "unsupported class";
1109 goto err_out;
1110 }
1111
1112 tf.protocol = ATA_PROT_PIO;
1113
1114 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1115 id, sizeof(id[0]) * ATA_ID_WORDS);
1116
1117 if (err_mask) {
1118 rc = -EIO;
1119 reason = "I/O error";
1120
1121 if (err_mask & ~AC_ERR_DEV)
1122 goto err_out;
1123
1124 /*
1125 * arg! EDD works for all test cases, but seems to return
1126 * the ATA signature for some ATAPI devices. Until the
1127 * reason for this is found and fixed, we fix up the mess
1128 * here. If IDENTIFY DEVICE returns command aborted
1129 * (as ATAPI devices do), then we issue an
1130 * IDENTIFY PACKET DEVICE.
1131 *
1132 * ATA software reset (SRST, the default) does not appear
1133 * to have this problem.
1134 */
1135 if ((using_edd) && (class == ATA_DEV_ATA)) {
1136 u8 err = tf.feature;
1137 if (err & ATA_ABORTED) {
1138 class = ATA_DEV_ATAPI;
1139 goto retry;
1140 }
1141 }
1142 goto err_out;
1143 }
1144
1145 swap_buf_le16(id, ATA_ID_WORDS);
1146
1147 /* sanity check */
1148 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1149 rc = -EINVAL;
1150 reason = "device reports illegal type";
1151 goto err_out;
1152 }
1153
1154 if (post_reset && class == ATA_DEV_ATA) {
1155 /*
1156 * The exact sequence expected by certain pre-ATA4 drives is:
1157 * SRST RESET
1158 * IDENTIFY
1159 * INITIALIZE DEVICE PARAMETERS
1160 * anything else..
1161 * Some drives were very specific about that exact sequence.
1162 */
1163 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1164 err_mask = ata_dev_init_params(ap, dev);
1165 if (err_mask) {
1166 rc = -EIO;
1167 reason = "INIT_DEV_PARAMS failed";
1168 goto err_out;
1169 }
1170
1171 /* current CHS translation info (id[53-58]) might be
1172 * changed. reread the identify device info.
1173 */
1174 post_reset = 0;
1175 goto retry;
1176 }
1177 }
1178
1179 *p_class = class;
1180 *p_id = id;
1181 return 0;
1182
1183 err_out:
1184 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1185 ap->id, dev->devno, reason);
1186 kfree(id);
1187 return rc;
1188 }
1189
1190 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1191 struct ata_device *dev)
1192 {
1193 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1194 }
1195
1196 /**
1197 * ata_dev_configure - Configure the specified ATA/ATAPI device
1198 * @ap: Port on which target device resides
1199 * @dev: Target device to configure
1200 * @print_info: Enable device info printout
1201 *
1202 * Configure @dev according to @dev->id. Generic and low-level
1203 * driver specific fixups are also applied.
1204 *
1205 * LOCKING:
1206 * Kernel thread context (may sleep)
1207 *
1208 * RETURNS:
1209 * 0 on success, -errno otherwise
1210 */
1211 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1212 int print_info)
1213 {
1214 const u16 *id = dev->id;
1215 unsigned int xfer_mask;
1216 int i, rc;
1217
1218 if (!ata_dev_present(dev)) {
1219 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1220 ap->id, dev->devno);
1221 return 0;
1222 }
1223
1224 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1225
1226 /* print device capabilities */
1227 if (print_info)
1228 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1229 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1230 ap->id, dev->devno, id[49], id[82], id[83],
1231 id[84], id[85], id[86], id[87], id[88]);
1232
1233 /* initialize to-be-configured parameters */
1234 dev->flags = 0;
1235 dev->max_sectors = 0;
1236 dev->cdb_len = 0;
1237 dev->n_sectors = 0;
1238 dev->cylinders = 0;
1239 dev->heads = 0;
1240 dev->sectors = 0;
1241
1242 /*
1243 * common ATA, ATAPI feature tests
1244 */
1245
1246 /* find max transfer mode; for printk only */
1247 xfer_mask = ata_id_xfermask(id);
1248
1249 ata_dump_id(id);
1250
1251 /* ATA-specific feature tests */
1252 if (dev->class == ATA_DEV_ATA) {
1253 dev->n_sectors = ata_id_n_sectors(id);
1254
1255 if (ata_id_has_lba(id)) {
1256 const char *lba_desc;
1257
1258 lba_desc = "LBA";
1259 dev->flags |= ATA_DFLAG_LBA;
1260 if (ata_id_has_lba48(id)) {
1261 dev->flags |= ATA_DFLAG_LBA48;
1262 lba_desc = "LBA48";
1263 }
1264
1265 /* print device info to dmesg */
1266 if (print_info)
1267 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1268 "max %s, %Lu sectors: %s\n",
1269 ap->id, dev->devno,
1270 ata_id_major_version(id),
1271 ata_mode_string(xfer_mask),
1272 (unsigned long long)dev->n_sectors,
1273 lba_desc);
1274 } else {
1275 /* CHS */
1276
1277 /* Default translation */
1278 dev->cylinders = id[1];
1279 dev->heads = id[3];
1280 dev->sectors = id[6];
1281
1282 if (ata_id_current_chs_valid(id)) {
1283 /* Current CHS translation is valid. */
1284 dev->cylinders = id[54];
1285 dev->heads = id[55];
1286 dev->sectors = id[56];
1287 }
1288
1289 /* print device info to dmesg */
1290 if (print_info)
1291 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1292 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1293 ap->id, dev->devno,
1294 ata_id_major_version(id),
1295 ata_mode_string(xfer_mask),
1296 (unsigned long long)dev->n_sectors,
1297 dev->cylinders, dev->heads, dev->sectors);
1298 }
1299
1300 dev->cdb_len = 16;
1301 }
1302
1303 /* ATAPI-specific feature tests */
1304 else if (dev->class == ATA_DEV_ATAPI) {
1305 rc = atapi_cdb_len(id);
1306 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1307 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1308 rc = -EINVAL;
1309 goto err_out_nosup;
1310 }
1311 dev->cdb_len = (unsigned int) rc;
1312
1313 /* print device info to dmesg */
1314 if (print_info)
1315 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1316 ap->id, dev->devno, ata_mode_string(xfer_mask));
1317 }
1318
1319 ap->host->max_cmd_len = 0;
1320 for (i = 0; i < ATA_MAX_DEVICES; i++)
1321 ap->host->max_cmd_len = max_t(unsigned int,
1322 ap->host->max_cmd_len,
1323 ap->device[i].cdb_len);
1324
1325 /* limit bridge transfers to udma5, 200 sectors */
1326 if (ata_dev_knobble(ap, dev)) {
1327 if (print_info)
1328 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1329 ap->id, dev->devno);
1330 ap->udma_mask &= ATA_UDMA5;
1331 dev->max_sectors = ATA_MAX_SECTORS;
1332 }
1333
1334 if (ap->ops->dev_config)
1335 ap->ops->dev_config(ap, dev);
1336
1337 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1338 return 0;
1339
1340 err_out_nosup:
1341 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1342 ap->id, dev->devno);
1343 DPRINTK("EXIT, err\n");
1344 return rc;
1345 }
1346
1347 /**
1348 * ata_bus_probe - Reset and probe ATA bus
1349 * @ap: Bus to probe
1350 *
1351 * Master ATA bus probing function. Initiates a hardware-dependent
1352 * bus reset, then attempts to identify any devices found on
1353 * the bus.
1354 *
1355 * LOCKING:
1356 * PCI/etc. bus probe sem.
1357 *
1358 * RETURNS:
1359 * Zero on success, non-zero on error.
1360 */
1361
1362 static int ata_bus_probe(struct ata_port *ap)
1363 {
1364 unsigned int classes[ATA_MAX_DEVICES];
1365 unsigned int i, rc, found = 0;
1366
1367 ata_port_probe(ap);
1368
1369 /* reset and determine device classes */
1370 for (i = 0; i < ATA_MAX_DEVICES; i++)
1371 classes[i] = ATA_DEV_UNKNOWN;
1372
1373 if (ap->ops->probe_reset) {
1374 rc = ap->ops->probe_reset(ap, classes);
1375 if (rc) {
1376 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1377 return rc;
1378 }
1379 } else {
1380 ap->ops->phy_reset(ap);
1381
1382 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1383 for (i = 0; i < ATA_MAX_DEVICES; i++)
1384 classes[i] = ap->device[i].class;
1385
1386 ata_port_probe(ap);
1387 }
1388
1389 for (i = 0; i < ATA_MAX_DEVICES; i++)
1390 if (classes[i] == ATA_DEV_UNKNOWN)
1391 classes[i] = ATA_DEV_NONE;
1392
1393 /* read IDENTIFY page and configure devices */
1394 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1395 struct ata_device *dev = &ap->device[i];
1396
1397 dev->class = classes[i];
1398
1399 if (!ata_dev_present(dev))
1400 continue;
1401
1402 WARN_ON(dev->id != NULL);
1403 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1404 dev->class = ATA_DEV_NONE;
1405 continue;
1406 }
1407
1408 if (ata_dev_configure(ap, dev, 1)) {
1409 dev->class++; /* disable device */
1410 continue;
1411 }
1412
1413 found = 1;
1414 }
1415
1416 if (!found)
1417 goto err_out_disable;
1418
1419 ata_set_mode(ap);
1420 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1421 goto err_out_disable;
1422
1423 return 0;
1424
1425 err_out_disable:
1426 ap->ops->port_disable(ap);
1427 return -1;
1428 }
1429
1430 /**
1431 * ata_port_probe - Mark port as enabled
1432 * @ap: Port for which we indicate enablement
1433 *
1434 * Modify @ap data structure such that the system
1435 * thinks that the entire port is enabled.
1436 *
1437 * LOCKING: host_set lock, or some other form of
1438 * serialization.
1439 */
1440
1441 void ata_port_probe(struct ata_port *ap)
1442 {
1443 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1444 }
1445
1446 /**
1447 * sata_print_link_status - Print SATA link status
1448 * @ap: SATA port to printk link status about
1449 *
1450 * This function prints link speed and status of a SATA link.
1451 *
1452 * LOCKING:
1453 * None.
1454 */
1455 static void sata_print_link_status(struct ata_port *ap)
1456 {
1457 u32 sstatus, tmp;
1458 const char *speed;
1459
1460 if (!ap->ops->scr_read)
1461 return;
1462
1463 sstatus = scr_read(ap, SCR_STATUS);
1464
1465 if (sata_dev_present(ap)) {
1466 tmp = (sstatus >> 4) & 0xf;
1467 if (tmp & (1 << 0))
1468 speed = "1.5";
1469 else if (tmp & (1 << 1))
1470 speed = "3.0";
1471 else
1472 speed = "<unknown>";
1473 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1474 ap->id, speed, sstatus);
1475 } else {
1476 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1477 ap->id, sstatus);
1478 }
1479 }
1480
1481 /**
1482 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1483 * @ap: SATA port associated with target SATA PHY.
1484 *
1485 * This function issues commands to standard SATA Sxxx
1486 * PHY registers, to wake up the phy (and device), and
1487 * clear any reset condition.
1488 *
1489 * LOCKING:
1490 * PCI/etc. bus probe sem.
1491 *
1492 */
1493 void __sata_phy_reset(struct ata_port *ap)
1494 {
1495 u32 sstatus;
1496 unsigned long timeout = jiffies + (HZ * 5);
1497
1498 if (ap->flags & ATA_FLAG_SATA_RESET) {
1499 /* issue phy wake/reset */
1500 scr_write_flush(ap, SCR_CONTROL, 0x301);
1501 /* Couldn't find anything in SATA I/II specs, but
1502 * AHCI-1.1 10.4.2 says at least 1 ms. */
1503 mdelay(1);
1504 }
1505 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1506
1507 /* wait for phy to become ready, if necessary */
1508 do {
1509 msleep(200);
1510 sstatus = scr_read(ap, SCR_STATUS);
1511 if ((sstatus & 0xf) != 1)
1512 break;
1513 } while (time_before(jiffies, timeout));
1514
1515 /* print link status */
1516 sata_print_link_status(ap);
1517
1518 /* TODO: phy layer with polling, timeouts, etc. */
1519 if (sata_dev_present(ap))
1520 ata_port_probe(ap);
1521 else
1522 ata_port_disable(ap);
1523
1524 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1525 return;
1526
1527 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1528 ata_port_disable(ap);
1529 return;
1530 }
1531
1532 ap->cbl = ATA_CBL_SATA;
1533 }
1534
1535 /**
1536 * sata_phy_reset - Reset SATA bus.
1537 * @ap: SATA port associated with target SATA PHY.
1538 *
1539 * This function resets the SATA bus, and then probes
1540 * the bus for devices.
1541 *
1542 * LOCKING:
1543 * PCI/etc. bus probe sem.
1544 *
1545 */
1546 void sata_phy_reset(struct ata_port *ap)
1547 {
1548 __sata_phy_reset(ap);
1549 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1550 return;
1551 ata_bus_reset(ap);
1552 }
1553
1554 /**
1555 * ata_port_disable - Disable port.
1556 * @ap: Port to be disabled.
1557 *
1558 * Modify @ap data structure such that the system
1559 * thinks that the entire port is disabled, and should
1560 * never attempt to probe or communicate with devices
1561 * on this port.
1562 *
1563 * LOCKING: host_set lock, or some other form of
1564 * serialization.
1565 */
1566
1567 void ata_port_disable(struct ata_port *ap)
1568 {
1569 ap->device[0].class = ATA_DEV_NONE;
1570 ap->device[1].class = ATA_DEV_NONE;
1571 ap->flags |= ATA_FLAG_PORT_DISABLED;
1572 }
1573
1574 /*
1575 * This mode timing computation functionality is ported over from
1576 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1577 */
1578 /*
1579 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1580 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1581 * for PIO 5, which is a nonstandard extension and UDMA6, which
1582 * is currently supported only by Maxtor drives.
1583 */
1584
1585 static const struct ata_timing ata_timing[] = {
1586
1587 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1588 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1589 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1590 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1591
1592 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1593 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1594 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1595
1596 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1597
1598 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1599 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1600 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1601
1602 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1603 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1604 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1605
1606 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1607 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1608 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1609
1610 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1611 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1612 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1613
1614 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1615
1616 { 0xFF }
1617 };
1618
1619 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1620 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1621
1622 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1623 {
1624 q->setup = EZ(t->setup * 1000, T);
1625 q->act8b = EZ(t->act8b * 1000, T);
1626 q->rec8b = EZ(t->rec8b * 1000, T);
1627 q->cyc8b = EZ(t->cyc8b * 1000, T);
1628 q->active = EZ(t->active * 1000, T);
1629 q->recover = EZ(t->recover * 1000, T);
1630 q->cycle = EZ(t->cycle * 1000, T);
1631 q->udma = EZ(t->udma * 1000, UT);
1632 }
1633
1634 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1635 struct ata_timing *m, unsigned int what)
1636 {
1637 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1638 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1639 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1640 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1641 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1642 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1643 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1644 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1645 }
1646
1647 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1648 {
1649 const struct ata_timing *t;
1650
1651 for (t = ata_timing; t->mode != speed; t++)
1652 if (t->mode == 0xFF)
1653 return NULL;
1654 return t;
1655 }
1656
1657 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1658 struct ata_timing *t, int T, int UT)
1659 {
1660 const struct ata_timing *s;
1661 struct ata_timing p;
1662
1663 /*
1664 * Find the mode.
1665 */
1666
1667 if (!(s = ata_timing_find_mode(speed)))
1668 return -EINVAL;
1669
1670 memcpy(t, s, sizeof(*s));
1671
1672 /*
1673 * If the drive is an EIDE drive, it can tell us it needs extended
1674 * PIO/MW_DMA cycle timing.
1675 */
1676
1677 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1678 memset(&p, 0, sizeof(p));
1679 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1680 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1681 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1682 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1683 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1684 }
1685 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1686 }
1687
1688 /*
1689 * Convert the timing to bus clock counts.
1690 */
1691
1692 ata_timing_quantize(t, t, T, UT);
1693
1694 /*
1695 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1696 * S.M.A.R.T * and some other commands. We have to ensure that the
1697 * DMA cycle timing is slower/equal than the fastest PIO timing.
1698 */
1699
1700 if (speed > XFER_PIO_4) {
1701 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1702 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1703 }
1704
1705 /*
1706 * Lengthen active & recovery time so that cycle time is correct.
1707 */
1708
1709 if (t->act8b + t->rec8b < t->cyc8b) {
1710 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1711 t->rec8b = t->cyc8b - t->act8b;
1712 }
1713
1714 if (t->active + t->recover < t->cycle) {
1715 t->active += (t->cycle - (t->active + t->recover)) / 2;
1716 t->recover = t->cycle - t->active;
1717 }
1718
1719 return 0;
1720 }
1721
1722 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1723 {
1724 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1725 return;
1726
1727 if (dev->xfer_shift == ATA_SHIFT_PIO)
1728 dev->flags |= ATA_DFLAG_PIO;
1729
1730 ata_dev_set_xfermode(ap, dev);
1731
1732 if (ata_dev_revalidate(ap, dev, 0)) {
1733 printk(KERN_ERR "ata%u: failed to revalidate after set "
1734 "xfermode, disabled\n", ap->id);
1735 ata_port_disable(ap);
1736 }
1737
1738 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1739 dev->xfer_shift, (int)dev->xfer_mode);
1740
1741 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1742 ap->id, dev->devno,
1743 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1744 }
1745
1746 static int ata_host_set_pio(struct ata_port *ap)
1747 {
1748 int i;
1749
1750 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1751 struct ata_device *dev = &ap->device[i];
1752
1753 if (!ata_dev_present(dev))
1754 continue;
1755
1756 if (!dev->pio_mode) {
1757 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1758 return -1;
1759 }
1760
1761 dev->xfer_mode = dev->pio_mode;
1762 dev->xfer_shift = ATA_SHIFT_PIO;
1763 if (ap->ops->set_piomode)
1764 ap->ops->set_piomode(ap, dev);
1765 }
1766
1767 return 0;
1768 }
1769
1770 static void ata_host_set_dma(struct ata_port *ap)
1771 {
1772 int i;
1773
1774 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1775 struct ata_device *dev = &ap->device[i];
1776
1777 if (!ata_dev_present(dev) || !dev->dma_mode)
1778 continue;
1779
1780 dev->xfer_mode = dev->dma_mode;
1781 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1782 if (ap->ops->set_dmamode)
1783 ap->ops->set_dmamode(ap, dev);
1784 }
1785 }
1786
1787 /**
1788 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1789 * @ap: port on which timings will be programmed
1790 *
1791 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1792 *
1793 * LOCKING:
1794 * PCI/etc. bus probe sem.
1795 */
1796 static void ata_set_mode(struct ata_port *ap)
1797 {
1798 int i, rc;
1799
1800 /* step 1: calculate xfer_mask */
1801 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1802 struct ata_device *dev = &ap->device[i];
1803 unsigned int pio_mask, dma_mask;
1804
1805 if (!ata_dev_present(dev))
1806 continue;
1807
1808 ata_dev_xfermask(ap, dev);
1809
1810 /* TODO: let LLDD filter dev->*_mask here */
1811
1812 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1813 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1814 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1815 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1816 }
1817
1818 /* step 2: always set host PIO timings */
1819 rc = ata_host_set_pio(ap);
1820 if (rc)
1821 goto err_out;
1822
1823 /* step 3: set host DMA timings */
1824 ata_host_set_dma(ap);
1825
1826 /* step 4: update devices' xfer mode */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++)
1828 ata_dev_set_mode(ap, &ap->device[i]);
1829
1830 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1831 return;
1832
1833 if (ap->ops->post_set_mode)
1834 ap->ops->post_set_mode(ap);
1835
1836 return;
1837
1838 err_out:
1839 ata_port_disable(ap);
1840 }
1841
1842 /**
1843 * ata_tf_to_host - issue ATA taskfile to host controller
1844 * @ap: port to which command is being issued
1845 * @tf: ATA taskfile register set
1846 *
1847 * Issues ATA taskfile register set to ATA host controller,
1848 * with proper synchronization with interrupt handler and
1849 * other threads.
1850 *
1851 * LOCKING:
1852 * spin_lock_irqsave(host_set lock)
1853 */
1854
1855 static inline void ata_tf_to_host(struct ata_port *ap,
1856 const struct ata_taskfile *tf)
1857 {
1858 ap->ops->tf_load(ap, tf);
1859 ap->ops->exec_command(ap, tf);
1860 }
1861
1862 /**
1863 * ata_busy_sleep - sleep until BSY clears, or timeout
1864 * @ap: port containing status register to be polled
1865 * @tmout_pat: impatience timeout
1866 * @tmout: overall timeout
1867 *
1868 * Sleep until ATA Status register bit BSY clears,
1869 * or a timeout occurs.
1870 *
1871 * LOCKING: None.
1872 */
1873
1874 unsigned int ata_busy_sleep (struct ata_port *ap,
1875 unsigned long tmout_pat, unsigned long tmout)
1876 {
1877 unsigned long timer_start, timeout;
1878 u8 status;
1879
1880 status = ata_busy_wait(ap, ATA_BUSY, 300);
1881 timer_start = jiffies;
1882 timeout = timer_start + tmout_pat;
1883 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1884 msleep(50);
1885 status = ata_busy_wait(ap, ATA_BUSY, 3);
1886 }
1887
1888 if (status & ATA_BUSY)
1889 printk(KERN_WARNING "ata%u is slow to respond, "
1890 "please be patient\n", ap->id);
1891
1892 timeout = timer_start + tmout;
1893 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1894 msleep(50);
1895 status = ata_chk_status(ap);
1896 }
1897
1898 if (status & ATA_BUSY) {
1899 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1900 ap->id, tmout / HZ);
1901 return 1;
1902 }
1903
1904 return 0;
1905 }
1906
1907 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1908 {
1909 struct ata_ioports *ioaddr = &ap->ioaddr;
1910 unsigned int dev0 = devmask & (1 << 0);
1911 unsigned int dev1 = devmask & (1 << 1);
1912 unsigned long timeout;
1913
1914 /* if device 0 was found in ata_devchk, wait for its
1915 * BSY bit to clear
1916 */
1917 if (dev0)
1918 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1919
1920 /* if device 1 was found in ata_devchk, wait for
1921 * register access, then wait for BSY to clear
1922 */
1923 timeout = jiffies + ATA_TMOUT_BOOT;
1924 while (dev1) {
1925 u8 nsect, lbal;
1926
1927 ap->ops->dev_select(ap, 1);
1928 if (ap->flags & ATA_FLAG_MMIO) {
1929 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1930 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1931 } else {
1932 nsect = inb(ioaddr->nsect_addr);
1933 lbal = inb(ioaddr->lbal_addr);
1934 }
1935 if ((nsect == 1) && (lbal == 1))
1936 break;
1937 if (time_after(jiffies, timeout)) {
1938 dev1 = 0;
1939 break;
1940 }
1941 msleep(50); /* give drive a breather */
1942 }
1943 if (dev1)
1944 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1945
1946 /* is all this really necessary? */
1947 ap->ops->dev_select(ap, 0);
1948 if (dev1)
1949 ap->ops->dev_select(ap, 1);
1950 if (dev0)
1951 ap->ops->dev_select(ap, 0);
1952 }
1953
1954 /**
1955 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1956 * @ap: Port to reset and probe
1957 *
1958 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1959 * probe the bus. Not often used these days.
1960 *
1961 * LOCKING:
1962 * PCI/etc. bus probe sem.
1963 * Obtains host_set lock.
1964 *
1965 */
1966
1967 static unsigned int ata_bus_edd(struct ata_port *ap)
1968 {
1969 struct ata_taskfile tf;
1970 unsigned long flags;
1971
1972 /* set up execute-device-diag (bus reset) taskfile */
1973 /* also, take interrupts to a known state (disabled) */
1974 DPRINTK("execute-device-diag\n");
1975 ata_tf_init(ap, &tf, 0);
1976 tf.ctl |= ATA_NIEN;
1977 tf.command = ATA_CMD_EDD;
1978 tf.protocol = ATA_PROT_NODATA;
1979
1980 /* do bus reset */
1981 spin_lock_irqsave(&ap->host_set->lock, flags);
1982 ata_tf_to_host(ap, &tf);
1983 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1984
1985 /* spec says at least 2ms. but who knows with those
1986 * crazy ATAPI devices...
1987 */
1988 msleep(150);
1989
1990 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1991 }
1992
1993 static unsigned int ata_bus_softreset(struct ata_port *ap,
1994 unsigned int devmask)
1995 {
1996 struct ata_ioports *ioaddr = &ap->ioaddr;
1997
1998 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1999
2000 /* software reset. causes dev0 to be selected */
2001 if (ap->flags & ATA_FLAG_MMIO) {
2002 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2003 udelay(20); /* FIXME: flush */
2004 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2005 udelay(20); /* FIXME: flush */
2006 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2007 } else {
2008 outb(ap->ctl, ioaddr->ctl_addr);
2009 udelay(10);
2010 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2011 udelay(10);
2012 outb(ap->ctl, ioaddr->ctl_addr);
2013 }
2014
2015 /* spec mandates ">= 2ms" before checking status.
2016 * We wait 150ms, because that was the magic delay used for
2017 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2018 * between when the ATA command register is written, and then
2019 * status is checked. Because waiting for "a while" before
2020 * checking status is fine, post SRST, we perform this magic
2021 * delay here as well.
2022 *
2023 * Old drivers/ide uses the 2mS rule and then waits for ready
2024 */
2025 msleep(150);
2026
2027
2028 /* Before we perform post reset processing we want to see if
2029 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2030 resistor */
2031
2032 if (ata_check_status(ap) == 0xFF)
2033 return 1; /* Positive is failure for some reason */
2034
2035 ata_bus_post_reset(ap, devmask);
2036
2037 return 0;
2038 }
2039
2040 /**
2041 * ata_bus_reset - reset host port and associated ATA channel
2042 * @ap: port to reset
2043 *
2044 * This is typically the first time we actually start issuing
2045 * commands to the ATA channel. We wait for BSY to clear, then
2046 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2047 * result. Determine what devices, if any, are on the channel
2048 * by looking at the device 0/1 error register. Look at the signature
2049 * stored in each device's taskfile registers, to determine if
2050 * the device is ATA or ATAPI.
2051 *
2052 * LOCKING:
2053 * PCI/etc. bus probe sem.
2054 * Obtains host_set lock.
2055 *
2056 * SIDE EFFECTS:
2057 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2058 */
2059
2060 void ata_bus_reset(struct ata_port *ap)
2061 {
2062 struct ata_ioports *ioaddr = &ap->ioaddr;
2063 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2064 u8 err;
2065 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2066
2067 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2068
2069 /* determine if device 0/1 are present */
2070 if (ap->flags & ATA_FLAG_SATA_RESET)
2071 dev0 = 1;
2072 else {
2073 dev0 = ata_devchk(ap, 0);
2074 if (slave_possible)
2075 dev1 = ata_devchk(ap, 1);
2076 }
2077
2078 if (dev0)
2079 devmask |= (1 << 0);
2080 if (dev1)
2081 devmask |= (1 << 1);
2082
2083 /* select device 0 again */
2084 ap->ops->dev_select(ap, 0);
2085
2086 /* issue bus reset */
2087 if (ap->flags & ATA_FLAG_SRST)
2088 rc = ata_bus_softreset(ap, devmask);
2089 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2090 /* set up device control */
2091 if (ap->flags & ATA_FLAG_MMIO)
2092 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2093 else
2094 outb(ap->ctl, ioaddr->ctl_addr);
2095 rc = ata_bus_edd(ap);
2096 }
2097
2098 if (rc)
2099 goto err_out;
2100
2101 /*
2102 * determine by signature whether we have ATA or ATAPI devices
2103 */
2104 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2105 if ((slave_possible) && (err != 0x81))
2106 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2107
2108 /* re-enable interrupts */
2109 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2110 ata_irq_on(ap);
2111
2112 /* is double-select really necessary? */
2113 if (ap->device[1].class != ATA_DEV_NONE)
2114 ap->ops->dev_select(ap, 1);
2115 if (ap->device[0].class != ATA_DEV_NONE)
2116 ap->ops->dev_select(ap, 0);
2117
2118 /* if no devices were detected, disable this port */
2119 if ((ap->device[0].class == ATA_DEV_NONE) &&
2120 (ap->device[1].class == ATA_DEV_NONE))
2121 goto err_out;
2122
2123 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2124 /* set up device control for ATA_FLAG_SATA_RESET */
2125 if (ap->flags & ATA_FLAG_MMIO)
2126 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2127 else
2128 outb(ap->ctl, ioaddr->ctl_addr);
2129 }
2130
2131 DPRINTK("EXIT\n");
2132 return;
2133
2134 err_out:
2135 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2136 ap->ops->port_disable(ap);
2137
2138 DPRINTK("EXIT\n");
2139 }
2140
2141 static int sata_phy_resume(struct ata_port *ap)
2142 {
2143 unsigned long timeout = jiffies + (HZ * 5);
2144 u32 sstatus;
2145
2146 scr_write_flush(ap, SCR_CONTROL, 0x300);
2147
2148 /* Wait for phy to become ready, if necessary. */
2149 do {
2150 msleep(200);
2151 sstatus = scr_read(ap, SCR_STATUS);
2152 if ((sstatus & 0xf) != 1)
2153 return 0;
2154 } while (time_before(jiffies, timeout));
2155
2156 return -1;
2157 }
2158
2159 /**
2160 * ata_std_probeinit - initialize probing
2161 * @ap: port to be probed
2162 *
2163 * @ap is about to be probed. Initialize it. This function is
2164 * to be used as standard callback for ata_drive_probe_reset().
2165 *
2166 * NOTE!!! Do not use this function as probeinit if a low level
2167 * driver implements only hardreset. Just pass NULL as probeinit
2168 * in that case. Using this function is probably okay but doing
2169 * so makes reset sequence different from the original
2170 * ->phy_reset implementation and Jeff nervous. :-P
2171 */
2172 extern void ata_std_probeinit(struct ata_port *ap)
2173 {
2174 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2175 sata_phy_resume(ap);
2176 if (sata_dev_present(ap))
2177 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2178 }
2179 }
2180
2181 /**
2182 * ata_std_softreset - reset host port via ATA SRST
2183 * @ap: port to reset
2184 * @verbose: fail verbosely
2185 * @classes: resulting classes of attached devices
2186 *
2187 * Reset host port using ATA SRST. This function is to be used
2188 * as standard callback for ata_drive_*_reset() functions.
2189 *
2190 * LOCKING:
2191 * Kernel thread context (may sleep)
2192 *
2193 * RETURNS:
2194 * 0 on success, -errno otherwise.
2195 */
2196 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2197 {
2198 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2199 unsigned int devmask = 0, err_mask;
2200 u8 err;
2201
2202 DPRINTK("ENTER\n");
2203
2204 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2205 classes[0] = ATA_DEV_NONE;
2206 goto out;
2207 }
2208
2209 /* determine if device 0/1 are present */
2210 if (ata_devchk(ap, 0))
2211 devmask |= (1 << 0);
2212 if (slave_possible && ata_devchk(ap, 1))
2213 devmask |= (1 << 1);
2214
2215 /* select device 0 again */
2216 ap->ops->dev_select(ap, 0);
2217
2218 /* issue bus reset */
2219 DPRINTK("about to softreset, devmask=%x\n", devmask);
2220 err_mask = ata_bus_softreset(ap, devmask);
2221 if (err_mask) {
2222 if (verbose)
2223 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2224 ap->id, err_mask);
2225 else
2226 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2227 err_mask);
2228 return -EIO;
2229 }
2230
2231 /* determine by signature whether we have ATA or ATAPI devices */
2232 classes[0] = ata_dev_try_classify(ap, 0, &err);
2233 if (slave_possible && err != 0x81)
2234 classes[1] = ata_dev_try_classify(ap, 1, &err);
2235
2236 out:
2237 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2238 return 0;
2239 }
2240
2241 /**
2242 * sata_std_hardreset - reset host port via SATA phy reset
2243 * @ap: port to reset
2244 * @verbose: fail verbosely
2245 * @class: resulting class of attached device
2246 *
2247 * SATA phy-reset host port using DET bits of SControl register.
2248 * This function is to be used as standard callback for
2249 * ata_drive_*_reset().
2250 *
2251 * LOCKING:
2252 * Kernel thread context (may sleep)
2253 *
2254 * RETURNS:
2255 * 0 on success, -errno otherwise.
2256 */
2257 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2258 {
2259 DPRINTK("ENTER\n");
2260
2261 /* Issue phy wake/reset */
2262 scr_write_flush(ap, SCR_CONTROL, 0x301);
2263
2264 /*
2265 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2266 * 10.4.2 says at least 1 ms.
2267 */
2268 msleep(1);
2269
2270 /* Bring phy back */
2271 sata_phy_resume(ap);
2272
2273 /* TODO: phy layer with polling, timeouts, etc. */
2274 if (!sata_dev_present(ap)) {
2275 *class = ATA_DEV_NONE;
2276 DPRINTK("EXIT, link offline\n");
2277 return 0;
2278 }
2279
2280 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2281 if (verbose)
2282 printk(KERN_ERR "ata%u: COMRESET failed "
2283 "(device not ready)\n", ap->id);
2284 else
2285 DPRINTK("EXIT, device not ready\n");
2286 return -EIO;
2287 }
2288
2289 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2290
2291 *class = ata_dev_try_classify(ap, 0, NULL);
2292
2293 DPRINTK("EXIT, class=%u\n", *class);
2294 return 0;
2295 }
2296
2297 /**
2298 * ata_std_postreset - standard postreset callback
2299 * @ap: the target ata_port
2300 * @classes: classes of attached devices
2301 *
2302 * This function is invoked after a successful reset. Note that
2303 * the device might have been reset more than once using
2304 * different reset methods before postreset is invoked.
2305 *
2306 * This function is to be used as standard callback for
2307 * ata_drive_*_reset().
2308 *
2309 * LOCKING:
2310 * Kernel thread context (may sleep)
2311 */
2312 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2313 {
2314 DPRINTK("ENTER\n");
2315
2316 /* set cable type if it isn't already set */
2317 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2318 ap->cbl = ATA_CBL_SATA;
2319
2320 /* print link status */
2321 if (ap->cbl == ATA_CBL_SATA)
2322 sata_print_link_status(ap);
2323
2324 /* re-enable interrupts */
2325 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2326 ata_irq_on(ap);
2327
2328 /* is double-select really necessary? */
2329 if (classes[0] != ATA_DEV_NONE)
2330 ap->ops->dev_select(ap, 1);
2331 if (classes[1] != ATA_DEV_NONE)
2332 ap->ops->dev_select(ap, 0);
2333
2334 /* bail out if no device is present */
2335 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2336 DPRINTK("EXIT, no device\n");
2337 return;
2338 }
2339
2340 /* set up device control */
2341 if (ap->ioaddr.ctl_addr) {
2342 if (ap->flags & ATA_FLAG_MMIO)
2343 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2344 else
2345 outb(ap->ctl, ap->ioaddr.ctl_addr);
2346 }
2347
2348 DPRINTK("EXIT\n");
2349 }
2350
2351 /**
2352 * ata_std_probe_reset - standard probe reset method
2353 * @ap: prot to perform probe-reset
2354 * @classes: resulting classes of attached devices
2355 *
2356 * The stock off-the-shelf ->probe_reset method.
2357 *
2358 * LOCKING:
2359 * Kernel thread context (may sleep)
2360 *
2361 * RETURNS:
2362 * 0 on success, -errno otherwise.
2363 */
2364 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2365 {
2366 ata_reset_fn_t hardreset;
2367
2368 hardreset = NULL;
2369 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2370 hardreset = sata_std_hardreset;
2371
2372 return ata_drive_probe_reset(ap, ata_std_probeinit,
2373 ata_std_softreset, hardreset,
2374 ata_std_postreset, classes);
2375 }
2376
2377 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2378 ata_postreset_fn_t postreset,
2379 unsigned int *classes)
2380 {
2381 int i, rc;
2382
2383 for (i = 0; i < ATA_MAX_DEVICES; i++)
2384 classes[i] = ATA_DEV_UNKNOWN;
2385
2386 rc = reset(ap, 0, classes);
2387 if (rc)
2388 return rc;
2389
2390 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2391 * is complete and convert all ATA_DEV_UNKNOWN to
2392 * ATA_DEV_NONE.
2393 */
2394 for (i = 0; i < ATA_MAX_DEVICES; i++)
2395 if (classes[i] != ATA_DEV_UNKNOWN)
2396 break;
2397
2398 if (i < ATA_MAX_DEVICES)
2399 for (i = 0; i < ATA_MAX_DEVICES; i++)
2400 if (classes[i] == ATA_DEV_UNKNOWN)
2401 classes[i] = ATA_DEV_NONE;
2402
2403 if (postreset)
2404 postreset(ap, classes);
2405
2406 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2407 }
2408
2409 /**
2410 * ata_drive_probe_reset - Perform probe reset with given methods
2411 * @ap: port to reset
2412 * @probeinit: probeinit method (can be NULL)
2413 * @softreset: softreset method (can be NULL)
2414 * @hardreset: hardreset method (can be NULL)
2415 * @postreset: postreset method (can be NULL)
2416 * @classes: resulting classes of attached devices
2417 *
2418 * Reset the specified port and classify attached devices using
2419 * given methods. This function prefers softreset but tries all
2420 * possible reset sequences to reset and classify devices. This
2421 * function is intended to be used for constructing ->probe_reset
2422 * callback by low level drivers.
2423 *
2424 * Reset methods should follow the following rules.
2425 *
2426 * - Return 0 on sucess, -errno on failure.
2427 * - If classification is supported, fill classes[] with
2428 * recognized class codes.
2429 * - If classification is not supported, leave classes[] alone.
2430 * - If verbose is non-zero, print error message on failure;
2431 * otherwise, shut up.
2432 *
2433 * LOCKING:
2434 * Kernel thread context (may sleep)
2435 *
2436 * RETURNS:
2437 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2438 * if classification fails, and any error code from reset
2439 * methods.
2440 */
2441 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2442 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2443 ata_postreset_fn_t postreset, unsigned int *classes)
2444 {
2445 int rc = -EINVAL;
2446
2447 if (probeinit)
2448 probeinit(ap);
2449
2450 if (softreset) {
2451 rc = do_probe_reset(ap, softreset, postreset, classes);
2452 if (rc == 0)
2453 return 0;
2454 }
2455
2456 if (!hardreset)
2457 return rc;
2458
2459 rc = do_probe_reset(ap, hardreset, postreset, classes);
2460 if (rc == 0 || rc != -ENODEV)
2461 return rc;
2462
2463 if (softreset)
2464 rc = do_probe_reset(ap, softreset, postreset, classes);
2465
2466 return rc;
2467 }
2468
2469 /**
2470 * ata_dev_same_device - Determine whether new ID matches configured device
2471 * @ap: port on which the device to compare against resides
2472 * @dev: device to compare against
2473 * @new_class: class of the new device
2474 * @new_id: IDENTIFY page of the new device
2475 *
2476 * Compare @new_class and @new_id against @dev and determine
2477 * whether @dev is the device indicated by @new_class and
2478 * @new_id.
2479 *
2480 * LOCKING:
2481 * None.
2482 *
2483 * RETURNS:
2484 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2485 */
2486 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2487 unsigned int new_class, const u16 *new_id)
2488 {
2489 const u16 *old_id = dev->id;
2490 unsigned char model[2][41], serial[2][21];
2491 u64 new_n_sectors;
2492
2493 if (dev->class != new_class) {
2494 printk(KERN_INFO
2495 "ata%u: dev %u class mismatch %d != %d\n",
2496 ap->id, dev->devno, dev->class, new_class);
2497 return 0;
2498 }
2499
2500 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2501 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2502 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2503 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2504 new_n_sectors = ata_id_n_sectors(new_id);
2505
2506 if (strcmp(model[0], model[1])) {
2507 printk(KERN_INFO
2508 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2509 ap->id, dev->devno, model[0], model[1]);
2510 return 0;
2511 }
2512
2513 if (strcmp(serial[0], serial[1])) {
2514 printk(KERN_INFO
2515 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2516 ap->id, dev->devno, serial[0], serial[1]);
2517 return 0;
2518 }
2519
2520 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2521 printk(KERN_INFO
2522 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2523 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2524 (unsigned long long)new_n_sectors);
2525 return 0;
2526 }
2527
2528 return 1;
2529 }
2530
2531 /**
2532 * ata_dev_revalidate - Revalidate ATA device
2533 * @ap: port on which the device to revalidate resides
2534 * @dev: device to revalidate
2535 * @post_reset: is this revalidation after reset?
2536 *
2537 * Re-read IDENTIFY page and make sure @dev is still attached to
2538 * the port.
2539 *
2540 * LOCKING:
2541 * Kernel thread context (may sleep)
2542 *
2543 * RETURNS:
2544 * 0 on success, negative errno otherwise
2545 */
2546 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2547 int post_reset)
2548 {
2549 unsigned int class;
2550 u16 *id;
2551 int rc;
2552
2553 if (!ata_dev_present(dev))
2554 return -ENODEV;
2555
2556 class = dev->class;
2557 id = NULL;
2558
2559 /* allocate & read ID data */
2560 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2561 if (rc)
2562 goto fail;
2563
2564 /* is the device still there? */
2565 if (!ata_dev_same_device(ap, dev, class, id)) {
2566 rc = -ENODEV;
2567 goto fail;
2568 }
2569
2570 kfree(dev->id);
2571 dev->id = id;
2572
2573 /* configure device according to the new ID */
2574 return ata_dev_configure(ap, dev, 0);
2575
2576 fail:
2577 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2578 ap->id, dev->devno, rc);
2579 kfree(id);
2580 return rc;
2581 }
2582
2583 static const char * const ata_dma_blacklist [] = {
2584 "WDC AC11000H", NULL,
2585 "WDC AC22100H", NULL,
2586 "WDC AC32500H", NULL,
2587 "WDC AC33100H", NULL,
2588 "WDC AC31600H", NULL,
2589 "WDC AC32100H", "24.09P07",
2590 "WDC AC23200L", "21.10N21",
2591 "Compaq CRD-8241B", NULL,
2592 "CRD-8400B", NULL,
2593 "CRD-8480B", NULL,
2594 "CRD-8482B", NULL,
2595 "CRD-84", NULL,
2596 "SanDisk SDP3B", NULL,
2597 "SanDisk SDP3B-64", NULL,
2598 "SANYO CD-ROM CRD", NULL,
2599 "HITACHI CDR-8", NULL,
2600 "HITACHI CDR-8335", NULL,
2601 "HITACHI CDR-8435", NULL,
2602 "Toshiba CD-ROM XM-6202B", NULL,
2603 "TOSHIBA CD-ROM XM-1702BC", NULL,
2604 "CD-532E-A", NULL,
2605 "E-IDE CD-ROM CR-840", NULL,
2606 "CD-ROM Drive/F5A", NULL,
2607 "WPI CDD-820", NULL,
2608 "SAMSUNG CD-ROM SC-148C", NULL,
2609 "SAMSUNG CD-ROM SC", NULL,
2610 "SanDisk SDP3B-64", NULL,
2611 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2612 "_NEC DV5800A", NULL,
2613 "SAMSUNG CD-ROM SN-124", "N001"
2614 };
2615
2616 static int ata_strim(char *s, size_t len)
2617 {
2618 len = strnlen(s, len);
2619
2620 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2621 while ((len > 0) && (s[len - 1] == ' ')) {
2622 len--;
2623 s[len] = 0;
2624 }
2625 return len;
2626 }
2627
2628 static int ata_dma_blacklisted(const struct ata_device *dev)
2629 {
2630 unsigned char model_num[40];
2631 unsigned char model_rev[16];
2632 unsigned int nlen, rlen;
2633 int i;
2634
2635 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2636 sizeof(model_num));
2637 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2638 sizeof(model_rev));
2639 nlen = ata_strim(model_num, sizeof(model_num));
2640 rlen = ata_strim(model_rev, sizeof(model_rev));
2641
2642 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2643 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2644 if (ata_dma_blacklist[i+1] == NULL)
2645 return 1;
2646 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2647 return 1;
2648 }
2649 }
2650 return 0;
2651 }
2652
2653 /**
2654 * ata_dev_xfermask - Compute supported xfermask of the given device
2655 * @ap: Port on which the device to compute xfermask for resides
2656 * @dev: Device to compute xfermask for
2657 *
2658 * Compute supported xfermask of @dev and store it in
2659 * dev->*_mask. This function is responsible for applying all
2660 * known limits including host controller limits, device
2661 * blacklist, etc...
2662 *
2663 * LOCKING:
2664 * None.
2665 */
2666 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2667 {
2668 unsigned long xfer_mask;
2669 int i;
2670
2671 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2672 ap->udma_mask);
2673
2674 /* use port-wide xfermask for now */
2675 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2676 struct ata_device *d = &ap->device[i];
2677 if (!ata_dev_present(d))
2678 continue;
2679 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2680 d->udma_mask);
2681 xfer_mask &= ata_id_xfermask(d->id);
2682 if (ata_dma_blacklisted(d))
2683 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2684 }
2685
2686 if (ata_dma_blacklisted(dev))
2687 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2688 "disabling DMA\n", ap->id, dev->devno);
2689
2690 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2691 &dev->udma_mask);
2692 }
2693
2694 /**
2695 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2696 * @ap: Port associated with device @dev
2697 * @dev: Device to which command will be sent
2698 *
2699 * Issue SET FEATURES - XFER MODE command to device @dev
2700 * on port @ap.
2701 *
2702 * LOCKING:
2703 * PCI/etc. bus probe sem.
2704 */
2705
2706 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2707 {
2708 struct ata_taskfile tf;
2709
2710 /* set up set-features taskfile */
2711 DPRINTK("set features - xfer mode\n");
2712
2713 ata_tf_init(ap, &tf, dev->devno);
2714 tf.command = ATA_CMD_SET_FEATURES;
2715 tf.feature = SETFEATURES_XFER;
2716 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2717 tf.protocol = ATA_PROT_NODATA;
2718 tf.nsect = dev->xfer_mode;
2719
2720 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2721 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2722 ap->id);
2723 ata_port_disable(ap);
2724 }
2725
2726 DPRINTK("EXIT\n");
2727 }
2728
2729 /**
2730 * ata_dev_init_params - Issue INIT DEV PARAMS command
2731 * @ap: Port associated with device @dev
2732 * @dev: Device to which command will be sent
2733 *
2734 * LOCKING:
2735 * Kernel thread context (may sleep)
2736 *
2737 * RETURNS:
2738 * 0 on success, AC_ERR_* mask otherwise.
2739 */
2740
2741 static unsigned int ata_dev_init_params(struct ata_port *ap,
2742 struct ata_device *dev)
2743 {
2744 struct ata_taskfile tf;
2745 unsigned int err_mask;
2746 u16 sectors = dev->id[6];
2747 u16 heads = dev->id[3];
2748
2749 /* Number of sectors per track 1-255. Number of heads 1-16 */
2750 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2751 return 0;
2752
2753 /* set up init dev params taskfile */
2754 DPRINTK("init dev params \n");
2755
2756 ata_tf_init(ap, &tf, dev->devno);
2757 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2758 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2759 tf.protocol = ATA_PROT_NODATA;
2760 tf.nsect = sectors;
2761 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2762
2763 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2764
2765 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2766 return err_mask;
2767 }
2768
2769 /**
2770 * ata_sg_clean - Unmap DMA memory associated with command
2771 * @qc: Command containing DMA memory to be released
2772 *
2773 * Unmap all mapped DMA memory associated with this command.
2774 *
2775 * LOCKING:
2776 * spin_lock_irqsave(host_set lock)
2777 */
2778
2779 static void ata_sg_clean(struct ata_queued_cmd *qc)
2780 {
2781 struct ata_port *ap = qc->ap;
2782 struct scatterlist *sg = qc->__sg;
2783 int dir = qc->dma_dir;
2784 void *pad_buf = NULL;
2785
2786 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2787 WARN_ON(sg == NULL);
2788
2789 if (qc->flags & ATA_QCFLAG_SINGLE)
2790 WARN_ON(qc->n_elem > 1);
2791
2792 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2793
2794 /* if we padded the buffer out to 32-bit bound, and data
2795 * xfer direction is from-device, we must copy from the
2796 * pad buffer back into the supplied buffer
2797 */
2798 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2799 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2800
2801 if (qc->flags & ATA_QCFLAG_SG) {
2802 if (qc->n_elem)
2803 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2804 /* restore last sg */
2805 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2806 if (pad_buf) {
2807 struct scatterlist *psg = &qc->pad_sgent;
2808 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2809 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2810 kunmap_atomic(addr, KM_IRQ0);
2811 }
2812 } else {
2813 if (qc->n_elem)
2814 dma_unmap_single(ap->host_set->dev,
2815 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2816 dir);
2817 /* restore sg */
2818 sg->length += qc->pad_len;
2819 if (pad_buf)
2820 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2821 pad_buf, qc->pad_len);
2822 }
2823
2824 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2825 qc->__sg = NULL;
2826 }
2827
2828 /**
2829 * ata_fill_sg - Fill PCI IDE PRD table
2830 * @qc: Metadata associated with taskfile to be transferred
2831 *
2832 * Fill PCI IDE PRD (scatter-gather) table with segments
2833 * associated with the current disk command.
2834 *
2835 * LOCKING:
2836 * spin_lock_irqsave(host_set lock)
2837 *
2838 */
2839 static void ata_fill_sg(struct ata_queued_cmd *qc)
2840 {
2841 struct ata_port *ap = qc->ap;
2842 struct scatterlist *sg;
2843 unsigned int idx;
2844
2845 WARN_ON(qc->__sg == NULL);
2846 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2847
2848 idx = 0;
2849 ata_for_each_sg(sg, qc) {
2850 u32 addr, offset;
2851 u32 sg_len, len;
2852
2853 /* determine if physical DMA addr spans 64K boundary.
2854 * Note h/w doesn't support 64-bit, so we unconditionally
2855 * truncate dma_addr_t to u32.
2856 */
2857 addr = (u32) sg_dma_address(sg);
2858 sg_len = sg_dma_len(sg);
2859
2860 while (sg_len) {
2861 offset = addr & 0xffff;
2862 len = sg_len;
2863 if ((offset + sg_len) > 0x10000)
2864 len = 0x10000 - offset;
2865
2866 ap->prd[idx].addr = cpu_to_le32(addr);
2867 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2868 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2869
2870 idx++;
2871 sg_len -= len;
2872 addr += len;
2873 }
2874 }
2875
2876 if (idx)
2877 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2878 }
2879 /**
2880 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2881 * @qc: Metadata associated with taskfile to check
2882 *
2883 * Allow low-level driver to filter ATA PACKET commands, returning
2884 * a status indicating whether or not it is OK to use DMA for the
2885 * supplied PACKET command.
2886 *
2887 * LOCKING:
2888 * spin_lock_irqsave(host_set lock)
2889 *
2890 * RETURNS: 0 when ATAPI DMA can be used
2891 * nonzero otherwise
2892 */
2893 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2894 {
2895 struct ata_port *ap = qc->ap;
2896 int rc = 0; /* Assume ATAPI DMA is OK by default */
2897
2898 if (ap->ops->check_atapi_dma)
2899 rc = ap->ops->check_atapi_dma(qc);
2900
2901 return rc;
2902 }
2903 /**
2904 * ata_qc_prep - Prepare taskfile for submission
2905 * @qc: Metadata associated with taskfile to be prepared
2906 *
2907 * Prepare ATA taskfile for submission.
2908 *
2909 * LOCKING:
2910 * spin_lock_irqsave(host_set lock)
2911 */
2912 void ata_qc_prep(struct ata_queued_cmd *qc)
2913 {
2914 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2915 return;
2916
2917 ata_fill_sg(qc);
2918 }
2919
2920 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2921
2922 /**
2923 * ata_sg_init_one - Associate command with memory buffer
2924 * @qc: Command to be associated
2925 * @buf: Memory buffer
2926 * @buflen: Length of memory buffer, in bytes.
2927 *
2928 * Initialize the data-related elements of queued_cmd @qc
2929 * to point to a single memory buffer, @buf of byte length @buflen.
2930 *
2931 * LOCKING:
2932 * spin_lock_irqsave(host_set lock)
2933 */
2934
2935 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2936 {
2937 struct scatterlist *sg;
2938
2939 qc->flags |= ATA_QCFLAG_SINGLE;
2940
2941 memset(&qc->sgent, 0, sizeof(qc->sgent));
2942 qc->__sg = &qc->sgent;
2943 qc->n_elem = 1;
2944 qc->orig_n_elem = 1;
2945 qc->buf_virt = buf;
2946
2947 sg = qc->__sg;
2948 sg_init_one(sg, buf, buflen);
2949 }
2950
2951 /**
2952 * ata_sg_init - Associate command with scatter-gather table.
2953 * @qc: Command to be associated
2954 * @sg: Scatter-gather table.
2955 * @n_elem: Number of elements in s/g table.
2956 *
2957 * Initialize the data-related elements of queued_cmd @qc
2958 * to point to a scatter-gather table @sg, containing @n_elem
2959 * elements.
2960 *
2961 * LOCKING:
2962 * spin_lock_irqsave(host_set lock)
2963 */
2964
2965 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2966 unsigned int n_elem)
2967 {
2968 qc->flags |= ATA_QCFLAG_SG;
2969 qc->__sg = sg;
2970 qc->n_elem = n_elem;
2971 qc->orig_n_elem = n_elem;
2972 }
2973
2974 /**
2975 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2976 * @qc: Command with memory buffer to be mapped.
2977 *
2978 * DMA-map the memory buffer associated with queued_cmd @qc.
2979 *
2980 * LOCKING:
2981 * spin_lock_irqsave(host_set lock)
2982 *
2983 * RETURNS:
2984 * Zero on success, negative on error.
2985 */
2986
2987 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2988 {
2989 struct ata_port *ap = qc->ap;
2990 int dir = qc->dma_dir;
2991 struct scatterlist *sg = qc->__sg;
2992 dma_addr_t dma_address;
2993 int trim_sg = 0;
2994
2995 /* we must lengthen transfers to end on a 32-bit boundary */
2996 qc->pad_len = sg->length & 3;
2997 if (qc->pad_len) {
2998 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2999 struct scatterlist *psg = &qc->pad_sgent;
3000
3001 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3002
3003 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3004
3005 if (qc->tf.flags & ATA_TFLAG_WRITE)
3006 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3007 qc->pad_len);
3008
3009 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3010 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3011 /* trim sg */
3012 sg->length -= qc->pad_len;
3013 if (sg->length == 0)
3014 trim_sg = 1;
3015
3016 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3017 sg->length, qc->pad_len);
3018 }
3019
3020 if (trim_sg) {
3021 qc->n_elem--;
3022 goto skip_map;
3023 }
3024
3025 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3026 sg->length, dir);
3027 if (dma_mapping_error(dma_address)) {
3028 /* restore sg */
3029 sg->length += qc->pad_len;
3030 return -1;
3031 }
3032
3033 sg_dma_address(sg) = dma_address;
3034 sg_dma_len(sg) = sg->length;
3035
3036 skip_map:
3037 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3038 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3039
3040 return 0;
3041 }
3042
3043 /**
3044 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3045 * @qc: Command with scatter-gather table to be mapped.
3046 *
3047 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3048 *
3049 * LOCKING:
3050 * spin_lock_irqsave(host_set lock)
3051 *
3052 * RETURNS:
3053 * Zero on success, negative on error.
3054 *
3055 */
3056
3057 static int ata_sg_setup(struct ata_queued_cmd *qc)
3058 {
3059 struct ata_port *ap = qc->ap;
3060 struct scatterlist *sg = qc->__sg;
3061 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3062 int n_elem, pre_n_elem, dir, trim_sg = 0;
3063
3064 VPRINTK("ENTER, ata%u\n", ap->id);
3065 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3066
3067 /* we must lengthen transfers to end on a 32-bit boundary */
3068 qc->pad_len = lsg->length & 3;
3069 if (qc->pad_len) {
3070 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3071 struct scatterlist *psg = &qc->pad_sgent;
3072 unsigned int offset;
3073
3074 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3075
3076 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3077
3078 /*
3079 * psg->page/offset are used to copy to-be-written
3080 * data in this function or read data in ata_sg_clean.
3081 */
3082 offset = lsg->offset + lsg->length - qc->pad_len;
3083 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3084 psg->offset = offset_in_page(offset);
3085
3086 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3087 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3088 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3089 kunmap_atomic(addr, KM_IRQ0);
3090 }
3091
3092 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3093 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3094 /* trim last sg */
3095 lsg->length -= qc->pad_len;
3096 if (lsg->length == 0)
3097 trim_sg = 1;
3098
3099 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3100 qc->n_elem - 1, lsg->length, qc->pad_len);
3101 }
3102
3103 pre_n_elem = qc->n_elem;
3104 if (trim_sg && pre_n_elem)
3105 pre_n_elem--;
3106
3107 if (!pre_n_elem) {
3108 n_elem = 0;
3109 goto skip_map;
3110 }
3111
3112 dir = qc->dma_dir;
3113 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3114 if (n_elem < 1) {
3115 /* restore last sg */
3116 lsg->length += qc->pad_len;
3117 return -1;
3118 }
3119
3120 DPRINTK("%d sg elements mapped\n", n_elem);
3121
3122 skip_map:
3123 qc->n_elem = n_elem;
3124
3125 return 0;
3126 }
3127
3128 /**
3129 * ata_poll_qc_complete - turn irq back on and finish qc
3130 * @qc: Command to complete
3131 * @err_mask: ATA status register content
3132 *
3133 * LOCKING:
3134 * None. (grabs host lock)
3135 */
3136
3137 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3138 {
3139 struct ata_port *ap = qc->ap;
3140 unsigned long flags;
3141
3142 spin_lock_irqsave(&ap->host_set->lock, flags);
3143 ap->flags &= ~ATA_FLAG_NOINTR;
3144 ata_irq_on(ap);
3145 ata_qc_complete(qc);
3146 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3147 }
3148
3149 /**
3150 * ata_pio_poll - poll using PIO, depending on current state
3151 * @ap: the target ata_port
3152 *
3153 * LOCKING:
3154 * None. (executing in kernel thread context)
3155 *
3156 * RETURNS:
3157 * timeout value to use
3158 */
3159
3160 static unsigned long ata_pio_poll(struct ata_port *ap)
3161 {
3162 struct ata_queued_cmd *qc;
3163 u8 status;
3164 unsigned int poll_state = HSM_ST_UNKNOWN;
3165 unsigned int reg_state = HSM_ST_UNKNOWN;
3166
3167 qc = ata_qc_from_tag(ap, ap->active_tag);
3168 WARN_ON(qc == NULL);
3169
3170 switch (ap->hsm_task_state) {
3171 case HSM_ST:
3172 case HSM_ST_POLL:
3173 poll_state = HSM_ST_POLL;
3174 reg_state = HSM_ST;
3175 break;
3176 case HSM_ST_LAST:
3177 case HSM_ST_LAST_POLL:
3178 poll_state = HSM_ST_LAST_POLL;
3179 reg_state = HSM_ST_LAST;
3180 break;
3181 default:
3182 BUG();
3183 break;
3184 }
3185
3186 status = ata_chk_status(ap);
3187 if (status & ATA_BUSY) {
3188 if (time_after(jiffies, ap->pio_task_timeout)) {
3189 qc->err_mask |= AC_ERR_TIMEOUT;
3190 ap->hsm_task_state = HSM_ST_TMOUT;
3191 return 0;
3192 }
3193 ap->hsm_task_state = poll_state;
3194 return ATA_SHORT_PAUSE;
3195 }
3196
3197 ap->hsm_task_state = reg_state;
3198 return 0;
3199 }
3200
3201 /**
3202 * ata_pio_complete - check if drive is busy or idle
3203 * @ap: the target ata_port
3204 *
3205 * LOCKING:
3206 * None. (executing in kernel thread context)
3207 *
3208 * RETURNS:
3209 * Non-zero if qc completed, zero otherwise.
3210 */
3211
3212 static int ata_pio_complete (struct ata_port *ap)
3213 {
3214 struct ata_queued_cmd *qc;
3215 u8 drv_stat;
3216
3217 /*
3218 * This is purely heuristic. This is a fast path. Sometimes when
3219 * we enter, BSY will be cleared in a chk-status or two. If not,
3220 * the drive is probably seeking or something. Snooze for a couple
3221 * msecs, then chk-status again. If still busy, fall back to
3222 * HSM_ST_POLL state.
3223 */
3224 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3225 if (drv_stat & ATA_BUSY) {
3226 msleep(2);
3227 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3228 if (drv_stat & ATA_BUSY) {
3229 ap->hsm_task_state = HSM_ST_LAST_POLL;
3230 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3231 return 0;
3232 }
3233 }
3234
3235 qc = ata_qc_from_tag(ap, ap->active_tag);
3236 WARN_ON(qc == NULL);
3237
3238 drv_stat = ata_wait_idle(ap);
3239 if (!ata_ok(drv_stat)) {
3240 qc->err_mask |= __ac_err_mask(drv_stat);
3241 ap->hsm_task_state = HSM_ST_ERR;
3242 return 0;
3243 }
3244
3245 ap->hsm_task_state = HSM_ST_IDLE;
3246
3247 WARN_ON(qc->err_mask);
3248 ata_poll_qc_complete(qc);
3249
3250 /* another command may start at this point */
3251
3252 return 1;
3253 }
3254
3255
3256 /**
3257 * swap_buf_le16 - swap halves of 16-bit words in place
3258 * @buf: Buffer to swap
3259 * @buf_words: Number of 16-bit words in buffer.
3260 *
3261 * Swap halves of 16-bit words if needed to convert from
3262 * little-endian byte order to native cpu byte order, or
3263 * vice-versa.
3264 *
3265 * LOCKING:
3266 * Inherited from caller.
3267 */
3268 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3269 {
3270 #ifdef __BIG_ENDIAN
3271 unsigned int i;
3272
3273 for (i = 0; i < buf_words; i++)
3274 buf[i] = le16_to_cpu(buf[i]);
3275 #endif /* __BIG_ENDIAN */
3276 }
3277
3278 /**
3279 * ata_mmio_data_xfer - Transfer data by MMIO
3280 * @ap: port to read/write
3281 * @buf: data buffer
3282 * @buflen: buffer length
3283 * @write_data: read/write
3284 *
3285 * Transfer data from/to the device data register by MMIO.
3286 *
3287 * LOCKING:
3288 * Inherited from caller.
3289 */
3290
3291 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3292 unsigned int buflen, int write_data)
3293 {
3294 unsigned int i;
3295 unsigned int words = buflen >> 1;
3296 u16 *buf16 = (u16 *) buf;
3297 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3298
3299 /* Transfer multiple of 2 bytes */
3300 if (write_data) {
3301 for (i = 0; i < words; i++)
3302 writew(le16_to_cpu(buf16[i]), mmio);
3303 } else {
3304 for (i = 0; i < words; i++)
3305 buf16[i] = cpu_to_le16(readw(mmio));
3306 }
3307
3308 /* Transfer trailing 1 byte, if any. */
3309 if (unlikely(buflen & 0x01)) {
3310 u16 align_buf[1] = { 0 };
3311 unsigned char *trailing_buf = buf + buflen - 1;
3312
3313 if (write_data) {
3314 memcpy(align_buf, trailing_buf, 1);
3315 writew(le16_to_cpu(align_buf[0]), mmio);
3316 } else {
3317 align_buf[0] = cpu_to_le16(readw(mmio));
3318 memcpy(trailing_buf, align_buf, 1);
3319 }
3320 }
3321 }
3322
3323 /**
3324 * ata_pio_data_xfer - Transfer data by PIO
3325 * @ap: port to read/write
3326 * @buf: data buffer
3327 * @buflen: buffer length
3328 * @write_data: read/write
3329 *
3330 * Transfer data from/to the device data register by PIO.
3331 *
3332 * LOCKING:
3333 * Inherited from caller.
3334 */
3335
3336 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3337 unsigned int buflen, int write_data)
3338 {
3339 unsigned int words = buflen >> 1;
3340
3341 /* Transfer multiple of 2 bytes */
3342 if (write_data)
3343 outsw(ap->ioaddr.data_addr, buf, words);
3344 else
3345 insw(ap->ioaddr.data_addr, buf, words);
3346
3347 /* Transfer trailing 1 byte, if any. */
3348 if (unlikely(buflen & 0x01)) {
3349 u16 align_buf[1] = { 0 };
3350 unsigned char *trailing_buf = buf + buflen - 1;
3351
3352 if (write_data) {
3353 memcpy(align_buf, trailing_buf, 1);
3354 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3355 } else {
3356 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3357 memcpy(trailing_buf, align_buf, 1);
3358 }
3359 }
3360 }
3361
3362 /**
3363 * ata_data_xfer - Transfer data from/to the data register.
3364 * @ap: port to read/write
3365 * @buf: data buffer
3366 * @buflen: buffer length
3367 * @do_write: read/write
3368 *
3369 * Transfer data from/to the device data register.
3370 *
3371 * LOCKING:
3372 * Inherited from caller.
3373 */
3374
3375 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3376 unsigned int buflen, int do_write)
3377 {
3378 /* Make the crap hardware pay the costs not the good stuff */
3379 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3380 unsigned long flags;
3381 local_irq_save(flags);
3382 if (ap->flags & ATA_FLAG_MMIO)
3383 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3384 else
3385 ata_pio_data_xfer(ap, buf, buflen, do_write);
3386 local_irq_restore(flags);
3387 } else {
3388 if (ap->flags & ATA_FLAG_MMIO)
3389 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3390 else
3391 ata_pio_data_xfer(ap, buf, buflen, do_write);
3392 }
3393 }
3394
3395 /**
3396 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3397 * @qc: Command on going
3398 *
3399 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3400 *
3401 * LOCKING:
3402 * Inherited from caller.
3403 */
3404
3405 static void ata_pio_sector(struct ata_queued_cmd *qc)
3406 {
3407 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3408 struct scatterlist *sg = qc->__sg;
3409 struct ata_port *ap = qc->ap;
3410 struct page *page;
3411 unsigned int offset;
3412 unsigned char *buf;
3413
3414 if (qc->cursect == (qc->nsect - 1))
3415 ap->hsm_task_state = HSM_ST_LAST;
3416
3417 page = sg[qc->cursg].page;
3418 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3419
3420 /* get the current page and offset */
3421 page = nth_page(page, (offset >> PAGE_SHIFT));
3422 offset %= PAGE_SIZE;
3423
3424 buf = kmap(page) + offset;
3425
3426 qc->cursect++;
3427 qc->cursg_ofs++;
3428
3429 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3430 qc->cursg++;
3431 qc->cursg_ofs = 0;
3432 }
3433
3434 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3435
3436 /* do the actual data transfer */
3437 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3438 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3439
3440 kunmap(page);
3441 }
3442
3443 /**
3444 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3445 * @qc: Command on going
3446 * @bytes: number of bytes
3447 *
3448 * Transfer Transfer data from/to the ATAPI device.
3449 *
3450 * LOCKING:
3451 * Inherited from caller.
3452 *
3453 */
3454
3455 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3456 {
3457 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3458 struct scatterlist *sg = qc->__sg;
3459 struct ata_port *ap = qc->ap;
3460 struct page *page;
3461 unsigned char *buf;
3462 unsigned int offset, count;
3463
3464 if (qc->curbytes + bytes >= qc->nbytes)
3465 ap->hsm_task_state = HSM_ST_LAST;
3466
3467 next_sg:
3468 if (unlikely(qc->cursg >= qc->n_elem)) {
3469 /*
3470 * The end of qc->sg is reached and the device expects
3471 * more data to transfer. In order not to overrun qc->sg
3472 * and fulfill length specified in the byte count register,
3473 * - for read case, discard trailing data from the device
3474 * - for write case, padding zero data to the device
3475 */
3476 u16 pad_buf[1] = { 0 };
3477 unsigned int words = bytes >> 1;
3478 unsigned int i;
3479
3480 if (words) /* warning if bytes > 1 */
3481 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3482 ap->id, bytes);
3483
3484 for (i = 0; i < words; i++)
3485 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3486
3487 ap->hsm_task_state = HSM_ST_LAST;
3488 return;
3489 }
3490
3491 sg = &qc->__sg[qc->cursg];
3492
3493 page = sg->page;
3494 offset = sg->offset + qc->cursg_ofs;
3495
3496 /* get the current page and offset */
3497 page = nth_page(page, (offset >> PAGE_SHIFT));
3498 offset %= PAGE_SIZE;
3499
3500 /* don't overrun current sg */
3501 count = min(sg->length - qc->cursg_ofs, bytes);
3502
3503 /* don't cross page boundaries */
3504 count = min(count, (unsigned int)PAGE_SIZE - offset);
3505
3506 buf = kmap(page) + offset;
3507
3508 bytes -= count;
3509 qc->curbytes += count;
3510 qc->cursg_ofs += count;
3511
3512 if (qc->cursg_ofs == sg->length) {
3513 qc->cursg++;
3514 qc->cursg_ofs = 0;
3515 }
3516
3517 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3518
3519 /* do the actual data transfer */
3520 ata_data_xfer(ap, buf, count, do_write);
3521
3522 kunmap(page);
3523
3524 if (bytes)
3525 goto next_sg;
3526 }
3527
3528 /**
3529 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3530 * @qc: Command on going
3531 *
3532 * Transfer Transfer data from/to the ATAPI device.
3533 *
3534 * LOCKING:
3535 * Inherited from caller.
3536 */
3537
3538 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3539 {
3540 struct ata_port *ap = qc->ap;
3541 struct ata_device *dev = qc->dev;
3542 unsigned int ireason, bc_lo, bc_hi, bytes;
3543 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3544
3545 ap->ops->tf_read(ap, &qc->tf);
3546 ireason = qc->tf.nsect;
3547 bc_lo = qc->tf.lbam;
3548 bc_hi = qc->tf.lbah;
3549 bytes = (bc_hi << 8) | bc_lo;
3550
3551 /* shall be cleared to zero, indicating xfer of data */
3552 if (ireason & (1 << 0))
3553 goto err_out;
3554
3555 /* make sure transfer direction matches expected */
3556 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3557 if (do_write != i_write)
3558 goto err_out;
3559
3560 __atapi_pio_bytes(qc, bytes);
3561
3562 return;
3563
3564 err_out:
3565 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3566 ap->id, dev->devno);
3567 qc->err_mask |= AC_ERR_HSM;
3568 ap->hsm_task_state = HSM_ST_ERR;
3569 }
3570
3571 /**
3572 * ata_pio_block - start PIO on a block
3573 * @ap: the target ata_port
3574 *
3575 * LOCKING:
3576 * None. (executing in kernel thread context)
3577 */
3578
3579 static void ata_pio_block(struct ata_port *ap)
3580 {
3581 struct ata_queued_cmd *qc;
3582 u8 status;
3583
3584 /*
3585 * This is purely heuristic. This is a fast path.
3586 * Sometimes when we enter, BSY will be cleared in
3587 * a chk-status or two. If not, the drive is probably seeking
3588 * or something. Snooze for a couple msecs, then
3589 * chk-status again. If still busy, fall back to
3590 * HSM_ST_POLL state.
3591 */
3592 status = ata_busy_wait(ap, ATA_BUSY, 5);
3593 if (status & ATA_BUSY) {
3594 msleep(2);
3595 status = ata_busy_wait(ap, ATA_BUSY, 10);
3596 if (status & ATA_BUSY) {
3597 ap->hsm_task_state = HSM_ST_POLL;
3598 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3599 return;
3600 }
3601 }
3602
3603 qc = ata_qc_from_tag(ap, ap->active_tag);
3604 WARN_ON(qc == NULL);
3605
3606 /* check error */
3607 if (status & (ATA_ERR | ATA_DF)) {
3608 qc->err_mask |= AC_ERR_DEV;
3609 ap->hsm_task_state = HSM_ST_ERR;
3610 return;
3611 }
3612
3613 /* transfer data if any */
3614 if (is_atapi_taskfile(&qc->tf)) {
3615 /* DRQ=0 means no more data to transfer */
3616 if ((status & ATA_DRQ) == 0) {
3617 ap->hsm_task_state = HSM_ST_LAST;
3618 return;
3619 }
3620
3621 atapi_pio_bytes(qc);
3622 } else {
3623 /* handle BSY=0, DRQ=0 as error */
3624 if ((status & ATA_DRQ) == 0) {
3625 qc->err_mask |= AC_ERR_HSM;
3626 ap->hsm_task_state = HSM_ST_ERR;
3627 return;
3628 }
3629
3630 ata_pio_sector(qc);
3631 }
3632 }
3633
3634 static void ata_pio_error(struct ata_port *ap)
3635 {
3636 struct ata_queued_cmd *qc;
3637
3638 qc = ata_qc_from_tag(ap, ap->active_tag);
3639 WARN_ON(qc == NULL);
3640
3641 if (qc->tf.command != ATA_CMD_PACKET)
3642 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3643
3644 /* make sure qc->err_mask is available to
3645 * know what's wrong and recover
3646 */
3647 WARN_ON(qc->err_mask == 0);
3648
3649 ap->hsm_task_state = HSM_ST_IDLE;
3650
3651 ata_poll_qc_complete(qc);
3652 }
3653
3654 static void ata_pio_task(void *_data)
3655 {
3656 struct ata_port *ap = _data;
3657 unsigned long timeout;
3658 int qc_completed;
3659
3660 fsm_start:
3661 timeout = 0;
3662 qc_completed = 0;
3663
3664 switch (ap->hsm_task_state) {
3665 case HSM_ST_IDLE:
3666 return;
3667
3668 case HSM_ST:
3669 ata_pio_block(ap);
3670 break;
3671
3672 case HSM_ST_LAST:
3673 qc_completed = ata_pio_complete(ap);
3674 break;
3675
3676 case HSM_ST_POLL:
3677 case HSM_ST_LAST_POLL:
3678 timeout = ata_pio_poll(ap);
3679 break;
3680
3681 case HSM_ST_TMOUT:
3682 case HSM_ST_ERR:
3683 ata_pio_error(ap);
3684 return;
3685 }
3686
3687 if (timeout)
3688 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3689 else if (!qc_completed)
3690 goto fsm_start;
3691 }
3692
3693 /**
3694 * atapi_packet_task - Write CDB bytes to hardware
3695 * @_data: Port to which ATAPI device is attached.
3696 *
3697 * When device has indicated its readiness to accept
3698 * a CDB, this function is called. Send the CDB.
3699 * If DMA is to be performed, exit immediately.
3700 * Otherwise, we are in polling mode, so poll
3701 * status under operation succeeds or fails.
3702 *
3703 * LOCKING:
3704 * Kernel thread context (may sleep)
3705 */
3706
3707 static void atapi_packet_task(void *_data)
3708 {
3709 struct ata_port *ap = _data;
3710 struct ata_queued_cmd *qc;
3711 u8 status;
3712
3713 qc = ata_qc_from_tag(ap, ap->active_tag);
3714 WARN_ON(qc == NULL);
3715 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3716
3717 /* sleep-wait for BSY to clear */
3718 DPRINTK("busy wait\n");
3719 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3720 qc->err_mask |= AC_ERR_TIMEOUT;
3721 goto err_out;
3722 }
3723
3724 /* make sure DRQ is set */
3725 status = ata_chk_status(ap);
3726 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3727 qc->err_mask |= AC_ERR_HSM;
3728 goto err_out;
3729 }
3730
3731 /* send SCSI cdb */
3732 DPRINTK("send cdb\n");
3733 WARN_ON(qc->dev->cdb_len < 12);
3734
3735 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3736 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3737 unsigned long flags;
3738
3739 /* Once we're done issuing command and kicking bmdma,
3740 * irq handler takes over. To not lose irq, we need
3741 * to clear NOINTR flag before sending cdb, but
3742 * interrupt handler shouldn't be invoked before we're
3743 * finished. Hence, the following locking.
3744 */
3745 spin_lock_irqsave(&ap->host_set->lock, flags);
3746 ap->flags &= ~ATA_FLAG_NOINTR;
3747 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3748 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3749 ap->ops->bmdma_start(qc); /* initiate bmdma */
3750 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3751 } else {
3752 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3753
3754 /* PIO commands are handled by polling */
3755 ap->hsm_task_state = HSM_ST;
3756 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3757 }
3758
3759 return;
3760
3761 err_out:
3762 ata_poll_qc_complete(qc);
3763 }
3764
3765 /**
3766 * ata_qc_timeout - Handle timeout of queued command
3767 * @qc: Command that timed out
3768 *
3769 * Some part of the kernel (currently, only the SCSI layer)
3770 * has noticed that the active command on port @ap has not
3771 * completed after a specified length of time. Handle this
3772 * condition by disabling DMA (if necessary) and completing
3773 * transactions, with error if necessary.
3774 *
3775 * This also handles the case of the "lost interrupt", where
3776 * for some reason (possibly hardware bug, possibly driver bug)
3777 * an interrupt was not delivered to the driver, even though the
3778 * transaction completed successfully.
3779 *
3780 * LOCKING:
3781 * Inherited from SCSI layer (none, can sleep)
3782 */
3783
3784 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3785 {
3786 struct ata_port *ap = qc->ap;
3787 struct ata_host_set *host_set = ap->host_set;
3788 u8 host_stat = 0, drv_stat;
3789 unsigned long flags;
3790
3791 DPRINTK("ENTER\n");
3792
3793 ap->hsm_task_state = HSM_ST_IDLE;
3794
3795 spin_lock_irqsave(&host_set->lock, flags);
3796
3797 switch (qc->tf.protocol) {
3798
3799 case ATA_PROT_DMA:
3800 case ATA_PROT_ATAPI_DMA:
3801 host_stat = ap->ops->bmdma_status(ap);
3802
3803 /* before we do anything else, clear DMA-Start bit */
3804 ap->ops->bmdma_stop(qc);
3805
3806 /* fall through */
3807
3808 default:
3809 ata_altstatus(ap);
3810 drv_stat = ata_chk_status(ap);
3811
3812 /* ack bmdma irq events */
3813 ap->ops->irq_clear(ap);
3814
3815 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3816 ap->id, qc->tf.command, drv_stat, host_stat);
3817
3818 /* complete taskfile transaction */
3819 qc->err_mask |= ac_err_mask(drv_stat);
3820 break;
3821 }
3822
3823 spin_unlock_irqrestore(&host_set->lock, flags);
3824
3825 ata_eh_qc_complete(qc);
3826
3827 DPRINTK("EXIT\n");
3828 }
3829
3830 /**
3831 * ata_eng_timeout - Handle timeout of queued command
3832 * @ap: Port on which timed-out command is active
3833 *
3834 * Some part of the kernel (currently, only the SCSI layer)
3835 * has noticed that the active command on port @ap has not
3836 * completed after a specified length of time. Handle this
3837 * condition by disabling DMA (if necessary) and completing
3838 * transactions, with error if necessary.
3839 *
3840 * This also handles the case of the "lost interrupt", where
3841 * for some reason (possibly hardware bug, possibly driver bug)
3842 * an interrupt was not delivered to the driver, even though the
3843 * transaction completed successfully.
3844 *
3845 * LOCKING:
3846 * Inherited from SCSI layer (none, can sleep)
3847 */
3848
3849 void ata_eng_timeout(struct ata_port *ap)
3850 {
3851 DPRINTK("ENTER\n");
3852
3853 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3854
3855 DPRINTK("EXIT\n");
3856 }
3857
3858 /**
3859 * ata_qc_new - Request an available ATA command, for queueing
3860 * @ap: Port associated with device @dev
3861 * @dev: Device from whom we request an available command structure
3862 *
3863 * LOCKING:
3864 * None.
3865 */
3866
3867 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3868 {
3869 struct ata_queued_cmd *qc = NULL;
3870 unsigned int i;
3871
3872 for (i = 0; i < ATA_MAX_QUEUE; i++)
3873 if (!test_and_set_bit(i, &ap->qactive)) {
3874 qc = ata_qc_from_tag(ap, i);
3875 break;
3876 }
3877
3878 if (qc)
3879 qc->tag = i;
3880
3881 return qc;
3882 }
3883
3884 /**
3885 * ata_qc_new_init - Request an available ATA command, and initialize it
3886 * @ap: Port associated with device @dev
3887 * @dev: Device from whom we request an available command structure
3888 *
3889 * LOCKING:
3890 * None.
3891 */
3892
3893 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3894 struct ata_device *dev)
3895 {
3896 struct ata_queued_cmd *qc;
3897
3898 qc = ata_qc_new(ap);
3899 if (qc) {
3900 qc->scsicmd = NULL;
3901 qc->ap = ap;
3902 qc->dev = dev;
3903
3904 ata_qc_reinit(qc);
3905 }
3906
3907 return qc;
3908 }
3909
3910 /**
3911 * ata_qc_free - free unused ata_queued_cmd
3912 * @qc: Command to complete
3913 *
3914 * Designed to free unused ata_queued_cmd object
3915 * in case something prevents using it.
3916 *
3917 * LOCKING:
3918 * spin_lock_irqsave(host_set lock)
3919 */
3920 void ata_qc_free(struct ata_queued_cmd *qc)
3921 {
3922 struct ata_port *ap = qc->ap;
3923 unsigned int tag;
3924
3925 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3926
3927 qc->flags = 0;
3928 tag = qc->tag;
3929 if (likely(ata_tag_valid(tag))) {
3930 if (tag == ap->active_tag)
3931 ap->active_tag = ATA_TAG_POISON;
3932 qc->tag = ATA_TAG_POISON;
3933 clear_bit(tag, &ap->qactive);
3934 }
3935 }
3936
3937 void __ata_qc_complete(struct ata_queued_cmd *qc)
3938 {
3939 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3940 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3941
3942 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3943 ata_sg_clean(qc);
3944
3945 /* atapi: mark qc as inactive to prevent the interrupt handler
3946 * from completing the command twice later, before the error handler
3947 * is called. (when rc != 0 and atapi request sense is needed)
3948 */
3949 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3950
3951 /* call completion callback */
3952 qc->complete_fn(qc);
3953 }
3954
3955 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3956 {
3957 struct ata_port *ap = qc->ap;
3958
3959 switch (qc->tf.protocol) {
3960 case ATA_PROT_DMA:
3961 case ATA_PROT_ATAPI_DMA:
3962 return 1;
3963
3964 case ATA_PROT_ATAPI:
3965 case ATA_PROT_PIO:
3966 if (ap->flags & ATA_FLAG_PIO_DMA)
3967 return 1;
3968
3969 /* fall through */
3970
3971 default:
3972 return 0;
3973 }
3974
3975 /* never reached */
3976 }
3977
3978 /**
3979 * ata_qc_issue - issue taskfile to device
3980 * @qc: command to issue to device
3981 *
3982 * Prepare an ATA command to submission to device.
3983 * This includes mapping the data into a DMA-able
3984 * area, filling in the S/G table, and finally
3985 * writing the taskfile to hardware, starting the command.
3986 *
3987 * LOCKING:
3988 * spin_lock_irqsave(host_set lock)
3989 *
3990 * RETURNS:
3991 * Zero on success, AC_ERR_* mask on failure
3992 */
3993
3994 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3995 {
3996 struct ata_port *ap = qc->ap;
3997
3998 if (ata_should_dma_map(qc)) {
3999 if (qc->flags & ATA_QCFLAG_SG) {
4000 if (ata_sg_setup(qc))
4001 goto sg_err;
4002 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4003 if (ata_sg_setup_one(qc))
4004 goto sg_err;
4005 }
4006 } else {
4007 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4008 }
4009
4010 ap->ops->qc_prep(qc);
4011
4012 qc->ap->active_tag = qc->tag;
4013 qc->flags |= ATA_QCFLAG_ACTIVE;
4014
4015 return ap->ops->qc_issue(qc);
4016
4017 sg_err:
4018 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4019 return AC_ERR_SYSTEM;
4020 }
4021
4022
4023 /**
4024 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4025 * @qc: command to issue to device
4026 *
4027 * Using various libata functions and hooks, this function
4028 * starts an ATA command. ATA commands are grouped into
4029 * classes called "protocols", and issuing each type of protocol
4030 * is slightly different.
4031 *
4032 * May be used as the qc_issue() entry in ata_port_operations.
4033 *
4034 * LOCKING:
4035 * spin_lock_irqsave(host_set lock)
4036 *
4037 * RETURNS:
4038 * Zero on success, AC_ERR_* mask on failure
4039 */
4040
4041 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4042 {
4043 struct ata_port *ap = qc->ap;
4044
4045 ata_dev_select(ap, qc->dev->devno, 1, 0);
4046
4047 switch (qc->tf.protocol) {
4048 case ATA_PROT_NODATA:
4049 ata_tf_to_host(ap, &qc->tf);
4050 break;
4051
4052 case ATA_PROT_DMA:
4053 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4054 ap->ops->bmdma_setup(qc); /* set up bmdma */
4055 ap->ops->bmdma_start(qc); /* initiate bmdma */
4056 break;
4057
4058 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4059 ata_qc_set_polling(qc);
4060 ata_tf_to_host(ap, &qc->tf);
4061 ap->hsm_task_state = HSM_ST;
4062 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4063 break;
4064
4065 case ATA_PROT_ATAPI:
4066 ata_qc_set_polling(qc);
4067 ata_tf_to_host(ap, &qc->tf);
4068 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4069 break;
4070
4071 case ATA_PROT_ATAPI_NODATA:
4072 ap->flags |= ATA_FLAG_NOINTR;
4073 ata_tf_to_host(ap, &qc->tf);
4074 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4075 break;
4076
4077 case ATA_PROT_ATAPI_DMA:
4078 ap->flags |= ATA_FLAG_NOINTR;
4079 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4080 ap->ops->bmdma_setup(qc); /* set up bmdma */
4081 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4082 break;
4083
4084 default:
4085 WARN_ON(1);
4086 return AC_ERR_SYSTEM;
4087 }
4088
4089 return 0;
4090 }
4091
4092 /**
4093 * ata_host_intr - Handle host interrupt for given (port, task)
4094 * @ap: Port on which interrupt arrived (possibly...)
4095 * @qc: Taskfile currently active in engine
4096 *
4097 * Handle host interrupt for given queued command. Currently,
4098 * only DMA interrupts are handled. All other commands are
4099 * handled via polling with interrupts disabled (nIEN bit).
4100 *
4101 * LOCKING:
4102 * spin_lock_irqsave(host_set lock)
4103 *
4104 * RETURNS:
4105 * One if interrupt was handled, zero if not (shared irq).
4106 */
4107
4108 inline unsigned int ata_host_intr (struct ata_port *ap,
4109 struct ata_queued_cmd *qc)
4110 {
4111 u8 status, host_stat;
4112
4113 switch (qc->tf.protocol) {
4114
4115 case ATA_PROT_DMA:
4116 case ATA_PROT_ATAPI_DMA:
4117 case ATA_PROT_ATAPI:
4118 /* check status of DMA engine */
4119 host_stat = ap->ops->bmdma_status(ap);
4120 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4121
4122 /* if it's not our irq... */
4123 if (!(host_stat & ATA_DMA_INTR))
4124 goto idle_irq;
4125
4126 /* before we do anything else, clear DMA-Start bit */
4127 ap->ops->bmdma_stop(qc);
4128
4129 /* fall through */
4130
4131 case ATA_PROT_ATAPI_NODATA:
4132 case ATA_PROT_NODATA:
4133 /* check altstatus */
4134 status = ata_altstatus(ap);
4135 if (status & ATA_BUSY)
4136 goto idle_irq;
4137
4138 /* check main status, clearing INTRQ */
4139 status = ata_chk_status(ap);
4140 if (unlikely(status & ATA_BUSY))
4141 goto idle_irq;
4142 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4143 ap->id, qc->tf.protocol, status);
4144
4145 /* ack bmdma irq events */
4146 ap->ops->irq_clear(ap);
4147
4148 /* complete taskfile transaction */
4149 qc->err_mask |= ac_err_mask(status);
4150 ata_qc_complete(qc);
4151 break;
4152
4153 default:
4154 goto idle_irq;
4155 }
4156
4157 return 1; /* irq handled */
4158
4159 idle_irq:
4160 ap->stats.idle_irq++;
4161
4162 #ifdef ATA_IRQ_TRAP
4163 if ((ap->stats.idle_irq % 1000) == 0) {
4164 ata_irq_ack(ap, 0); /* debug trap */
4165 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4166 return 1;
4167 }
4168 #endif
4169 return 0; /* irq not handled */
4170 }
4171
4172 /**
4173 * ata_interrupt - Default ATA host interrupt handler
4174 * @irq: irq line (unused)
4175 * @dev_instance: pointer to our ata_host_set information structure
4176 * @regs: unused
4177 *
4178 * Default interrupt handler for PCI IDE devices. Calls
4179 * ata_host_intr() for each port that is not disabled.
4180 *
4181 * LOCKING:
4182 * Obtains host_set lock during operation.
4183 *
4184 * RETURNS:
4185 * IRQ_NONE or IRQ_HANDLED.
4186 */
4187
4188 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4189 {
4190 struct ata_host_set *host_set = dev_instance;
4191 unsigned int i;
4192 unsigned int handled = 0;
4193 unsigned long flags;
4194
4195 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4196 spin_lock_irqsave(&host_set->lock, flags);
4197
4198 for (i = 0; i < host_set->n_ports; i++) {
4199 struct ata_port *ap;
4200
4201 ap = host_set->ports[i];
4202 if (ap &&
4203 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4204 struct ata_queued_cmd *qc;
4205
4206 qc = ata_qc_from_tag(ap, ap->active_tag);
4207 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4208 (qc->flags & ATA_QCFLAG_ACTIVE))
4209 handled |= ata_host_intr(ap, qc);
4210 }
4211 }
4212
4213 spin_unlock_irqrestore(&host_set->lock, flags);
4214
4215 return IRQ_RETVAL(handled);
4216 }
4217
4218
4219 /*
4220 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4221 * without filling any other registers
4222 */
4223 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4224 u8 cmd)
4225 {
4226 struct ata_taskfile tf;
4227 int err;
4228
4229 ata_tf_init(ap, &tf, dev->devno);
4230
4231 tf.command = cmd;
4232 tf.flags |= ATA_TFLAG_DEVICE;
4233 tf.protocol = ATA_PROT_NODATA;
4234
4235 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4236 if (err)
4237 printk(KERN_ERR "%s: ata command failed: %d\n",
4238 __FUNCTION__, err);
4239
4240 return err;
4241 }
4242
4243 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4244 {
4245 u8 cmd;
4246
4247 if (!ata_try_flush_cache(dev))
4248 return 0;
4249
4250 if (ata_id_has_flush_ext(dev->id))
4251 cmd = ATA_CMD_FLUSH_EXT;
4252 else
4253 cmd = ATA_CMD_FLUSH;
4254
4255 return ata_do_simple_cmd(ap, dev, cmd);
4256 }
4257
4258 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4259 {
4260 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4261 }
4262
4263 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4264 {
4265 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4266 }
4267
4268 /**
4269 * ata_device_resume - wakeup a previously suspended devices
4270 * @ap: port the device is connected to
4271 * @dev: the device to resume
4272 *
4273 * Kick the drive back into action, by sending it an idle immediate
4274 * command and making sure its transfer mode matches between drive
4275 * and host.
4276 *
4277 */
4278 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4279 {
4280 if (ap->flags & ATA_FLAG_SUSPENDED) {
4281 ap->flags &= ~ATA_FLAG_SUSPENDED;
4282 ata_set_mode(ap);
4283 }
4284 if (!ata_dev_present(dev))
4285 return 0;
4286 if (dev->class == ATA_DEV_ATA)
4287 ata_start_drive(ap, dev);
4288
4289 return 0;
4290 }
4291
4292 /**
4293 * ata_device_suspend - prepare a device for suspend
4294 * @ap: port the device is connected to
4295 * @dev: the device to suspend
4296 *
4297 * Flush the cache on the drive, if appropriate, then issue a
4298 * standbynow command.
4299 */
4300 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4301 {
4302 if (!ata_dev_present(dev))
4303 return 0;
4304 if (dev->class == ATA_DEV_ATA)
4305 ata_flush_cache(ap, dev);
4306
4307 ata_standby_drive(ap, dev);
4308 ap->flags |= ATA_FLAG_SUSPENDED;
4309 return 0;
4310 }
4311
4312 /**
4313 * ata_port_start - Set port up for dma.
4314 * @ap: Port to initialize
4315 *
4316 * Called just after data structures for each port are
4317 * initialized. Allocates space for PRD table.
4318 *
4319 * May be used as the port_start() entry in ata_port_operations.
4320 *
4321 * LOCKING:
4322 * Inherited from caller.
4323 */
4324
4325 int ata_port_start (struct ata_port *ap)
4326 {
4327 struct device *dev = ap->host_set->dev;
4328 int rc;
4329
4330 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4331 if (!ap->prd)
4332 return -ENOMEM;
4333
4334 rc = ata_pad_alloc(ap, dev);
4335 if (rc) {
4336 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4337 return rc;
4338 }
4339
4340 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4341
4342 return 0;
4343 }
4344
4345
4346 /**
4347 * ata_port_stop - Undo ata_port_start()
4348 * @ap: Port to shut down
4349 *
4350 * Frees the PRD table.
4351 *
4352 * May be used as the port_stop() entry in ata_port_operations.
4353 *
4354 * LOCKING:
4355 * Inherited from caller.
4356 */
4357
4358 void ata_port_stop (struct ata_port *ap)
4359 {
4360 struct device *dev = ap->host_set->dev;
4361
4362 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4363 ata_pad_free(ap, dev);
4364 }
4365
4366 void ata_host_stop (struct ata_host_set *host_set)
4367 {
4368 if (host_set->mmio_base)
4369 iounmap(host_set->mmio_base);
4370 }
4371
4372
4373 /**
4374 * ata_host_remove - Unregister SCSI host structure with upper layers
4375 * @ap: Port to unregister
4376 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4377 *
4378 * LOCKING:
4379 * Inherited from caller.
4380 */
4381
4382 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4383 {
4384 struct Scsi_Host *sh = ap->host;
4385
4386 DPRINTK("ENTER\n");
4387
4388 if (do_unregister)
4389 scsi_remove_host(sh);
4390
4391 ap->ops->port_stop(ap);
4392 }
4393
4394 /**
4395 * ata_host_init - Initialize an ata_port structure
4396 * @ap: Structure to initialize
4397 * @host: associated SCSI mid-layer structure
4398 * @host_set: Collection of hosts to which @ap belongs
4399 * @ent: Probe information provided by low-level driver
4400 * @port_no: Port number associated with this ata_port
4401 *
4402 * Initialize a new ata_port structure, and its associated
4403 * scsi_host.
4404 *
4405 * LOCKING:
4406 * Inherited from caller.
4407 */
4408
4409 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4410 struct ata_host_set *host_set,
4411 const struct ata_probe_ent *ent, unsigned int port_no)
4412 {
4413 unsigned int i;
4414
4415 host->max_id = 16;
4416 host->max_lun = 1;
4417 host->max_channel = 1;
4418 host->unique_id = ata_unique_id++;
4419 host->max_cmd_len = 12;
4420
4421 ap->flags = ATA_FLAG_PORT_DISABLED;
4422 ap->id = host->unique_id;
4423 ap->host = host;
4424 ap->ctl = ATA_DEVCTL_OBS;
4425 ap->host_set = host_set;
4426 ap->port_no = port_no;
4427 ap->hard_port_no =
4428 ent->legacy_mode ? ent->hard_port_no : port_no;
4429 ap->pio_mask = ent->pio_mask;
4430 ap->mwdma_mask = ent->mwdma_mask;
4431 ap->udma_mask = ent->udma_mask;
4432 ap->flags |= ent->host_flags;
4433 ap->ops = ent->port_ops;
4434 ap->cbl = ATA_CBL_NONE;
4435 ap->active_tag = ATA_TAG_POISON;
4436 ap->last_ctl = 0xFF;
4437
4438 INIT_WORK(&ap->port_task, NULL, NULL);
4439 INIT_LIST_HEAD(&ap->eh_done_q);
4440
4441 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4442 struct ata_device *dev = &ap->device[i];
4443 dev->devno = i;
4444 dev->pio_mask = UINT_MAX;
4445 dev->mwdma_mask = UINT_MAX;
4446 dev->udma_mask = UINT_MAX;
4447 }
4448
4449 #ifdef ATA_IRQ_TRAP
4450 ap->stats.unhandled_irq = 1;
4451 ap->stats.idle_irq = 1;
4452 #endif
4453
4454 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4455 }
4456
4457 /**
4458 * ata_host_add - Attach low-level ATA driver to system
4459 * @ent: Information provided by low-level driver
4460 * @host_set: Collections of ports to which we add
4461 * @port_no: Port number associated with this host
4462 *
4463 * Attach low-level ATA driver to system.
4464 *
4465 * LOCKING:
4466 * PCI/etc. bus probe sem.
4467 *
4468 * RETURNS:
4469 * New ata_port on success, for NULL on error.
4470 */
4471
4472 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4473 struct ata_host_set *host_set,
4474 unsigned int port_no)
4475 {
4476 struct Scsi_Host *host;
4477 struct ata_port *ap;
4478 int rc;
4479
4480 DPRINTK("ENTER\n");
4481 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4482 if (!host)
4483 return NULL;
4484
4485 host->transportt = &ata_scsi_transport_template;
4486
4487 ap = (struct ata_port *) &host->hostdata[0];
4488
4489 ata_host_init(ap, host, host_set, ent, port_no);
4490
4491 rc = ap->ops->port_start(ap);
4492 if (rc)
4493 goto err_out;
4494
4495 return ap;
4496
4497 err_out:
4498 scsi_host_put(host);
4499 return NULL;
4500 }
4501
4502 /**
4503 * ata_device_add - Register hardware device with ATA and SCSI layers
4504 * @ent: Probe information describing hardware device to be registered
4505 *
4506 * This function processes the information provided in the probe
4507 * information struct @ent, allocates the necessary ATA and SCSI
4508 * host information structures, initializes them, and registers
4509 * everything with requisite kernel subsystems.
4510 *
4511 * This function requests irqs, probes the ATA bus, and probes
4512 * the SCSI bus.
4513 *
4514 * LOCKING:
4515 * PCI/etc. bus probe sem.
4516 *
4517 * RETURNS:
4518 * Number of ports registered. Zero on error (no ports registered).
4519 */
4520
4521 int ata_device_add(const struct ata_probe_ent *ent)
4522 {
4523 unsigned int count = 0, i;
4524 struct device *dev = ent->dev;
4525 struct ata_host_set *host_set;
4526
4527 DPRINTK("ENTER\n");
4528 /* alloc a container for our list of ATA ports (buses) */
4529 host_set = kzalloc(sizeof(struct ata_host_set) +
4530 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4531 if (!host_set)
4532 return 0;
4533 spin_lock_init(&host_set->lock);
4534
4535 host_set->dev = dev;
4536 host_set->n_ports = ent->n_ports;
4537 host_set->irq = ent->irq;
4538 host_set->mmio_base = ent->mmio_base;
4539 host_set->private_data = ent->private_data;
4540 host_set->ops = ent->port_ops;
4541
4542 /* register each port bound to this device */
4543 for (i = 0; i < ent->n_ports; i++) {
4544 struct ata_port *ap;
4545 unsigned long xfer_mode_mask;
4546
4547 ap = ata_host_add(ent, host_set, i);
4548 if (!ap)
4549 goto err_out;
4550
4551 host_set->ports[i] = ap;
4552 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4553 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4554 (ap->pio_mask << ATA_SHIFT_PIO);
4555
4556 /* print per-port info to dmesg */
4557 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4558 "bmdma 0x%lX irq %lu\n",
4559 ap->id,
4560 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4561 ata_mode_string(xfer_mode_mask),
4562 ap->ioaddr.cmd_addr,
4563 ap->ioaddr.ctl_addr,
4564 ap->ioaddr.bmdma_addr,
4565 ent->irq);
4566
4567 ata_chk_status(ap);
4568 host_set->ops->irq_clear(ap);
4569 count++;
4570 }
4571
4572 if (!count)
4573 goto err_free_ret;
4574
4575 /* obtain irq, that is shared between channels */
4576 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4577 DRV_NAME, host_set))
4578 goto err_out;
4579
4580 /* perform each probe synchronously */
4581 DPRINTK("probe begin\n");
4582 for (i = 0; i < count; i++) {
4583 struct ata_port *ap;
4584 int rc;
4585
4586 ap = host_set->ports[i];
4587
4588 DPRINTK("ata%u: bus probe begin\n", ap->id);
4589 rc = ata_bus_probe(ap);
4590 DPRINTK("ata%u: bus probe end\n", ap->id);
4591
4592 if (rc) {
4593 /* FIXME: do something useful here?
4594 * Current libata behavior will
4595 * tear down everything when
4596 * the module is removed
4597 * or the h/w is unplugged.
4598 */
4599 }
4600
4601 rc = scsi_add_host(ap->host, dev);
4602 if (rc) {
4603 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4604 ap->id);
4605 /* FIXME: do something useful here */
4606 /* FIXME: handle unconditional calls to
4607 * scsi_scan_host and ata_host_remove, below,
4608 * at the very least
4609 */
4610 }
4611 }
4612
4613 /* probes are done, now scan each port's disk(s) */
4614 DPRINTK("host probe begin\n");
4615 for (i = 0; i < count; i++) {
4616 struct ata_port *ap = host_set->ports[i];
4617
4618 ata_scsi_scan_host(ap);
4619 }
4620
4621 dev_set_drvdata(dev, host_set);
4622
4623 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4624 return ent->n_ports; /* success */
4625
4626 err_out:
4627 for (i = 0; i < count; i++) {
4628 ata_host_remove(host_set->ports[i], 1);
4629 scsi_host_put(host_set->ports[i]->host);
4630 }
4631 err_free_ret:
4632 kfree(host_set);
4633 VPRINTK("EXIT, returning 0\n");
4634 return 0;
4635 }
4636
4637 /**
4638 * ata_host_set_remove - PCI layer callback for device removal
4639 * @host_set: ATA host set that was removed
4640 *
4641 * Unregister all objects associated with this host set. Free those
4642 * objects.
4643 *
4644 * LOCKING:
4645 * Inherited from calling layer (may sleep).
4646 */
4647
4648 void ata_host_set_remove(struct ata_host_set *host_set)
4649 {
4650 struct ata_port *ap;
4651 unsigned int i;
4652
4653 for (i = 0; i < host_set->n_ports; i++) {
4654 ap = host_set->ports[i];
4655 scsi_remove_host(ap->host);
4656 }
4657
4658 free_irq(host_set->irq, host_set);
4659
4660 for (i = 0; i < host_set->n_ports; i++) {
4661 ap = host_set->ports[i];
4662
4663 ata_scsi_release(ap->host);
4664
4665 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4666 struct ata_ioports *ioaddr = &ap->ioaddr;
4667
4668 if (ioaddr->cmd_addr == 0x1f0)
4669 release_region(0x1f0, 8);
4670 else if (ioaddr->cmd_addr == 0x170)
4671 release_region(0x170, 8);
4672 }
4673
4674 scsi_host_put(ap->host);
4675 }
4676
4677 if (host_set->ops->host_stop)
4678 host_set->ops->host_stop(host_set);
4679
4680 kfree(host_set);
4681 }
4682
4683 /**
4684 * ata_scsi_release - SCSI layer callback hook for host unload
4685 * @host: libata host to be unloaded
4686 *
4687 * Performs all duties necessary to shut down a libata port...
4688 * Kill port kthread, disable port, and release resources.
4689 *
4690 * LOCKING:
4691 * Inherited from SCSI layer.
4692 *
4693 * RETURNS:
4694 * One.
4695 */
4696
4697 int ata_scsi_release(struct Scsi_Host *host)
4698 {
4699 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4700 int i;
4701
4702 DPRINTK("ENTER\n");
4703
4704 ap->ops->port_disable(ap);
4705 ata_host_remove(ap, 0);
4706 for (i = 0; i < ATA_MAX_DEVICES; i++)
4707 kfree(ap->device[i].id);
4708
4709 DPRINTK("EXIT\n");
4710 return 1;
4711 }
4712
4713 /**
4714 * ata_std_ports - initialize ioaddr with standard port offsets.
4715 * @ioaddr: IO address structure to be initialized
4716 *
4717 * Utility function which initializes data_addr, error_addr,
4718 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4719 * device_addr, status_addr, and command_addr to standard offsets
4720 * relative to cmd_addr.
4721 *
4722 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4723 */
4724
4725 void ata_std_ports(struct ata_ioports *ioaddr)
4726 {
4727 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4728 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4729 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4730 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4731 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4732 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4733 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4734 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4735 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4736 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4737 }
4738
4739
4740 #ifdef CONFIG_PCI
4741
4742 void ata_pci_host_stop (struct ata_host_set *host_set)
4743 {
4744 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4745
4746 pci_iounmap(pdev, host_set->mmio_base);
4747 }
4748
4749 /**
4750 * ata_pci_remove_one - PCI layer callback for device removal
4751 * @pdev: PCI device that was removed
4752 *
4753 * PCI layer indicates to libata via this hook that
4754 * hot-unplug or module unload event has occurred.
4755 * Handle this by unregistering all objects associated
4756 * with this PCI device. Free those objects. Then finally
4757 * release PCI resources and disable device.
4758 *
4759 * LOCKING:
4760 * Inherited from PCI layer (may sleep).
4761 */
4762
4763 void ata_pci_remove_one (struct pci_dev *pdev)
4764 {
4765 struct device *dev = pci_dev_to_dev(pdev);
4766 struct ata_host_set *host_set = dev_get_drvdata(dev);
4767
4768 ata_host_set_remove(host_set);
4769 pci_release_regions(pdev);
4770 pci_disable_device(pdev);
4771 dev_set_drvdata(dev, NULL);
4772 }
4773
4774 /* move to PCI subsystem */
4775 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4776 {
4777 unsigned long tmp = 0;
4778
4779 switch (bits->width) {
4780 case 1: {
4781 u8 tmp8 = 0;
4782 pci_read_config_byte(pdev, bits->reg, &tmp8);
4783 tmp = tmp8;
4784 break;
4785 }
4786 case 2: {
4787 u16 tmp16 = 0;
4788 pci_read_config_word(pdev, bits->reg, &tmp16);
4789 tmp = tmp16;
4790 break;
4791 }
4792 case 4: {
4793 u32 tmp32 = 0;
4794 pci_read_config_dword(pdev, bits->reg, &tmp32);
4795 tmp = tmp32;
4796 break;
4797 }
4798
4799 default:
4800 return -EINVAL;
4801 }
4802
4803 tmp &= bits->mask;
4804
4805 return (tmp == bits->val) ? 1 : 0;
4806 }
4807
4808 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4809 {
4810 pci_save_state(pdev);
4811 pci_disable_device(pdev);
4812 pci_set_power_state(pdev, PCI_D3hot);
4813 return 0;
4814 }
4815
4816 int ata_pci_device_resume(struct pci_dev *pdev)
4817 {
4818 pci_set_power_state(pdev, PCI_D0);
4819 pci_restore_state(pdev);
4820 pci_enable_device(pdev);
4821 pci_set_master(pdev);
4822 return 0;
4823 }
4824 #endif /* CONFIG_PCI */
4825
4826
4827 static int __init ata_init(void)
4828 {
4829 ata_wq = create_workqueue("ata");
4830 if (!ata_wq)
4831 return -ENOMEM;
4832
4833 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4834 return 0;
4835 }
4836
4837 static void __exit ata_exit(void)
4838 {
4839 destroy_workqueue(ata_wq);
4840 }
4841
4842 module_init(ata_init);
4843 module_exit(ata_exit);
4844
4845 static unsigned long ratelimit_time;
4846 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4847
4848 int ata_ratelimit(void)
4849 {
4850 int rc;
4851 unsigned long flags;
4852
4853 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4854
4855 if (time_after(jiffies, ratelimit_time)) {
4856 rc = 1;
4857 ratelimit_time = jiffies + (HZ/5);
4858 } else
4859 rc = 0;
4860
4861 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4862
4863 return rc;
4864 }
4865
4866 /*
4867 * libata is essentially a library of internal helper functions for
4868 * low-level ATA host controller drivers. As such, the API/ABI is
4869 * likely to change as new drivers are added and updated.
4870 * Do not depend on ABI/API stability.
4871 */
4872
4873 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4874 EXPORT_SYMBOL_GPL(ata_std_ports);
4875 EXPORT_SYMBOL_GPL(ata_device_add);
4876 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4877 EXPORT_SYMBOL_GPL(ata_sg_init);
4878 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4879 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4880 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4881 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4882 EXPORT_SYMBOL_GPL(ata_tf_load);
4883 EXPORT_SYMBOL_GPL(ata_tf_read);
4884 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4885 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4886 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4887 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4888 EXPORT_SYMBOL_GPL(ata_check_status);
4889 EXPORT_SYMBOL_GPL(ata_altstatus);
4890 EXPORT_SYMBOL_GPL(ata_exec_command);
4891 EXPORT_SYMBOL_GPL(ata_port_start);
4892 EXPORT_SYMBOL_GPL(ata_port_stop);
4893 EXPORT_SYMBOL_GPL(ata_host_stop);
4894 EXPORT_SYMBOL_GPL(ata_interrupt);
4895 EXPORT_SYMBOL_GPL(ata_qc_prep);
4896 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4897 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4898 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4899 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4900 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4901 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4902 EXPORT_SYMBOL_GPL(ata_port_probe);
4903 EXPORT_SYMBOL_GPL(sata_phy_reset);
4904 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4905 EXPORT_SYMBOL_GPL(ata_bus_reset);
4906 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4907 EXPORT_SYMBOL_GPL(ata_std_softreset);
4908 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4909 EXPORT_SYMBOL_GPL(ata_std_postreset);
4910 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4911 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4912 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4913 EXPORT_SYMBOL_GPL(ata_port_disable);
4914 EXPORT_SYMBOL_GPL(ata_ratelimit);
4915 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4916 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4917 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4918 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4919 EXPORT_SYMBOL_GPL(ata_scsi_error);
4920 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4921 EXPORT_SYMBOL_GPL(ata_scsi_release);
4922 EXPORT_SYMBOL_GPL(ata_host_intr);
4923 EXPORT_SYMBOL_GPL(ata_dev_classify);
4924 EXPORT_SYMBOL_GPL(ata_id_string);
4925 EXPORT_SYMBOL_GPL(ata_id_c_string);
4926 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4927 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4928 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4929
4930 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4931 EXPORT_SYMBOL_GPL(ata_timing_compute);
4932 EXPORT_SYMBOL_GPL(ata_timing_merge);
4933
4934 #ifdef CONFIG_PCI
4935 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4936 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4937 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4938 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4939 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4940 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4941 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4942 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4943 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4944 #endif /* CONFIG_PCI */
4945
4946 EXPORT_SYMBOL_GPL(ata_device_suspend);
4947 EXPORT_SYMBOL_GPL(ata_device_resume);
4948 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4949 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.179772 seconds and 6 git commands to generate.