[PATCH] libata: make ata_set_mode() handle no-device case properly
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
72
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
75
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static const char *sata_spd_string(unsigned int spd)
401 {
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
405 };
406
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
410 }
411
412 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
413 {
414 if (ata_dev_enabled(dev)) {
415 printk(KERN_WARNING "ata%u: dev %u disabled\n",
416 ap->id, dev->devno);
417 dev->class++;
418 }
419 }
420
421 /**
422 * ata_pio_devchk - PATA device presence detection
423 * @ap: ATA channel to examine
424 * @device: Device to examine (starting at zero)
425 *
426 * This technique was originally described in
427 * Hale Landis's ATADRVR (www.ata-atapi.com), and
428 * later found its way into the ATA/ATAPI spec.
429 *
430 * Write a pattern to the ATA shadow registers,
431 * and if a device is present, it will respond by
432 * correctly storing and echoing back the
433 * ATA shadow register contents.
434 *
435 * LOCKING:
436 * caller.
437 */
438
439 static unsigned int ata_pio_devchk(struct ata_port *ap,
440 unsigned int device)
441 {
442 struct ata_ioports *ioaddr = &ap->ioaddr;
443 u8 nsect, lbal;
444
445 ap->ops->dev_select(ap, device);
446
447 outb(0x55, ioaddr->nsect_addr);
448 outb(0xaa, ioaddr->lbal_addr);
449
450 outb(0xaa, ioaddr->nsect_addr);
451 outb(0x55, ioaddr->lbal_addr);
452
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
455
456 nsect = inb(ioaddr->nsect_addr);
457 lbal = inb(ioaddr->lbal_addr);
458
459 if ((nsect == 0x55) && (lbal == 0xaa))
460 return 1; /* we found a device */
461
462 return 0; /* nothing found */
463 }
464
465 /**
466 * ata_mmio_devchk - PATA device presence detection
467 * @ap: ATA channel to examine
468 * @device: Device to examine (starting at zero)
469 *
470 * This technique was originally described in
471 * Hale Landis's ATADRVR (www.ata-atapi.com), and
472 * later found its way into the ATA/ATAPI spec.
473 *
474 * Write a pattern to the ATA shadow registers,
475 * and if a device is present, it will respond by
476 * correctly storing and echoing back the
477 * ATA shadow register contents.
478 *
479 * LOCKING:
480 * caller.
481 */
482
483 static unsigned int ata_mmio_devchk(struct ata_port *ap,
484 unsigned int device)
485 {
486 struct ata_ioports *ioaddr = &ap->ioaddr;
487 u8 nsect, lbal;
488
489 ap->ops->dev_select(ap, device);
490
491 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
492 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
493
494 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
495 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
496
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
499
500 nsect = readb((void __iomem *) ioaddr->nsect_addr);
501 lbal = readb((void __iomem *) ioaddr->lbal_addr);
502
503 if ((nsect == 0x55) && (lbal == 0xaa))
504 return 1; /* we found a device */
505
506 return 0; /* nothing found */
507 }
508
509 /**
510 * ata_devchk - PATA device presence detection
511 * @ap: ATA channel to examine
512 * @device: Device to examine (starting at zero)
513 *
514 * Dispatch ATA device presence detection, depending
515 * on whether we are using PIO or MMIO to talk to the
516 * ATA shadow registers.
517 *
518 * LOCKING:
519 * caller.
520 */
521
522 static unsigned int ata_devchk(struct ata_port *ap,
523 unsigned int device)
524 {
525 if (ap->flags & ATA_FLAG_MMIO)
526 return ata_mmio_devchk(ap, device);
527 return ata_pio_devchk(ap, device);
528 }
529
530 /**
531 * ata_dev_classify - determine device type based on ATA-spec signature
532 * @tf: ATA taskfile register set for device to be identified
533 *
534 * Determine from taskfile register contents whether a device is
535 * ATA or ATAPI, as per "Signature and persistence" section
536 * of ATA/PI spec (volume 1, sect 5.14).
537 *
538 * LOCKING:
539 * None.
540 *
541 * RETURNS:
542 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
543 * the event of failure.
544 */
545
546 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
547 {
548 /* Apple's open source Darwin code hints that some devices only
549 * put a proper signature into the LBA mid/high registers,
550 * So, we only check those. It's sufficient for uniqueness.
551 */
552
553 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
554 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
555 DPRINTK("found ATA device by sig\n");
556 return ATA_DEV_ATA;
557 }
558
559 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
560 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
561 DPRINTK("found ATAPI device by sig\n");
562 return ATA_DEV_ATAPI;
563 }
564
565 DPRINTK("unknown device\n");
566 return ATA_DEV_UNKNOWN;
567 }
568
569 /**
570 * ata_dev_try_classify - Parse returned ATA device signature
571 * @ap: ATA channel to examine
572 * @device: Device to examine (starting at zero)
573 * @r_err: Value of error register on completion
574 *
575 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
576 * an ATA/ATAPI-defined set of values is placed in the ATA
577 * shadow registers, indicating the results of device detection
578 * and diagnostics.
579 *
580 * Select the ATA device, and read the values from the ATA shadow
581 * registers. Then parse according to the Error register value,
582 * and the spec-defined values examined by ata_dev_classify().
583 *
584 * LOCKING:
585 * caller.
586 *
587 * RETURNS:
588 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
589 */
590
591 static unsigned int
592 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
593 {
594 struct ata_taskfile tf;
595 unsigned int class;
596 u8 err;
597
598 ap->ops->dev_select(ap, device);
599
600 memset(&tf, 0, sizeof(tf));
601
602 ap->ops->tf_read(ap, &tf);
603 err = tf.feature;
604 if (r_err)
605 *r_err = err;
606
607 /* see if device passed diags */
608 if (err == 1)
609 /* do nothing */ ;
610 else if ((device == 0) && (err == 0x81))
611 /* do nothing */ ;
612 else
613 return ATA_DEV_NONE;
614
615 /* determine if device is ATA or ATAPI */
616 class = ata_dev_classify(&tf);
617
618 if (class == ATA_DEV_UNKNOWN)
619 return ATA_DEV_NONE;
620 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
621 return ATA_DEV_NONE;
622 return class;
623 }
624
625 /**
626 * ata_id_string - Convert IDENTIFY DEVICE page into string
627 * @id: IDENTIFY DEVICE results we will examine
628 * @s: string into which data is output
629 * @ofs: offset into identify device page
630 * @len: length of string to return. must be an even number.
631 *
632 * The strings in the IDENTIFY DEVICE page are broken up into
633 * 16-bit chunks. Run through the string, and output each
634 * 8-bit chunk linearly, regardless of platform.
635 *
636 * LOCKING:
637 * caller.
638 */
639
640 void ata_id_string(const u16 *id, unsigned char *s,
641 unsigned int ofs, unsigned int len)
642 {
643 unsigned int c;
644
645 while (len > 0) {
646 c = id[ofs] >> 8;
647 *s = c;
648 s++;
649
650 c = id[ofs] & 0xff;
651 *s = c;
652 s++;
653
654 ofs++;
655 len -= 2;
656 }
657 }
658
659 /**
660 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
661 * @id: IDENTIFY DEVICE results we will examine
662 * @s: string into which data is output
663 * @ofs: offset into identify device page
664 * @len: length of string to return. must be an odd number.
665 *
666 * This function is identical to ata_id_string except that it
667 * trims trailing spaces and terminates the resulting string with
668 * null. @len must be actual maximum length (even number) + 1.
669 *
670 * LOCKING:
671 * caller.
672 */
673 void ata_id_c_string(const u16 *id, unsigned char *s,
674 unsigned int ofs, unsigned int len)
675 {
676 unsigned char *p;
677
678 WARN_ON(!(len & 1));
679
680 ata_id_string(id, s, ofs, len - 1);
681
682 p = s + strnlen(s, len - 1);
683 while (p > s && p[-1] == ' ')
684 p--;
685 *p = '\0';
686 }
687
688 static u64 ata_id_n_sectors(const u16 *id)
689 {
690 if (ata_id_has_lba(id)) {
691 if (ata_id_has_lba48(id))
692 return ata_id_u64(id, 100);
693 else
694 return ata_id_u32(id, 60);
695 } else {
696 if (ata_id_current_chs_valid(id))
697 return ata_id_u32(id, 57);
698 else
699 return id[1] * id[3] * id[6];
700 }
701 }
702
703 /**
704 * ata_noop_dev_select - Select device 0/1 on ATA bus
705 * @ap: ATA channel to manipulate
706 * @device: ATA device (numbered from zero) to select
707 *
708 * This function performs no actual function.
709 *
710 * May be used as the dev_select() entry in ata_port_operations.
711 *
712 * LOCKING:
713 * caller.
714 */
715 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
716 {
717 }
718
719
720 /**
721 * ata_std_dev_select - Select device 0/1 on ATA bus
722 * @ap: ATA channel to manipulate
723 * @device: ATA device (numbered from zero) to select
724 *
725 * Use the method defined in the ATA specification to
726 * make either device 0, or device 1, active on the
727 * ATA channel. Works with both PIO and MMIO.
728 *
729 * May be used as the dev_select() entry in ata_port_operations.
730 *
731 * LOCKING:
732 * caller.
733 */
734
735 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
736 {
737 u8 tmp;
738
739 if (device == 0)
740 tmp = ATA_DEVICE_OBS;
741 else
742 tmp = ATA_DEVICE_OBS | ATA_DEV1;
743
744 if (ap->flags & ATA_FLAG_MMIO) {
745 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
746 } else {
747 outb(tmp, ap->ioaddr.device_addr);
748 }
749 ata_pause(ap); /* needed; also flushes, for mmio */
750 }
751
752 /**
753 * ata_dev_select - Select device 0/1 on ATA bus
754 * @ap: ATA channel to manipulate
755 * @device: ATA device (numbered from zero) to select
756 * @wait: non-zero to wait for Status register BSY bit to clear
757 * @can_sleep: non-zero if context allows sleeping
758 *
759 * Use the method defined in the ATA specification to
760 * make either device 0, or device 1, active on the
761 * ATA channel.
762 *
763 * This is a high-level version of ata_std_dev_select(),
764 * which additionally provides the services of inserting
765 * the proper pauses and status polling, where needed.
766 *
767 * LOCKING:
768 * caller.
769 */
770
771 void ata_dev_select(struct ata_port *ap, unsigned int device,
772 unsigned int wait, unsigned int can_sleep)
773 {
774 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
775 ap->id, device, wait);
776
777 if (wait)
778 ata_wait_idle(ap);
779
780 ap->ops->dev_select(ap, device);
781
782 if (wait) {
783 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
784 msleep(150);
785 ata_wait_idle(ap);
786 }
787 }
788
789 /**
790 * ata_dump_id - IDENTIFY DEVICE info debugging output
791 * @id: IDENTIFY DEVICE page to dump
792 *
793 * Dump selected 16-bit words from the given IDENTIFY DEVICE
794 * page.
795 *
796 * LOCKING:
797 * caller.
798 */
799
800 static inline void ata_dump_id(const u16 *id)
801 {
802 DPRINTK("49==0x%04x "
803 "53==0x%04x "
804 "63==0x%04x "
805 "64==0x%04x "
806 "75==0x%04x \n",
807 id[49],
808 id[53],
809 id[63],
810 id[64],
811 id[75]);
812 DPRINTK("80==0x%04x "
813 "81==0x%04x "
814 "82==0x%04x "
815 "83==0x%04x "
816 "84==0x%04x \n",
817 id[80],
818 id[81],
819 id[82],
820 id[83],
821 id[84]);
822 DPRINTK("88==0x%04x "
823 "93==0x%04x\n",
824 id[88],
825 id[93]);
826 }
827
828 /**
829 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
830 * @id: IDENTIFY data to compute xfer mask from
831 *
832 * Compute the xfermask for this device. This is not as trivial
833 * as it seems if we must consider early devices correctly.
834 *
835 * FIXME: pre IDE drive timing (do we care ?).
836 *
837 * LOCKING:
838 * None.
839 *
840 * RETURNS:
841 * Computed xfermask
842 */
843 static unsigned int ata_id_xfermask(const u16 *id)
844 {
845 unsigned int pio_mask, mwdma_mask, udma_mask;
846
847 /* Usual case. Word 53 indicates word 64 is valid */
848 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
849 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
850 pio_mask <<= 3;
851 pio_mask |= 0x7;
852 } else {
853 /* If word 64 isn't valid then Word 51 high byte holds
854 * the PIO timing number for the maximum. Turn it into
855 * a mask.
856 */
857 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
858
859 /* But wait.. there's more. Design your standards by
860 * committee and you too can get a free iordy field to
861 * process. However its the speeds not the modes that
862 * are supported... Note drivers using the timing API
863 * will get this right anyway
864 */
865 }
866
867 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
868
869 udma_mask = 0;
870 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
871 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
872
873 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
874 }
875
876 /**
877 * ata_port_queue_task - Queue port_task
878 * @ap: The ata_port to queue port_task for
879 *
880 * Schedule @fn(@data) for execution after @delay jiffies using
881 * port_task. There is one port_task per port and it's the
882 * user(low level driver)'s responsibility to make sure that only
883 * one task is active at any given time.
884 *
885 * libata core layer takes care of synchronization between
886 * port_task and EH. ata_port_queue_task() may be ignored for EH
887 * synchronization.
888 *
889 * LOCKING:
890 * Inherited from caller.
891 */
892 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
893 unsigned long delay)
894 {
895 int rc;
896
897 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
898 return;
899
900 PREPARE_WORK(&ap->port_task, fn, data);
901
902 if (!delay)
903 rc = queue_work(ata_wq, &ap->port_task);
904 else
905 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
906
907 /* rc == 0 means that another user is using port task */
908 WARN_ON(rc == 0);
909 }
910
911 /**
912 * ata_port_flush_task - Flush port_task
913 * @ap: The ata_port to flush port_task for
914 *
915 * After this function completes, port_task is guranteed not to
916 * be running or scheduled.
917 *
918 * LOCKING:
919 * Kernel thread context (may sleep)
920 */
921 void ata_port_flush_task(struct ata_port *ap)
922 {
923 unsigned long flags;
924
925 DPRINTK("ENTER\n");
926
927 spin_lock_irqsave(&ap->host_set->lock, flags);
928 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
929 spin_unlock_irqrestore(&ap->host_set->lock, flags);
930
931 DPRINTK("flush #1\n");
932 flush_workqueue(ata_wq);
933
934 /*
935 * At this point, if a task is running, it's guaranteed to see
936 * the FLUSH flag; thus, it will never queue pio tasks again.
937 * Cancel and flush.
938 */
939 if (!cancel_delayed_work(&ap->port_task)) {
940 DPRINTK("flush #2\n");
941 flush_workqueue(ata_wq);
942 }
943
944 spin_lock_irqsave(&ap->host_set->lock, flags);
945 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
946 spin_unlock_irqrestore(&ap->host_set->lock, flags);
947
948 DPRINTK("EXIT\n");
949 }
950
951 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
952 {
953 struct completion *waiting = qc->private_data;
954
955 qc->ap->ops->tf_read(qc->ap, &qc->tf);
956 complete(waiting);
957 }
958
959 /**
960 * ata_exec_internal - execute libata internal command
961 * @ap: Port to which the command is sent
962 * @dev: Device to which the command is sent
963 * @tf: Taskfile registers for the command and the result
964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer
967 *
968 * Executes libata internal command with timeout. @tf contains
969 * command on entry and result on return. Timeout and error
970 * conditions are reported via return value. No recovery action
971 * is taken after a command times out. It's caller's duty to
972 * clean up after timeout.
973 *
974 * LOCKING:
975 * None. Should be called with kernel context, might sleep.
976 */
977
978 static unsigned
979 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
980 struct ata_taskfile *tf,
981 int dma_dir, void *buf, unsigned int buflen)
982 {
983 u8 command = tf->command;
984 struct ata_queued_cmd *qc;
985 DECLARE_COMPLETION(wait);
986 unsigned long flags;
987 unsigned int err_mask;
988
989 spin_lock_irqsave(&ap->host_set->lock, flags);
990
991 qc = ata_qc_new_init(ap, dev);
992 BUG_ON(qc == NULL);
993
994 qc->tf = *tf;
995 qc->dma_dir = dma_dir;
996 if (dma_dir != DMA_NONE) {
997 ata_sg_init_one(qc, buf, buflen);
998 qc->nsect = buflen / ATA_SECT_SIZE;
999 }
1000
1001 qc->private_data = &wait;
1002 qc->complete_fn = ata_qc_complete_internal;
1003
1004 ata_qc_issue(qc);
1005
1006 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1007
1008 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1009 ata_port_flush_task(ap);
1010
1011 spin_lock_irqsave(&ap->host_set->lock, flags);
1012
1013 /* We're racing with irq here. If we lose, the
1014 * following test prevents us from completing the qc
1015 * again. If completion irq occurs after here but
1016 * before the caller cleans up, it will result in a
1017 * spurious interrupt. We can live with that.
1018 */
1019 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1020 qc->err_mask = AC_ERR_TIMEOUT;
1021 ata_qc_complete(qc);
1022 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1023 ap->id, command);
1024 }
1025
1026 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1027 }
1028
1029 *tf = qc->tf;
1030 err_mask = qc->err_mask;
1031
1032 ata_qc_free(qc);
1033
1034 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1035 * Until those drivers are fixed, we detect the condition
1036 * here, fail the command with AC_ERR_SYSTEM and reenable the
1037 * port.
1038 *
1039 * Note that this doesn't change any behavior as internal
1040 * command failure results in disabling the device in the
1041 * higher layer for LLDDs without new reset/EH callbacks.
1042 *
1043 * Kill the following code as soon as those drivers are fixed.
1044 */
1045 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1046 err_mask |= AC_ERR_SYSTEM;
1047 ata_port_probe(ap);
1048 }
1049
1050 return err_mask;
1051 }
1052
1053 /**
1054 * ata_pio_need_iordy - check if iordy needed
1055 * @adev: ATA device
1056 *
1057 * Check if the current speed of the device requires IORDY. Used
1058 * by various controllers for chip configuration.
1059 */
1060
1061 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1062 {
1063 int pio;
1064 int speed = adev->pio_mode - XFER_PIO_0;
1065
1066 if (speed < 2)
1067 return 0;
1068 if (speed > 2)
1069 return 1;
1070
1071 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1072
1073 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1074 pio = adev->id[ATA_ID_EIDE_PIO];
1075 /* Is the speed faster than the drive allows non IORDY ? */
1076 if (pio) {
1077 /* This is cycle times not frequency - watch the logic! */
1078 if (pio > 240) /* PIO2 is 240nS per cycle */
1079 return 1;
1080 return 0;
1081 }
1082 }
1083 return 0;
1084 }
1085
1086 /**
1087 * ata_dev_read_id - Read ID data from the specified device
1088 * @ap: port on which target device resides
1089 * @dev: target device
1090 * @p_class: pointer to class of the target device (may be changed)
1091 * @post_reset: is this read ID post-reset?
1092 * @p_id: read IDENTIFY page (newly allocated)
1093 *
1094 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1095 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1096 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1097 * for pre-ATA4 drives.
1098 *
1099 * LOCKING:
1100 * Kernel thread context (may sleep)
1101 *
1102 * RETURNS:
1103 * 0 on success, -errno otherwise.
1104 */
1105 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1106 unsigned int *p_class, int post_reset, u16 **p_id)
1107 {
1108 unsigned int class = *p_class;
1109 struct ata_taskfile tf;
1110 unsigned int err_mask = 0;
1111 u16 *id;
1112 const char *reason;
1113 int rc;
1114
1115 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1116
1117 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1118
1119 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1120 if (id == NULL) {
1121 rc = -ENOMEM;
1122 reason = "out of memory";
1123 goto err_out;
1124 }
1125
1126 retry:
1127 ata_tf_init(ap, &tf, dev->devno);
1128
1129 switch (class) {
1130 case ATA_DEV_ATA:
1131 tf.command = ATA_CMD_ID_ATA;
1132 break;
1133 case ATA_DEV_ATAPI:
1134 tf.command = ATA_CMD_ID_ATAPI;
1135 break;
1136 default:
1137 rc = -ENODEV;
1138 reason = "unsupported class";
1139 goto err_out;
1140 }
1141
1142 tf.protocol = ATA_PROT_PIO;
1143
1144 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1145 id, sizeof(id[0]) * ATA_ID_WORDS);
1146 if (err_mask) {
1147 rc = -EIO;
1148 reason = "I/O error";
1149 goto err_out;
1150 }
1151
1152 swap_buf_le16(id, ATA_ID_WORDS);
1153
1154 /* sanity check */
1155 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1156 rc = -EINVAL;
1157 reason = "device reports illegal type";
1158 goto err_out;
1159 }
1160
1161 if (post_reset && class == ATA_DEV_ATA) {
1162 /*
1163 * The exact sequence expected by certain pre-ATA4 drives is:
1164 * SRST RESET
1165 * IDENTIFY
1166 * INITIALIZE DEVICE PARAMETERS
1167 * anything else..
1168 * Some drives were very specific about that exact sequence.
1169 */
1170 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1171 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1172 if (err_mask) {
1173 rc = -EIO;
1174 reason = "INIT_DEV_PARAMS failed";
1175 goto err_out;
1176 }
1177
1178 /* current CHS translation info (id[53-58]) might be
1179 * changed. reread the identify device info.
1180 */
1181 post_reset = 0;
1182 goto retry;
1183 }
1184 }
1185
1186 *p_class = class;
1187 *p_id = id;
1188 return 0;
1189
1190 err_out:
1191 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1192 ap->id, dev->devno, reason);
1193 kfree(id);
1194 return rc;
1195 }
1196
1197 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1198 struct ata_device *dev)
1199 {
1200 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1201 }
1202
1203 /**
1204 * ata_dev_configure - Configure the specified ATA/ATAPI device
1205 * @ap: Port on which target device resides
1206 * @dev: Target device to configure
1207 * @print_info: Enable device info printout
1208 *
1209 * Configure @dev according to @dev->id. Generic and low-level
1210 * driver specific fixups are also applied.
1211 *
1212 * LOCKING:
1213 * Kernel thread context (may sleep)
1214 *
1215 * RETURNS:
1216 * 0 on success, -errno otherwise
1217 */
1218 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1219 int print_info)
1220 {
1221 const u16 *id = dev->id;
1222 unsigned int xfer_mask;
1223 int i, rc;
1224
1225 if (!ata_dev_enabled(dev)) {
1226 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1227 ap->id, dev->devno);
1228 return 0;
1229 }
1230
1231 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1232
1233 /* print device capabilities */
1234 if (print_info)
1235 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1236 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1237 ap->id, dev->devno, id[49], id[82], id[83],
1238 id[84], id[85], id[86], id[87], id[88]);
1239
1240 /* initialize to-be-configured parameters */
1241 dev->flags = 0;
1242 dev->max_sectors = 0;
1243 dev->cdb_len = 0;
1244 dev->n_sectors = 0;
1245 dev->cylinders = 0;
1246 dev->heads = 0;
1247 dev->sectors = 0;
1248
1249 /*
1250 * common ATA, ATAPI feature tests
1251 */
1252
1253 /* find max transfer mode; for printk only */
1254 xfer_mask = ata_id_xfermask(id);
1255
1256 ata_dump_id(id);
1257
1258 /* ATA-specific feature tests */
1259 if (dev->class == ATA_DEV_ATA) {
1260 dev->n_sectors = ata_id_n_sectors(id);
1261
1262 if (ata_id_has_lba(id)) {
1263 const char *lba_desc;
1264
1265 lba_desc = "LBA";
1266 dev->flags |= ATA_DFLAG_LBA;
1267 if (ata_id_has_lba48(id)) {
1268 dev->flags |= ATA_DFLAG_LBA48;
1269 lba_desc = "LBA48";
1270 }
1271
1272 /* print device info to dmesg */
1273 if (print_info)
1274 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1275 "max %s, %Lu sectors: %s\n",
1276 ap->id, dev->devno,
1277 ata_id_major_version(id),
1278 ata_mode_string(xfer_mask),
1279 (unsigned long long)dev->n_sectors,
1280 lba_desc);
1281 } else {
1282 /* CHS */
1283
1284 /* Default translation */
1285 dev->cylinders = id[1];
1286 dev->heads = id[3];
1287 dev->sectors = id[6];
1288
1289 if (ata_id_current_chs_valid(id)) {
1290 /* Current CHS translation is valid. */
1291 dev->cylinders = id[54];
1292 dev->heads = id[55];
1293 dev->sectors = id[56];
1294 }
1295
1296 /* print device info to dmesg */
1297 if (print_info)
1298 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1299 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1300 ap->id, dev->devno,
1301 ata_id_major_version(id),
1302 ata_mode_string(xfer_mask),
1303 (unsigned long long)dev->n_sectors,
1304 dev->cylinders, dev->heads, dev->sectors);
1305 }
1306
1307 dev->cdb_len = 16;
1308 }
1309
1310 /* ATAPI-specific feature tests */
1311 else if (dev->class == ATA_DEV_ATAPI) {
1312 rc = atapi_cdb_len(id);
1313 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1314 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1315 rc = -EINVAL;
1316 goto err_out_nosup;
1317 }
1318 dev->cdb_len = (unsigned int) rc;
1319
1320 /* print device info to dmesg */
1321 if (print_info)
1322 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1323 ap->id, dev->devno, ata_mode_string(xfer_mask));
1324 }
1325
1326 ap->host->max_cmd_len = 0;
1327 for (i = 0; i < ATA_MAX_DEVICES; i++)
1328 ap->host->max_cmd_len = max_t(unsigned int,
1329 ap->host->max_cmd_len,
1330 ap->device[i].cdb_len);
1331
1332 /* limit bridge transfers to udma5, 200 sectors */
1333 if (ata_dev_knobble(ap, dev)) {
1334 if (print_info)
1335 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1336 ap->id, dev->devno);
1337 dev->udma_mask &= ATA_UDMA5;
1338 dev->max_sectors = ATA_MAX_SECTORS;
1339 }
1340
1341 if (ap->ops->dev_config)
1342 ap->ops->dev_config(ap, dev);
1343
1344 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1345 return 0;
1346
1347 err_out_nosup:
1348 DPRINTK("EXIT, err\n");
1349 return rc;
1350 }
1351
1352 /**
1353 * ata_bus_probe - Reset and probe ATA bus
1354 * @ap: Bus to probe
1355 *
1356 * Master ATA bus probing function. Initiates a hardware-dependent
1357 * bus reset, then attempts to identify any devices found on
1358 * the bus.
1359 *
1360 * LOCKING:
1361 * PCI/etc. bus probe sem.
1362 *
1363 * RETURNS:
1364 * Zero on success, negative errno otherwise.
1365 */
1366
1367 static int ata_bus_probe(struct ata_port *ap)
1368 {
1369 unsigned int classes[ATA_MAX_DEVICES];
1370 int i, rc, found = 0;
1371
1372 ata_port_probe(ap);
1373
1374 /* reset and determine device classes */
1375 for (i = 0; i < ATA_MAX_DEVICES; i++)
1376 classes[i] = ATA_DEV_UNKNOWN;
1377
1378 if (ap->ops->probe_reset) {
1379 rc = ap->ops->probe_reset(ap, classes);
1380 if (rc) {
1381 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1382 return rc;
1383 }
1384 } else {
1385 ap->ops->phy_reset(ap);
1386
1387 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1388 for (i = 0; i < ATA_MAX_DEVICES; i++)
1389 classes[i] = ap->device[i].class;
1390
1391 ata_port_probe(ap);
1392 }
1393
1394 for (i = 0; i < ATA_MAX_DEVICES; i++)
1395 if (classes[i] == ATA_DEV_UNKNOWN)
1396 classes[i] = ATA_DEV_NONE;
1397
1398 /* read IDENTIFY page and configure devices */
1399 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1400 struct ata_device *dev = &ap->device[i];
1401
1402 dev->class = classes[i];
1403
1404 if (!ata_dev_enabled(dev))
1405 continue;
1406
1407 WARN_ON(dev->id != NULL);
1408 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1409 dev->class = ATA_DEV_NONE;
1410 continue;
1411 }
1412
1413 if (ata_dev_configure(ap, dev, 1)) {
1414 ata_dev_disable(ap, dev);
1415 continue;
1416 }
1417
1418 found = 1;
1419 }
1420
1421 if (!found)
1422 goto err_out_disable;
1423
1424 if (ap->ops->set_mode)
1425 ap->ops->set_mode(ap);
1426 else
1427 ata_set_mode(ap);
1428
1429 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1430 goto err_out_disable;
1431
1432 return 0;
1433
1434 err_out_disable:
1435 ap->ops->port_disable(ap);
1436 return -ENODEV;
1437 }
1438
1439 /**
1440 * ata_port_probe - Mark port as enabled
1441 * @ap: Port for which we indicate enablement
1442 *
1443 * Modify @ap data structure such that the system
1444 * thinks that the entire port is enabled.
1445 *
1446 * LOCKING: host_set lock, or some other form of
1447 * serialization.
1448 */
1449
1450 void ata_port_probe(struct ata_port *ap)
1451 {
1452 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1453 }
1454
1455 /**
1456 * sata_print_link_status - Print SATA link status
1457 * @ap: SATA port to printk link status about
1458 *
1459 * This function prints link speed and status of a SATA link.
1460 *
1461 * LOCKING:
1462 * None.
1463 */
1464 static void sata_print_link_status(struct ata_port *ap)
1465 {
1466 u32 sstatus, tmp;
1467
1468 if (!ap->ops->scr_read)
1469 return;
1470
1471 sstatus = scr_read(ap, SCR_STATUS);
1472
1473 if (sata_dev_present(ap)) {
1474 tmp = (sstatus >> 4) & 0xf;
1475 printk(KERN_INFO "ata%u: SATA link up %s (SStatus %X)\n",
1476 ap->id, sata_spd_string(tmp), sstatus);
1477 } else {
1478 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1479 ap->id, sstatus);
1480 }
1481 }
1482
1483 /**
1484 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1485 * @ap: SATA port associated with target SATA PHY.
1486 *
1487 * This function issues commands to standard SATA Sxxx
1488 * PHY registers, to wake up the phy (and device), and
1489 * clear any reset condition.
1490 *
1491 * LOCKING:
1492 * PCI/etc. bus probe sem.
1493 *
1494 */
1495 void __sata_phy_reset(struct ata_port *ap)
1496 {
1497 u32 sstatus;
1498 unsigned long timeout = jiffies + (HZ * 5);
1499
1500 if (ap->flags & ATA_FLAG_SATA_RESET) {
1501 /* issue phy wake/reset */
1502 scr_write_flush(ap, SCR_CONTROL, 0x301);
1503 /* Couldn't find anything in SATA I/II specs, but
1504 * AHCI-1.1 10.4.2 says at least 1 ms. */
1505 mdelay(1);
1506 }
1507 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1508
1509 /* wait for phy to become ready, if necessary */
1510 do {
1511 msleep(200);
1512 sstatus = scr_read(ap, SCR_STATUS);
1513 if ((sstatus & 0xf) != 1)
1514 break;
1515 } while (time_before(jiffies, timeout));
1516
1517 /* print link status */
1518 sata_print_link_status(ap);
1519
1520 /* TODO: phy layer with polling, timeouts, etc. */
1521 if (sata_dev_present(ap))
1522 ata_port_probe(ap);
1523 else
1524 ata_port_disable(ap);
1525
1526 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1527 return;
1528
1529 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1530 ata_port_disable(ap);
1531 return;
1532 }
1533
1534 ap->cbl = ATA_CBL_SATA;
1535 }
1536
1537 /**
1538 * sata_phy_reset - Reset SATA bus.
1539 * @ap: SATA port associated with target SATA PHY.
1540 *
1541 * This function resets the SATA bus, and then probes
1542 * the bus for devices.
1543 *
1544 * LOCKING:
1545 * PCI/etc. bus probe sem.
1546 *
1547 */
1548 void sata_phy_reset(struct ata_port *ap)
1549 {
1550 __sata_phy_reset(ap);
1551 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1552 return;
1553 ata_bus_reset(ap);
1554 }
1555
1556 /**
1557 * ata_dev_pair - return other device on cable
1558 * @ap: port
1559 * @adev: device
1560 *
1561 * Obtain the other device on the same cable, or if none is
1562 * present NULL is returned
1563 */
1564
1565 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1566 {
1567 struct ata_device *pair = &ap->device[1 - adev->devno];
1568 if (!ata_dev_enabled(pair))
1569 return NULL;
1570 return pair;
1571 }
1572
1573 /**
1574 * ata_port_disable - Disable port.
1575 * @ap: Port to be disabled.
1576 *
1577 * Modify @ap data structure such that the system
1578 * thinks that the entire port is disabled, and should
1579 * never attempt to probe or communicate with devices
1580 * on this port.
1581 *
1582 * LOCKING: host_set lock, or some other form of
1583 * serialization.
1584 */
1585
1586 void ata_port_disable(struct ata_port *ap)
1587 {
1588 ap->device[0].class = ATA_DEV_NONE;
1589 ap->device[1].class = ATA_DEV_NONE;
1590 ap->flags |= ATA_FLAG_PORT_DISABLED;
1591 }
1592
1593 /*
1594 * This mode timing computation functionality is ported over from
1595 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1596 */
1597 /*
1598 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1599 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1600 * for PIO 5, which is a nonstandard extension and UDMA6, which
1601 * is currently supported only by Maxtor drives.
1602 */
1603
1604 static const struct ata_timing ata_timing[] = {
1605
1606 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1607 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1608 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1609 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1610
1611 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1612 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1613 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1614
1615 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1616
1617 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1618 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1619 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1620
1621 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1622 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1623 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1624
1625 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1626 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1627 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1628
1629 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1630 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1631 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1632
1633 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1634
1635 { 0xFF }
1636 };
1637
1638 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1639 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1640
1641 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1642 {
1643 q->setup = EZ(t->setup * 1000, T);
1644 q->act8b = EZ(t->act8b * 1000, T);
1645 q->rec8b = EZ(t->rec8b * 1000, T);
1646 q->cyc8b = EZ(t->cyc8b * 1000, T);
1647 q->active = EZ(t->active * 1000, T);
1648 q->recover = EZ(t->recover * 1000, T);
1649 q->cycle = EZ(t->cycle * 1000, T);
1650 q->udma = EZ(t->udma * 1000, UT);
1651 }
1652
1653 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1654 struct ata_timing *m, unsigned int what)
1655 {
1656 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1657 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1658 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1659 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1660 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1661 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1662 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1663 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1664 }
1665
1666 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1667 {
1668 const struct ata_timing *t;
1669
1670 for (t = ata_timing; t->mode != speed; t++)
1671 if (t->mode == 0xFF)
1672 return NULL;
1673 return t;
1674 }
1675
1676 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1677 struct ata_timing *t, int T, int UT)
1678 {
1679 const struct ata_timing *s;
1680 struct ata_timing p;
1681
1682 /*
1683 * Find the mode.
1684 */
1685
1686 if (!(s = ata_timing_find_mode(speed)))
1687 return -EINVAL;
1688
1689 memcpy(t, s, sizeof(*s));
1690
1691 /*
1692 * If the drive is an EIDE drive, it can tell us it needs extended
1693 * PIO/MW_DMA cycle timing.
1694 */
1695
1696 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1697 memset(&p, 0, sizeof(p));
1698 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1699 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1700 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1701 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1702 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1703 }
1704 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1705 }
1706
1707 /*
1708 * Convert the timing to bus clock counts.
1709 */
1710
1711 ata_timing_quantize(t, t, T, UT);
1712
1713 /*
1714 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1715 * S.M.A.R.T * and some other commands. We have to ensure that the
1716 * DMA cycle timing is slower/equal than the fastest PIO timing.
1717 */
1718
1719 if (speed > XFER_PIO_4) {
1720 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1721 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1722 }
1723
1724 /*
1725 * Lengthen active & recovery time so that cycle time is correct.
1726 */
1727
1728 if (t->act8b + t->rec8b < t->cyc8b) {
1729 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1730 t->rec8b = t->cyc8b - t->act8b;
1731 }
1732
1733 if (t->active + t->recover < t->cycle) {
1734 t->active += (t->cycle - (t->active + t->recover)) / 2;
1735 t->recover = t->cycle - t->active;
1736 }
1737
1738 return 0;
1739 }
1740
1741 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1742 {
1743 unsigned int err_mask;
1744 int rc;
1745
1746 if (dev->xfer_shift == ATA_SHIFT_PIO)
1747 dev->flags |= ATA_DFLAG_PIO;
1748
1749 err_mask = ata_dev_set_xfermode(ap, dev);
1750 if (err_mask) {
1751 printk(KERN_ERR
1752 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1753 ap->id, err_mask);
1754 return -EIO;
1755 }
1756
1757 rc = ata_dev_revalidate(ap, dev, 0);
1758 if (rc) {
1759 printk(KERN_ERR
1760 "ata%u: failed to revalidate after set xfermode\n",
1761 ap->id);
1762 return rc;
1763 }
1764
1765 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1766 dev->xfer_shift, (int)dev->xfer_mode);
1767
1768 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1769 ap->id, dev->devno,
1770 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1771 return 0;
1772 }
1773
1774 static int ata_host_set_pio(struct ata_port *ap)
1775 {
1776 int i;
1777
1778 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1779 struct ata_device *dev = &ap->device[i];
1780
1781 if (!ata_dev_enabled(dev))
1782 continue;
1783
1784 if (!dev->pio_mode) {
1785 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1786 return -1;
1787 }
1788
1789 dev->xfer_mode = dev->pio_mode;
1790 dev->xfer_shift = ATA_SHIFT_PIO;
1791 if (ap->ops->set_piomode)
1792 ap->ops->set_piomode(ap, dev);
1793 }
1794
1795 return 0;
1796 }
1797
1798 static void ata_host_set_dma(struct ata_port *ap)
1799 {
1800 int i;
1801
1802 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1803 struct ata_device *dev = &ap->device[i];
1804
1805 if (!ata_dev_enabled(dev) || !dev->dma_mode)
1806 continue;
1807
1808 dev->xfer_mode = dev->dma_mode;
1809 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1810 if (ap->ops->set_dmamode)
1811 ap->ops->set_dmamode(ap, dev);
1812 }
1813 }
1814
1815 /**
1816 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1817 * @ap: port on which timings will be programmed
1818 *
1819 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1820 *
1821 * LOCKING:
1822 * PCI/etc. bus probe sem.
1823 */
1824 static void ata_set_mode(struct ata_port *ap)
1825 {
1826 int i, rc, used_dma = 0, found = 0;
1827
1828 /* step 1: calculate xfer_mask */
1829 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1830 struct ata_device *dev = &ap->device[i];
1831 unsigned int pio_mask, dma_mask;
1832
1833 if (!ata_dev_enabled(dev))
1834 continue;
1835
1836 ata_dev_xfermask(ap, dev);
1837
1838 /* TODO: let LLDD filter dev->*_mask here */
1839
1840 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1841 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1842 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1843 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1844
1845 found = 1;
1846 if (dev->dma_mode)
1847 used_dma = 1;
1848 }
1849 if (!found)
1850 return;
1851
1852 /* step 2: always set host PIO timings */
1853 rc = ata_host_set_pio(ap);
1854 if (rc)
1855 goto err_out;
1856
1857 /* step 3: set host DMA timings */
1858 ata_host_set_dma(ap);
1859
1860 /* step 4: update devices' xfer mode */
1861 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1862 struct ata_device *dev = &ap->device[i];
1863
1864 if (!ata_dev_enabled(dev))
1865 continue;
1866
1867 rc = ata_dev_set_mode(ap, dev);
1868 if (rc)
1869 goto err_out;
1870 }
1871
1872 /*
1873 * Record simplex status. If we selected DMA then the other
1874 * host channels are not permitted to do so.
1875 */
1876
1877 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1878 ap->host_set->simplex_claimed = 1;
1879
1880 /*
1881 * Chip specific finalisation
1882 */
1883 if (ap->ops->post_set_mode)
1884 ap->ops->post_set_mode(ap);
1885
1886 return;
1887
1888 err_out:
1889 ata_port_disable(ap);
1890 }
1891
1892 /**
1893 * ata_tf_to_host - issue ATA taskfile to host controller
1894 * @ap: port to which command is being issued
1895 * @tf: ATA taskfile register set
1896 *
1897 * Issues ATA taskfile register set to ATA host controller,
1898 * with proper synchronization with interrupt handler and
1899 * other threads.
1900 *
1901 * LOCKING:
1902 * spin_lock_irqsave(host_set lock)
1903 */
1904
1905 static inline void ata_tf_to_host(struct ata_port *ap,
1906 const struct ata_taskfile *tf)
1907 {
1908 ap->ops->tf_load(ap, tf);
1909 ap->ops->exec_command(ap, tf);
1910 }
1911
1912 /**
1913 * ata_busy_sleep - sleep until BSY clears, or timeout
1914 * @ap: port containing status register to be polled
1915 * @tmout_pat: impatience timeout
1916 * @tmout: overall timeout
1917 *
1918 * Sleep until ATA Status register bit BSY clears,
1919 * or a timeout occurs.
1920 *
1921 * LOCKING: None.
1922 */
1923
1924 unsigned int ata_busy_sleep (struct ata_port *ap,
1925 unsigned long tmout_pat, unsigned long tmout)
1926 {
1927 unsigned long timer_start, timeout;
1928 u8 status;
1929
1930 status = ata_busy_wait(ap, ATA_BUSY, 300);
1931 timer_start = jiffies;
1932 timeout = timer_start + tmout_pat;
1933 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1934 msleep(50);
1935 status = ata_busy_wait(ap, ATA_BUSY, 3);
1936 }
1937
1938 if (status & ATA_BUSY)
1939 printk(KERN_WARNING "ata%u is slow to respond, "
1940 "please be patient\n", ap->id);
1941
1942 timeout = timer_start + tmout;
1943 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1944 msleep(50);
1945 status = ata_chk_status(ap);
1946 }
1947
1948 if (status & ATA_BUSY) {
1949 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1950 ap->id, tmout / HZ);
1951 return 1;
1952 }
1953
1954 return 0;
1955 }
1956
1957 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1958 {
1959 struct ata_ioports *ioaddr = &ap->ioaddr;
1960 unsigned int dev0 = devmask & (1 << 0);
1961 unsigned int dev1 = devmask & (1 << 1);
1962 unsigned long timeout;
1963
1964 /* if device 0 was found in ata_devchk, wait for its
1965 * BSY bit to clear
1966 */
1967 if (dev0)
1968 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1969
1970 /* if device 1 was found in ata_devchk, wait for
1971 * register access, then wait for BSY to clear
1972 */
1973 timeout = jiffies + ATA_TMOUT_BOOT;
1974 while (dev1) {
1975 u8 nsect, lbal;
1976
1977 ap->ops->dev_select(ap, 1);
1978 if (ap->flags & ATA_FLAG_MMIO) {
1979 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1980 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1981 } else {
1982 nsect = inb(ioaddr->nsect_addr);
1983 lbal = inb(ioaddr->lbal_addr);
1984 }
1985 if ((nsect == 1) && (lbal == 1))
1986 break;
1987 if (time_after(jiffies, timeout)) {
1988 dev1 = 0;
1989 break;
1990 }
1991 msleep(50); /* give drive a breather */
1992 }
1993 if (dev1)
1994 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1995
1996 /* is all this really necessary? */
1997 ap->ops->dev_select(ap, 0);
1998 if (dev1)
1999 ap->ops->dev_select(ap, 1);
2000 if (dev0)
2001 ap->ops->dev_select(ap, 0);
2002 }
2003
2004 static unsigned int ata_bus_softreset(struct ata_port *ap,
2005 unsigned int devmask)
2006 {
2007 struct ata_ioports *ioaddr = &ap->ioaddr;
2008
2009 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2010
2011 /* software reset. causes dev0 to be selected */
2012 if (ap->flags & ATA_FLAG_MMIO) {
2013 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2014 udelay(20); /* FIXME: flush */
2015 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2016 udelay(20); /* FIXME: flush */
2017 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2018 } else {
2019 outb(ap->ctl, ioaddr->ctl_addr);
2020 udelay(10);
2021 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2022 udelay(10);
2023 outb(ap->ctl, ioaddr->ctl_addr);
2024 }
2025
2026 /* spec mandates ">= 2ms" before checking status.
2027 * We wait 150ms, because that was the magic delay used for
2028 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2029 * between when the ATA command register is written, and then
2030 * status is checked. Because waiting for "a while" before
2031 * checking status is fine, post SRST, we perform this magic
2032 * delay here as well.
2033 *
2034 * Old drivers/ide uses the 2mS rule and then waits for ready
2035 */
2036 msleep(150);
2037
2038 /* Before we perform post reset processing we want to see if
2039 * the bus shows 0xFF because the odd clown forgets the D7
2040 * pulldown resistor.
2041 */
2042 if (ata_check_status(ap) == 0xFF)
2043 return AC_ERR_OTHER;
2044
2045 ata_bus_post_reset(ap, devmask);
2046
2047 return 0;
2048 }
2049
2050 /**
2051 * ata_bus_reset - reset host port and associated ATA channel
2052 * @ap: port to reset
2053 *
2054 * This is typically the first time we actually start issuing
2055 * commands to the ATA channel. We wait for BSY to clear, then
2056 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2057 * result. Determine what devices, if any, are on the channel
2058 * by looking at the device 0/1 error register. Look at the signature
2059 * stored in each device's taskfile registers, to determine if
2060 * the device is ATA or ATAPI.
2061 *
2062 * LOCKING:
2063 * PCI/etc. bus probe sem.
2064 * Obtains host_set lock.
2065 *
2066 * SIDE EFFECTS:
2067 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2068 */
2069
2070 void ata_bus_reset(struct ata_port *ap)
2071 {
2072 struct ata_ioports *ioaddr = &ap->ioaddr;
2073 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2074 u8 err;
2075 unsigned int dev0, dev1 = 0, devmask = 0;
2076
2077 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2078
2079 /* determine if device 0/1 are present */
2080 if (ap->flags & ATA_FLAG_SATA_RESET)
2081 dev0 = 1;
2082 else {
2083 dev0 = ata_devchk(ap, 0);
2084 if (slave_possible)
2085 dev1 = ata_devchk(ap, 1);
2086 }
2087
2088 if (dev0)
2089 devmask |= (1 << 0);
2090 if (dev1)
2091 devmask |= (1 << 1);
2092
2093 /* select device 0 again */
2094 ap->ops->dev_select(ap, 0);
2095
2096 /* issue bus reset */
2097 if (ap->flags & ATA_FLAG_SRST)
2098 if (ata_bus_softreset(ap, devmask))
2099 goto err_out;
2100
2101 /*
2102 * determine by signature whether we have ATA or ATAPI devices
2103 */
2104 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2105 if ((slave_possible) && (err != 0x81))
2106 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2107
2108 /* re-enable interrupts */
2109 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2110 ata_irq_on(ap);
2111
2112 /* is double-select really necessary? */
2113 if (ap->device[1].class != ATA_DEV_NONE)
2114 ap->ops->dev_select(ap, 1);
2115 if (ap->device[0].class != ATA_DEV_NONE)
2116 ap->ops->dev_select(ap, 0);
2117
2118 /* if no devices were detected, disable this port */
2119 if ((ap->device[0].class == ATA_DEV_NONE) &&
2120 (ap->device[1].class == ATA_DEV_NONE))
2121 goto err_out;
2122
2123 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2124 /* set up device control for ATA_FLAG_SATA_RESET */
2125 if (ap->flags & ATA_FLAG_MMIO)
2126 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2127 else
2128 outb(ap->ctl, ioaddr->ctl_addr);
2129 }
2130
2131 DPRINTK("EXIT\n");
2132 return;
2133
2134 err_out:
2135 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2136 ap->ops->port_disable(ap);
2137
2138 DPRINTK("EXIT\n");
2139 }
2140
2141 static int sata_phy_resume(struct ata_port *ap)
2142 {
2143 unsigned long timeout = jiffies + (HZ * 5);
2144 u32 sstatus;
2145
2146 scr_write_flush(ap, SCR_CONTROL, 0x300);
2147
2148 /* Wait for phy to become ready, if necessary. */
2149 do {
2150 msleep(200);
2151 sstatus = scr_read(ap, SCR_STATUS);
2152 if ((sstatus & 0xf) != 1)
2153 return 0;
2154 } while (time_before(jiffies, timeout));
2155
2156 return -1;
2157 }
2158
2159 /**
2160 * ata_std_probeinit - initialize probing
2161 * @ap: port to be probed
2162 *
2163 * @ap is about to be probed. Initialize it. This function is
2164 * to be used as standard callback for ata_drive_probe_reset().
2165 *
2166 * NOTE!!! Do not use this function as probeinit if a low level
2167 * driver implements only hardreset. Just pass NULL as probeinit
2168 * in that case. Using this function is probably okay but doing
2169 * so makes reset sequence different from the original
2170 * ->phy_reset implementation and Jeff nervous. :-P
2171 */
2172 void ata_std_probeinit(struct ata_port *ap)
2173 {
2174 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2175 sata_phy_resume(ap);
2176 if (sata_dev_present(ap))
2177 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2178 }
2179 }
2180
2181 /**
2182 * ata_std_softreset - reset host port via ATA SRST
2183 * @ap: port to reset
2184 * @verbose: fail verbosely
2185 * @classes: resulting classes of attached devices
2186 *
2187 * Reset host port using ATA SRST. This function is to be used
2188 * as standard callback for ata_drive_*_reset() functions.
2189 *
2190 * LOCKING:
2191 * Kernel thread context (may sleep)
2192 *
2193 * RETURNS:
2194 * 0 on success, -errno otherwise.
2195 */
2196 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2197 {
2198 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2199 unsigned int devmask = 0, err_mask;
2200 u8 err;
2201
2202 DPRINTK("ENTER\n");
2203
2204 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2205 classes[0] = ATA_DEV_NONE;
2206 goto out;
2207 }
2208
2209 /* determine if device 0/1 are present */
2210 if (ata_devchk(ap, 0))
2211 devmask |= (1 << 0);
2212 if (slave_possible && ata_devchk(ap, 1))
2213 devmask |= (1 << 1);
2214
2215 /* select device 0 again */
2216 ap->ops->dev_select(ap, 0);
2217
2218 /* issue bus reset */
2219 DPRINTK("about to softreset, devmask=%x\n", devmask);
2220 err_mask = ata_bus_softreset(ap, devmask);
2221 if (err_mask) {
2222 if (verbose)
2223 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2224 ap->id, err_mask);
2225 else
2226 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2227 err_mask);
2228 return -EIO;
2229 }
2230
2231 /* determine by signature whether we have ATA or ATAPI devices */
2232 classes[0] = ata_dev_try_classify(ap, 0, &err);
2233 if (slave_possible && err != 0x81)
2234 classes[1] = ata_dev_try_classify(ap, 1, &err);
2235
2236 out:
2237 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2238 return 0;
2239 }
2240
2241 /**
2242 * sata_std_hardreset - reset host port via SATA phy reset
2243 * @ap: port to reset
2244 * @verbose: fail verbosely
2245 * @class: resulting class of attached device
2246 *
2247 * SATA phy-reset host port using DET bits of SControl register.
2248 * This function is to be used as standard callback for
2249 * ata_drive_*_reset().
2250 *
2251 * LOCKING:
2252 * Kernel thread context (may sleep)
2253 *
2254 * RETURNS:
2255 * 0 on success, -errno otherwise.
2256 */
2257 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2258 {
2259 DPRINTK("ENTER\n");
2260
2261 /* Issue phy wake/reset */
2262 scr_write_flush(ap, SCR_CONTROL, 0x301);
2263
2264 /*
2265 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2266 * 10.4.2 says at least 1 ms.
2267 */
2268 msleep(1);
2269
2270 /* Bring phy back */
2271 sata_phy_resume(ap);
2272
2273 /* TODO: phy layer with polling, timeouts, etc. */
2274 if (!sata_dev_present(ap)) {
2275 *class = ATA_DEV_NONE;
2276 DPRINTK("EXIT, link offline\n");
2277 return 0;
2278 }
2279
2280 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2281 if (verbose)
2282 printk(KERN_ERR "ata%u: COMRESET failed "
2283 "(device not ready)\n", ap->id);
2284 else
2285 DPRINTK("EXIT, device not ready\n");
2286 return -EIO;
2287 }
2288
2289 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2290
2291 *class = ata_dev_try_classify(ap, 0, NULL);
2292
2293 DPRINTK("EXIT, class=%u\n", *class);
2294 return 0;
2295 }
2296
2297 /**
2298 * ata_std_postreset - standard postreset callback
2299 * @ap: the target ata_port
2300 * @classes: classes of attached devices
2301 *
2302 * This function is invoked after a successful reset. Note that
2303 * the device might have been reset more than once using
2304 * different reset methods before postreset is invoked.
2305 *
2306 * This function is to be used as standard callback for
2307 * ata_drive_*_reset().
2308 *
2309 * LOCKING:
2310 * Kernel thread context (may sleep)
2311 */
2312 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2313 {
2314 DPRINTK("ENTER\n");
2315
2316 /* set cable type if it isn't already set */
2317 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2318 ap->cbl = ATA_CBL_SATA;
2319
2320 /* print link status */
2321 if (ap->cbl == ATA_CBL_SATA)
2322 sata_print_link_status(ap);
2323
2324 /* re-enable interrupts */
2325 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2326 ata_irq_on(ap);
2327
2328 /* is double-select really necessary? */
2329 if (classes[0] != ATA_DEV_NONE)
2330 ap->ops->dev_select(ap, 1);
2331 if (classes[1] != ATA_DEV_NONE)
2332 ap->ops->dev_select(ap, 0);
2333
2334 /* bail out if no device is present */
2335 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2336 DPRINTK("EXIT, no device\n");
2337 return;
2338 }
2339
2340 /* set up device control */
2341 if (ap->ioaddr.ctl_addr) {
2342 if (ap->flags & ATA_FLAG_MMIO)
2343 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2344 else
2345 outb(ap->ctl, ap->ioaddr.ctl_addr);
2346 }
2347
2348 DPRINTK("EXIT\n");
2349 }
2350
2351 /**
2352 * ata_std_probe_reset - standard probe reset method
2353 * @ap: prot to perform probe-reset
2354 * @classes: resulting classes of attached devices
2355 *
2356 * The stock off-the-shelf ->probe_reset method.
2357 *
2358 * LOCKING:
2359 * Kernel thread context (may sleep)
2360 *
2361 * RETURNS:
2362 * 0 on success, -errno otherwise.
2363 */
2364 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2365 {
2366 ata_reset_fn_t hardreset;
2367
2368 hardreset = NULL;
2369 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2370 hardreset = sata_std_hardreset;
2371
2372 return ata_drive_probe_reset(ap, ata_std_probeinit,
2373 ata_std_softreset, hardreset,
2374 ata_std_postreset, classes);
2375 }
2376
2377 static int ata_do_reset(struct ata_port *ap,
2378 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2379 int verbose, unsigned int *classes)
2380 {
2381 int i, rc;
2382
2383 for (i = 0; i < ATA_MAX_DEVICES; i++)
2384 classes[i] = ATA_DEV_UNKNOWN;
2385
2386 rc = reset(ap, verbose, classes);
2387 if (rc)
2388 return rc;
2389
2390 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2391 * is complete and convert all ATA_DEV_UNKNOWN to
2392 * ATA_DEV_NONE.
2393 */
2394 for (i = 0; i < ATA_MAX_DEVICES; i++)
2395 if (classes[i] != ATA_DEV_UNKNOWN)
2396 break;
2397
2398 if (i < ATA_MAX_DEVICES)
2399 for (i = 0; i < ATA_MAX_DEVICES; i++)
2400 if (classes[i] == ATA_DEV_UNKNOWN)
2401 classes[i] = ATA_DEV_NONE;
2402
2403 if (postreset)
2404 postreset(ap, classes);
2405
2406 return 0;
2407 }
2408
2409 /**
2410 * ata_drive_probe_reset - Perform probe reset with given methods
2411 * @ap: port to reset
2412 * @probeinit: probeinit method (can be NULL)
2413 * @softreset: softreset method (can be NULL)
2414 * @hardreset: hardreset method (can be NULL)
2415 * @postreset: postreset method (can be NULL)
2416 * @classes: resulting classes of attached devices
2417 *
2418 * Reset the specified port and classify attached devices using
2419 * given methods. This function prefers softreset but tries all
2420 * possible reset sequences to reset and classify devices. This
2421 * function is intended to be used for constructing ->probe_reset
2422 * callback by low level drivers.
2423 *
2424 * Reset methods should follow the following rules.
2425 *
2426 * - Return 0 on sucess, -errno on failure.
2427 * - If classification is supported, fill classes[] with
2428 * recognized class codes.
2429 * - If classification is not supported, leave classes[] alone.
2430 * - If verbose is non-zero, print error message on failure;
2431 * otherwise, shut up.
2432 *
2433 * LOCKING:
2434 * Kernel thread context (may sleep)
2435 *
2436 * RETURNS:
2437 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2438 * if classification fails, and any error code from reset
2439 * methods.
2440 */
2441 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2442 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2443 ata_postreset_fn_t postreset, unsigned int *classes)
2444 {
2445 int rc = -EINVAL;
2446
2447 if (probeinit)
2448 probeinit(ap);
2449
2450 if (softreset) {
2451 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2452 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2453 goto done;
2454 }
2455
2456 if (!hardreset)
2457 goto done;
2458
2459 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2460 if (rc || classes[0] != ATA_DEV_UNKNOWN)
2461 goto done;
2462
2463 if (softreset)
2464 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2465
2466 done:
2467 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
2468 rc = -ENODEV;
2469 return rc;
2470 }
2471
2472 /**
2473 * ata_dev_same_device - Determine whether new ID matches configured device
2474 * @ap: port on which the device to compare against resides
2475 * @dev: device to compare against
2476 * @new_class: class of the new device
2477 * @new_id: IDENTIFY page of the new device
2478 *
2479 * Compare @new_class and @new_id against @dev and determine
2480 * whether @dev is the device indicated by @new_class and
2481 * @new_id.
2482 *
2483 * LOCKING:
2484 * None.
2485 *
2486 * RETURNS:
2487 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2488 */
2489 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2490 unsigned int new_class, const u16 *new_id)
2491 {
2492 const u16 *old_id = dev->id;
2493 unsigned char model[2][41], serial[2][21];
2494 u64 new_n_sectors;
2495
2496 if (dev->class != new_class) {
2497 printk(KERN_INFO
2498 "ata%u: dev %u class mismatch %d != %d\n",
2499 ap->id, dev->devno, dev->class, new_class);
2500 return 0;
2501 }
2502
2503 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2504 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2505 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2506 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2507 new_n_sectors = ata_id_n_sectors(new_id);
2508
2509 if (strcmp(model[0], model[1])) {
2510 printk(KERN_INFO
2511 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2512 ap->id, dev->devno, model[0], model[1]);
2513 return 0;
2514 }
2515
2516 if (strcmp(serial[0], serial[1])) {
2517 printk(KERN_INFO
2518 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2519 ap->id, dev->devno, serial[0], serial[1]);
2520 return 0;
2521 }
2522
2523 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2524 printk(KERN_INFO
2525 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2526 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2527 (unsigned long long)new_n_sectors);
2528 return 0;
2529 }
2530
2531 return 1;
2532 }
2533
2534 /**
2535 * ata_dev_revalidate - Revalidate ATA device
2536 * @ap: port on which the device to revalidate resides
2537 * @dev: device to revalidate
2538 * @post_reset: is this revalidation after reset?
2539 *
2540 * Re-read IDENTIFY page and make sure @dev is still attached to
2541 * the port.
2542 *
2543 * LOCKING:
2544 * Kernel thread context (may sleep)
2545 *
2546 * RETURNS:
2547 * 0 on success, negative errno otherwise
2548 */
2549 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2550 int post_reset)
2551 {
2552 unsigned int class;
2553 u16 *id;
2554 int rc;
2555
2556 if (!ata_dev_enabled(dev))
2557 return -ENODEV;
2558
2559 class = dev->class;
2560 id = NULL;
2561
2562 /* allocate & read ID data */
2563 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2564 if (rc)
2565 goto fail;
2566
2567 /* is the device still there? */
2568 if (!ata_dev_same_device(ap, dev, class, id)) {
2569 rc = -ENODEV;
2570 goto fail;
2571 }
2572
2573 kfree(dev->id);
2574 dev->id = id;
2575
2576 /* configure device according to the new ID */
2577 return ata_dev_configure(ap, dev, 0);
2578
2579 fail:
2580 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2581 ap->id, dev->devno, rc);
2582 kfree(id);
2583 return rc;
2584 }
2585
2586 static const char * const ata_dma_blacklist [] = {
2587 "WDC AC11000H", NULL,
2588 "WDC AC22100H", NULL,
2589 "WDC AC32500H", NULL,
2590 "WDC AC33100H", NULL,
2591 "WDC AC31600H", NULL,
2592 "WDC AC32100H", "24.09P07",
2593 "WDC AC23200L", "21.10N21",
2594 "Compaq CRD-8241B", NULL,
2595 "CRD-8400B", NULL,
2596 "CRD-8480B", NULL,
2597 "CRD-8482B", NULL,
2598 "CRD-84", NULL,
2599 "SanDisk SDP3B", NULL,
2600 "SanDisk SDP3B-64", NULL,
2601 "SANYO CD-ROM CRD", NULL,
2602 "HITACHI CDR-8", NULL,
2603 "HITACHI CDR-8335", NULL,
2604 "HITACHI CDR-8435", NULL,
2605 "Toshiba CD-ROM XM-6202B", NULL,
2606 "TOSHIBA CD-ROM XM-1702BC", NULL,
2607 "CD-532E-A", NULL,
2608 "E-IDE CD-ROM CR-840", NULL,
2609 "CD-ROM Drive/F5A", NULL,
2610 "WPI CDD-820", NULL,
2611 "SAMSUNG CD-ROM SC-148C", NULL,
2612 "SAMSUNG CD-ROM SC", NULL,
2613 "SanDisk SDP3B-64", NULL,
2614 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2615 "_NEC DV5800A", NULL,
2616 "SAMSUNG CD-ROM SN-124", "N001"
2617 };
2618
2619 static int ata_strim(char *s, size_t len)
2620 {
2621 len = strnlen(s, len);
2622
2623 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2624 while ((len > 0) && (s[len - 1] == ' ')) {
2625 len--;
2626 s[len] = 0;
2627 }
2628 return len;
2629 }
2630
2631 static int ata_dma_blacklisted(const struct ata_device *dev)
2632 {
2633 unsigned char model_num[40];
2634 unsigned char model_rev[16];
2635 unsigned int nlen, rlen;
2636 int i;
2637
2638 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2639 sizeof(model_num));
2640 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2641 sizeof(model_rev));
2642 nlen = ata_strim(model_num, sizeof(model_num));
2643 rlen = ata_strim(model_rev, sizeof(model_rev));
2644
2645 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2646 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2647 if (ata_dma_blacklist[i+1] == NULL)
2648 return 1;
2649 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2650 return 1;
2651 }
2652 }
2653 return 0;
2654 }
2655
2656 /**
2657 * ata_dev_xfermask - Compute supported xfermask of the given device
2658 * @ap: Port on which the device to compute xfermask for resides
2659 * @dev: Device to compute xfermask for
2660 *
2661 * Compute supported xfermask of @dev and store it in
2662 * dev->*_mask. This function is responsible for applying all
2663 * known limits including host controller limits, device
2664 * blacklist, etc...
2665 *
2666 * FIXME: The current implementation limits all transfer modes to
2667 * the fastest of the lowested device on the port. This is not
2668 * required on most controllers.
2669 *
2670 * LOCKING:
2671 * None.
2672 */
2673 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2674 {
2675 struct ata_host_set *hs = ap->host_set;
2676 unsigned long xfer_mask;
2677 int i;
2678
2679 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2680 ap->udma_mask);
2681
2682 /* FIXME: Use port-wide xfermask for now */
2683 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2684 struct ata_device *d = &ap->device[i];
2685 if (!ata_dev_enabled(d))
2686 continue;
2687 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2688 d->udma_mask);
2689 xfer_mask &= ata_id_xfermask(d->id);
2690 if (ata_dma_blacklisted(d))
2691 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2692 /* Apply cable rule here. Don't apply it early because when
2693 we handle hot plug the cable type can itself change */
2694 if (ap->cbl == ATA_CBL_PATA40)
2695 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2696 }
2697
2698 if (ata_dma_blacklisted(dev))
2699 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2700 "disabling DMA\n", ap->id, dev->devno);
2701
2702 if (hs->flags & ATA_HOST_SIMPLEX) {
2703 if (hs->simplex_claimed)
2704 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2705 }
2706 if (ap->ops->mode_filter)
2707 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2708
2709 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2710 &dev->udma_mask);
2711 }
2712
2713 /**
2714 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2715 * @ap: Port associated with device @dev
2716 * @dev: Device to which command will be sent
2717 *
2718 * Issue SET FEATURES - XFER MODE command to device @dev
2719 * on port @ap.
2720 *
2721 * LOCKING:
2722 * PCI/etc. bus probe sem.
2723 *
2724 * RETURNS:
2725 * 0 on success, AC_ERR_* mask otherwise.
2726 */
2727
2728 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2729 struct ata_device *dev)
2730 {
2731 struct ata_taskfile tf;
2732 unsigned int err_mask;
2733
2734 /* set up set-features taskfile */
2735 DPRINTK("set features - xfer mode\n");
2736
2737 ata_tf_init(ap, &tf, dev->devno);
2738 tf.command = ATA_CMD_SET_FEATURES;
2739 tf.feature = SETFEATURES_XFER;
2740 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2741 tf.protocol = ATA_PROT_NODATA;
2742 tf.nsect = dev->xfer_mode;
2743
2744 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2745
2746 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2747 return err_mask;
2748 }
2749
2750 /**
2751 * ata_dev_init_params - Issue INIT DEV PARAMS command
2752 * @ap: Port associated with device @dev
2753 * @dev: Device to which command will be sent
2754 *
2755 * LOCKING:
2756 * Kernel thread context (may sleep)
2757 *
2758 * RETURNS:
2759 * 0 on success, AC_ERR_* mask otherwise.
2760 */
2761
2762 static unsigned int ata_dev_init_params(struct ata_port *ap,
2763 struct ata_device *dev,
2764 u16 heads,
2765 u16 sectors)
2766 {
2767 struct ata_taskfile tf;
2768 unsigned int err_mask;
2769
2770 /* Number of sectors per track 1-255. Number of heads 1-16 */
2771 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2772 return AC_ERR_INVALID;
2773
2774 /* set up init dev params taskfile */
2775 DPRINTK("init dev params \n");
2776
2777 ata_tf_init(ap, &tf, dev->devno);
2778 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2779 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2780 tf.protocol = ATA_PROT_NODATA;
2781 tf.nsect = sectors;
2782 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2783
2784 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2785
2786 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2787 return err_mask;
2788 }
2789
2790 /**
2791 * ata_sg_clean - Unmap DMA memory associated with command
2792 * @qc: Command containing DMA memory to be released
2793 *
2794 * Unmap all mapped DMA memory associated with this command.
2795 *
2796 * LOCKING:
2797 * spin_lock_irqsave(host_set lock)
2798 */
2799
2800 static void ata_sg_clean(struct ata_queued_cmd *qc)
2801 {
2802 struct ata_port *ap = qc->ap;
2803 struct scatterlist *sg = qc->__sg;
2804 int dir = qc->dma_dir;
2805 void *pad_buf = NULL;
2806
2807 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2808 WARN_ON(sg == NULL);
2809
2810 if (qc->flags & ATA_QCFLAG_SINGLE)
2811 WARN_ON(qc->n_elem > 1);
2812
2813 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2814
2815 /* if we padded the buffer out to 32-bit bound, and data
2816 * xfer direction is from-device, we must copy from the
2817 * pad buffer back into the supplied buffer
2818 */
2819 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2820 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2821
2822 if (qc->flags & ATA_QCFLAG_SG) {
2823 if (qc->n_elem)
2824 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2825 /* restore last sg */
2826 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2827 if (pad_buf) {
2828 struct scatterlist *psg = &qc->pad_sgent;
2829 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2830 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2831 kunmap_atomic(addr, KM_IRQ0);
2832 }
2833 } else {
2834 if (qc->n_elem)
2835 dma_unmap_single(ap->dev,
2836 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2837 dir);
2838 /* restore sg */
2839 sg->length += qc->pad_len;
2840 if (pad_buf)
2841 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2842 pad_buf, qc->pad_len);
2843 }
2844
2845 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2846 qc->__sg = NULL;
2847 }
2848
2849 /**
2850 * ata_fill_sg - Fill PCI IDE PRD table
2851 * @qc: Metadata associated with taskfile to be transferred
2852 *
2853 * Fill PCI IDE PRD (scatter-gather) table with segments
2854 * associated with the current disk command.
2855 *
2856 * LOCKING:
2857 * spin_lock_irqsave(host_set lock)
2858 *
2859 */
2860 static void ata_fill_sg(struct ata_queued_cmd *qc)
2861 {
2862 struct ata_port *ap = qc->ap;
2863 struct scatterlist *sg;
2864 unsigned int idx;
2865
2866 WARN_ON(qc->__sg == NULL);
2867 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2868
2869 idx = 0;
2870 ata_for_each_sg(sg, qc) {
2871 u32 addr, offset;
2872 u32 sg_len, len;
2873
2874 /* determine if physical DMA addr spans 64K boundary.
2875 * Note h/w doesn't support 64-bit, so we unconditionally
2876 * truncate dma_addr_t to u32.
2877 */
2878 addr = (u32) sg_dma_address(sg);
2879 sg_len = sg_dma_len(sg);
2880
2881 while (sg_len) {
2882 offset = addr & 0xffff;
2883 len = sg_len;
2884 if ((offset + sg_len) > 0x10000)
2885 len = 0x10000 - offset;
2886
2887 ap->prd[idx].addr = cpu_to_le32(addr);
2888 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2889 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2890
2891 idx++;
2892 sg_len -= len;
2893 addr += len;
2894 }
2895 }
2896
2897 if (idx)
2898 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2899 }
2900 /**
2901 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2902 * @qc: Metadata associated with taskfile to check
2903 *
2904 * Allow low-level driver to filter ATA PACKET commands, returning
2905 * a status indicating whether or not it is OK to use DMA for the
2906 * supplied PACKET command.
2907 *
2908 * LOCKING:
2909 * spin_lock_irqsave(host_set lock)
2910 *
2911 * RETURNS: 0 when ATAPI DMA can be used
2912 * nonzero otherwise
2913 */
2914 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2915 {
2916 struct ata_port *ap = qc->ap;
2917 int rc = 0; /* Assume ATAPI DMA is OK by default */
2918
2919 if (ap->ops->check_atapi_dma)
2920 rc = ap->ops->check_atapi_dma(qc);
2921
2922 return rc;
2923 }
2924 /**
2925 * ata_qc_prep - Prepare taskfile for submission
2926 * @qc: Metadata associated with taskfile to be prepared
2927 *
2928 * Prepare ATA taskfile for submission.
2929 *
2930 * LOCKING:
2931 * spin_lock_irqsave(host_set lock)
2932 */
2933 void ata_qc_prep(struct ata_queued_cmd *qc)
2934 {
2935 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2936 return;
2937
2938 ata_fill_sg(qc);
2939 }
2940
2941 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2942
2943 /**
2944 * ata_sg_init_one - Associate command with memory buffer
2945 * @qc: Command to be associated
2946 * @buf: Memory buffer
2947 * @buflen: Length of memory buffer, in bytes.
2948 *
2949 * Initialize the data-related elements of queued_cmd @qc
2950 * to point to a single memory buffer, @buf of byte length @buflen.
2951 *
2952 * LOCKING:
2953 * spin_lock_irqsave(host_set lock)
2954 */
2955
2956 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2957 {
2958 struct scatterlist *sg;
2959
2960 qc->flags |= ATA_QCFLAG_SINGLE;
2961
2962 memset(&qc->sgent, 0, sizeof(qc->sgent));
2963 qc->__sg = &qc->sgent;
2964 qc->n_elem = 1;
2965 qc->orig_n_elem = 1;
2966 qc->buf_virt = buf;
2967
2968 sg = qc->__sg;
2969 sg_init_one(sg, buf, buflen);
2970 }
2971
2972 /**
2973 * ata_sg_init - Associate command with scatter-gather table.
2974 * @qc: Command to be associated
2975 * @sg: Scatter-gather table.
2976 * @n_elem: Number of elements in s/g table.
2977 *
2978 * Initialize the data-related elements of queued_cmd @qc
2979 * to point to a scatter-gather table @sg, containing @n_elem
2980 * elements.
2981 *
2982 * LOCKING:
2983 * spin_lock_irqsave(host_set lock)
2984 */
2985
2986 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2987 unsigned int n_elem)
2988 {
2989 qc->flags |= ATA_QCFLAG_SG;
2990 qc->__sg = sg;
2991 qc->n_elem = n_elem;
2992 qc->orig_n_elem = n_elem;
2993 }
2994
2995 /**
2996 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2997 * @qc: Command with memory buffer to be mapped.
2998 *
2999 * DMA-map the memory buffer associated with queued_cmd @qc.
3000 *
3001 * LOCKING:
3002 * spin_lock_irqsave(host_set lock)
3003 *
3004 * RETURNS:
3005 * Zero on success, negative on error.
3006 */
3007
3008 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3009 {
3010 struct ata_port *ap = qc->ap;
3011 int dir = qc->dma_dir;
3012 struct scatterlist *sg = qc->__sg;
3013 dma_addr_t dma_address;
3014 int trim_sg = 0;
3015
3016 /* we must lengthen transfers to end on a 32-bit boundary */
3017 qc->pad_len = sg->length & 3;
3018 if (qc->pad_len) {
3019 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3020 struct scatterlist *psg = &qc->pad_sgent;
3021
3022 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3023
3024 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3025
3026 if (qc->tf.flags & ATA_TFLAG_WRITE)
3027 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3028 qc->pad_len);
3029
3030 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3031 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3032 /* trim sg */
3033 sg->length -= qc->pad_len;
3034 if (sg->length == 0)
3035 trim_sg = 1;
3036
3037 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3038 sg->length, qc->pad_len);
3039 }
3040
3041 if (trim_sg) {
3042 qc->n_elem--;
3043 goto skip_map;
3044 }
3045
3046 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3047 sg->length, dir);
3048 if (dma_mapping_error(dma_address)) {
3049 /* restore sg */
3050 sg->length += qc->pad_len;
3051 return -1;
3052 }
3053
3054 sg_dma_address(sg) = dma_address;
3055 sg_dma_len(sg) = sg->length;
3056
3057 skip_map:
3058 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3059 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3060
3061 return 0;
3062 }
3063
3064 /**
3065 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3066 * @qc: Command with scatter-gather table to be mapped.
3067 *
3068 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3069 *
3070 * LOCKING:
3071 * spin_lock_irqsave(host_set lock)
3072 *
3073 * RETURNS:
3074 * Zero on success, negative on error.
3075 *
3076 */
3077
3078 static int ata_sg_setup(struct ata_queued_cmd *qc)
3079 {
3080 struct ata_port *ap = qc->ap;
3081 struct scatterlist *sg = qc->__sg;
3082 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3083 int n_elem, pre_n_elem, dir, trim_sg = 0;
3084
3085 VPRINTK("ENTER, ata%u\n", ap->id);
3086 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3087
3088 /* we must lengthen transfers to end on a 32-bit boundary */
3089 qc->pad_len = lsg->length & 3;
3090 if (qc->pad_len) {
3091 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3092 struct scatterlist *psg = &qc->pad_sgent;
3093 unsigned int offset;
3094
3095 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3096
3097 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3098
3099 /*
3100 * psg->page/offset are used to copy to-be-written
3101 * data in this function or read data in ata_sg_clean.
3102 */
3103 offset = lsg->offset + lsg->length - qc->pad_len;
3104 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3105 psg->offset = offset_in_page(offset);
3106
3107 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3108 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3109 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3110 kunmap_atomic(addr, KM_IRQ0);
3111 }
3112
3113 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3114 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3115 /* trim last sg */
3116 lsg->length -= qc->pad_len;
3117 if (lsg->length == 0)
3118 trim_sg = 1;
3119
3120 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3121 qc->n_elem - 1, lsg->length, qc->pad_len);
3122 }
3123
3124 pre_n_elem = qc->n_elem;
3125 if (trim_sg && pre_n_elem)
3126 pre_n_elem--;
3127
3128 if (!pre_n_elem) {
3129 n_elem = 0;
3130 goto skip_map;
3131 }
3132
3133 dir = qc->dma_dir;
3134 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3135 if (n_elem < 1) {
3136 /* restore last sg */
3137 lsg->length += qc->pad_len;
3138 return -1;
3139 }
3140
3141 DPRINTK("%d sg elements mapped\n", n_elem);
3142
3143 skip_map:
3144 qc->n_elem = n_elem;
3145
3146 return 0;
3147 }
3148
3149 /**
3150 * ata_poll_qc_complete - turn irq back on and finish qc
3151 * @qc: Command to complete
3152 * @err_mask: ATA status register content
3153 *
3154 * LOCKING:
3155 * None. (grabs host lock)
3156 */
3157
3158 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3159 {
3160 struct ata_port *ap = qc->ap;
3161 unsigned long flags;
3162
3163 spin_lock_irqsave(&ap->host_set->lock, flags);
3164 ap->flags &= ~ATA_FLAG_NOINTR;
3165 ata_irq_on(ap);
3166 ata_qc_complete(qc);
3167 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3168 }
3169
3170 /**
3171 * ata_pio_poll - poll using PIO, depending on current state
3172 * @ap: the target ata_port
3173 *
3174 * LOCKING:
3175 * None. (executing in kernel thread context)
3176 *
3177 * RETURNS:
3178 * timeout value to use
3179 */
3180
3181 static unsigned long ata_pio_poll(struct ata_port *ap)
3182 {
3183 struct ata_queued_cmd *qc;
3184 u8 status;
3185 unsigned int poll_state = HSM_ST_UNKNOWN;
3186 unsigned int reg_state = HSM_ST_UNKNOWN;
3187
3188 qc = ata_qc_from_tag(ap, ap->active_tag);
3189 WARN_ON(qc == NULL);
3190
3191 switch (ap->hsm_task_state) {
3192 case HSM_ST:
3193 case HSM_ST_POLL:
3194 poll_state = HSM_ST_POLL;
3195 reg_state = HSM_ST;
3196 break;
3197 case HSM_ST_LAST:
3198 case HSM_ST_LAST_POLL:
3199 poll_state = HSM_ST_LAST_POLL;
3200 reg_state = HSM_ST_LAST;
3201 break;
3202 default:
3203 BUG();
3204 break;
3205 }
3206
3207 status = ata_chk_status(ap);
3208 if (status & ATA_BUSY) {
3209 if (time_after(jiffies, ap->pio_task_timeout)) {
3210 qc->err_mask |= AC_ERR_TIMEOUT;
3211 ap->hsm_task_state = HSM_ST_TMOUT;
3212 return 0;
3213 }
3214 ap->hsm_task_state = poll_state;
3215 return ATA_SHORT_PAUSE;
3216 }
3217
3218 ap->hsm_task_state = reg_state;
3219 return 0;
3220 }
3221
3222 /**
3223 * ata_pio_complete - check if drive is busy or idle
3224 * @ap: the target ata_port
3225 *
3226 * LOCKING:
3227 * None. (executing in kernel thread context)
3228 *
3229 * RETURNS:
3230 * Non-zero if qc completed, zero otherwise.
3231 */
3232
3233 static int ata_pio_complete (struct ata_port *ap)
3234 {
3235 struct ata_queued_cmd *qc;
3236 u8 drv_stat;
3237
3238 /*
3239 * This is purely heuristic. This is a fast path. Sometimes when
3240 * we enter, BSY will be cleared in a chk-status or two. If not,
3241 * the drive is probably seeking or something. Snooze for a couple
3242 * msecs, then chk-status again. If still busy, fall back to
3243 * HSM_ST_POLL state.
3244 */
3245 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3246 if (drv_stat & ATA_BUSY) {
3247 msleep(2);
3248 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3249 if (drv_stat & ATA_BUSY) {
3250 ap->hsm_task_state = HSM_ST_LAST_POLL;
3251 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3252 return 0;
3253 }
3254 }
3255
3256 qc = ata_qc_from_tag(ap, ap->active_tag);
3257 WARN_ON(qc == NULL);
3258
3259 drv_stat = ata_wait_idle(ap);
3260 if (!ata_ok(drv_stat)) {
3261 qc->err_mask |= __ac_err_mask(drv_stat);
3262 ap->hsm_task_state = HSM_ST_ERR;
3263 return 0;
3264 }
3265
3266 ap->hsm_task_state = HSM_ST_IDLE;
3267
3268 WARN_ON(qc->err_mask);
3269 ata_poll_qc_complete(qc);
3270
3271 /* another command may start at this point */
3272
3273 return 1;
3274 }
3275
3276
3277 /**
3278 * swap_buf_le16 - swap halves of 16-bit words in place
3279 * @buf: Buffer to swap
3280 * @buf_words: Number of 16-bit words in buffer.
3281 *
3282 * Swap halves of 16-bit words if needed to convert from
3283 * little-endian byte order to native cpu byte order, or
3284 * vice-versa.
3285 *
3286 * LOCKING:
3287 * Inherited from caller.
3288 */
3289 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3290 {
3291 #ifdef __BIG_ENDIAN
3292 unsigned int i;
3293
3294 for (i = 0; i < buf_words; i++)
3295 buf[i] = le16_to_cpu(buf[i]);
3296 #endif /* __BIG_ENDIAN */
3297 }
3298
3299 /**
3300 * ata_mmio_data_xfer - Transfer data by MMIO
3301 * @ap: port to read/write
3302 * @buf: data buffer
3303 * @buflen: buffer length
3304 * @write_data: read/write
3305 *
3306 * Transfer data from/to the device data register by MMIO.
3307 *
3308 * LOCKING:
3309 * Inherited from caller.
3310 */
3311
3312 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3313 unsigned int buflen, int write_data)
3314 {
3315 unsigned int i;
3316 unsigned int words = buflen >> 1;
3317 u16 *buf16 = (u16 *) buf;
3318 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3319
3320 /* Transfer multiple of 2 bytes */
3321 if (write_data) {
3322 for (i = 0; i < words; i++)
3323 writew(le16_to_cpu(buf16[i]), mmio);
3324 } else {
3325 for (i = 0; i < words; i++)
3326 buf16[i] = cpu_to_le16(readw(mmio));
3327 }
3328
3329 /* Transfer trailing 1 byte, if any. */
3330 if (unlikely(buflen & 0x01)) {
3331 u16 align_buf[1] = { 0 };
3332 unsigned char *trailing_buf = buf + buflen - 1;
3333
3334 if (write_data) {
3335 memcpy(align_buf, trailing_buf, 1);
3336 writew(le16_to_cpu(align_buf[0]), mmio);
3337 } else {
3338 align_buf[0] = cpu_to_le16(readw(mmio));
3339 memcpy(trailing_buf, align_buf, 1);
3340 }
3341 }
3342 }
3343
3344 /**
3345 * ata_pio_data_xfer - Transfer data by PIO
3346 * @ap: port to read/write
3347 * @buf: data buffer
3348 * @buflen: buffer length
3349 * @write_data: read/write
3350 *
3351 * Transfer data from/to the device data register by PIO.
3352 *
3353 * LOCKING:
3354 * Inherited from caller.
3355 */
3356
3357 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3358 unsigned int buflen, int write_data)
3359 {
3360 unsigned int words = buflen >> 1;
3361
3362 /* Transfer multiple of 2 bytes */
3363 if (write_data)
3364 outsw(ap->ioaddr.data_addr, buf, words);
3365 else
3366 insw(ap->ioaddr.data_addr, buf, words);
3367
3368 /* Transfer trailing 1 byte, if any. */
3369 if (unlikely(buflen & 0x01)) {
3370 u16 align_buf[1] = { 0 };
3371 unsigned char *trailing_buf = buf + buflen - 1;
3372
3373 if (write_data) {
3374 memcpy(align_buf, trailing_buf, 1);
3375 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3376 } else {
3377 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3378 memcpy(trailing_buf, align_buf, 1);
3379 }
3380 }
3381 }
3382
3383 /**
3384 * ata_data_xfer - Transfer data from/to the data register.
3385 * @ap: port to read/write
3386 * @buf: data buffer
3387 * @buflen: buffer length
3388 * @do_write: read/write
3389 *
3390 * Transfer data from/to the device data register.
3391 *
3392 * LOCKING:
3393 * Inherited from caller.
3394 */
3395
3396 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3397 unsigned int buflen, int do_write)
3398 {
3399 /* Make the crap hardware pay the costs not the good stuff */
3400 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3401 unsigned long flags;
3402 local_irq_save(flags);
3403 if (ap->flags & ATA_FLAG_MMIO)
3404 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3405 else
3406 ata_pio_data_xfer(ap, buf, buflen, do_write);
3407 local_irq_restore(flags);
3408 } else {
3409 if (ap->flags & ATA_FLAG_MMIO)
3410 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3411 else
3412 ata_pio_data_xfer(ap, buf, buflen, do_write);
3413 }
3414 }
3415
3416 /**
3417 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3418 * @qc: Command on going
3419 *
3420 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3421 *
3422 * LOCKING:
3423 * Inherited from caller.
3424 */
3425
3426 static void ata_pio_sector(struct ata_queued_cmd *qc)
3427 {
3428 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3429 struct scatterlist *sg = qc->__sg;
3430 struct ata_port *ap = qc->ap;
3431 struct page *page;
3432 unsigned int offset;
3433 unsigned char *buf;
3434
3435 if (qc->cursect == (qc->nsect - 1))
3436 ap->hsm_task_state = HSM_ST_LAST;
3437
3438 page = sg[qc->cursg].page;
3439 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3440
3441 /* get the current page and offset */
3442 page = nth_page(page, (offset >> PAGE_SHIFT));
3443 offset %= PAGE_SIZE;
3444
3445 buf = kmap(page) + offset;
3446
3447 qc->cursect++;
3448 qc->cursg_ofs++;
3449
3450 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3451 qc->cursg++;
3452 qc->cursg_ofs = 0;
3453 }
3454
3455 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3456
3457 /* do the actual data transfer */
3458 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3459 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3460
3461 kunmap(page);
3462 }
3463
3464 /**
3465 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3466 * @qc: Command on going
3467 * @bytes: number of bytes
3468 *
3469 * Transfer Transfer data from/to the ATAPI device.
3470 *
3471 * LOCKING:
3472 * Inherited from caller.
3473 *
3474 */
3475
3476 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3477 {
3478 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3479 struct scatterlist *sg = qc->__sg;
3480 struct ata_port *ap = qc->ap;
3481 struct page *page;
3482 unsigned char *buf;
3483 unsigned int offset, count;
3484
3485 if (qc->curbytes + bytes >= qc->nbytes)
3486 ap->hsm_task_state = HSM_ST_LAST;
3487
3488 next_sg:
3489 if (unlikely(qc->cursg >= qc->n_elem)) {
3490 /*
3491 * The end of qc->sg is reached and the device expects
3492 * more data to transfer. In order not to overrun qc->sg
3493 * and fulfill length specified in the byte count register,
3494 * - for read case, discard trailing data from the device
3495 * - for write case, padding zero data to the device
3496 */
3497 u16 pad_buf[1] = { 0 };
3498 unsigned int words = bytes >> 1;
3499 unsigned int i;
3500
3501 if (words) /* warning if bytes > 1 */
3502 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3503 ap->id, bytes);
3504
3505 for (i = 0; i < words; i++)
3506 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3507
3508 ap->hsm_task_state = HSM_ST_LAST;
3509 return;
3510 }
3511
3512 sg = &qc->__sg[qc->cursg];
3513
3514 page = sg->page;
3515 offset = sg->offset + qc->cursg_ofs;
3516
3517 /* get the current page and offset */
3518 page = nth_page(page, (offset >> PAGE_SHIFT));
3519 offset %= PAGE_SIZE;
3520
3521 /* don't overrun current sg */
3522 count = min(sg->length - qc->cursg_ofs, bytes);
3523
3524 /* don't cross page boundaries */
3525 count = min(count, (unsigned int)PAGE_SIZE - offset);
3526
3527 buf = kmap(page) + offset;
3528
3529 bytes -= count;
3530 qc->curbytes += count;
3531 qc->cursg_ofs += count;
3532
3533 if (qc->cursg_ofs == sg->length) {
3534 qc->cursg++;
3535 qc->cursg_ofs = 0;
3536 }
3537
3538 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3539
3540 /* do the actual data transfer */
3541 ata_data_xfer(ap, buf, count, do_write);
3542
3543 kunmap(page);
3544
3545 if (bytes)
3546 goto next_sg;
3547 }
3548
3549 /**
3550 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3551 * @qc: Command on going
3552 *
3553 * Transfer Transfer data from/to the ATAPI device.
3554 *
3555 * LOCKING:
3556 * Inherited from caller.
3557 */
3558
3559 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3560 {
3561 struct ata_port *ap = qc->ap;
3562 struct ata_device *dev = qc->dev;
3563 unsigned int ireason, bc_lo, bc_hi, bytes;
3564 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3565
3566 ap->ops->tf_read(ap, &qc->tf);
3567 ireason = qc->tf.nsect;
3568 bc_lo = qc->tf.lbam;
3569 bc_hi = qc->tf.lbah;
3570 bytes = (bc_hi << 8) | bc_lo;
3571
3572 /* shall be cleared to zero, indicating xfer of data */
3573 if (ireason & (1 << 0))
3574 goto err_out;
3575
3576 /* make sure transfer direction matches expected */
3577 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3578 if (do_write != i_write)
3579 goto err_out;
3580
3581 __atapi_pio_bytes(qc, bytes);
3582
3583 return;
3584
3585 err_out:
3586 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3587 ap->id, dev->devno);
3588 qc->err_mask |= AC_ERR_HSM;
3589 ap->hsm_task_state = HSM_ST_ERR;
3590 }
3591
3592 /**
3593 * ata_pio_block - start PIO on a block
3594 * @ap: the target ata_port
3595 *
3596 * LOCKING:
3597 * None. (executing in kernel thread context)
3598 */
3599
3600 static void ata_pio_block(struct ata_port *ap)
3601 {
3602 struct ata_queued_cmd *qc;
3603 u8 status;
3604
3605 /*
3606 * This is purely heuristic. This is a fast path.
3607 * Sometimes when we enter, BSY will be cleared in
3608 * a chk-status or two. If not, the drive is probably seeking
3609 * or something. Snooze for a couple msecs, then
3610 * chk-status again. If still busy, fall back to
3611 * HSM_ST_POLL state.
3612 */
3613 status = ata_busy_wait(ap, ATA_BUSY, 5);
3614 if (status & ATA_BUSY) {
3615 msleep(2);
3616 status = ata_busy_wait(ap, ATA_BUSY, 10);
3617 if (status & ATA_BUSY) {
3618 ap->hsm_task_state = HSM_ST_POLL;
3619 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3620 return;
3621 }
3622 }
3623
3624 qc = ata_qc_from_tag(ap, ap->active_tag);
3625 WARN_ON(qc == NULL);
3626
3627 /* check error */
3628 if (status & (ATA_ERR | ATA_DF)) {
3629 qc->err_mask |= AC_ERR_DEV;
3630 ap->hsm_task_state = HSM_ST_ERR;
3631 return;
3632 }
3633
3634 /* transfer data if any */
3635 if (is_atapi_taskfile(&qc->tf)) {
3636 /* DRQ=0 means no more data to transfer */
3637 if ((status & ATA_DRQ) == 0) {
3638 ap->hsm_task_state = HSM_ST_LAST;
3639 return;
3640 }
3641
3642 atapi_pio_bytes(qc);
3643 } else {
3644 /* handle BSY=0, DRQ=0 as error */
3645 if ((status & ATA_DRQ) == 0) {
3646 qc->err_mask |= AC_ERR_HSM;
3647 ap->hsm_task_state = HSM_ST_ERR;
3648 return;
3649 }
3650
3651 ata_pio_sector(qc);
3652 }
3653 }
3654
3655 static void ata_pio_error(struct ata_port *ap)
3656 {
3657 struct ata_queued_cmd *qc;
3658
3659 qc = ata_qc_from_tag(ap, ap->active_tag);
3660 WARN_ON(qc == NULL);
3661
3662 if (qc->tf.command != ATA_CMD_PACKET)
3663 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3664
3665 /* make sure qc->err_mask is available to
3666 * know what's wrong and recover
3667 */
3668 WARN_ON(qc->err_mask == 0);
3669
3670 ap->hsm_task_state = HSM_ST_IDLE;
3671
3672 ata_poll_qc_complete(qc);
3673 }
3674
3675 static void ata_pio_task(void *_data)
3676 {
3677 struct ata_port *ap = _data;
3678 unsigned long timeout;
3679 int qc_completed;
3680
3681 fsm_start:
3682 timeout = 0;
3683 qc_completed = 0;
3684
3685 switch (ap->hsm_task_state) {
3686 case HSM_ST_IDLE:
3687 return;
3688
3689 case HSM_ST:
3690 ata_pio_block(ap);
3691 break;
3692
3693 case HSM_ST_LAST:
3694 qc_completed = ata_pio_complete(ap);
3695 break;
3696
3697 case HSM_ST_POLL:
3698 case HSM_ST_LAST_POLL:
3699 timeout = ata_pio_poll(ap);
3700 break;
3701
3702 case HSM_ST_TMOUT:
3703 case HSM_ST_ERR:
3704 ata_pio_error(ap);
3705 return;
3706 }
3707
3708 if (timeout)
3709 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3710 else if (!qc_completed)
3711 goto fsm_start;
3712 }
3713
3714 /**
3715 * atapi_packet_task - Write CDB bytes to hardware
3716 * @_data: Port to which ATAPI device is attached.
3717 *
3718 * When device has indicated its readiness to accept
3719 * a CDB, this function is called. Send the CDB.
3720 * If DMA is to be performed, exit immediately.
3721 * Otherwise, we are in polling mode, so poll
3722 * status under operation succeeds or fails.
3723 *
3724 * LOCKING:
3725 * Kernel thread context (may sleep)
3726 */
3727
3728 static void atapi_packet_task(void *_data)
3729 {
3730 struct ata_port *ap = _data;
3731 struct ata_queued_cmd *qc;
3732 u8 status;
3733
3734 qc = ata_qc_from_tag(ap, ap->active_tag);
3735 WARN_ON(qc == NULL);
3736 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3737
3738 /* sleep-wait for BSY to clear */
3739 DPRINTK("busy wait\n");
3740 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3741 qc->err_mask |= AC_ERR_TIMEOUT;
3742 goto err_out;
3743 }
3744
3745 /* make sure DRQ is set */
3746 status = ata_chk_status(ap);
3747 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3748 qc->err_mask |= AC_ERR_HSM;
3749 goto err_out;
3750 }
3751
3752 /* send SCSI cdb */
3753 DPRINTK("send cdb\n");
3754 WARN_ON(qc->dev->cdb_len < 12);
3755
3756 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3757 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3758 unsigned long flags;
3759
3760 /* Once we're done issuing command and kicking bmdma,
3761 * irq handler takes over. To not lose irq, we need
3762 * to clear NOINTR flag before sending cdb, but
3763 * interrupt handler shouldn't be invoked before we're
3764 * finished. Hence, the following locking.
3765 */
3766 spin_lock_irqsave(&ap->host_set->lock, flags);
3767 ap->flags &= ~ATA_FLAG_NOINTR;
3768 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3769 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3770 ap->ops->bmdma_start(qc); /* initiate bmdma */
3771 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3772 } else {
3773 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3774
3775 /* PIO commands are handled by polling */
3776 ap->hsm_task_state = HSM_ST;
3777 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3778 }
3779
3780 return;
3781
3782 err_out:
3783 ata_poll_qc_complete(qc);
3784 }
3785
3786 /**
3787 * ata_qc_timeout - Handle timeout of queued command
3788 * @qc: Command that timed out
3789 *
3790 * Some part of the kernel (currently, only the SCSI layer)
3791 * has noticed that the active command on port @ap has not
3792 * completed after a specified length of time. Handle this
3793 * condition by disabling DMA (if necessary) and completing
3794 * transactions, with error if necessary.
3795 *
3796 * This also handles the case of the "lost interrupt", where
3797 * for some reason (possibly hardware bug, possibly driver bug)
3798 * an interrupt was not delivered to the driver, even though the
3799 * transaction completed successfully.
3800 *
3801 * LOCKING:
3802 * Inherited from SCSI layer (none, can sleep)
3803 */
3804
3805 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3806 {
3807 struct ata_port *ap = qc->ap;
3808 struct ata_host_set *host_set = ap->host_set;
3809 u8 host_stat = 0, drv_stat;
3810 unsigned long flags;
3811
3812 DPRINTK("ENTER\n");
3813
3814 ap->hsm_task_state = HSM_ST_IDLE;
3815
3816 spin_lock_irqsave(&host_set->lock, flags);
3817
3818 switch (qc->tf.protocol) {
3819
3820 case ATA_PROT_DMA:
3821 case ATA_PROT_ATAPI_DMA:
3822 host_stat = ap->ops->bmdma_status(ap);
3823
3824 /* before we do anything else, clear DMA-Start bit */
3825 ap->ops->bmdma_stop(qc);
3826
3827 /* fall through */
3828
3829 default:
3830 ata_altstatus(ap);
3831 drv_stat = ata_chk_status(ap);
3832
3833 /* ack bmdma irq events */
3834 ap->ops->irq_clear(ap);
3835
3836 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3837 ap->id, qc->tf.command, drv_stat, host_stat);
3838
3839 /* complete taskfile transaction */
3840 qc->err_mask |= ac_err_mask(drv_stat);
3841 break;
3842 }
3843
3844 spin_unlock_irqrestore(&host_set->lock, flags);
3845
3846 ata_eh_qc_complete(qc);
3847
3848 DPRINTK("EXIT\n");
3849 }
3850
3851 /**
3852 * ata_eng_timeout - Handle timeout of queued command
3853 * @ap: Port on which timed-out command is active
3854 *
3855 * Some part of the kernel (currently, only the SCSI layer)
3856 * has noticed that the active command on port @ap has not
3857 * completed after a specified length of time. Handle this
3858 * condition by disabling DMA (if necessary) and completing
3859 * transactions, with error if necessary.
3860 *
3861 * This also handles the case of the "lost interrupt", where
3862 * for some reason (possibly hardware bug, possibly driver bug)
3863 * an interrupt was not delivered to the driver, even though the
3864 * transaction completed successfully.
3865 *
3866 * LOCKING:
3867 * Inherited from SCSI layer (none, can sleep)
3868 */
3869
3870 void ata_eng_timeout(struct ata_port *ap)
3871 {
3872 DPRINTK("ENTER\n");
3873
3874 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3875
3876 DPRINTK("EXIT\n");
3877 }
3878
3879 /**
3880 * ata_qc_new - Request an available ATA command, for queueing
3881 * @ap: Port associated with device @dev
3882 * @dev: Device from whom we request an available command structure
3883 *
3884 * LOCKING:
3885 * None.
3886 */
3887
3888 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3889 {
3890 struct ata_queued_cmd *qc = NULL;
3891 unsigned int i;
3892
3893 for (i = 0; i < ATA_MAX_QUEUE; i++)
3894 if (!test_and_set_bit(i, &ap->qactive)) {
3895 qc = ata_qc_from_tag(ap, i);
3896 break;
3897 }
3898
3899 if (qc)
3900 qc->tag = i;
3901
3902 return qc;
3903 }
3904
3905 /**
3906 * ata_qc_new_init - Request an available ATA command, and initialize it
3907 * @ap: Port associated with device @dev
3908 * @dev: Device from whom we request an available command structure
3909 *
3910 * LOCKING:
3911 * None.
3912 */
3913
3914 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3915 struct ata_device *dev)
3916 {
3917 struct ata_queued_cmd *qc;
3918
3919 qc = ata_qc_new(ap);
3920 if (qc) {
3921 qc->scsicmd = NULL;
3922 qc->ap = ap;
3923 qc->dev = dev;
3924
3925 ata_qc_reinit(qc);
3926 }
3927
3928 return qc;
3929 }
3930
3931 /**
3932 * ata_qc_free - free unused ata_queued_cmd
3933 * @qc: Command to complete
3934 *
3935 * Designed to free unused ata_queued_cmd object
3936 * in case something prevents using it.
3937 *
3938 * LOCKING:
3939 * spin_lock_irqsave(host_set lock)
3940 */
3941 void ata_qc_free(struct ata_queued_cmd *qc)
3942 {
3943 struct ata_port *ap = qc->ap;
3944 unsigned int tag;
3945
3946 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3947
3948 qc->flags = 0;
3949 tag = qc->tag;
3950 if (likely(ata_tag_valid(tag))) {
3951 if (tag == ap->active_tag)
3952 ap->active_tag = ATA_TAG_POISON;
3953 qc->tag = ATA_TAG_POISON;
3954 clear_bit(tag, &ap->qactive);
3955 }
3956 }
3957
3958 void __ata_qc_complete(struct ata_queued_cmd *qc)
3959 {
3960 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3961 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3962
3963 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3964 ata_sg_clean(qc);
3965
3966 /* atapi: mark qc as inactive to prevent the interrupt handler
3967 * from completing the command twice later, before the error handler
3968 * is called. (when rc != 0 and atapi request sense is needed)
3969 */
3970 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3971
3972 /* call completion callback */
3973 qc->complete_fn(qc);
3974 }
3975
3976 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3977 {
3978 struct ata_port *ap = qc->ap;
3979
3980 switch (qc->tf.protocol) {
3981 case ATA_PROT_DMA:
3982 case ATA_PROT_ATAPI_DMA:
3983 return 1;
3984
3985 case ATA_PROT_ATAPI:
3986 case ATA_PROT_PIO:
3987 if (ap->flags & ATA_FLAG_PIO_DMA)
3988 return 1;
3989
3990 /* fall through */
3991
3992 default:
3993 return 0;
3994 }
3995
3996 /* never reached */
3997 }
3998
3999 /**
4000 * ata_qc_issue - issue taskfile to device
4001 * @qc: command to issue to device
4002 *
4003 * Prepare an ATA command to submission to device.
4004 * This includes mapping the data into a DMA-able
4005 * area, filling in the S/G table, and finally
4006 * writing the taskfile to hardware, starting the command.
4007 *
4008 * LOCKING:
4009 * spin_lock_irqsave(host_set lock)
4010 */
4011 void ata_qc_issue(struct ata_queued_cmd *qc)
4012 {
4013 struct ata_port *ap = qc->ap;
4014
4015 qc->ap->active_tag = qc->tag;
4016 qc->flags |= ATA_QCFLAG_ACTIVE;
4017
4018 if (ata_should_dma_map(qc)) {
4019 if (qc->flags & ATA_QCFLAG_SG) {
4020 if (ata_sg_setup(qc))
4021 goto sg_err;
4022 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4023 if (ata_sg_setup_one(qc))
4024 goto sg_err;
4025 }
4026 } else {
4027 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4028 }
4029
4030 ap->ops->qc_prep(qc);
4031
4032 qc->err_mask |= ap->ops->qc_issue(qc);
4033 if (unlikely(qc->err_mask))
4034 goto err;
4035 return;
4036
4037 sg_err:
4038 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4039 qc->err_mask |= AC_ERR_SYSTEM;
4040 err:
4041 ata_qc_complete(qc);
4042 }
4043
4044 /**
4045 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4046 * @qc: command to issue to device
4047 *
4048 * Using various libata functions and hooks, this function
4049 * starts an ATA command. ATA commands are grouped into
4050 * classes called "protocols", and issuing each type of protocol
4051 * is slightly different.
4052 *
4053 * May be used as the qc_issue() entry in ata_port_operations.
4054 *
4055 * LOCKING:
4056 * spin_lock_irqsave(host_set lock)
4057 *
4058 * RETURNS:
4059 * Zero on success, AC_ERR_* mask on failure
4060 */
4061
4062 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4063 {
4064 struct ata_port *ap = qc->ap;
4065
4066 ata_dev_select(ap, qc->dev->devno, 1, 0);
4067
4068 switch (qc->tf.protocol) {
4069 case ATA_PROT_NODATA:
4070 ata_tf_to_host(ap, &qc->tf);
4071 break;
4072
4073 case ATA_PROT_DMA:
4074 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4075 ap->ops->bmdma_setup(qc); /* set up bmdma */
4076 ap->ops->bmdma_start(qc); /* initiate bmdma */
4077 break;
4078
4079 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4080 ata_qc_set_polling(qc);
4081 ata_tf_to_host(ap, &qc->tf);
4082 ap->hsm_task_state = HSM_ST;
4083 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4084 break;
4085
4086 case ATA_PROT_ATAPI:
4087 ata_qc_set_polling(qc);
4088 ata_tf_to_host(ap, &qc->tf);
4089 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4090 break;
4091
4092 case ATA_PROT_ATAPI_NODATA:
4093 ap->flags |= ATA_FLAG_NOINTR;
4094 ata_tf_to_host(ap, &qc->tf);
4095 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4096 break;
4097
4098 case ATA_PROT_ATAPI_DMA:
4099 ap->flags |= ATA_FLAG_NOINTR;
4100 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4101 ap->ops->bmdma_setup(qc); /* set up bmdma */
4102 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4103 break;
4104
4105 default:
4106 WARN_ON(1);
4107 return AC_ERR_SYSTEM;
4108 }
4109
4110 return 0;
4111 }
4112
4113 /**
4114 * ata_host_intr - Handle host interrupt for given (port, task)
4115 * @ap: Port on which interrupt arrived (possibly...)
4116 * @qc: Taskfile currently active in engine
4117 *
4118 * Handle host interrupt for given queued command. Currently,
4119 * only DMA interrupts are handled. All other commands are
4120 * handled via polling with interrupts disabled (nIEN bit).
4121 *
4122 * LOCKING:
4123 * spin_lock_irqsave(host_set lock)
4124 *
4125 * RETURNS:
4126 * One if interrupt was handled, zero if not (shared irq).
4127 */
4128
4129 inline unsigned int ata_host_intr (struct ata_port *ap,
4130 struct ata_queued_cmd *qc)
4131 {
4132 u8 status, host_stat;
4133
4134 switch (qc->tf.protocol) {
4135
4136 case ATA_PROT_DMA:
4137 case ATA_PROT_ATAPI_DMA:
4138 case ATA_PROT_ATAPI:
4139 /* check status of DMA engine */
4140 host_stat = ap->ops->bmdma_status(ap);
4141 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4142
4143 /* if it's not our irq... */
4144 if (!(host_stat & ATA_DMA_INTR))
4145 goto idle_irq;
4146
4147 /* before we do anything else, clear DMA-Start bit */
4148 ap->ops->bmdma_stop(qc);
4149
4150 /* fall through */
4151
4152 case ATA_PROT_ATAPI_NODATA:
4153 case ATA_PROT_NODATA:
4154 /* check altstatus */
4155 status = ata_altstatus(ap);
4156 if (status & ATA_BUSY)
4157 goto idle_irq;
4158
4159 /* check main status, clearing INTRQ */
4160 status = ata_chk_status(ap);
4161 if (unlikely(status & ATA_BUSY))
4162 goto idle_irq;
4163 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4164 ap->id, qc->tf.protocol, status);
4165
4166 /* ack bmdma irq events */
4167 ap->ops->irq_clear(ap);
4168
4169 /* complete taskfile transaction */
4170 qc->err_mask |= ac_err_mask(status);
4171 ata_qc_complete(qc);
4172 break;
4173
4174 default:
4175 goto idle_irq;
4176 }
4177
4178 return 1; /* irq handled */
4179
4180 idle_irq:
4181 ap->stats.idle_irq++;
4182
4183 #ifdef ATA_IRQ_TRAP
4184 if ((ap->stats.idle_irq % 1000) == 0) {
4185 ata_irq_ack(ap, 0); /* debug trap */
4186 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4187 return 1;
4188 }
4189 #endif
4190 return 0; /* irq not handled */
4191 }
4192
4193 /**
4194 * ata_interrupt - Default ATA host interrupt handler
4195 * @irq: irq line (unused)
4196 * @dev_instance: pointer to our ata_host_set information structure
4197 * @regs: unused
4198 *
4199 * Default interrupt handler for PCI IDE devices. Calls
4200 * ata_host_intr() for each port that is not disabled.
4201 *
4202 * LOCKING:
4203 * Obtains host_set lock during operation.
4204 *
4205 * RETURNS:
4206 * IRQ_NONE or IRQ_HANDLED.
4207 */
4208
4209 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4210 {
4211 struct ata_host_set *host_set = dev_instance;
4212 unsigned int i;
4213 unsigned int handled = 0;
4214 unsigned long flags;
4215
4216 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4217 spin_lock_irqsave(&host_set->lock, flags);
4218
4219 for (i = 0; i < host_set->n_ports; i++) {
4220 struct ata_port *ap;
4221
4222 ap = host_set->ports[i];
4223 if (ap &&
4224 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4225 struct ata_queued_cmd *qc;
4226
4227 qc = ata_qc_from_tag(ap, ap->active_tag);
4228 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4229 (qc->flags & ATA_QCFLAG_ACTIVE))
4230 handled |= ata_host_intr(ap, qc);
4231 }
4232 }
4233
4234 spin_unlock_irqrestore(&host_set->lock, flags);
4235
4236 return IRQ_RETVAL(handled);
4237 }
4238
4239
4240 /*
4241 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4242 * without filling any other registers
4243 */
4244 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4245 u8 cmd)
4246 {
4247 struct ata_taskfile tf;
4248 int err;
4249
4250 ata_tf_init(ap, &tf, dev->devno);
4251
4252 tf.command = cmd;
4253 tf.flags |= ATA_TFLAG_DEVICE;
4254 tf.protocol = ATA_PROT_NODATA;
4255
4256 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4257 if (err)
4258 printk(KERN_ERR "%s: ata command failed: %d\n",
4259 __FUNCTION__, err);
4260
4261 return err;
4262 }
4263
4264 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4265 {
4266 u8 cmd;
4267
4268 if (!ata_try_flush_cache(dev))
4269 return 0;
4270
4271 if (ata_id_has_flush_ext(dev->id))
4272 cmd = ATA_CMD_FLUSH_EXT;
4273 else
4274 cmd = ATA_CMD_FLUSH;
4275
4276 return ata_do_simple_cmd(ap, dev, cmd);
4277 }
4278
4279 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4280 {
4281 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4282 }
4283
4284 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4285 {
4286 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4287 }
4288
4289 /**
4290 * ata_device_resume - wakeup a previously suspended devices
4291 * @ap: port the device is connected to
4292 * @dev: the device to resume
4293 *
4294 * Kick the drive back into action, by sending it an idle immediate
4295 * command and making sure its transfer mode matches between drive
4296 * and host.
4297 *
4298 */
4299 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4300 {
4301 if (ap->flags & ATA_FLAG_SUSPENDED) {
4302 ap->flags &= ~ATA_FLAG_SUSPENDED;
4303 ata_set_mode(ap);
4304 }
4305 if (!ata_dev_enabled(dev))
4306 return 0;
4307 if (dev->class == ATA_DEV_ATA)
4308 ata_start_drive(ap, dev);
4309
4310 return 0;
4311 }
4312
4313 /**
4314 * ata_device_suspend - prepare a device for suspend
4315 * @ap: port the device is connected to
4316 * @dev: the device to suspend
4317 *
4318 * Flush the cache on the drive, if appropriate, then issue a
4319 * standbynow command.
4320 */
4321 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4322 {
4323 if (!ata_dev_enabled(dev))
4324 return 0;
4325 if (dev->class == ATA_DEV_ATA)
4326 ata_flush_cache(ap, dev);
4327
4328 if (state.event != PM_EVENT_FREEZE)
4329 ata_standby_drive(ap, dev);
4330 ap->flags |= ATA_FLAG_SUSPENDED;
4331 return 0;
4332 }
4333
4334 /**
4335 * ata_port_start - Set port up for dma.
4336 * @ap: Port to initialize
4337 *
4338 * Called just after data structures for each port are
4339 * initialized. Allocates space for PRD table.
4340 *
4341 * May be used as the port_start() entry in ata_port_operations.
4342 *
4343 * LOCKING:
4344 * Inherited from caller.
4345 */
4346
4347 int ata_port_start (struct ata_port *ap)
4348 {
4349 struct device *dev = ap->dev;
4350 int rc;
4351
4352 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4353 if (!ap->prd)
4354 return -ENOMEM;
4355
4356 rc = ata_pad_alloc(ap, dev);
4357 if (rc) {
4358 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4359 return rc;
4360 }
4361
4362 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4363
4364 return 0;
4365 }
4366
4367
4368 /**
4369 * ata_port_stop - Undo ata_port_start()
4370 * @ap: Port to shut down
4371 *
4372 * Frees the PRD table.
4373 *
4374 * May be used as the port_stop() entry in ata_port_operations.
4375 *
4376 * LOCKING:
4377 * Inherited from caller.
4378 */
4379
4380 void ata_port_stop (struct ata_port *ap)
4381 {
4382 struct device *dev = ap->dev;
4383
4384 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4385 ata_pad_free(ap, dev);
4386 }
4387
4388 void ata_host_stop (struct ata_host_set *host_set)
4389 {
4390 if (host_set->mmio_base)
4391 iounmap(host_set->mmio_base);
4392 }
4393
4394
4395 /**
4396 * ata_host_remove - Unregister SCSI host structure with upper layers
4397 * @ap: Port to unregister
4398 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4399 *
4400 * LOCKING:
4401 * Inherited from caller.
4402 */
4403
4404 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4405 {
4406 struct Scsi_Host *sh = ap->host;
4407
4408 DPRINTK("ENTER\n");
4409
4410 if (do_unregister)
4411 scsi_remove_host(sh);
4412
4413 ap->ops->port_stop(ap);
4414 }
4415
4416 /**
4417 * ata_host_init - Initialize an ata_port structure
4418 * @ap: Structure to initialize
4419 * @host: associated SCSI mid-layer structure
4420 * @host_set: Collection of hosts to which @ap belongs
4421 * @ent: Probe information provided by low-level driver
4422 * @port_no: Port number associated with this ata_port
4423 *
4424 * Initialize a new ata_port structure, and its associated
4425 * scsi_host.
4426 *
4427 * LOCKING:
4428 * Inherited from caller.
4429 */
4430
4431 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4432 struct ata_host_set *host_set,
4433 const struct ata_probe_ent *ent, unsigned int port_no)
4434 {
4435 unsigned int i;
4436
4437 host->max_id = 16;
4438 host->max_lun = 1;
4439 host->max_channel = 1;
4440 host->unique_id = ata_unique_id++;
4441 host->max_cmd_len = 12;
4442
4443 ap->flags = ATA_FLAG_PORT_DISABLED;
4444 ap->id = host->unique_id;
4445 ap->host = host;
4446 ap->ctl = ATA_DEVCTL_OBS;
4447 ap->host_set = host_set;
4448 ap->dev = ent->dev;
4449 ap->port_no = port_no;
4450 ap->hard_port_no =
4451 ent->legacy_mode ? ent->hard_port_no : port_no;
4452 ap->pio_mask = ent->pio_mask;
4453 ap->mwdma_mask = ent->mwdma_mask;
4454 ap->udma_mask = ent->udma_mask;
4455 ap->flags |= ent->host_flags;
4456 ap->ops = ent->port_ops;
4457 ap->cbl = ATA_CBL_NONE;
4458 ap->active_tag = ATA_TAG_POISON;
4459 ap->last_ctl = 0xFF;
4460
4461 INIT_WORK(&ap->port_task, NULL, NULL);
4462 INIT_LIST_HEAD(&ap->eh_done_q);
4463
4464 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4465 struct ata_device *dev = &ap->device[i];
4466 dev->devno = i;
4467 dev->pio_mask = UINT_MAX;
4468 dev->mwdma_mask = UINT_MAX;
4469 dev->udma_mask = UINT_MAX;
4470 }
4471
4472 #ifdef ATA_IRQ_TRAP
4473 ap->stats.unhandled_irq = 1;
4474 ap->stats.idle_irq = 1;
4475 #endif
4476
4477 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4478 }
4479
4480 /**
4481 * ata_host_add - Attach low-level ATA driver to system
4482 * @ent: Information provided by low-level driver
4483 * @host_set: Collections of ports to which we add
4484 * @port_no: Port number associated with this host
4485 *
4486 * Attach low-level ATA driver to system.
4487 *
4488 * LOCKING:
4489 * PCI/etc. bus probe sem.
4490 *
4491 * RETURNS:
4492 * New ata_port on success, for NULL on error.
4493 */
4494
4495 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4496 struct ata_host_set *host_set,
4497 unsigned int port_no)
4498 {
4499 struct Scsi_Host *host;
4500 struct ata_port *ap;
4501 int rc;
4502
4503 DPRINTK("ENTER\n");
4504
4505 if (!ent->port_ops->probe_reset &&
4506 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4507 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4508 port_no);
4509 return NULL;
4510 }
4511
4512 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4513 if (!host)
4514 return NULL;
4515
4516 host->transportt = &ata_scsi_transport_template;
4517
4518 ap = (struct ata_port *) &host->hostdata[0];
4519
4520 ata_host_init(ap, host, host_set, ent, port_no);
4521
4522 rc = ap->ops->port_start(ap);
4523 if (rc)
4524 goto err_out;
4525
4526 return ap;
4527
4528 err_out:
4529 scsi_host_put(host);
4530 return NULL;
4531 }
4532
4533 /**
4534 * ata_device_add - Register hardware device with ATA and SCSI layers
4535 * @ent: Probe information describing hardware device to be registered
4536 *
4537 * This function processes the information provided in the probe
4538 * information struct @ent, allocates the necessary ATA and SCSI
4539 * host information structures, initializes them, and registers
4540 * everything with requisite kernel subsystems.
4541 *
4542 * This function requests irqs, probes the ATA bus, and probes
4543 * the SCSI bus.
4544 *
4545 * LOCKING:
4546 * PCI/etc. bus probe sem.
4547 *
4548 * RETURNS:
4549 * Number of ports registered. Zero on error (no ports registered).
4550 */
4551
4552 int ata_device_add(const struct ata_probe_ent *ent)
4553 {
4554 unsigned int count = 0, i;
4555 struct device *dev = ent->dev;
4556 struct ata_host_set *host_set;
4557
4558 DPRINTK("ENTER\n");
4559 /* alloc a container for our list of ATA ports (buses) */
4560 host_set = kzalloc(sizeof(struct ata_host_set) +
4561 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4562 if (!host_set)
4563 return 0;
4564 spin_lock_init(&host_set->lock);
4565
4566 host_set->dev = dev;
4567 host_set->n_ports = ent->n_ports;
4568 host_set->irq = ent->irq;
4569 host_set->mmio_base = ent->mmio_base;
4570 host_set->private_data = ent->private_data;
4571 host_set->ops = ent->port_ops;
4572 host_set->flags = ent->host_set_flags;
4573
4574 /* register each port bound to this device */
4575 for (i = 0; i < ent->n_ports; i++) {
4576 struct ata_port *ap;
4577 unsigned long xfer_mode_mask;
4578
4579 ap = ata_host_add(ent, host_set, i);
4580 if (!ap)
4581 goto err_out;
4582
4583 host_set->ports[i] = ap;
4584 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4585 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4586 (ap->pio_mask << ATA_SHIFT_PIO);
4587
4588 /* print per-port info to dmesg */
4589 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4590 "bmdma 0x%lX irq %lu\n",
4591 ap->id,
4592 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4593 ata_mode_string(xfer_mode_mask),
4594 ap->ioaddr.cmd_addr,
4595 ap->ioaddr.ctl_addr,
4596 ap->ioaddr.bmdma_addr,
4597 ent->irq);
4598
4599 ata_chk_status(ap);
4600 host_set->ops->irq_clear(ap);
4601 count++;
4602 }
4603
4604 if (!count)
4605 goto err_free_ret;
4606
4607 /* obtain irq, that is shared between channels */
4608 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4609 DRV_NAME, host_set))
4610 goto err_out;
4611
4612 /* perform each probe synchronously */
4613 DPRINTK("probe begin\n");
4614 for (i = 0; i < count; i++) {
4615 struct ata_port *ap;
4616 int rc;
4617
4618 ap = host_set->ports[i];
4619
4620 DPRINTK("ata%u: bus probe begin\n", ap->id);
4621 rc = ata_bus_probe(ap);
4622 DPRINTK("ata%u: bus probe end\n", ap->id);
4623
4624 if (rc) {
4625 /* FIXME: do something useful here?
4626 * Current libata behavior will
4627 * tear down everything when
4628 * the module is removed
4629 * or the h/w is unplugged.
4630 */
4631 }
4632
4633 rc = scsi_add_host(ap->host, dev);
4634 if (rc) {
4635 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4636 ap->id);
4637 /* FIXME: do something useful here */
4638 /* FIXME: handle unconditional calls to
4639 * scsi_scan_host and ata_host_remove, below,
4640 * at the very least
4641 */
4642 }
4643 }
4644
4645 /* probes are done, now scan each port's disk(s) */
4646 DPRINTK("host probe begin\n");
4647 for (i = 0; i < count; i++) {
4648 struct ata_port *ap = host_set->ports[i];
4649
4650 ata_scsi_scan_host(ap);
4651 }
4652
4653 dev_set_drvdata(dev, host_set);
4654
4655 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4656 return ent->n_ports; /* success */
4657
4658 err_out:
4659 for (i = 0; i < count; i++) {
4660 ata_host_remove(host_set->ports[i], 1);
4661 scsi_host_put(host_set->ports[i]->host);
4662 }
4663 err_free_ret:
4664 kfree(host_set);
4665 VPRINTK("EXIT, returning 0\n");
4666 return 0;
4667 }
4668
4669 /**
4670 * ata_host_set_remove - PCI layer callback for device removal
4671 * @host_set: ATA host set that was removed
4672 *
4673 * Unregister all objects associated with this host set. Free those
4674 * objects.
4675 *
4676 * LOCKING:
4677 * Inherited from calling layer (may sleep).
4678 */
4679
4680 void ata_host_set_remove(struct ata_host_set *host_set)
4681 {
4682 struct ata_port *ap;
4683 unsigned int i;
4684
4685 for (i = 0; i < host_set->n_ports; i++) {
4686 ap = host_set->ports[i];
4687 scsi_remove_host(ap->host);
4688 }
4689
4690 free_irq(host_set->irq, host_set);
4691
4692 for (i = 0; i < host_set->n_ports; i++) {
4693 ap = host_set->ports[i];
4694
4695 ata_scsi_release(ap->host);
4696
4697 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4698 struct ata_ioports *ioaddr = &ap->ioaddr;
4699
4700 if (ioaddr->cmd_addr == 0x1f0)
4701 release_region(0x1f0, 8);
4702 else if (ioaddr->cmd_addr == 0x170)
4703 release_region(0x170, 8);
4704 }
4705
4706 scsi_host_put(ap->host);
4707 }
4708
4709 if (host_set->ops->host_stop)
4710 host_set->ops->host_stop(host_set);
4711
4712 kfree(host_set);
4713 }
4714
4715 /**
4716 * ata_scsi_release - SCSI layer callback hook for host unload
4717 * @host: libata host to be unloaded
4718 *
4719 * Performs all duties necessary to shut down a libata port...
4720 * Kill port kthread, disable port, and release resources.
4721 *
4722 * LOCKING:
4723 * Inherited from SCSI layer.
4724 *
4725 * RETURNS:
4726 * One.
4727 */
4728
4729 int ata_scsi_release(struct Scsi_Host *host)
4730 {
4731 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4732 int i;
4733
4734 DPRINTK("ENTER\n");
4735
4736 ap->ops->port_disable(ap);
4737 ata_host_remove(ap, 0);
4738 for (i = 0; i < ATA_MAX_DEVICES; i++)
4739 kfree(ap->device[i].id);
4740
4741 DPRINTK("EXIT\n");
4742 return 1;
4743 }
4744
4745 /**
4746 * ata_std_ports - initialize ioaddr with standard port offsets.
4747 * @ioaddr: IO address structure to be initialized
4748 *
4749 * Utility function which initializes data_addr, error_addr,
4750 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4751 * device_addr, status_addr, and command_addr to standard offsets
4752 * relative to cmd_addr.
4753 *
4754 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4755 */
4756
4757 void ata_std_ports(struct ata_ioports *ioaddr)
4758 {
4759 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4760 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4761 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4762 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4763 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4764 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4765 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4766 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4767 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4768 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4769 }
4770
4771
4772 #ifdef CONFIG_PCI
4773
4774 void ata_pci_host_stop (struct ata_host_set *host_set)
4775 {
4776 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4777
4778 pci_iounmap(pdev, host_set->mmio_base);
4779 }
4780
4781 /**
4782 * ata_pci_remove_one - PCI layer callback for device removal
4783 * @pdev: PCI device that was removed
4784 *
4785 * PCI layer indicates to libata via this hook that
4786 * hot-unplug or module unload event has occurred.
4787 * Handle this by unregistering all objects associated
4788 * with this PCI device. Free those objects. Then finally
4789 * release PCI resources and disable device.
4790 *
4791 * LOCKING:
4792 * Inherited from PCI layer (may sleep).
4793 */
4794
4795 void ata_pci_remove_one (struct pci_dev *pdev)
4796 {
4797 struct device *dev = pci_dev_to_dev(pdev);
4798 struct ata_host_set *host_set = dev_get_drvdata(dev);
4799
4800 ata_host_set_remove(host_set);
4801 pci_release_regions(pdev);
4802 pci_disable_device(pdev);
4803 dev_set_drvdata(dev, NULL);
4804 }
4805
4806 /* move to PCI subsystem */
4807 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4808 {
4809 unsigned long tmp = 0;
4810
4811 switch (bits->width) {
4812 case 1: {
4813 u8 tmp8 = 0;
4814 pci_read_config_byte(pdev, bits->reg, &tmp8);
4815 tmp = tmp8;
4816 break;
4817 }
4818 case 2: {
4819 u16 tmp16 = 0;
4820 pci_read_config_word(pdev, bits->reg, &tmp16);
4821 tmp = tmp16;
4822 break;
4823 }
4824 case 4: {
4825 u32 tmp32 = 0;
4826 pci_read_config_dword(pdev, bits->reg, &tmp32);
4827 tmp = tmp32;
4828 break;
4829 }
4830
4831 default:
4832 return -EINVAL;
4833 }
4834
4835 tmp &= bits->mask;
4836
4837 return (tmp == bits->val) ? 1 : 0;
4838 }
4839
4840 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4841 {
4842 pci_save_state(pdev);
4843 pci_disable_device(pdev);
4844 pci_set_power_state(pdev, PCI_D3hot);
4845 return 0;
4846 }
4847
4848 int ata_pci_device_resume(struct pci_dev *pdev)
4849 {
4850 pci_set_power_state(pdev, PCI_D0);
4851 pci_restore_state(pdev);
4852 pci_enable_device(pdev);
4853 pci_set_master(pdev);
4854 return 0;
4855 }
4856 #endif /* CONFIG_PCI */
4857
4858
4859 static int __init ata_init(void)
4860 {
4861 ata_wq = create_workqueue("ata");
4862 if (!ata_wq)
4863 return -ENOMEM;
4864
4865 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4866 return 0;
4867 }
4868
4869 static void __exit ata_exit(void)
4870 {
4871 destroy_workqueue(ata_wq);
4872 }
4873
4874 module_init(ata_init);
4875 module_exit(ata_exit);
4876
4877 static unsigned long ratelimit_time;
4878 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4879
4880 int ata_ratelimit(void)
4881 {
4882 int rc;
4883 unsigned long flags;
4884
4885 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4886
4887 if (time_after(jiffies, ratelimit_time)) {
4888 rc = 1;
4889 ratelimit_time = jiffies + (HZ/5);
4890 } else
4891 rc = 0;
4892
4893 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4894
4895 return rc;
4896 }
4897
4898 /*
4899 * libata is essentially a library of internal helper functions for
4900 * low-level ATA host controller drivers. As such, the API/ABI is
4901 * likely to change as new drivers are added and updated.
4902 * Do not depend on ABI/API stability.
4903 */
4904
4905 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4906 EXPORT_SYMBOL_GPL(ata_std_ports);
4907 EXPORT_SYMBOL_GPL(ata_device_add);
4908 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4909 EXPORT_SYMBOL_GPL(ata_sg_init);
4910 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4911 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4912 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4913 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4914 EXPORT_SYMBOL_GPL(ata_tf_load);
4915 EXPORT_SYMBOL_GPL(ata_tf_read);
4916 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4917 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4918 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4919 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4920 EXPORT_SYMBOL_GPL(ata_check_status);
4921 EXPORT_SYMBOL_GPL(ata_altstatus);
4922 EXPORT_SYMBOL_GPL(ata_exec_command);
4923 EXPORT_SYMBOL_GPL(ata_port_start);
4924 EXPORT_SYMBOL_GPL(ata_port_stop);
4925 EXPORT_SYMBOL_GPL(ata_host_stop);
4926 EXPORT_SYMBOL_GPL(ata_interrupt);
4927 EXPORT_SYMBOL_GPL(ata_qc_prep);
4928 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4929 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4930 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4931 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4932 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4933 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4934 EXPORT_SYMBOL_GPL(ata_port_probe);
4935 EXPORT_SYMBOL_GPL(sata_phy_reset);
4936 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4937 EXPORT_SYMBOL_GPL(ata_bus_reset);
4938 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4939 EXPORT_SYMBOL_GPL(ata_std_softreset);
4940 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4941 EXPORT_SYMBOL_GPL(ata_std_postreset);
4942 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4943 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4944 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4945 EXPORT_SYMBOL_GPL(ata_dev_classify);
4946 EXPORT_SYMBOL_GPL(ata_dev_pair);
4947 EXPORT_SYMBOL_GPL(ata_port_disable);
4948 EXPORT_SYMBOL_GPL(ata_ratelimit);
4949 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4950 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4951 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4952 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4953 EXPORT_SYMBOL_GPL(ata_scsi_error);
4954 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4955 EXPORT_SYMBOL_GPL(ata_scsi_release);
4956 EXPORT_SYMBOL_GPL(ata_host_intr);
4957 EXPORT_SYMBOL_GPL(ata_id_string);
4958 EXPORT_SYMBOL_GPL(ata_id_c_string);
4959 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4960 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4961 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4962
4963 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4964 EXPORT_SYMBOL_GPL(ata_timing_compute);
4965 EXPORT_SYMBOL_GPL(ata_timing_merge);
4966
4967 #ifdef CONFIG_PCI
4968 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4969 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4970 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4971 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4972 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4973 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4974 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4975 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4976 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4977 #endif /* CONFIG_PCI */
4978
4979 EXPORT_SYMBOL_GPL(ata_device_suspend);
4980 EXPORT_SYMBOL_GPL(ata_device_resume);
4981 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4982 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.337489 seconds and 6 git commands to generate.