[PATCH] libata-dev: print out information for ATAPI devices with CDB interrupts
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
72
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
75
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
95 *
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 {
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
110
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
115
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
120
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
125
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
130 }
131
132 /**
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
136 *
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 {
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
147
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
152
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
156
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
159 }
160
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
167 0,
168 0,
169 0,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
176 0,
177 0,
178 0,
179 0,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
185 0,
186 0,
187 0,
188 ATA_CMD_WRITE_FUA_EXT
189 };
190
191 /**
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
194 *
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
197 *
198 * LOCKING:
199 * caller.
200 */
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 {
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
206
207 int index, fua, lba48, write;
208
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
223 }
224
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
229 }
230 return -1;
231 }
232
233 /**
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
238 *
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
241 *
242 * LOCKING:
243 * None.
244 *
245 * RETURNS:
246 * Packed xfer_mask.
247 */
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
251 {
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 }
256
257 /**
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
263 *
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
266 */
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
271 {
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 }
279
280 static const struct ata_xfer_ent {
281 unsigned int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
288 };
289
290 /**
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
293 *
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
296 *
297 * LOCKING:
298 * None.
299 *
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
302 */
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 {
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
307
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
312 }
313
314 /**
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
317 *
318 * Return matching xfer_mask for @xfer_mode.
319 *
320 * LOCKING:
321 * None.
322 *
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
325 */
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 {
328 const struct ata_xfer_ent *ent;
329
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
334 }
335
336 /**
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
339 *
340 * Return matching xfer_shift for @xfer_mode.
341 *
342 * LOCKING:
343 * None.
344 *
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
347 */
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 {
350 const struct ata_xfer_ent *ent;
351
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
356 }
357
358 /**
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
361 *
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
364 *
365 * LOCKING:
366 * None.
367 *
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
371 */
372 static const char *ata_mode_string(unsigned int xfer_mask)
373 {
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
391 };
392 int highbit;
393
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
398 }
399
400 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
401 {
402 if (ata_dev_present(dev)) {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n",
404 ap->id, dev->devno);
405 dev->class++;
406 }
407 }
408
409 /**
410 * ata_pio_devchk - PATA device presence detection
411 * @ap: ATA channel to examine
412 * @device: Device to examine (starting at zero)
413 *
414 * This technique was originally described in
415 * Hale Landis's ATADRVR (www.ata-atapi.com), and
416 * later found its way into the ATA/ATAPI spec.
417 *
418 * Write a pattern to the ATA shadow registers,
419 * and if a device is present, it will respond by
420 * correctly storing and echoing back the
421 * ATA shadow register contents.
422 *
423 * LOCKING:
424 * caller.
425 */
426
427 static unsigned int ata_pio_devchk(struct ata_port *ap,
428 unsigned int device)
429 {
430 struct ata_ioports *ioaddr = &ap->ioaddr;
431 u8 nsect, lbal;
432
433 ap->ops->dev_select(ap, device);
434
435 outb(0x55, ioaddr->nsect_addr);
436 outb(0xaa, ioaddr->lbal_addr);
437
438 outb(0xaa, ioaddr->nsect_addr);
439 outb(0x55, ioaddr->lbal_addr);
440
441 outb(0x55, ioaddr->nsect_addr);
442 outb(0xaa, ioaddr->lbal_addr);
443
444 nsect = inb(ioaddr->nsect_addr);
445 lbal = inb(ioaddr->lbal_addr);
446
447 if ((nsect == 0x55) && (lbal == 0xaa))
448 return 1; /* we found a device */
449
450 return 0; /* nothing found */
451 }
452
453 /**
454 * ata_mmio_devchk - PATA device presence detection
455 * @ap: ATA channel to examine
456 * @device: Device to examine (starting at zero)
457 *
458 * This technique was originally described in
459 * Hale Landis's ATADRVR (www.ata-atapi.com), and
460 * later found its way into the ATA/ATAPI spec.
461 *
462 * Write a pattern to the ATA shadow registers,
463 * and if a device is present, it will respond by
464 * correctly storing and echoing back the
465 * ATA shadow register contents.
466 *
467 * LOCKING:
468 * caller.
469 */
470
471 static unsigned int ata_mmio_devchk(struct ata_port *ap,
472 unsigned int device)
473 {
474 struct ata_ioports *ioaddr = &ap->ioaddr;
475 u8 nsect, lbal;
476
477 ap->ops->dev_select(ap, device);
478
479 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
481
482 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
483 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
484
485 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
486 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
487
488 nsect = readb((void __iomem *) ioaddr->nsect_addr);
489 lbal = readb((void __iomem *) ioaddr->lbal_addr);
490
491 if ((nsect == 0x55) && (lbal == 0xaa))
492 return 1; /* we found a device */
493
494 return 0; /* nothing found */
495 }
496
497 /**
498 * ata_devchk - PATA device presence detection
499 * @ap: ATA channel to examine
500 * @device: Device to examine (starting at zero)
501 *
502 * Dispatch ATA device presence detection, depending
503 * on whether we are using PIO or MMIO to talk to the
504 * ATA shadow registers.
505 *
506 * LOCKING:
507 * caller.
508 */
509
510 static unsigned int ata_devchk(struct ata_port *ap,
511 unsigned int device)
512 {
513 if (ap->flags & ATA_FLAG_MMIO)
514 return ata_mmio_devchk(ap, device);
515 return ata_pio_devchk(ap, device);
516 }
517
518 /**
519 * ata_dev_classify - determine device type based on ATA-spec signature
520 * @tf: ATA taskfile register set for device to be identified
521 *
522 * Determine from taskfile register contents whether a device is
523 * ATA or ATAPI, as per "Signature and persistence" section
524 * of ATA/PI spec (volume 1, sect 5.14).
525 *
526 * LOCKING:
527 * None.
528 *
529 * RETURNS:
530 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
531 * the event of failure.
532 */
533
534 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
535 {
536 /* Apple's open source Darwin code hints that some devices only
537 * put a proper signature into the LBA mid/high registers,
538 * So, we only check those. It's sufficient for uniqueness.
539 */
540
541 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
542 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
543 DPRINTK("found ATA device by sig\n");
544 return ATA_DEV_ATA;
545 }
546
547 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
548 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
549 DPRINTK("found ATAPI device by sig\n");
550 return ATA_DEV_ATAPI;
551 }
552
553 DPRINTK("unknown device\n");
554 return ATA_DEV_UNKNOWN;
555 }
556
557 /**
558 * ata_dev_try_classify - Parse returned ATA device signature
559 * @ap: ATA channel to examine
560 * @device: Device to examine (starting at zero)
561 * @r_err: Value of error register on completion
562 *
563 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
564 * an ATA/ATAPI-defined set of values is placed in the ATA
565 * shadow registers, indicating the results of device detection
566 * and diagnostics.
567 *
568 * Select the ATA device, and read the values from the ATA shadow
569 * registers. Then parse according to the Error register value,
570 * and the spec-defined values examined by ata_dev_classify().
571 *
572 * LOCKING:
573 * caller.
574 *
575 * RETURNS:
576 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
577 */
578
579 static unsigned int
580 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
581 {
582 struct ata_taskfile tf;
583 unsigned int class;
584 u8 err;
585
586 ap->ops->dev_select(ap, device);
587
588 memset(&tf, 0, sizeof(tf));
589
590 ap->ops->tf_read(ap, &tf);
591 err = tf.feature;
592 if (r_err)
593 *r_err = err;
594
595 /* see if device passed diags */
596 if (err == 1)
597 /* do nothing */ ;
598 else if ((device == 0) && (err == 0x81))
599 /* do nothing */ ;
600 else
601 return ATA_DEV_NONE;
602
603 /* determine if device is ATA or ATAPI */
604 class = ata_dev_classify(&tf);
605
606 if (class == ATA_DEV_UNKNOWN)
607 return ATA_DEV_NONE;
608 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
609 return ATA_DEV_NONE;
610 return class;
611 }
612
613 /**
614 * ata_id_string - Convert IDENTIFY DEVICE page into string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an even number.
619 *
620 * The strings in the IDENTIFY DEVICE page are broken up into
621 * 16-bit chunks. Run through the string, and output each
622 * 8-bit chunk linearly, regardless of platform.
623 *
624 * LOCKING:
625 * caller.
626 */
627
628 void ata_id_string(const u16 *id, unsigned char *s,
629 unsigned int ofs, unsigned int len)
630 {
631 unsigned int c;
632
633 while (len > 0) {
634 c = id[ofs] >> 8;
635 *s = c;
636 s++;
637
638 c = id[ofs] & 0xff;
639 *s = c;
640 s++;
641
642 ofs++;
643 len -= 2;
644 }
645 }
646
647 /**
648 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
649 * @id: IDENTIFY DEVICE results we will examine
650 * @s: string into which data is output
651 * @ofs: offset into identify device page
652 * @len: length of string to return. must be an odd number.
653 *
654 * This function is identical to ata_id_string except that it
655 * trims trailing spaces and terminates the resulting string with
656 * null. @len must be actual maximum length (even number) + 1.
657 *
658 * LOCKING:
659 * caller.
660 */
661 void ata_id_c_string(const u16 *id, unsigned char *s,
662 unsigned int ofs, unsigned int len)
663 {
664 unsigned char *p;
665
666 WARN_ON(!(len & 1));
667
668 ata_id_string(id, s, ofs, len - 1);
669
670 p = s + strnlen(s, len - 1);
671 while (p > s && p[-1] == ' ')
672 p--;
673 *p = '\0';
674 }
675
676 static u64 ata_id_n_sectors(const u16 *id)
677 {
678 if (ata_id_has_lba(id)) {
679 if (ata_id_has_lba48(id))
680 return ata_id_u64(id, 100);
681 else
682 return ata_id_u32(id, 60);
683 } else {
684 if (ata_id_current_chs_valid(id))
685 return ata_id_u32(id, 57);
686 else
687 return id[1] * id[3] * id[6];
688 }
689 }
690
691 /**
692 * ata_noop_dev_select - Select device 0/1 on ATA bus
693 * @ap: ATA channel to manipulate
694 * @device: ATA device (numbered from zero) to select
695 *
696 * This function performs no actual function.
697 *
698 * May be used as the dev_select() entry in ata_port_operations.
699 *
700 * LOCKING:
701 * caller.
702 */
703 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
704 {
705 }
706
707
708 /**
709 * ata_std_dev_select - Select device 0/1 on ATA bus
710 * @ap: ATA channel to manipulate
711 * @device: ATA device (numbered from zero) to select
712 *
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel. Works with both PIO and MMIO.
716 *
717 * May be used as the dev_select() entry in ata_port_operations.
718 *
719 * LOCKING:
720 * caller.
721 */
722
723 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
724 {
725 u8 tmp;
726
727 if (device == 0)
728 tmp = ATA_DEVICE_OBS;
729 else
730 tmp = ATA_DEVICE_OBS | ATA_DEV1;
731
732 if (ap->flags & ATA_FLAG_MMIO) {
733 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
734 } else {
735 outb(tmp, ap->ioaddr.device_addr);
736 }
737 ata_pause(ap); /* needed; also flushes, for mmio */
738 }
739
740 /**
741 * ata_dev_select - Select device 0/1 on ATA bus
742 * @ap: ATA channel to manipulate
743 * @device: ATA device (numbered from zero) to select
744 * @wait: non-zero to wait for Status register BSY bit to clear
745 * @can_sleep: non-zero if context allows sleeping
746 *
747 * Use the method defined in the ATA specification to
748 * make either device 0, or device 1, active on the
749 * ATA channel.
750 *
751 * This is a high-level version of ata_std_dev_select(),
752 * which additionally provides the services of inserting
753 * the proper pauses and status polling, where needed.
754 *
755 * LOCKING:
756 * caller.
757 */
758
759 void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep)
761 {
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
763 ap->id, device, wait);
764
765 if (wait)
766 ata_wait_idle(ap);
767
768 ap->ops->dev_select(ap, device);
769
770 if (wait) {
771 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
772 msleep(150);
773 ata_wait_idle(ap);
774 }
775 }
776
777 /**
778 * ata_dump_id - IDENTIFY DEVICE info debugging output
779 * @id: IDENTIFY DEVICE page to dump
780 *
781 * Dump selected 16-bit words from the given IDENTIFY DEVICE
782 * page.
783 *
784 * LOCKING:
785 * caller.
786 */
787
788 static inline void ata_dump_id(const u16 *id)
789 {
790 DPRINTK("49==0x%04x "
791 "53==0x%04x "
792 "63==0x%04x "
793 "64==0x%04x "
794 "75==0x%04x \n",
795 id[49],
796 id[53],
797 id[63],
798 id[64],
799 id[75]);
800 DPRINTK("80==0x%04x "
801 "81==0x%04x "
802 "82==0x%04x "
803 "83==0x%04x "
804 "84==0x%04x \n",
805 id[80],
806 id[81],
807 id[82],
808 id[83],
809 id[84]);
810 DPRINTK("88==0x%04x "
811 "93==0x%04x\n",
812 id[88],
813 id[93]);
814 }
815
816 /**
817 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
818 * @id: IDENTIFY data to compute xfer mask from
819 *
820 * Compute the xfermask for this device. This is not as trivial
821 * as it seems if we must consider early devices correctly.
822 *
823 * FIXME: pre IDE drive timing (do we care ?).
824 *
825 * LOCKING:
826 * None.
827 *
828 * RETURNS:
829 * Computed xfermask
830 */
831 static unsigned int ata_id_xfermask(const u16 *id)
832 {
833 unsigned int pio_mask, mwdma_mask, udma_mask;
834
835 /* Usual case. Word 53 indicates word 64 is valid */
836 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
837 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
838 pio_mask <<= 3;
839 pio_mask |= 0x7;
840 } else {
841 /* If word 64 isn't valid then Word 51 high byte holds
842 * the PIO timing number for the maximum. Turn it into
843 * a mask.
844 */
845 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
846
847 /* But wait.. there's more. Design your standards by
848 * committee and you too can get a free iordy field to
849 * process. However its the speeds not the modes that
850 * are supported... Note drivers using the timing API
851 * will get this right anyway
852 */
853 }
854
855 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
856
857 udma_mask = 0;
858 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
859 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
860
861 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
862 }
863
864 /**
865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for
867 *
868 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the
870 * user(low level driver)'s responsibility to make sure that only
871 * one task is active at any given time.
872 *
873 * libata core layer takes care of synchronization between
874 * port_task and EH. ata_port_queue_task() may be ignored for EH
875 * synchronization.
876 *
877 * LOCKING:
878 * Inherited from caller.
879 */
880 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
881 unsigned long delay)
882 {
883 int rc;
884
885 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
886 return;
887
888 PREPARE_WORK(&ap->port_task, fn, data);
889
890 if (!delay)
891 rc = queue_work(ata_wq, &ap->port_task);
892 else
893 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
894
895 /* rc == 0 means that another user is using port task */
896 WARN_ON(rc == 0);
897 }
898
899 /**
900 * ata_port_flush_task - Flush port_task
901 * @ap: The ata_port to flush port_task for
902 *
903 * After this function completes, port_task is guranteed not to
904 * be running or scheduled.
905 *
906 * LOCKING:
907 * Kernel thread context (may sleep)
908 */
909 void ata_port_flush_task(struct ata_port *ap)
910 {
911 unsigned long flags;
912
913 DPRINTK("ENTER\n");
914
915 spin_lock_irqsave(&ap->host_set->lock, flags);
916 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
917 spin_unlock_irqrestore(&ap->host_set->lock, flags);
918
919 DPRINTK("flush #1\n");
920 flush_workqueue(ata_wq);
921
922 /*
923 * At this point, if a task is running, it's guaranteed to see
924 * the FLUSH flag; thus, it will never queue pio tasks again.
925 * Cancel and flush.
926 */
927 if (!cancel_delayed_work(&ap->port_task)) {
928 DPRINTK("flush #2\n");
929 flush_workqueue(ata_wq);
930 }
931
932 spin_lock_irqsave(&ap->host_set->lock, flags);
933 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
934 spin_unlock_irqrestore(&ap->host_set->lock, flags);
935
936 DPRINTK("EXIT\n");
937 }
938
939 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
940 {
941 struct completion *waiting = qc->private_data;
942
943 qc->ap->ops->tf_read(qc->ap, &qc->tf);
944 complete(waiting);
945 }
946
947 /**
948 * ata_exec_internal - execute libata internal command
949 * @ap: Port to which the command is sent
950 * @dev: Device to which the command is sent
951 * @tf: Taskfile registers for the command and the result
952 * @dma_dir: Data tranfer direction of the command
953 * @buf: Data buffer of the command
954 * @buflen: Length of data buffer
955 *
956 * Executes libata internal command with timeout. @tf contains
957 * command on entry and result on return. Timeout and error
958 * conditions are reported via return value. No recovery action
959 * is taken after a command times out. It's caller's duty to
960 * clean up after timeout.
961 *
962 * LOCKING:
963 * None. Should be called with kernel context, might sleep.
964 */
965
966 static unsigned
967 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
968 struct ata_taskfile *tf,
969 int dma_dir, void *buf, unsigned int buflen)
970 {
971 u8 command = tf->command;
972 struct ata_queued_cmd *qc;
973 DECLARE_COMPLETION(wait);
974 unsigned long flags;
975 unsigned int err_mask;
976
977 spin_lock_irqsave(&ap->host_set->lock, flags);
978
979 qc = ata_qc_new_init(ap, dev);
980 BUG_ON(qc == NULL);
981
982 qc->tf = *tf;
983 qc->dma_dir = dma_dir;
984 if (dma_dir != DMA_NONE) {
985 ata_sg_init_one(qc, buf, buflen);
986 qc->nsect = buflen / ATA_SECT_SIZE;
987 }
988
989 qc->private_data = &wait;
990 qc->complete_fn = ata_qc_complete_internal;
991
992 qc->err_mask = ata_qc_issue(qc);
993 if (qc->err_mask)
994 ata_qc_complete(qc);
995
996 spin_unlock_irqrestore(&ap->host_set->lock, flags);
997
998 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
999 ata_port_flush_task(ap);
1000
1001 spin_lock_irqsave(&ap->host_set->lock, flags);
1002
1003 /* We're racing with irq here. If we lose, the
1004 * following test prevents us from completing the qc
1005 * again. If completion irq occurs after here but
1006 * before the caller cleans up, it will result in a
1007 * spurious interrupt. We can live with that.
1008 */
1009 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1010 qc->err_mask = AC_ERR_TIMEOUT;
1011 ata_qc_complete(qc);
1012 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1013 ap->id, command);
1014 }
1015
1016 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1017 }
1018
1019 *tf = qc->tf;
1020 err_mask = qc->err_mask;
1021
1022 ata_qc_free(qc);
1023
1024 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1025 * Until those drivers are fixed, we detect the condition
1026 * here, fail the command with AC_ERR_SYSTEM and reenable the
1027 * port.
1028 *
1029 * Note that this doesn't change any behavior as internal
1030 * command failure results in disabling the device in the
1031 * higher layer for LLDDs without new reset/EH callbacks.
1032 *
1033 * Kill the following code as soon as those drivers are fixed.
1034 */
1035 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1036 err_mask |= AC_ERR_SYSTEM;
1037 ata_port_probe(ap);
1038 }
1039
1040 return err_mask;
1041 }
1042
1043 /**
1044 * ata_pio_need_iordy - check if iordy needed
1045 * @adev: ATA device
1046 *
1047 * Check if the current speed of the device requires IORDY. Used
1048 * by various controllers for chip configuration.
1049 */
1050
1051 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1052 {
1053 int pio;
1054 int speed = adev->pio_mode - XFER_PIO_0;
1055
1056 if (speed < 2)
1057 return 0;
1058 if (speed > 2)
1059 return 1;
1060
1061 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1062
1063 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1064 pio = adev->id[ATA_ID_EIDE_PIO];
1065 /* Is the speed faster than the drive allows non IORDY ? */
1066 if (pio) {
1067 /* This is cycle times not frequency - watch the logic! */
1068 if (pio > 240) /* PIO2 is 240nS per cycle */
1069 return 1;
1070 return 0;
1071 }
1072 }
1073 return 0;
1074 }
1075
1076 /**
1077 * ata_dev_read_id - Read ID data from the specified device
1078 * @ap: port on which target device resides
1079 * @dev: target device
1080 * @p_class: pointer to class of the target device (may be changed)
1081 * @post_reset: is this read ID post-reset?
1082 * @p_id: read IDENTIFY page (newly allocated)
1083 *
1084 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1085 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1086 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1087 * for pre-ATA4 drives.
1088 *
1089 * LOCKING:
1090 * Kernel thread context (may sleep)
1091 *
1092 * RETURNS:
1093 * 0 on success, -errno otherwise.
1094 */
1095 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1096 unsigned int *p_class, int post_reset, u16 **p_id)
1097 {
1098 unsigned int class = *p_class;
1099 struct ata_taskfile tf;
1100 unsigned int err_mask = 0;
1101 u16 *id;
1102 const char *reason;
1103 int rc;
1104
1105 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1106
1107 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1108
1109 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1110 if (id == NULL) {
1111 rc = -ENOMEM;
1112 reason = "out of memory";
1113 goto err_out;
1114 }
1115
1116 retry:
1117 ata_tf_init(ap, &tf, dev->devno);
1118
1119 switch (class) {
1120 case ATA_DEV_ATA:
1121 tf.command = ATA_CMD_ID_ATA;
1122 break;
1123 case ATA_DEV_ATAPI:
1124 tf.command = ATA_CMD_ID_ATAPI;
1125 break;
1126 default:
1127 rc = -ENODEV;
1128 reason = "unsupported class";
1129 goto err_out;
1130 }
1131
1132 tf.protocol = ATA_PROT_PIO;
1133
1134 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1135 id, sizeof(id[0]) * ATA_ID_WORDS);
1136 if (err_mask) {
1137 rc = -EIO;
1138 reason = "I/O error";
1139 goto err_out;
1140 }
1141
1142 swap_buf_le16(id, ATA_ID_WORDS);
1143
1144 /* sanity check */
1145 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1146 rc = -EINVAL;
1147 reason = "device reports illegal type";
1148 goto err_out;
1149 }
1150
1151 if (post_reset && class == ATA_DEV_ATA) {
1152 /*
1153 * The exact sequence expected by certain pre-ATA4 drives is:
1154 * SRST RESET
1155 * IDENTIFY
1156 * INITIALIZE DEVICE PARAMETERS
1157 * anything else..
1158 * Some drives were very specific about that exact sequence.
1159 */
1160 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1161 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1162 if (err_mask) {
1163 rc = -EIO;
1164 reason = "INIT_DEV_PARAMS failed";
1165 goto err_out;
1166 }
1167
1168 /* current CHS translation info (id[53-58]) might be
1169 * changed. reread the identify device info.
1170 */
1171 post_reset = 0;
1172 goto retry;
1173 }
1174 }
1175
1176 *p_class = class;
1177 *p_id = id;
1178 return 0;
1179
1180 err_out:
1181 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1182 ap->id, dev->devno, reason);
1183 kfree(id);
1184 return rc;
1185 }
1186
1187 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1188 struct ata_device *dev)
1189 {
1190 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1191 }
1192
1193 /**
1194 * ata_dev_configure - Configure the specified ATA/ATAPI device
1195 * @ap: Port on which target device resides
1196 * @dev: Target device to configure
1197 * @print_info: Enable device info printout
1198 *
1199 * Configure @dev according to @dev->id. Generic and low-level
1200 * driver specific fixups are also applied.
1201 *
1202 * LOCKING:
1203 * Kernel thread context (may sleep)
1204 *
1205 * RETURNS:
1206 * 0 on success, -errno otherwise
1207 */
1208 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1209 int print_info)
1210 {
1211 const u16 *id = dev->id;
1212 unsigned int xfer_mask;
1213 int i, rc;
1214
1215 if (!ata_dev_present(dev)) {
1216 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1217 ap->id, dev->devno);
1218 return 0;
1219 }
1220
1221 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1222
1223 /* print device capabilities */
1224 if (print_info)
1225 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1226 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1227 ap->id, dev->devno, id[49], id[82], id[83],
1228 id[84], id[85], id[86], id[87], id[88]);
1229
1230 /* initialize to-be-configured parameters */
1231 dev->flags = 0;
1232 dev->max_sectors = 0;
1233 dev->cdb_len = 0;
1234 dev->n_sectors = 0;
1235 dev->cylinders = 0;
1236 dev->heads = 0;
1237 dev->sectors = 0;
1238
1239 /*
1240 * common ATA, ATAPI feature tests
1241 */
1242
1243 /* find max transfer mode; for printk only */
1244 xfer_mask = ata_id_xfermask(id);
1245
1246 ata_dump_id(id);
1247
1248 /* ATA-specific feature tests */
1249 if (dev->class == ATA_DEV_ATA) {
1250 dev->n_sectors = ata_id_n_sectors(id);
1251
1252 if (ata_id_has_lba(id)) {
1253 const char *lba_desc;
1254
1255 lba_desc = "LBA";
1256 dev->flags |= ATA_DFLAG_LBA;
1257 if (ata_id_has_lba48(id)) {
1258 dev->flags |= ATA_DFLAG_LBA48;
1259 lba_desc = "LBA48";
1260 }
1261
1262 /* print device info to dmesg */
1263 if (print_info)
1264 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1265 "max %s, %Lu sectors: %s\n",
1266 ap->id, dev->devno,
1267 ata_id_major_version(id),
1268 ata_mode_string(xfer_mask),
1269 (unsigned long long)dev->n_sectors,
1270 lba_desc);
1271 } else {
1272 /* CHS */
1273
1274 /* Default translation */
1275 dev->cylinders = id[1];
1276 dev->heads = id[3];
1277 dev->sectors = id[6];
1278
1279 if (ata_id_current_chs_valid(id)) {
1280 /* Current CHS translation is valid. */
1281 dev->cylinders = id[54];
1282 dev->heads = id[55];
1283 dev->sectors = id[56];
1284 }
1285
1286 /* print device info to dmesg */
1287 if (print_info)
1288 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1289 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1290 ap->id, dev->devno,
1291 ata_id_major_version(id),
1292 ata_mode_string(xfer_mask),
1293 (unsigned long long)dev->n_sectors,
1294 dev->cylinders, dev->heads, dev->sectors);
1295 }
1296
1297 if (dev->id[59] & 0x100) {
1298 dev->multi_count = dev->id[59] & 0xff;
1299 DPRINTK("ata%u: dev %u multi count %u\n",
1300 ap->id, dev->devno, dev->multi_count);
1301 }
1302
1303 dev->cdb_len = 16;
1304 }
1305
1306 /* ATAPI-specific feature tests */
1307 else if (dev->class == ATA_DEV_ATAPI) {
1308 char *cdb_intr_string = "";
1309
1310 rc = atapi_cdb_len(id);
1311 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1312 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1313 rc = -EINVAL;
1314 goto err_out_nosup;
1315 }
1316 dev->cdb_len = (unsigned int) rc;
1317
1318 if (ata_id_cdb_intr(dev->id)) {
1319 dev->flags |= ATA_DFLAG_CDB_INTR;
1320 cdb_intr_string = ", CDB intr";
1321 }
1322
1323 /* print device info to dmesg */
1324 if (print_info)
1325 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
1326 ap->id, dev->devno, ata_mode_string(xfer_mask),
1327 cdb_intr_string);
1328 }
1329
1330 ap->host->max_cmd_len = 0;
1331 for (i = 0; i < ATA_MAX_DEVICES; i++)
1332 ap->host->max_cmd_len = max_t(unsigned int,
1333 ap->host->max_cmd_len,
1334 ap->device[i].cdb_len);
1335
1336 /* limit bridge transfers to udma5, 200 sectors */
1337 if (ata_dev_knobble(ap, dev)) {
1338 if (print_info)
1339 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1340 ap->id, dev->devno);
1341 dev->udma_mask &= ATA_UDMA5;
1342 dev->max_sectors = ATA_MAX_SECTORS;
1343 }
1344
1345 if (ap->ops->dev_config)
1346 ap->ops->dev_config(ap, dev);
1347
1348 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1349 return 0;
1350
1351 err_out_nosup:
1352 DPRINTK("EXIT, err\n");
1353 return rc;
1354 }
1355
1356 /**
1357 * ata_bus_probe - Reset and probe ATA bus
1358 * @ap: Bus to probe
1359 *
1360 * Master ATA bus probing function. Initiates a hardware-dependent
1361 * bus reset, then attempts to identify any devices found on
1362 * the bus.
1363 *
1364 * LOCKING:
1365 * PCI/etc. bus probe sem.
1366 *
1367 * RETURNS:
1368 * Zero on success, non-zero on error.
1369 */
1370
1371 static int ata_bus_probe(struct ata_port *ap)
1372 {
1373 unsigned int classes[ATA_MAX_DEVICES];
1374 unsigned int i, rc, found = 0;
1375
1376 ata_port_probe(ap);
1377
1378 /* reset and determine device classes */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++)
1380 classes[i] = ATA_DEV_UNKNOWN;
1381
1382 if (ap->ops->probe_reset) {
1383 rc = ap->ops->probe_reset(ap, classes);
1384 if (rc) {
1385 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1386 return rc;
1387 }
1388 } else {
1389 ap->ops->phy_reset(ap);
1390
1391 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1392 for (i = 0; i < ATA_MAX_DEVICES; i++)
1393 classes[i] = ap->device[i].class;
1394
1395 ata_port_probe(ap);
1396 }
1397
1398 for (i = 0; i < ATA_MAX_DEVICES; i++)
1399 if (classes[i] == ATA_DEV_UNKNOWN)
1400 classes[i] = ATA_DEV_NONE;
1401
1402 /* read IDENTIFY page and configure devices */
1403 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1404 struct ata_device *dev = &ap->device[i];
1405
1406 dev->class = classes[i];
1407
1408 if (!ata_dev_present(dev))
1409 continue;
1410
1411 WARN_ON(dev->id != NULL);
1412 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1413 dev->class = ATA_DEV_NONE;
1414 continue;
1415 }
1416
1417 if (ata_dev_configure(ap, dev, 1)) {
1418 ata_dev_disable(ap, dev);
1419 continue;
1420 }
1421
1422 found = 1;
1423 }
1424
1425 if (!found)
1426 goto err_out_disable;
1427
1428 if (ap->ops->set_mode)
1429 ap->ops->set_mode(ap);
1430 else
1431 ata_set_mode(ap);
1432
1433 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1434 goto err_out_disable;
1435
1436 return 0;
1437
1438 err_out_disable:
1439 ap->ops->port_disable(ap);
1440 return -1;
1441 }
1442
1443 /**
1444 * ata_port_probe - Mark port as enabled
1445 * @ap: Port for which we indicate enablement
1446 *
1447 * Modify @ap data structure such that the system
1448 * thinks that the entire port is enabled.
1449 *
1450 * LOCKING: host_set lock, or some other form of
1451 * serialization.
1452 */
1453
1454 void ata_port_probe(struct ata_port *ap)
1455 {
1456 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1457 }
1458
1459 /**
1460 * sata_print_link_status - Print SATA link status
1461 * @ap: SATA port to printk link status about
1462 *
1463 * This function prints link speed and status of a SATA link.
1464 *
1465 * LOCKING:
1466 * None.
1467 */
1468 static void sata_print_link_status(struct ata_port *ap)
1469 {
1470 u32 sstatus, tmp;
1471 const char *speed;
1472
1473 if (!ap->ops->scr_read)
1474 return;
1475
1476 sstatus = scr_read(ap, SCR_STATUS);
1477
1478 if (sata_dev_present(ap)) {
1479 tmp = (sstatus >> 4) & 0xf;
1480 if (tmp & (1 << 0))
1481 speed = "1.5";
1482 else if (tmp & (1 << 1))
1483 speed = "3.0";
1484 else
1485 speed = "<unknown>";
1486 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1487 ap->id, speed, sstatus);
1488 } else {
1489 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1490 ap->id, sstatus);
1491 }
1492 }
1493
1494 /**
1495 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1496 * @ap: SATA port associated with target SATA PHY.
1497 *
1498 * This function issues commands to standard SATA Sxxx
1499 * PHY registers, to wake up the phy (and device), and
1500 * clear any reset condition.
1501 *
1502 * LOCKING:
1503 * PCI/etc. bus probe sem.
1504 *
1505 */
1506 void __sata_phy_reset(struct ata_port *ap)
1507 {
1508 u32 sstatus;
1509 unsigned long timeout = jiffies + (HZ * 5);
1510
1511 if (ap->flags & ATA_FLAG_SATA_RESET) {
1512 /* issue phy wake/reset */
1513 scr_write_flush(ap, SCR_CONTROL, 0x301);
1514 /* Couldn't find anything in SATA I/II specs, but
1515 * AHCI-1.1 10.4.2 says at least 1 ms. */
1516 mdelay(1);
1517 }
1518 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1519
1520 /* wait for phy to become ready, if necessary */
1521 do {
1522 msleep(200);
1523 sstatus = scr_read(ap, SCR_STATUS);
1524 if ((sstatus & 0xf) != 1)
1525 break;
1526 } while (time_before(jiffies, timeout));
1527
1528 /* print link status */
1529 sata_print_link_status(ap);
1530
1531 /* TODO: phy layer with polling, timeouts, etc. */
1532 if (sata_dev_present(ap))
1533 ata_port_probe(ap);
1534 else
1535 ata_port_disable(ap);
1536
1537 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1538 return;
1539
1540 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1541 ata_port_disable(ap);
1542 return;
1543 }
1544
1545 ap->cbl = ATA_CBL_SATA;
1546 }
1547
1548 /**
1549 * sata_phy_reset - Reset SATA bus.
1550 * @ap: SATA port associated with target SATA PHY.
1551 *
1552 * This function resets the SATA bus, and then probes
1553 * the bus for devices.
1554 *
1555 * LOCKING:
1556 * PCI/etc. bus probe sem.
1557 *
1558 */
1559 void sata_phy_reset(struct ata_port *ap)
1560 {
1561 __sata_phy_reset(ap);
1562 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1563 return;
1564 ata_bus_reset(ap);
1565 }
1566
1567 /**
1568 * ata_dev_pair - return other device on cable
1569 * @ap: port
1570 * @adev: device
1571 *
1572 * Obtain the other device on the same cable, or if none is
1573 * present NULL is returned
1574 */
1575
1576 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1577 {
1578 struct ata_device *pair = &ap->device[1 - adev->devno];
1579 if (!ata_dev_present(pair))
1580 return NULL;
1581 return pair;
1582 }
1583
1584 /**
1585 * ata_port_disable - Disable port.
1586 * @ap: Port to be disabled.
1587 *
1588 * Modify @ap data structure such that the system
1589 * thinks that the entire port is disabled, and should
1590 * never attempt to probe or communicate with devices
1591 * on this port.
1592 *
1593 * LOCKING: host_set lock, or some other form of
1594 * serialization.
1595 */
1596
1597 void ata_port_disable(struct ata_port *ap)
1598 {
1599 ap->device[0].class = ATA_DEV_NONE;
1600 ap->device[1].class = ATA_DEV_NONE;
1601 ap->flags |= ATA_FLAG_PORT_DISABLED;
1602 }
1603
1604 /*
1605 * This mode timing computation functionality is ported over from
1606 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1607 */
1608 /*
1609 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1610 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1611 * for PIO 5, which is a nonstandard extension and UDMA6, which
1612 * is currently supported only by Maxtor drives.
1613 */
1614
1615 static const struct ata_timing ata_timing[] = {
1616
1617 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1618 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1619 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1620 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1621
1622 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1623 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1624 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1625
1626 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1627
1628 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1629 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1630 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1631
1632 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1633 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1634 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1635
1636 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1637 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1638 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1639
1640 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1641 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1642 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1643
1644 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1645
1646 { 0xFF }
1647 };
1648
1649 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1650 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1651
1652 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1653 {
1654 q->setup = EZ(t->setup * 1000, T);
1655 q->act8b = EZ(t->act8b * 1000, T);
1656 q->rec8b = EZ(t->rec8b * 1000, T);
1657 q->cyc8b = EZ(t->cyc8b * 1000, T);
1658 q->active = EZ(t->active * 1000, T);
1659 q->recover = EZ(t->recover * 1000, T);
1660 q->cycle = EZ(t->cycle * 1000, T);
1661 q->udma = EZ(t->udma * 1000, UT);
1662 }
1663
1664 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1665 struct ata_timing *m, unsigned int what)
1666 {
1667 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1668 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1669 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1670 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1671 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1672 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1673 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1674 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1675 }
1676
1677 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1678 {
1679 const struct ata_timing *t;
1680
1681 for (t = ata_timing; t->mode != speed; t++)
1682 if (t->mode == 0xFF)
1683 return NULL;
1684 return t;
1685 }
1686
1687 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1688 struct ata_timing *t, int T, int UT)
1689 {
1690 const struct ata_timing *s;
1691 struct ata_timing p;
1692
1693 /*
1694 * Find the mode.
1695 */
1696
1697 if (!(s = ata_timing_find_mode(speed)))
1698 return -EINVAL;
1699
1700 memcpy(t, s, sizeof(*s));
1701
1702 /*
1703 * If the drive is an EIDE drive, it can tell us it needs extended
1704 * PIO/MW_DMA cycle timing.
1705 */
1706
1707 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1708 memset(&p, 0, sizeof(p));
1709 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1710 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1711 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1712 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1713 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1714 }
1715 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1716 }
1717
1718 /*
1719 * Convert the timing to bus clock counts.
1720 */
1721
1722 ata_timing_quantize(t, t, T, UT);
1723
1724 /*
1725 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1726 * S.M.A.R.T * and some other commands. We have to ensure that the
1727 * DMA cycle timing is slower/equal than the fastest PIO timing.
1728 */
1729
1730 if (speed > XFER_PIO_4) {
1731 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1732 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1733 }
1734
1735 /*
1736 * Lengthen active & recovery time so that cycle time is correct.
1737 */
1738
1739 if (t->act8b + t->rec8b < t->cyc8b) {
1740 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1741 t->rec8b = t->cyc8b - t->act8b;
1742 }
1743
1744 if (t->active + t->recover < t->cycle) {
1745 t->active += (t->cycle - (t->active + t->recover)) / 2;
1746 t->recover = t->cycle - t->active;
1747 }
1748
1749 return 0;
1750 }
1751
1752 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1753 {
1754 unsigned int err_mask;
1755 int rc;
1756
1757 if (dev->xfer_shift == ATA_SHIFT_PIO)
1758 dev->flags |= ATA_DFLAG_PIO;
1759
1760 err_mask = ata_dev_set_xfermode(ap, dev);
1761 if (err_mask) {
1762 printk(KERN_ERR
1763 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1764 ap->id, err_mask);
1765 return -EIO;
1766 }
1767
1768 rc = ata_dev_revalidate(ap, dev, 0);
1769 if (rc) {
1770 printk(KERN_ERR
1771 "ata%u: failed to revalidate after set xfermode\n",
1772 ap->id);
1773 return rc;
1774 }
1775
1776 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1777 dev->xfer_shift, (int)dev->xfer_mode);
1778
1779 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1780 ap->id, dev->devno,
1781 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1782 return 0;
1783 }
1784
1785 static int ata_host_set_pio(struct ata_port *ap)
1786 {
1787 int i;
1788
1789 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1790 struct ata_device *dev = &ap->device[i];
1791
1792 if (!ata_dev_present(dev))
1793 continue;
1794
1795 if (!dev->pio_mode) {
1796 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1797 return -1;
1798 }
1799
1800 dev->xfer_mode = dev->pio_mode;
1801 dev->xfer_shift = ATA_SHIFT_PIO;
1802 if (ap->ops->set_piomode)
1803 ap->ops->set_piomode(ap, dev);
1804 }
1805
1806 return 0;
1807 }
1808
1809 static void ata_host_set_dma(struct ata_port *ap)
1810 {
1811 int i;
1812
1813 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1814 struct ata_device *dev = &ap->device[i];
1815
1816 if (!ata_dev_present(dev) || !dev->dma_mode)
1817 continue;
1818
1819 dev->xfer_mode = dev->dma_mode;
1820 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1821 if (ap->ops->set_dmamode)
1822 ap->ops->set_dmamode(ap, dev);
1823 }
1824 }
1825
1826 /**
1827 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1828 * @ap: port on which timings will be programmed
1829 *
1830 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1831 *
1832 * LOCKING:
1833 * PCI/etc. bus probe sem.
1834 */
1835 static void ata_set_mode(struct ata_port *ap)
1836 {
1837 int i, rc, used_dma = 0;
1838
1839 /* step 1: calculate xfer_mask */
1840 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1841 struct ata_device *dev = &ap->device[i];
1842 unsigned int pio_mask, dma_mask;
1843
1844 if (!ata_dev_present(dev))
1845 continue;
1846
1847 ata_dev_xfermask(ap, dev);
1848
1849 /* TODO: let LLDD filter dev->*_mask here */
1850
1851 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1852 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1853 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1854 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1855
1856 if (dev->dma_mode)
1857 used_dma = 1;
1858 }
1859
1860 /* step 2: always set host PIO timings */
1861 rc = ata_host_set_pio(ap);
1862 if (rc)
1863 goto err_out;
1864
1865 /* step 3: set host DMA timings */
1866 ata_host_set_dma(ap);
1867
1868 /* step 4: update devices' xfer mode */
1869 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1870 struct ata_device *dev = &ap->device[i];
1871
1872 if (!ata_dev_present(dev))
1873 continue;
1874
1875 if (ata_dev_set_mode(ap, dev))
1876 goto err_out;
1877 }
1878
1879 /*
1880 * Record simplex status. If we selected DMA then the other
1881 * host channels are not permitted to do so.
1882 */
1883
1884 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1885 ap->host_set->simplex_claimed = 1;
1886
1887 /*
1888 * Chip specific finalisation
1889 */
1890 if (ap->ops->post_set_mode)
1891 ap->ops->post_set_mode(ap);
1892
1893 return;
1894
1895 err_out:
1896 ata_port_disable(ap);
1897 }
1898
1899 /**
1900 * ata_tf_to_host - issue ATA taskfile to host controller
1901 * @ap: port to which command is being issued
1902 * @tf: ATA taskfile register set
1903 *
1904 * Issues ATA taskfile register set to ATA host controller,
1905 * with proper synchronization with interrupt handler and
1906 * other threads.
1907 *
1908 * LOCKING:
1909 * spin_lock_irqsave(host_set lock)
1910 */
1911
1912 static inline void ata_tf_to_host(struct ata_port *ap,
1913 const struct ata_taskfile *tf)
1914 {
1915 ap->ops->tf_load(ap, tf);
1916 ap->ops->exec_command(ap, tf);
1917 }
1918
1919 /**
1920 * ata_busy_sleep - sleep until BSY clears, or timeout
1921 * @ap: port containing status register to be polled
1922 * @tmout_pat: impatience timeout
1923 * @tmout: overall timeout
1924 *
1925 * Sleep until ATA Status register bit BSY clears,
1926 * or a timeout occurs.
1927 *
1928 * LOCKING: None.
1929 */
1930
1931 unsigned int ata_busy_sleep (struct ata_port *ap,
1932 unsigned long tmout_pat, unsigned long tmout)
1933 {
1934 unsigned long timer_start, timeout;
1935 u8 status;
1936
1937 status = ata_busy_wait(ap, ATA_BUSY, 300);
1938 timer_start = jiffies;
1939 timeout = timer_start + tmout_pat;
1940 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1941 msleep(50);
1942 status = ata_busy_wait(ap, ATA_BUSY, 3);
1943 }
1944
1945 if (status & ATA_BUSY)
1946 printk(KERN_WARNING "ata%u is slow to respond, "
1947 "please be patient\n", ap->id);
1948
1949 timeout = timer_start + tmout;
1950 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1951 msleep(50);
1952 status = ata_chk_status(ap);
1953 }
1954
1955 if (status & ATA_BUSY) {
1956 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1957 ap->id, tmout / HZ);
1958 return 1;
1959 }
1960
1961 return 0;
1962 }
1963
1964 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1965 {
1966 struct ata_ioports *ioaddr = &ap->ioaddr;
1967 unsigned int dev0 = devmask & (1 << 0);
1968 unsigned int dev1 = devmask & (1 << 1);
1969 unsigned long timeout;
1970
1971 /* if device 0 was found in ata_devchk, wait for its
1972 * BSY bit to clear
1973 */
1974 if (dev0)
1975 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1976
1977 /* if device 1 was found in ata_devchk, wait for
1978 * register access, then wait for BSY to clear
1979 */
1980 timeout = jiffies + ATA_TMOUT_BOOT;
1981 while (dev1) {
1982 u8 nsect, lbal;
1983
1984 ap->ops->dev_select(ap, 1);
1985 if (ap->flags & ATA_FLAG_MMIO) {
1986 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1987 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1988 } else {
1989 nsect = inb(ioaddr->nsect_addr);
1990 lbal = inb(ioaddr->lbal_addr);
1991 }
1992 if ((nsect == 1) && (lbal == 1))
1993 break;
1994 if (time_after(jiffies, timeout)) {
1995 dev1 = 0;
1996 break;
1997 }
1998 msleep(50); /* give drive a breather */
1999 }
2000 if (dev1)
2001 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2002
2003 /* is all this really necessary? */
2004 ap->ops->dev_select(ap, 0);
2005 if (dev1)
2006 ap->ops->dev_select(ap, 1);
2007 if (dev0)
2008 ap->ops->dev_select(ap, 0);
2009 }
2010
2011 static unsigned int ata_bus_softreset(struct ata_port *ap,
2012 unsigned int devmask)
2013 {
2014 struct ata_ioports *ioaddr = &ap->ioaddr;
2015
2016 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2017
2018 /* software reset. causes dev0 to be selected */
2019 if (ap->flags & ATA_FLAG_MMIO) {
2020 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2021 udelay(20); /* FIXME: flush */
2022 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2023 udelay(20); /* FIXME: flush */
2024 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2025 } else {
2026 outb(ap->ctl, ioaddr->ctl_addr);
2027 udelay(10);
2028 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2029 udelay(10);
2030 outb(ap->ctl, ioaddr->ctl_addr);
2031 }
2032
2033 /* spec mandates ">= 2ms" before checking status.
2034 * We wait 150ms, because that was the magic delay used for
2035 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2036 * between when the ATA command register is written, and then
2037 * status is checked. Because waiting for "a while" before
2038 * checking status is fine, post SRST, we perform this magic
2039 * delay here as well.
2040 *
2041 * Old drivers/ide uses the 2mS rule and then waits for ready
2042 */
2043 msleep(150);
2044
2045 /* Before we perform post reset processing we want to see if
2046 * the bus shows 0xFF because the odd clown forgets the D7
2047 * pulldown resistor.
2048 */
2049 if (ata_check_status(ap) == 0xFF)
2050 return AC_ERR_OTHER;
2051
2052 ata_bus_post_reset(ap, devmask);
2053
2054 return 0;
2055 }
2056
2057 /**
2058 * ata_bus_reset - reset host port and associated ATA channel
2059 * @ap: port to reset
2060 *
2061 * This is typically the first time we actually start issuing
2062 * commands to the ATA channel. We wait for BSY to clear, then
2063 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2064 * result. Determine what devices, if any, are on the channel
2065 * by looking at the device 0/1 error register. Look at the signature
2066 * stored in each device's taskfile registers, to determine if
2067 * the device is ATA or ATAPI.
2068 *
2069 * LOCKING:
2070 * PCI/etc. bus probe sem.
2071 * Obtains host_set lock.
2072 *
2073 * SIDE EFFECTS:
2074 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2075 */
2076
2077 void ata_bus_reset(struct ata_port *ap)
2078 {
2079 struct ata_ioports *ioaddr = &ap->ioaddr;
2080 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2081 u8 err;
2082 unsigned int dev0, dev1 = 0, devmask = 0;
2083
2084 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2085
2086 /* determine if device 0/1 are present */
2087 if (ap->flags & ATA_FLAG_SATA_RESET)
2088 dev0 = 1;
2089 else {
2090 dev0 = ata_devchk(ap, 0);
2091 if (slave_possible)
2092 dev1 = ata_devchk(ap, 1);
2093 }
2094
2095 if (dev0)
2096 devmask |= (1 << 0);
2097 if (dev1)
2098 devmask |= (1 << 1);
2099
2100 /* select device 0 again */
2101 ap->ops->dev_select(ap, 0);
2102
2103 /* issue bus reset */
2104 if (ap->flags & ATA_FLAG_SRST)
2105 if (ata_bus_softreset(ap, devmask))
2106 goto err_out;
2107
2108 /*
2109 * determine by signature whether we have ATA or ATAPI devices
2110 */
2111 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2112 if ((slave_possible) && (err != 0x81))
2113 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2114
2115 /* re-enable interrupts */
2116 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2117 ata_irq_on(ap);
2118
2119 /* is double-select really necessary? */
2120 if (ap->device[1].class != ATA_DEV_NONE)
2121 ap->ops->dev_select(ap, 1);
2122 if (ap->device[0].class != ATA_DEV_NONE)
2123 ap->ops->dev_select(ap, 0);
2124
2125 /* if no devices were detected, disable this port */
2126 if ((ap->device[0].class == ATA_DEV_NONE) &&
2127 (ap->device[1].class == ATA_DEV_NONE))
2128 goto err_out;
2129
2130 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2131 /* set up device control for ATA_FLAG_SATA_RESET */
2132 if (ap->flags & ATA_FLAG_MMIO)
2133 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2134 else
2135 outb(ap->ctl, ioaddr->ctl_addr);
2136 }
2137
2138 DPRINTK("EXIT\n");
2139 return;
2140
2141 err_out:
2142 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2143 ap->ops->port_disable(ap);
2144
2145 DPRINTK("EXIT\n");
2146 }
2147
2148 static int sata_phy_resume(struct ata_port *ap)
2149 {
2150 unsigned long timeout = jiffies + (HZ * 5);
2151 u32 sstatus;
2152
2153 scr_write_flush(ap, SCR_CONTROL, 0x300);
2154
2155 /* Wait for phy to become ready, if necessary. */
2156 do {
2157 msleep(200);
2158 sstatus = scr_read(ap, SCR_STATUS);
2159 if ((sstatus & 0xf) != 1)
2160 return 0;
2161 } while (time_before(jiffies, timeout));
2162
2163 return -1;
2164 }
2165
2166 /**
2167 * ata_std_probeinit - initialize probing
2168 * @ap: port to be probed
2169 *
2170 * @ap is about to be probed. Initialize it. This function is
2171 * to be used as standard callback for ata_drive_probe_reset().
2172 *
2173 * NOTE!!! Do not use this function as probeinit if a low level
2174 * driver implements only hardreset. Just pass NULL as probeinit
2175 * in that case. Using this function is probably okay but doing
2176 * so makes reset sequence different from the original
2177 * ->phy_reset implementation and Jeff nervous. :-P
2178 */
2179 void ata_std_probeinit(struct ata_port *ap)
2180 {
2181 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2182 sata_phy_resume(ap);
2183 if (sata_dev_present(ap))
2184 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2185 }
2186 }
2187
2188 /**
2189 * ata_std_softreset - reset host port via ATA SRST
2190 * @ap: port to reset
2191 * @verbose: fail verbosely
2192 * @classes: resulting classes of attached devices
2193 *
2194 * Reset host port using ATA SRST. This function is to be used
2195 * as standard callback for ata_drive_*_reset() functions.
2196 *
2197 * LOCKING:
2198 * Kernel thread context (may sleep)
2199 *
2200 * RETURNS:
2201 * 0 on success, -errno otherwise.
2202 */
2203 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2204 {
2205 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2206 unsigned int devmask = 0, err_mask;
2207 u8 err;
2208
2209 DPRINTK("ENTER\n");
2210
2211 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2212 classes[0] = ATA_DEV_NONE;
2213 goto out;
2214 }
2215
2216 /* determine if device 0/1 are present */
2217 if (ata_devchk(ap, 0))
2218 devmask |= (1 << 0);
2219 if (slave_possible && ata_devchk(ap, 1))
2220 devmask |= (1 << 1);
2221
2222 /* select device 0 again */
2223 ap->ops->dev_select(ap, 0);
2224
2225 /* issue bus reset */
2226 DPRINTK("about to softreset, devmask=%x\n", devmask);
2227 err_mask = ata_bus_softreset(ap, devmask);
2228 if (err_mask) {
2229 if (verbose)
2230 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2231 ap->id, err_mask);
2232 else
2233 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2234 err_mask);
2235 return -EIO;
2236 }
2237
2238 /* determine by signature whether we have ATA or ATAPI devices */
2239 classes[0] = ata_dev_try_classify(ap, 0, &err);
2240 if (slave_possible && err != 0x81)
2241 classes[1] = ata_dev_try_classify(ap, 1, &err);
2242
2243 out:
2244 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2245 return 0;
2246 }
2247
2248 /**
2249 * sata_std_hardreset - reset host port via SATA phy reset
2250 * @ap: port to reset
2251 * @verbose: fail verbosely
2252 * @class: resulting class of attached device
2253 *
2254 * SATA phy-reset host port using DET bits of SControl register.
2255 * This function is to be used as standard callback for
2256 * ata_drive_*_reset().
2257 *
2258 * LOCKING:
2259 * Kernel thread context (may sleep)
2260 *
2261 * RETURNS:
2262 * 0 on success, -errno otherwise.
2263 */
2264 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2265 {
2266 DPRINTK("ENTER\n");
2267
2268 /* Issue phy wake/reset */
2269 scr_write_flush(ap, SCR_CONTROL, 0x301);
2270
2271 /*
2272 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2273 * 10.4.2 says at least 1 ms.
2274 */
2275 msleep(1);
2276
2277 /* Bring phy back */
2278 sata_phy_resume(ap);
2279
2280 /* TODO: phy layer with polling, timeouts, etc. */
2281 if (!sata_dev_present(ap)) {
2282 *class = ATA_DEV_NONE;
2283 DPRINTK("EXIT, link offline\n");
2284 return 0;
2285 }
2286
2287 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2288 if (verbose)
2289 printk(KERN_ERR "ata%u: COMRESET failed "
2290 "(device not ready)\n", ap->id);
2291 else
2292 DPRINTK("EXIT, device not ready\n");
2293 return -EIO;
2294 }
2295
2296 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2297
2298 *class = ata_dev_try_classify(ap, 0, NULL);
2299
2300 DPRINTK("EXIT, class=%u\n", *class);
2301 return 0;
2302 }
2303
2304 /**
2305 * ata_std_postreset - standard postreset callback
2306 * @ap: the target ata_port
2307 * @classes: classes of attached devices
2308 *
2309 * This function is invoked after a successful reset. Note that
2310 * the device might have been reset more than once using
2311 * different reset methods before postreset is invoked.
2312 *
2313 * This function is to be used as standard callback for
2314 * ata_drive_*_reset().
2315 *
2316 * LOCKING:
2317 * Kernel thread context (may sleep)
2318 */
2319 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2320 {
2321 DPRINTK("ENTER\n");
2322
2323 /* set cable type if it isn't already set */
2324 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2325 ap->cbl = ATA_CBL_SATA;
2326
2327 /* print link status */
2328 if (ap->cbl == ATA_CBL_SATA)
2329 sata_print_link_status(ap);
2330
2331 /* re-enable interrupts */
2332 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2333 ata_irq_on(ap);
2334
2335 /* is double-select really necessary? */
2336 if (classes[0] != ATA_DEV_NONE)
2337 ap->ops->dev_select(ap, 1);
2338 if (classes[1] != ATA_DEV_NONE)
2339 ap->ops->dev_select(ap, 0);
2340
2341 /* bail out if no device is present */
2342 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2343 DPRINTK("EXIT, no device\n");
2344 return;
2345 }
2346
2347 /* set up device control */
2348 if (ap->ioaddr.ctl_addr) {
2349 if (ap->flags & ATA_FLAG_MMIO)
2350 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2351 else
2352 outb(ap->ctl, ap->ioaddr.ctl_addr);
2353 }
2354
2355 DPRINTK("EXIT\n");
2356 }
2357
2358 /**
2359 * ata_std_probe_reset - standard probe reset method
2360 * @ap: prot to perform probe-reset
2361 * @classes: resulting classes of attached devices
2362 *
2363 * The stock off-the-shelf ->probe_reset method.
2364 *
2365 * LOCKING:
2366 * Kernel thread context (may sleep)
2367 *
2368 * RETURNS:
2369 * 0 on success, -errno otherwise.
2370 */
2371 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2372 {
2373 ata_reset_fn_t hardreset;
2374
2375 hardreset = NULL;
2376 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2377 hardreset = sata_std_hardreset;
2378
2379 return ata_drive_probe_reset(ap, ata_std_probeinit,
2380 ata_std_softreset, hardreset,
2381 ata_std_postreset, classes);
2382 }
2383
2384 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2385 ata_postreset_fn_t postreset,
2386 unsigned int *classes)
2387 {
2388 int i, rc;
2389
2390 for (i = 0; i < ATA_MAX_DEVICES; i++)
2391 classes[i] = ATA_DEV_UNKNOWN;
2392
2393 rc = reset(ap, 0, classes);
2394 if (rc)
2395 return rc;
2396
2397 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2398 * is complete and convert all ATA_DEV_UNKNOWN to
2399 * ATA_DEV_NONE.
2400 */
2401 for (i = 0; i < ATA_MAX_DEVICES; i++)
2402 if (classes[i] != ATA_DEV_UNKNOWN)
2403 break;
2404
2405 if (i < ATA_MAX_DEVICES)
2406 for (i = 0; i < ATA_MAX_DEVICES; i++)
2407 if (classes[i] == ATA_DEV_UNKNOWN)
2408 classes[i] = ATA_DEV_NONE;
2409
2410 if (postreset)
2411 postreset(ap, classes);
2412
2413 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2414 }
2415
2416 /**
2417 * ata_drive_probe_reset - Perform probe reset with given methods
2418 * @ap: port to reset
2419 * @probeinit: probeinit method (can be NULL)
2420 * @softreset: softreset method (can be NULL)
2421 * @hardreset: hardreset method (can be NULL)
2422 * @postreset: postreset method (can be NULL)
2423 * @classes: resulting classes of attached devices
2424 *
2425 * Reset the specified port and classify attached devices using
2426 * given methods. This function prefers softreset but tries all
2427 * possible reset sequences to reset and classify devices. This
2428 * function is intended to be used for constructing ->probe_reset
2429 * callback by low level drivers.
2430 *
2431 * Reset methods should follow the following rules.
2432 *
2433 * - Return 0 on sucess, -errno on failure.
2434 * - If classification is supported, fill classes[] with
2435 * recognized class codes.
2436 * - If classification is not supported, leave classes[] alone.
2437 * - If verbose is non-zero, print error message on failure;
2438 * otherwise, shut up.
2439 *
2440 * LOCKING:
2441 * Kernel thread context (may sleep)
2442 *
2443 * RETURNS:
2444 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2445 * if classification fails, and any error code from reset
2446 * methods.
2447 */
2448 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2449 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2450 ata_postreset_fn_t postreset, unsigned int *classes)
2451 {
2452 int rc = -EINVAL;
2453
2454 if (probeinit)
2455 probeinit(ap);
2456
2457 if (softreset) {
2458 rc = do_probe_reset(ap, softreset, postreset, classes);
2459 if (rc == 0)
2460 return 0;
2461 }
2462
2463 if (!hardreset)
2464 return rc;
2465
2466 rc = do_probe_reset(ap, hardreset, postreset, classes);
2467 if (rc == 0 || rc != -ENODEV)
2468 return rc;
2469
2470 if (softreset)
2471 rc = do_probe_reset(ap, softreset, postreset, classes);
2472
2473 return rc;
2474 }
2475
2476 /**
2477 * ata_dev_same_device - Determine whether new ID matches configured device
2478 * @ap: port on which the device to compare against resides
2479 * @dev: device to compare against
2480 * @new_class: class of the new device
2481 * @new_id: IDENTIFY page of the new device
2482 *
2483 * Compare @new_class and @new_id against @dev and determine
2484 * whether @dev is the device indicated by @new_class and
2485 * @new_id.
2486 *
2487 * LOCKING:
2488 * None.
2489 *
2490 * RETURNS:
2491 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2492 */
2493 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2494 unsigned int new_class, const u16 *new_id)
2495 {
2496 const u16 *old_id = dev->id;
2497 unsigned char model[2][41], serial[2][21];
2498 u64 new_n_sectors;
2499
2500 if (dev->class != new_class) {
2501 printk(KERN_INFO
2502 "ata%u: dev %u class mismatch %d != %d\n",
2503 ap->id, dev->devno, dev->class, new_class);
2504 return 0;
2505 }
2506
2507 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2508 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2509 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2510 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2511 new_n_sectors = ata_id_n_sectors(new_id);
2512
2513 if (strcmp(model[0], model[1])) {
2514 printk(KERN_INFO
2515 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2516 ap->id, dev->devno, model[0], model[1]);
2517 return 0;
2518 }
2519
2520 if (strcmp(serial[0], serial[1])) {
2521 printk(KERN_INFO
2522 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2523 ap->id, dev->devno, serial[0], serial[1]);
2524 return 0;
2525 }
2526
2527 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2528 printk(KERN_INFO
2529 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2530 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2531 (unsigned long long)new_n_sectors);
2532 return 0;
2533 }
2534
2535 return 1;
2536 }
2537
2538 /**
2539 * ata_dev_revalidate - Revalidate ATA device
2540 * @ap: port on which the device to revalidate resides
2541 * @dev: device to revalidate
2542 * @post_reset: is this revalidation after reset?
2543 *
2544 * Re-read IDENTIFY page and make sure @dev is still attached to
2545 * the port.
2546 *
2547 * LOCKING:
2548 * Kernel thread context (may sleep)
2549 *
2550 * RETURNS:
2551 * 0 on success, negative errno otherwise
2552 */
2553 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2554 int post_reset)
2555 {
2556 unsigned int class;
2557 u16 *id;
2558 int rc;
2559
2560 if (!ata_dev_present(dev))
2561 return -ENODEV;
2562
2563 class = dev->class;
2564 id = NULL;
2565
2566 /* allocate & read ID data */
2567 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2568 if (rc)
2569 goto fail;
2570
2571 /* is the device still there? */
2572 if (!ata_dev_same_device(ap, dev, class, id)) {
2573 rc = -ENODEV;
2574 goto fail;
2575 }
2576
2577 kfree(dev->id);
2578 dev->id = id;
2579
2580 /* configure device according to the new ID */
2581 return ata_dev_configure(ap, dev, 0);
2582
2583 fail:
2584 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2585 ap->id, dev->devno, rc);
2586 kfree(id);
2587 return rc;
2588 }
2589
2590 static const char * const ata_dma_blacklist [] = {
2591 "WDC AC11000H", NULL,
2592 "WDC AC22100H", NULL,
2593 "WDC AC32500H", NULL,
2594 "WDC AC33100H", NULL,
2595 "WDC AC31600H", NULL,
2596 "WDC AC32100H", "24.09P07",
2597 "WDC AC23200L", "21.10N21",
2598 "Compaq CRD-8241B", NULL,
2599 "CRD-8400B", NULL,
2600 "CRD-8480B", NULL,
2601 "CRD-8482B", NULL,
2602 "CRD-84", NULL,
2603 "SanDisk SDP3B", NULL,
2604 "SanDisk SDP3B-64", NULL,
2605 "SANYO CD-ROM CRD", NULL,
2606 "HITACHI CDR-8", NULL,
2607 "HITACHI CDR-8335", NULL,
2608 "HITACHI CDR-8435", NULL,
2609 "Toshiba CD-ROM XM-6202B", NULL,
2610 "TOSHIBA CD-ROM XM-1702BC", NULL,
2611 "CD-532E-A", NULL,
2612 "E-IDE CD-ROM CR-840", NULL,
2613 "CD-ROM Drive/F5A", NULL,
2614 "WPI CDD-820", NULL,
2615 "SAMSUNG CD-ROM SC-148C", NULL,
2616 "SAMSUNG CD-ROM SC", NULL,
2617 "SanDisk SDP3B-64", NULL,
2618 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2619 "_NEC DV5800A", NULL,
2620 "SAMSUNG CD-ROM SN-124", "N001"
2621 };
2622
2623 static int ata_strim(char *s, size_t len)
2624 {
2625 len = strnlen(s, len);
2626
2627 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2628 while ((len > 0) && (s[len - 1] == ' ')) {
2629 len--;
2630 s[len] = 0;
2631 }
2632 return len;
2633 }
2634
2635 static int ata_dma_blacklisted(const struct ata_device *dev)
2636 {
2637 unsigned char model_num[40];
2638 unsigned char model_rev[16];
2639 unsigned int nlen, rlen;
2640 int i;
2641
2642 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2643 sizeof(model_num));
2644 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2645 sizeof(model_rev));
2646 nlen = ata_strim(model_num, sizeof(model_num));
2647 rlen = ata_strim(model_rev, sizeof(model_rev));
2648
2649 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2650 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2651 if (ata_dma_blacklist[i+1] == NULL)
2652 return 1;
2653 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2654 return 1;
2655 }
2656 }
2657 return 0;
2658 }
2659
2660 /**
2661 * ata_dev_xfermask - Compute supported xfermask of the given device
2662 * @ap: Port on which the device to compute xfermask for resides
2663 * @dev: Device to compute xfermask for
2664 *
2665 * Compute supported xfermask of @dev and store it in
2666 * dev->*_mask. This function is responsible for applying all
2667 * known limits including host controller limits, device
2668 * blacklist, etc...
2669 *
2670 * FIXME: The current implementation limits all transfer modes to
2671 * the fastest of the lowested device on the port. This is not
2672 * required on most controllers.
2673 *
2674 * LOCKING:
2675 * None.
2676 */
2677 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2678 {
2679 struct ata_host_set *hs = ap->host_set;
2680 unsigned long xfer_mask;
2681 int i;
2682
2683 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2684 ap->udma_mask);
2685
2686 /* FIXME: Use port-wide xfermask for now */
2687 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2688 struct ata_device *d = &ap->device[i];
2689 if (!ata_dev_present(d))
2690 continue;
2691 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2692 d->udma_mask);
2693 xfer_mask &= ata_id_xfermask(d->id);
2694 if (ata_dma_blacklisted(d))
2695 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2696 /* Apply cable rule here. Don't apply it early because when
2697 we handle hot plug the cable type can itself change */
2698 if (ap->cbl == ATA_CBL_PATA40)
2699 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2700 }
2701
2702 if (ata_dma_blacklisted(dev))
2703 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2704 "disabling DMA\n", ap->id, dev->devno);
2705
2706 if (hs->flags & ATA_HOST_SIMPLEX) {
2707 if (hs->simplex_claimed)
2708 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2709 }
2710 if (ap->ops->mode_filter)
2711 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2712
2713 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2714 &dev->udma_mask);
2715 }
2716
2717 /**
2718 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2719 * @ap: Port associated with device @dev
2720 * @dev: Device to which command will be sent
2721 *
2722 * Issue SET FEATURES - XFER MODE command to device @dev
2723 * on port @ap.
2724 *
2725 * LOCKING:
2726 * PCI/etc. bus probe sem.
2727 *
2728 * RETURNS:
2729 * 0 on success, AC_ERR_* mask otherwise.
2730 */
2731
2732 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2733 struct ata_device *dev)
2734 {
2735 struct ata_taskfile tf;
2736 unsigned int err_mask;
2737
2738 /* set up set-features taskfile */
2739 DPRINTK("set features - xfer mode\n");
2740
2741 ata_tf_init(ap, &tf, dev->devno);
2742 tf.command = ATA_CMD_SET_FEATURES;
2743 tf.feature = SETFEATURES_XFER;
2744 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2745 tf.protocol = ATA_PROT_NODATA;
2746 tf.nsect = dev->xfer_mode;
2747
2748 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2749
2750 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2751 return err_mask;
2752 }
2753
2754 /**
2755 * ata_dev_init_params - Issue INIT DEV PARAMS command
2756 * @ap: Port associated with device @dev
2757 * @dev: Device to which command will be sent
2758 *
2759 * LOCKING:
2760 * Kernel thread context (may sleep)
2761 *
2762 * RETURNS:
2763 * 0 on success, AC_ERR_* mask otherwise.
2764 */
2765
2766 static unsigned int ata_dev_init_params(struct ata_port *ap,
2767 struct ata_device *dev,
2768 u16 heads,
2769 u16 sectors)
2770 {
2771 struct ata_taskfile tf;
2772 unsigned int err_mask;
2773
2774 /* Number of sectors per track 1-255. Number of heads 1-16 */
2775 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2776 return AC_ERR_INVALID;
2777
2778 /* set up init dev params taskfile */
2779 DPRINTK("init dev params \n");
2780
2781 ata_tf_init(ap, &tf, dev->devno);
2782 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2783 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2784 tf.protocol = ATA_PROT_NODATA;
2785 tf.nsect = sectors;
2786 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2787
2788 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2789
2790 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2791 return err_mask;
2792 }
2793
2794 /**
2795 * ata_sg_clean - Unmap DMA memory associated with command
2796 * @qc: Command containing DMA memory to be released
2797 *
2798 * Unmap all mapped DMA memory associated with this command.
2799 *
2800 * LOCKING:
2801 * spin_lock_irqsave(host_set lock)
2802 */
2803
2804 static void ata_sg_clean(struct ata_queued_cmd *qc)
2805 {
2806 struct ata_port *ap = qc->ap;
2807 struct scatterlist *sg = qc->__sg;
2808 int dir = qc->dma_dir;
2809 void *pad_buf = NULL;
2810
2811 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2812 WARN_ON(sg == NULL);
2813
2814 if (qc->flags & ATA_QCFLAG_SINGLE)
2815 WARN_ON(qc->n_elem > 1);
2816
2817 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2818
2819 /* if we padded the buffer out to 32-bit bound, and data
2820 * xfer direction is from-device, we must copy from the
2821 * pad buffer back into the supplied buffer
2822 */
2823 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2824 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2825
2826 if (qc->flags & ATA_QCFLAG_SG) {
2827 if (qc->n_elem)
2828 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2829 /* restore last sg */
2830 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2831 if (pad_buf) {
2832 struct scatterlist *psg = &qc->pad_sgent;
2833 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2834 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2835 kunmap_atomic(addr, KM_IRQ0);
2836 }
2837 } else {
2838 if (qc->n_elem)
2839 dma_unmap_single(ap->dev,
2840 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2841 dir);
2842 /* restore sg */
2843 sg->length += qc->pad_len;
2844 if (pad_buf)
2845 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2846 pad_buf, qc->pad_len);
2847 }
2848
2849 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2850 qc->__sg = NULL;
2851 }
2852
2853 /**
2854 * ata_fill_sg - Fill PCI IDE PRD table
2855 * @qc: Metadata associated with taskfile to be transferred
2856 *
2857 * Fill PCI IDE PRD (scatter-gather) table with segments
2858 * associated with the current disk command.
2859 *
2860 * LOCKING:
2861 * spin_lock_irqsave(host_set lock)
2862 *
2863 */
2864 static void ata_fill_sg(struct ata_queued_cmd *qc)
2865 {
2866 struct ata_port *ap = qc->ap;
2867 struct scatterlist *sg;
2868 unsigned int idx;
2869
2870 WARN_ON(qc->__sg == NULL);
2871 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2872
2873 idx = 0;
2874 ata_for_each_sg(sg, qc) {
2875 u32 addr, offset;
2876 u32 sg_len, len;
2877
2878 /* determine if physical DMA addr spans 64K boundary.
2879 * Note h/w doesn't support 64-bit, so we unconditionally
2880 * truncate dma_addr_t to u32.
2881 */
2882 addr = (u32) sg_dma_address(sg);
2883 sg_len = sg_dma_len(sg);
2884
2885 while (sg_len) {
2886 offset = addr & 0xffff;
2887 len = sg_len;
2888 if ((offset + sg_len) > 0x10000)
2889 len = 0x10000 - offset;
2890
2891 ap->prd[idx].addr = cpu_to_le32(addr);
2892 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2893 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2894
2895 idx++;
2896 sg_len -= len;
2897 addr += len;
2898 }
2899 }
2900
2901 if (idx)
2902 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2903 }
2904 /**
2905 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2906 * @qc: Metadata associated with taskfile to check
2907 *
2908 * Allow low-level driver to filter ATA PACKET commands, returning
2909 * a status indicating whether or not it is OK to use DMA for the
2910 * supplied PACKET command.
2911 *
2912 * LOCKING:
2913 * spin_lock_irqsave(host_set lock)
2914 *
2915 * RETURNS: 0 when ATAPI DMA can be used
2916 * nonzero otherwise
2917 */
2918 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2919 {
2920 struct ata_port *ap = qc->ap;
2921 int rc = 0; /* Assume ATAPI DMA is OK by default */
2922
2923 if (ap->ops->check_atapi_dma)
2924 rc = ap->ops->check_atapi_dma(qc);
2925
2926 /* We don't support polling DMA.
2927 * Use PIO if the LLDD handles only interrupts in
2928 * the HSM_ST_LAST state and the ATAPI device
2929 * generates CDB interrupts.
2930 */
2931 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
2932 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
2933 rc = 1;
2934
2935 return rc;
2936 }
2937 /**
2938 * ata_qc_prep - Prepare taskfile for submission
2939 * @qc: Metadata associated with taskfile to be prepared
2940 *
2941 * Prepare ATA taskfile for submission.
2942 *
2943 * LOCKING:
2944 * spin_lock_irqsave(host_set lock)
2945 */
2946 void ata_qc_prep(struct ata_queued_cmd *qc)
2947 {
2948 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2949 return;
2950
2951 ata_fill_sg(qc);
2952 }
2953
2954 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2955
2956 /**
2957 * ata_sg_init_one - Associate command with memory buffer
2958 * @qc: Command to be associated
2959 * @buf: Memory buffer
2960 * @buflen: Length of memory buffer, in bytes.
2961 *
2962 * Initialize the data-related elements of queued_cmd @qc
2963 * to point to a single memory buffer, @buf of byte length @buflen.
2964 *
2965 * LOCKING:
2966 * spin_lock_irqsave(host_set lock)
2967 */
2968
2969 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2970 {
2971 struct scatterlist *sg;
2972
2973 qc->flags |= ATA_QCFLAG_SINGLE;
2974
2975 memset(&qc->sgent, 0, sizeof(qc->sgent));
2976 qc->__sg = &qc->sgent;
2977 qc->n_elem = 1;
2978 qc->orig_n_elem = 1;
2979 qc->buf_virt = buf;
2980
2981 sg = qc->__sg;
2982 sg_init_one(sg, buf, buflen);
2983 }
2984
2985 /**
2986 * ata_sg_init - Associate command with scatter-gather table.
2987 * @qc: Command to be associated
2988 * @sg: Scatter-gather table.
2989 * @n_elem: Number of elements in s/g table.
2990 *
2991 * Initialize the data-related elements of queued_cmd @qc
2992 * to point to a scatter-gather table @sg, containing @n_elem
2993 * elements.
2994 *
2995 * LOCKING:
2996 * spin_lock_irqsave(host_set lock)
2997 */
2998
2999 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3000 unsigned int n_elem)
3001 {
3002 qc->flags |= ATA_QCFLAG_SG;
3003 qc->__sg = sg;
3004 qc->n_elem = n_elem;
3005 qc->orig_n_elem = n_elem;
3006 }
3007
3008 /**
3009 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3010 * @qc: Command with memory buffer to be mapped.
3011 *
3012 * DMA-map the memory buffer associated with queued_cmd @qc.
3013 *
3014 * LOCKING:
3015 * spin_lock_irqsave(host_set lock)
3016 *
3017 * RETURNS:
3018 * Zero on success, negative on error.
3019 */
3020
3021 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3022 {
3023 struct ata_port *ap = qc->ap;
3024 int dir = qc->dma_dir;
3025 struct scatterlist *sg = qc->__sg;
3026 dma_addr_t dma_address;
3027 int trim_sg = 0;
3028
3029 /* we must lengthen transfers to end on a 32-bit boundary */
3030 qc->pad_len = sg->length & 3;
3031 if (qc->pad_len) {
3032 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3033 struct scatterlist *psg = &qc->pad_sgent;
3034
3035 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3036
3037 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3038
3039 if (qc->tf.flags & ATA_TFLAG_WRITE)
3040 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3041 qc->pad_len);
3042
3043 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3044 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3045 /* trim sg */
3046 sg->length -= qc->pad_len;
3047 if (sg->length == 0)
3048 trim_sg = 1;
3049
3050 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3051 sg->length, qc->pad_len);
3052 }
3053
3054 if (trim_sg) {
3055 qc->n_elem--;
3056 goto skip_map;
3057 }
3058
3059 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3060 sg->length, dir);
3061 if (dma_mapping_error(dma_address)) {
3062 /* restore sg */
3063 sg->length += qc->pad_len;
3064 return -1;
3065 }
3066
3067 sg_dma_address(sg) = dma_address;
3068 sg_dma_len(sg) = sg->length;
3069
3070 skip_map:
3071 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3072 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3073
3074 return 0;
3075 }
3076
3077 /**
3078 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3079 * @qc: Command with scatter-gather table to be mapped.
3080 *
3081 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3082 *
3083 * LOCKING:
3084 * spin_lock_irqsave(host_set lock)
3085 *
3086 * RETURNS:
3087 * Zero on success, negative on error.
3088 *
3089 */
3090
3091 static int ata_sg_setup(struct ata_queued_cmd *qc)
3092 {
3093 struct ata_port *ap = qc->ap;
3094 struct scatterlist *sg = qc->__sg;
3095 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3096 int n_elem, pre_n_elem, dir, trim_sg = 0;
3097
3098 VPRINTK("ENTER, ata%u\n", ap->id);
3099 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3100
3101 /* we must lengthen transfers to end on a 32-bit boundary */
3102 qc->pad_len = lsg->length & 3;
3103 if (qc->pad_len) {
3104 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3105 struct scatterlist *psg = &qc->pad_sgent;
3106 unsigned int offset;
3107
3108 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3109
3110 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3111
3112 /*
3113 * psg->page/offset are used to copy to-be-written
3114 * data in this function or read data in ata_sg_clean.
3115 */
3116 offset = lsg->offset + lsg->length - qc->pad_len;
3117 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3118 psg->offset = offset_in_page(offset);
3119
3120 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3121 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3122 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3123 kunmap_atomic(addr, KM_IRQ0);
3124 }
3125
3126 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3127 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3128 /* trim last sg */
3129 lsg->length -= qc->pad_len;
3130 if (lsg->length == 0)
3131 trim_sg = 1;
3132
3133 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3134 qc->n_elem - 1, lsg->length, qc->pad_len);
3135 }
3136
3137 pre_n_elem = qc->n_elem;
3138 if (trim_sg && pre_n_elem)
3139 pre_n_elem--;
3140
3141 if (!pre_n_elem) {
3142 n_elem = 0;
3143 goto skip_map;
3144 }
3145
3146 dir = qc->dma_dir;
3147 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3148 if (n_elem < 1) {
3149 /* restore last sg */
3150 lsg->length += qc->pad_len;
3151 return -1;
3152 }
3153
3154 DPRINTK("%d sg elements mapped\n", n_elem);
3155
3156 skip_map:
3157 qc->n_elem = n_elem;
3158
3159 return 0;
3160 }
3161
3162 /**
3163 * ata_poll_qc_complete - turn irq back on and finish qc
3164 * @qc: Command to complete
3165 * @err_mask: ATA status register content
3166 *
3167 * LOCKING:
3168 * None. (grabs host lock)
3169 */
3170
3171 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3172 {
3173 struct ata_port *ap = qc->ap;
3174 unsigned long flags;
3175
3176 spin_lock_irqsave(&ap->host_set->lock, flags);
3177 ata_irq_on(ap);
3178 ata_qc_complete(qc);
3179 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3180 }
3181
3182 /**
3183 * swap_buf_le16 - swap halves of 16-bit words in place
3184 * @buf: Buffer to swap
3185 * @buf_words: Number of 16-bit words in buffer.
3186 *
3187 * Swap halves of 16-bit words if needed to convert from
3188 * little-endian byte order to native cpu byte order, or
3189 * vice-versa.
3190 *
3191 * LOCKING:
3192 * Inherited from caller.
3193 */
3194 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3195 {
3196 #ifdef __BIG_ENDIAN
3197 unsigned int i;
3198
3199 for (i = 0; i < buf_words; i++)
3200 buf[i] = le16_to_cpu(buf[i]);
3201 #endif /* __BIG_ENDIAN */
3202 }
3203
3204 /**
3205 * ata_mmio_data_xfer - Transfer data by MMIO
3206 * @ap: port to read/write
3207 * @buf: data buffer
3208 * @buflen: buffer length
3209 * @write_data: read/write
3210 *
3211 * Transfer data from/to the device data register by MMIO.
3212 *
3213 * LOCKING:
3214 * Inherited from caller.
3215 */
3216
3217 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3218 unsigned int buflen, int write_data)
3219 {
3220 unsigned int i;
3221 unsigned int words = buflen >> 1;
3222 u16 *buf16 = (u16 *) buf;
3223 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3224
3225 /* Transfer multiple of 2 bytes */
3226 if (write_data) {
3227 for (i = 0; i < words; i++)
3228 writew(le16_to_cpu(buf16[i]), mmio);
3229 } else {
3230 for (i = 0; i < words; i++)
3231 buf16[i] = cpu_to_le16(readw(mmio));
3232 }
3233
3234 /* Transfer trailing 1 byte, if any. */
3235 if (unlikely(buflen & 0x01)) {
3236 u16 align_buf[1] = { 0 };
3237 unsigned char *trailing_buf = buf + buflen - 1;
3238
3239 if (write_data) {
3240 memcpy(align_buf, trailing_buf, 1);
3241 writew(le16_to_cpu(align_buf[0]), mmio);
3242 } else {
3243 align_buf[0] = cpu_to_le16(readw(mmio));
3244 memcpy(trailing_buf, align_buf, 1);
3245 }
3246 }
3247 }
3248
3249 /**
3250 * ata_pio_data_xfer - Transfer data by PIO
3251 * @ap: port to read/write
3252 * @buf: data buffer
3253 * @buflen: buffer length
3254 * @write_data: read/write
3255 *
3256 * Transfer data from/to the device data register by PIO.
3257 *
3258 * LOCKING:
3259 * Inherited from caller.
3260 */
3261
3262 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3263 unsigned int buflen, int write_data)
3264 {
3265 unsigned int words = buflen >> 1;
3266
3267 /* Transfer multiple of 2 bytes */
3268 if (write_data)
3269 outsw(ap->ioaddr.data_addr, buf, words);
3270 else
3271 insw(ap->ioaddr.data_addr, buf, words);
3272
3273 /* Transfer trailing 1 byte, if any. */
3274 if (unlikely(buflen & 0x01)) {
3275 u16 align_buf[1] = { 0 };
3276 unsigned char *trailing_buf = buf + buflen - 1;
3277
3278 if (write_data) {
3279 memcpy(align_buf, trailing_buf, 1);
3280 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3281 } else {
3282 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3283 memcpy(trailing_buf, align_buf, 1);
3284 }
3285 }
3286 }
3287
3288 /**
3289 * ata_data_xfer - Transfer data from/to the data register.
3290 * @ap: port to read/write
3291 * @buf: data buffer
3292 * @buflen: buffer length
3293 * @do_write: read/write
3294 *
3295 * Transfer data from/to the device data register.
3296 *
3297 * LOCKING:
3298 * Inherited from caller.
3299 */
3300
3301 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3302 unsigned int buflen, int do_write)
3303 {
3304 /* Make the crap hardware pay the costs not the good stuff */
3305 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3306 unsigned long flags;
3307 local_irq_save(flags);
3308 if (ap->flags & ATA_FLAG_MMIO)
3309 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3310 else
3311 ata_pio_data_xfer(ap, buf, buflen, do_write);
3312 local_irq_restore(flags);
3313 } else {
3314 if (ap->flags & ATA_FLAG_MMIO)
3315 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3316 else
3317 ata_pio_data_xfer(ap, buf, buflen, do_write);
3318 }
3319 }
3320
3321 /**
3322 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3323 * @qc: Command on going
3324 *
3325 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3326 *
3327 * LOCKING:
3328 * Inherited from caller.
3329 */
3330
3331 static void ata_pio_sector(struct ata_queued_cmd *qc)
3332 {
3333 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3334 struct scatterlist *sg = qc->__sg;
3335 struct ata_port *ap = qc->ap;
3336 struct page *page;
3337 unsigned int offset;
3338 unsigned char *buf;
3339
3340 if (qc->cursect == (qc->nsect - 1))
3341 ap->hsm_task_state = HSM_ST_LAST;
3342
3343 page = sg[qc->cursg].page;
3344 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3345
3346 /* get the current page and offset */
3347 page = nth_page(page, (offset >> PAGE_SHIFT));
3348 offset %= PAGE_SIZE;
3349
3350 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3351
3352 if (PageHighMem(page)) {
3353 unsigned long flags;
3354
3355 local_irq_save(flags);
3356 buf = kmap_atomic(page, KM_IRQ0);
3357
3358 /* do the actual data transfer */
3359 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3360
3361 kunmap_atomic(buf, KM_IRQ0);
3362 local_irq_restore(flags);
3363 } else {
3364 buf = page_address(page);
3365 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3366 }
3367
3368 qc->cursect++;
3369 qc->cursg_ofs++;
3370
3371 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3372 qc->cursg++;
3373 qc->cursg_ofs = 0;
3374 }
3375 }
3376
3377 /**
3378 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3379 * @qc: Command on going
3380 *
3381 * Transfer one or many ATA_SECT_SIZE of data from/to the
3382 * ATA device for the DRQ request.
3383 *
3384 * LOCKING:
3385 * Inherited from caller.
3386 */
3387
3388 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3389 {
3390 if (is_multi_taskfile(&qc->tf)) {
3391 /* READ/WRITE MULTIPLE */
3392 unsigned int nsect;
3393
3394 WARN_ON(qc->dev->multi_count == 0);
3395
3396 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3397 while (nsect--)
3398 ata_pio_sector(qc);
3399 } else
3400 ata_pio_sector(qc);
3401 }
3402
3403 /**
3404 * atapi_send_cdb - Write CDB bytes to hardware
3405 * @ap: Port to which ATAPI device is attached.
3406 * @qc: Taskfile currently active
3407 *
3408 * When device has indicated its readiness to accept
3409 * a CDB, this function is called. Send the CDB.
3410 *
3411 * LOCKING:
3412 * caller.
3413 */
3414
3415 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3416 {
3417 /* send SCSI cdb */
3418 DPRINTK("send cdb\n");
3419 WARN_ON(qc->dev->cdb_len < 12);
3420
3421 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3422 ata_altstatus(ap); /* flush */
3423
3424 switch (qc->tf.protocol) {
3425 case ATA_PROT_ATAPI:
3426 ap->hsm_task_state = HSM_ST;
3427 break;
3428 case ATA_PROT_ATAPI_NODATA:
3429 ap->hsm_task_state = HSM_ST_LAST;
3430 break;
3431 case ATA_PROT_ATAPI_DMA:
3432 ap->hsm_task_state = HSM_ST_LAST;
3433 /* initiate bmdma */
3434 ap->ops->bmdma_start(qc);
3435 break;
3436 }
3437 }
3438
3439 /**
3440 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3441 * @qc: Command on going
3442 * @bytes: number of bytes
3443 *
3444 * Transfer Transfer data from/to the ATAPI device.
3445 *
3446 * LOCKING:
3447 * Inherited from caller.
3448 *
3449 */
3450
3451 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3452 {
3453 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3454 struct scatterlist *sg = qc->__sg;
3455 struct ata_port *ap = qc->ap;
3456 struct page *page;
3457 unsigned char *buf;
3458 unsigned int offset, count;
3459
3460 if (qc->curbytes + bytes >= qc->nbytes)
3461 ap->hsm_task_state = HSM_ST_LAST;
3462
3463 next_sg:
3464 if (unlikely(qc->cursg >= qc->n_elem)) {
3465 /*
3466 * The end of qc->sg is reached and the device expects
3467 * more data to transfer. In order not to overrun qc->sg
3468 * and fulfill length specified in the byte count register,
3469 * - for read case, discard trailing data from the device
3470 * - for write case, padding zero data to the device
3471 */
3472 u16 pad_buf[1] = { 0 };
3473 unsigned int words = bytes >> 1;
3474 unsigned int i;
3475
3476 if (words) /* warning if bytes > 1 */
3477 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3478 ap->id, bytes);
3479
3480 for (i = 0; i < words; i++)
3481 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3482
3483 ap->hsm_task_state = HSM_ST_LAST;
3484 return;
3485 }
3486
3487 sg = &qc->__sg[qc->cursg];
3488
3489 page = sg->page;
3490 offset = sg->offset + qc->cursg_ofs;
3491
3492 /* get the current page and offset */
3493 page = nth_page(page, (offset >> PAGE_SHIFT));
3494 offset %= PAGE_SIZE;
3495
3496 /* don't overrun current sg */
3497 count = min(sg->length - qc->cursg_ofs, bytes);
3498
3499 /* don't cross page boundaries */
3500 count = min(count, (unsigned int)PAGE_SIZE - offset);
3501
3502 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3503
3504 if (PageHighMem(page)) {
3505 unsigned long flags;
3506
3507 local_irq_save(flags);
3508 buf = kmap_atomic(page, KM_IRQ0);
3509
3510 /* do the actual data transfer */
3511 ata_data_xfer(ap, buf + offset, count, do_write);
3512
3513 kunmap_atomic(buf, KM_IRQ0);
3514 local_irq_restore(flags);
3515 } else {
3516 buf = page_address(page);
3517 ata_data_xfer(ap, buf + offset, count, do_write);
3518 }
3519
3520 bytes -= count;
3521 qc->curbytes += count;
3522 qc->cursg_ofs += count;
3523
3524 if (qc->cursg_ofs == sg->length) {
3525 qc->cursg++;
3526 qc->cursg_ofs = 0;
3527 }
3528
3529 if (bytes)
3530 goto next_sg;
3531 }
3532
3533 /**
3534 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3535 * @qc: Command on going
3536 *
3537 * Transfer Transfer data from/to the ATAPI device.
3538 *
3539 * LOCKING:
3540 * Inherited from caller.
3541 */
3542
3543 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3544 {
3545 struct ata_port *ap = qc->ap;
3546 struct ata_device *dev = qc->dev;
3547 unsigned int ireason, bc_lo, bc_hi, bytes;
3548 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3549
3550 ap->ops->tf_read(ap, &qc->tf);
3551 ireason = qc->tf.nsect;
3552 bc_lo = qc->tf.lbam;
3553 bc_hi = qc->tf.lbah;
3554 bytes = (bc_hi << 8) | bc_lo;
3555
3556 /* shall be cleared to zero, indicating xfer of data */
3557 if (ireason & (1 << 0))
3558 goto err_out;
3559
3560 /* make sure transfer direction matches expected */
3561 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3562 if (do_write != i_write)
3563 goto err_out;
3564
3565 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3566
3567 __atapi_pio_bytes(qc, bytes);
3568
3569 return;
3570
3571 err_out:
3572 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3573 ap->id, dev->devno);
3574 qc->err_mask |= AC_ERR_HSM;
3575 ap->hsm_task_state = HSM_ST_ERR;
3576 }
3577
3578 /**
3579 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3580 * @ap: the target ata_port
3581 * @qc: qc on going
3582 *
3583 * RETURNS:
3584 * 1 if ok in workqueue, 0 otherwise.
3585 */
3586
3587 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3588 {
3589 if (qc->tf.flags & ATA_TFLAG_POLLING)
3590 return 1;
3591
3592 if (ap->hsm_task_state == HSM_ST_FIRST) {
3593 if (qc->tf.protocol == ATA_PROT_PIO &&
3594 (qc->tf.flags & ATA_TFLAG_WRITE))
3595 return 1;
3596
3597 if (is_atapi_taskfile(&qc->tf) &&
3598 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3599 return 1;
3600 }
3601
3602 return 0;
3603 }
3604
3605 /**
3606 * ata_hsm_move - move the HSM to the next state.
3607 * @ap: the target ata_port
3608 * @qc: qc on going
3609 * @status: current device status
3610 * @in_wq: 1 if called from workqueue, 0 otherwise
3611 *
3612 * RETURNS:
3613 * 1 when poll next status needed, 0 otherwise.
3614 */
3615
3616 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3617 u8 status, int in_wq)
3618 {
3619 unsigned long flags = 0;
3620 int poll_next;
3621
3622 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3623
3624 /* Make sure ata_qc_issue_prot() does not throw things
3625 * like DMA polling into the workqueue. Notice that
3626 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3627 */
3628 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3629
3630 fsm_start:
3631 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3632 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3633
3634 switch (ap->hsm_task_state) {
3635 case HSM_ST_FIRST:
3636 /* Send first data block or PACKET CDB */
3637
3638 /* If polling, we will stay in the work queue after
3639 * sending the data. Otherwise, interrupt handler
3640 * takes over after sending the data.
3641 */
3642 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3643
3644 /* check device status */
3645 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3646 /* Wrong status. Let EH handle this */
3647 qc->err_mask |= AC_ERR_HSM;
3648 ap->hsm_task_state = HSM_ST_ERR;
3649 goto fsm_start;
3650 }
3651
3652 /* Device should not ask for data transfer (DRQ=1)
3653 * when it finds something wrong.
3654 * Anyway, we respect DRQ here and let HSM go on
3655 * without changing hsm_task_state to HSM_ST_ERR.
3656 */
3657 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3658 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3659 ap->id, status);
3660 qc->err_mask |= AC_ERR_DEV;
3661 }
3662
3663 /* Send the CDB (atapi) or the first data block (ata pio out).
3664 * During the state transition, interrupt handler shouldn't
3665 * be invoked before the data transfer is complete and
3666 * hsm_task_state is changed. Hence, the following locking.
3667 */
3668 if (in_wq)
3669 spin_lock_irqsave(&ap->host_set->lock, flags);
3670
3671 if (qc->tf.protocol == ATA_PROT_PIO) {
3672 /* PIO data out protocol.
3673 * send first data block.
3674 */
3675
3676 /* ata_pio_sectors() might change the state
3677 * to HSM_ST_LAST. so, the state is changed here
3678 * before ata_pio_sectors().
3679 */
3680 ap->hsm_task_state = HSM_ST;
3681 ata_pio_sectors(qc);
3682 ata_altstatus(ap); /* flush */
3683 } else
3684 /* send CDB */
3685 atapi_send_cdb(ap, qc);
3686
3687 if (in_wq)
3688 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3689
3690 /* if polling, ata_pio_task() handles the rest.
3691 * otherwise, interrupt handler takes over from here.
3692 */
3693 break;
3694
3695 case HSM_ST:
3696 /* complete command or read/write the data register */
3697 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3698 /* ATAPI PIO protocol */
3699 if ((status & ATA_DRQ) == 0) {
3700 /* no more data to transfer */
3701 ap->hsm_task_state = HSM_ST_LAST;
3702 goto fsm_start;
3703 }
3704
3705 /* Device should not ask for data transfer (DRQ=1)
3706 * when it finds something wrong.
3707 * Anyway, we respect DRQ here and let HSM go on
3708 * without changing hsm_task_state to HSM_ST_ERR.
3709 */
3710 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3711 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3712 ap->id, status);
3713 qc->err_mask |= AC_ERR_DEV;
3714 }
3715
3716 atapi_pio_bytes(qc);
3717
3718 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3719 /* bad ireason reported by device */
3720 goto fsm_start;
3721
3722 } else {
3723 /* ATA PIO protocol */
3724 if (unlikely((status & ATA_DRQ) == 0)) {
3725 /* handle BSY=0, DRQ=0 as error */
3726 qc->err_mask |= AC_ERR_HSM;
3727 ap->hsm_task_state = HSM_ST_ERR;
3728 goto fsm_start;
3729 }
3730
3731 /* Some devices may ask for data transfer (DRQ=1)
3732 * alone with ERR=1 for PIO reads.
3733 * We respect DRQ here and let HSM go on without
3734 * changing hsm_task_state to HSM_ST_ERR.
3735 */
3736 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3737 /* For writes, ERR=1 DRQ=1 doesn't make
3738 * sense since the data block has been
3739 * transferred to the device.
3740 */
3741 WARN_ON(qc->tf.flags & ATA_TFLAG_WRITE);
3742
3743 /* data might be corrputed */
3744 qc->err_mask |= AC_ERR_DEV;
3745 }
3746
3747 ata_pio_sectors(qc);
3748
3749 if (ap->hsm_task_state == HSM_ST_LAST &&
3750 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
3751 /* all data read */
3752 ata_altstatus(ap);
3753 status = ata_wait_idle(ap);
3754 goto fsm_start;
3755 }
3756 }
3757
3758 ata_altstatus(ap); /* flush */
3759 poll_next = 1;
3760 break;
3761
3762 case HSM_ST_LAST:
3763 if (unlikely(!ata_ok(status))) {
3764 qc->err_mask |= __ac_err_mask(status);
3765 ap->hsm_task_state = HSM_ST_ERR;
3766 goto fsm_start;
3767 }
3768
3769 /* no more data to transfer */
3770 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
3771 ap->id, status);
3772
3773 WARN_ON(qc->err_mask);
3774
3775 ap->hsm_task_state = HSM_ST_IDLE;
3776
3777 /* complete taskfile transaction */
3778 if (in_wq)
3779 ata_poll_qc_complete(qc);
3780 else
3781 ata_qc_complete(qc);
3782
3783 poll_next = 0;
3784 break;
3785
3786 case HSM_ST_ERR:
3787 if (qc->tf.command != ATA_CMD_PACKET)
3788 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x\n",
3789 ap->id, status);
3790
3791 /* make sure qc->err_mask is available to
3792 * know what's wrong and recover
3793 */
3794 WARN_ON(qc->err_mask == 0);
3795
3796 ap->hsm_task_state = HSM_ST_IDLE;
3797
3798 /* complete taskfile transaction */
3799 if (in_wq)
3800 ata_poll_qc_complete(qc);
3801 else
3802 ata_qc_complete(qc);
3803
3804 poll_next = 0;
3805 break;
3806 default:
3807 poll_next = 0;
3808 BUG();
3809 }
3810
3811 return poll_next;
3812 }
3813
3814 static void ata_pio_task(void *_data)
3815 {
3816 struct ata_port *ap = _data;
3817 struct ata_queued_cmd *qc;
3818 u8 status;
3819 int poll_next;
3820
3821 fsm_start:
3822 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3823
3824 qc = ata_qc_from_tag(ap, ap->active_tag);
3825 WARN_ON(qc == NULL);
3826
3827 /*
3828 * This is purely heuristic. This is a fast path.
3829 * Sometimes when we enter, BSY will be cleared in
3830 * a chk-status or two. If not, the drive is probably seeking
3831 * or something. Snooze for a couple msecs, then
3832 * chk-status again. If still busy, queue delayed work.
3833 */
3834 status = ata_busy_wait(ap, ATA_BUSY, 5);
3835 if (status & ATA_BUSY) {
3836 msleep(2);
3837 status = ata_busy_wait(ap, ATA_BUSY, 10);
3838 if (status & ATA_BUSY) {
3839 ata_port_queue_task(ap, ata_pio_task, ap, ATA_SHORT_PAUSE);
3840 return;
3841 }
3842 }
3843
3844 /* move the HSM */
3845 poll_next = ata_hsm_move(ap, qc, status, 1);
3846
3847 /* another command or interrupt handler
3848 * may be running at this point.
3849 */
3850 if (poll_next)
3851 goto fsm_start;
3852 }
3853
3854 /**
3855 * ata_qc_timeout - Handle timeout of queued command
3856 * @qc: Command that timed out
3857 *
3858 * Some part of the kernel (currently, only the SCSI layer)
3859 * has noticed that the active command on port @ap has not
3860 * completed after a specified length of time. Handle this
3861 * condition by disabling DMA (if necessary) and completing
3862 * transactions, with error if necessary.
3863 *
3864 * This also handles the case of the "lost interrupt", where
3865 * for some reason (possibly hardware bug, possibly driver bug)
3866 * an interrupt was not delivered to the driver, even though the
3867 * transaction completed successfully.
3868 *
3869 * LOCKING:
3870 * Inherited from SCSI layer (none, can sleep)
3871 */
3872
3873 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3874 {
3875 struct ata_port *ap = qc->ap;
3876 struct ata_host_set *host_set = ap->host_set;
3877 u8 host_stat = 0, drv_stat;
3878 unsigned long flags;
3879
3880 DPRINTK("ENTER\n");
3881
3882 ap->hsm_task_state = HSM_ST_IDLE;
3883
3884 spin_lock_irqsave(&host_set->lock, flags);
3885
3886 switch (qc->tf.protocol) {
3887
3888 case ATA_PROT_DMA:
3889 case ATA_PROT_ATAPI_DMA:
3890 host_stat = ap->ops->bmdma_status(ap);
3891
3892 /* before we do anything else, clear DMA-Start bit */
3893 ap->ops->bmdma_stop(qc);
3894
3895 /* fall through */
3896
3897 default:
3898 ata_altstatus(ap);
3899 drv_stat = ata_chk_status(ap);
3900
3901 /* ack bmdma irq events */
3902 ap->ops->irq_clear(ap);
3903
3904 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3905 ap->id, qc->tf.command, drv_stat, host_stat);
3906
3907 ap->hsm_task_state = HSM_ST_IDLE;
3908
3909 /* complete taskfile transaction */
3910 qc->err_mask |= AC_ERR_TIMEOUT;
3911 break;
3912 }
3913
3914 spin_unlock_irqrestore(&host_set->lock, flags);
3915
3916 ata_eh_qc_complete(qc);
3917
3918 DPRINTK("EXIT\n");
3919 }
3920
3921 /**
3922 * ata_eng_timeout - Handle timeout of queued command
3923 * @ap: Port on which timed-out command is active
3924 *
3925 * Some part of the kernel (currently, only the SCSI layer)
3926 * has noticed that the active command on port @ap has not
3927 * completed after a specified length of time. Handle this
3928 * condition by disabling DMA (if necessary) and completing
3929 * transactions, with error if necessary.
3930 *
3931 * This also handles the case of the "lost interrupt", where
3932 * for some reason (possibly hardware bug, possibly driver bug)
3933 * an interrupt was not delivered to the driver, even though the
3934 * transaction completed successfully.
3935 *
3936 * LOCKING:
3937 * Inherited from SCSI layer (none, can sleep)
3938 */
3939
3940 void ata_eng_timeout(struct ata_port *ap)
3941 {
3942 DPRINTK("ENTER\n");
3943
3944 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3945
3946 DPRINTK("EXIT\n");
3947 }
3948
3949 /**
3950 * ata_qc_new - Request an available ATA command, for queueing
3951 * @ap: Port associated with device @dev
3952 * @dev: Device from whom we request an available command structure
3953 *
3954 * LOCKING:
3955 * None.
3956 */
3957
3958 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3959 {
3960 struct ata_queued_cmd *qc = NULL;
3961 unsigned int i;
3962
3963 for (i = 0; i < ATA_MAX_QUEUE; i++)
3964 if (!test_and_set_bit(i, &ap->qactive)) {
3965 qc = ata_qc_from_tag(ap, i);
3966 break;
3967 }
3968
3969 if (qc)
3970 qc->tag = i;
3971
3972 return qc;
3973 }
3974
3975 /**
3976 * ata_qc_new_init - Request an available ATA command, and initialize it
3977 * @ap: Port associated with device @dev
3978 * @dev: Device from whom we request an available command structure
3979 *
3980 * LOCKING:
3981 * None.
3982 */
3983
3984 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3985 struct ata_device *dev)
3986 {
3987 struct ata_queued_cmd *qc;
3988
3989 qc = ata_qc_new(ap);
3990 if (qc) {
3991 qc->scsicmd = NULL;
3992 qc->ap = ap;
3993 qc->dev = dev;
3994
3995 ata_qc_reinit(qc);
3996 }
3997
3998 return qc;
3999 }
4000
4001 /**
4002 * ata_qc_free - free unused ata_queued_cmd
4003 * @qc: Command to complete
4004 *
4005 * Designed to free unused ata_queued_cmd object
4006 * in case something prevents using it.
4007 *
4008 * LOCKING:
4009 * spin_lock_irqsave(host_set lock)
4010 */
4011 void ata_qc_free(struct ata_queued_cmd *qc)
4012 {
4013 struct ata_port *ap = qc->ap;
4014 unsigned int tag;
4015
4016 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4017
4018 qc->flags = 0;
4019 tag = qc->tag;
4020 if (likely(ata_tag_valid(tag))) {
4021 if (tag == ap->active_tag)
4022 ap->active_tag = ATA_TAG_POISON;
4023 qc->tag = ATA_TAG_POISON;
4024 clear_bit(tag, &ap->qactive);
4025 }
4026 }
4027
4028 void __ata_qc_complete(struct ata_queued_cmd *qc)
4029 {
4030 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4031 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4032
4033 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4034 ata_sg_clean(qc);
4035
4036 /* atapi: mark qc as inactive to prevent the interrupt handler
4037 * from completing the command twice later, before the error handler
4038 * is called. (when rc != 0 and atapi request sense is needed)
4039 */
4040 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4041
4042 /* call completion callback */
4043 qc->complete_fn(qc);
4044 }
4045
4046 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4047 {
4048 struct ata_port *ap = qc->ap;
4049
4050 switch (qc->tf.protocol) {
4051 case ATA_PROT_DMA:
4052 case ATA_PROT_ATAPI_DMA:
4053 return 1;
4054
4055 case ATA_PROT_ATAPI:
4056 case ATA_PROT_PIO:
4057 if (ap->flags & ATA_FLAG_PIO_DMA)
4058 return 1;
4059
4060 /* fall through */
4061
4062 default:
4063 return 0;
4064 }
4065
4066 /* never reached */
4067 }
4068
4069 /**
4070 * ata_qc_issue - issue taskfile to device
4071 * @qc: command to issue to device
4072 *
4073 * Prepare an ATA command to submission to device.
4074 * This includes mapping the data into a DMA-able
4075 * area, filling in the S/G table, and finally
4076 * writing the taskfile to hardware, starting the command.
4077 *
4078 * LOCKING:
4079 * spin_lock_irqsave(host_set lock)
4080 *
4081 * RETURNS:
4082 * Zero on success, AC_ERR_* mask on failure
4083 */
4084
4085 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4086 {
4087 struct ata_port *ap = qc->ap;
4088
4089 if (ata_should_dma_map(qc)) {
4090 if (qc->flags & ATA_QCFLAG_SG) {
4091 if (ata_sg_setup(qc))
4092 goto sg_err;
4093 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4094 if (ata_sg_setup_one(qc))
4095 goto sg_err;
4096 }
4097 } else {
4098 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4099 }
4100
4101 ap->ops->qc_prep(qc);
4102
4103 qc->ap->active_tag = qc->tag;
4104 qc->flags |= ATA_QCFLAG_ACTIVE;
4105
4106 return ap->ops->qc_issue(qc);
4107
4108 sg_err:
4109 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4110 return AC_ERR_SYSTEM;
4111 }
4112
4113
4114 /**
4115 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4116 * @qc: command to issue to device
4117 *
4118 * Using various libata functions and hooks, this function
4119 * starts an ATA command. ATA commands are grouped into
4120 * classes called "protocols", and issuing each type of protocol
4121 * is slightly different.
4122 *
4123 * May be used as the qc_issue() entry in ata_port_operations.
4124 *
4125 * LOCKING:
4126 * spin_lock_irqsave(host_set lock)
4127 *
4128 * RETURNS:
4129 * Zero on success, AC_ERR_* mask on failure
4130 */
4131
4132 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4133 {
4134 struct ata_port *ap = qc->ap;
4135
4136 /* Use polling pio if the LLD doesn't handle
4137 * interrupt driven pio and atapi CDB interrupt.
4138 */
4139 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4140 switch (qc->tf.protocol) {
4141 case ATA_PROT_PIO:
4142 case ATA_PROT_ATAPI:
4143 case ATA_PROT_ATAPI_NODATA:
4144 qc->tf.flags |= ATA_TFLAG_POLLING;
4145 break;
4146 case ATA_PROT_ATAPI_DMA:
4147 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4148 /* see ata_check_atapi_dma() */
4149 BUG();
4150 break;
4151 default:
4152 break;
4153 }
4154 }
4155
4156 /* select the device */
4157 ata_dev_select(ap, qc->dev->devno, 1, 0);
4158
4159 /* start the command */
4160 switch (qc->tf.protocol) {
4161 case ATA_PROT_NODATA:
4162 if (qc->tf.flags & ATA_TFLAG_POLLING)
4163 ata_qc_set_polling(qc);
4164
4165 ata_tf_to_host(ap, &qc->tf);
4166 ap->hsm_task_state = HSM_ST_LAST;
4167
4168 if (qc->tf.flags & ATA_TFLAG_POLLING)
4169 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4170
4171 break;
4172
4173 case ATA_PROT_DMA:
4174 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4175
4176 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4177 ap->ops->bmdma_setup(qc); /* set up bmdma */
4178 ap->ops->bmdma_start(qc); /* initiate bmdma */
4179 ap->hsm_task_state = HSM_ST_LAST;
4180 break;
4181
4182 case ATA_PROT_PIO:
4183 if (qc->tf.flags & ATA_TFLAG_POLLING)
4184 ata_qc_set_polling(qc);
4185
4186 ata_tf_to_host(ap, &qc->tf);
4187
4188 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4189 /* PIO data out protocol */
4190 ap->hsm_task_state = HSM_ST_FIRST;
4191 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4192
4193 /* always send first data block using
4194 * the ata_pio_task() codepath.
4195 */
4196 } else {
4197 /* PIO data in protocol */
4198 ap->hsm_task_state = HSM_ST;
4199
4200 if (qc->tf.flags & ATA_TFLAG_POLLING)
4201 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4202
4203 /* if polling, ata_pio_task() handles the rest.
4204 * otherwise, interrupt handler takes over from here.
4205 */
4206 }
4207
4208 break;
4209
4210 case ATA_PROT_ATAPI:
4211 case ATA_PROT_ATAPI_NODATA:
4212 if (qc->tf.flags & ATA_TFLAG_POLLING)
4213 ata_qc_set_polling(qc);
4214
4215 ata_tf_to_host(ap, &qc->tf);
4216
4217 ap->hsm_task_state = HSM_ST_FIRST;
4218
4219 /* send cdb by polling if no cdb interrupt */
4220 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4221 (qc->tf.flags & ATA_TFLAG_POLLING))
4222 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4223 break;
4224
4225 case ATA_PROT_ATAPI_DMA:
4226 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4227
4228 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4229 ap->ops->bmdma_setup(qc); /* set up bmdma */
4230 ap->hsm_task_state = HSM_ST_FIRST;
4231
4232 /* send cdb by polling if no cdb interrupt */
4233 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4234 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4235 break;
4236
4237 default:
4238 WARN_ON(1);
4239 return AC_ERR_SYSTEM;
4240 }
4241
4242 return 0;
4243 }
4244
4245 /**
4246 * ata_host_intr - Handle host interrupt for given (port, task)
4247 * @ap: Port on which interrupt arrived (possibly...)
4248 * @qc: Taskfile currently active in engine
4249 *
4250 * Handle host interrupt for given queued command. Currently,
4251 * only DMA interrupts are handled. All other commands are
4252 * handled via polling with interrupts disabled (nIEN bit).
4253 *
4254 * LOCKING:
4255 * spin_lock_irqsave(host_set lock)
4256 *
4257 * RETURNS:
4258 * One if interrupt was handled, zero if not (shared irq).
4259 */
4260
4261 inline unsigned int ata_host_intr (struct ata_port *ap,
4262 struct ata_queued_cmd *qc)
4263 {
4264 u8 status, host_stat = 0;
4265
4266 VPRINTK("ata%u: protocol %d task_state %d\n",
4267 ap->id, qc->tf.protocol, ap->hsm_task_state);
4268
4269 /* Check whether we are expecting interrupt in this state */
4270 switch (ap->hsm_task_state) {
4271 case HSM_ST_FIRST:
4272 /* Some pre-ATAPI-4 devices assert INTRQ
4273 * at this state when ready to receive CDB.
4274 */
4275
4276 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4277 * The flag was turned on only for atapi devices.
4278 * No need to check is_atapi_taskfile(&qc->tf) again.
4279 */
4280 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4281 goto idle_irq;
4282 break;
4283 case HSM_ST_LAST:
4284 if (qc->tf.protocol == ATA_PROT_DMA ||
4285 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4286 /* check status of DMA engine */
4287 host_stat = ap->ops->bmdma_status(ap);
4288 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4289
4290 /* if it's not our irq... */
4291 if (!(host_stat & ATA_DMA_INTR))
4292 goto idle_irq;
4293
4294 /* before we do anything else, clear DMA-Start bit */
4295 ap->ops->bmdma_stop(qc);
4296
4297 if (unlikely(host_stat & ATA_DMA_ERR)) {
4298 /* error when transfering data to/from memory */
4299 qc->err_mask |= AC_ERR_HOST_BUS;
4300 ap->hsm_task_state = HSM_ST_ERR;
4301 }
4302 }
4303 break;
4304 case HSM_ST:
4305 break;
4306 default:
4307 goto idle_irq;
4308 }
4309
4310 /* check altstatus */
4311 status = ata_altstatus(ap);
4312 if (status & ATA_BUSY)
4313 goto idle_irq;
4314
4315 /* check main status, clearing INTRQ */
4316 status = ata_chk_status(ap);
4317 if (unlikely(status & ATA_BUSY))
4318 goto idle_irq;
4319
4320 /* ack bmdma irq events */
4321 ap->ops->irq_clear(ap);
4322
4323 ata_hsm_move(ap, qc, status, 0);
4324 return 1; /* irq handled */
4325
4326 idle_irq:
4327 ap->stats.idle_irq++;
4328
4329 #ifdef ATA_IRQ_TRAP
4330 if ((ap->stats.idle_irq % 1000) == 0) {
4331 ata_irq_ack(ap, 0); /* debug trap */
4332 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4333 return 1;
4334 }
4335 #endif
4336 return 0; /* irq not handled */
4337 }
4338
4339 /**
4340 * ata_interrupt - Default ATA host interrupt handler
4341 * @irq: irq line (unused)
4342 * @dev_instance: pointer to our ata_host_set information structure
4343 * @regs: unused
4344 *
4345 * Default interrupt handler for PCI IDE devices. Calls
4346 * ata_host_intr() for each port that is not disabled.
4347 *
4348 * LOCKING:
4349 * Obtains host_set lock during operation.
4350 *
4351 * RETURNS:
4352 * IRQ_NONE or IRQ_HANDLED.
4353 */
4354
4355 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4356 {
4357 struct ata_host_set *host_set = dev_instance;
4358 unsigned int i;
4359 unsigned int handled = 0;
4360 unsigned long flags;
4361
4362 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4363 spin_lock_irqsave(&host_set->lock, flags);
4364
4365 for (i = 0; i < host_set->n_ports; i++) {
4366 struct ata_port *ap;
4367
4368 ap = host_set->ports[i];
4369 if (ap &&
4370 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4371 struct ata_queued_cmd *qc;
4372
4373 qc = ata_qc_from_tag(ap, ap->active_tag);
4374 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4375 (qc->flags & ATA_QCFLAG_ACTIVE))
4376 handled |= ata_host_intr(ap, qc);
4377 }
4378 }
4379
4380 spin_unlock_irqrestore(&host_set->lock, flags);
4381
4382 return IRQ_RETVAL(handled);
4383 }
4384
4385
4386 /*
4387 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4388 * without filling any other registers
4389 */
4390 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4391 u8 cmd)
4392 {
4393 struct ata_taskfile tf;
4394 int err;
4395
4396 ata_tf_init(ap, &tf, dev->devno);
4397
4398 tf.command = cmd;
4399 tf.flags |= ATA_TFLAG_DEVICE;
4400 tf.protocol = ATA_PROT_NODATA;
4401
4402 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4403 if (err)
4404 printk(KERN_ERR "%s: ata command failed: %d\n",
4405 __FUNCTION__, err);
4406
4407 return err;
4408 }
4409
4410 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4411 {
4412 u8 cmd;
4413
4414 if (!ata_try_flush_cache(dev))
4415 return 0;
4416
4417 if (ata_id_has_flush_ext(dev->id))
4418 cmd = ATA_CMD_FLUSH_EXT;
4419 else
4420 cmd = ATA_CMD_FLUSH;
4421
4422 return ata_do_simple_cmd(ap, dev, cmd);
4423 }
4424
4425 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4426 {
4427 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4428 }
4429
4430 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4431 {
4432 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4433 }
4434
4435 /**
4436 * ata_device_resume - wakeup a previously suspended devices
4437 * @ap: port the device is connected to
4438 * @dev: the device to resume
4439 *
4440 * Kick the drive back into action, by sending it an idle immediate
4441 * command and making sure its transfer mode matches between drive
4442 * and host.
4443 *
4444 */
4445 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4446 {
4447 if (ap->flags & ATA_FLAG_SUSPENDED) {
4448 ap->flags &= ~ATA_FLAG_SUSPENDED;
4449 ata_set_mode(ap);
4450 }
4451 if (!ata_dev_present(dev))
4452 return 0;
4453 if (dev->class == ATA_DEV_ATA)
4454 ata_start_drive(ap, dev);
4455
4456 return 0;
4457 }
4458
4459 /**
4460 * ata_device_suspend - prepare a device for suspend
4461 * @ap: port the device is connected to
4462 * @dev: the device to suspend
4463 *
4464 * Flush the cache on the drive, if appropriate, then issue a
4465 * standbynow command.
4466 */
4467 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4468 {
4469 if (!ata_dev_present(dev))
4470 return 0;
4471 if (dev->class == ATA_DEV_ATA)
4472 ata_flush_cache(ap, dev);
4473
4474 if (state.event != PM_EVENT_FREEZE)
4475 ata_standby_drive(ap, dev);
4476 ap->flags |= ATA_FLAG_SUSPENDED;
4477 return 0;
4478 }
4479
4480 /**
4481 * ata_port_start - Set port up for dma.
4482 * @ap: Port to initialize
4483 *
4484 * Called just after data structures for each port are
4485 * initialized. Allocates space for PRD table.
4486 *
4487 * May be used as the port_start() entry in ata_port_operations.
4488 *
4489 * LOCKING:
4490 * Inherited from caller.
4491 */
4492
4493 int ata_port_start (struct ata_port *ap)
4494 {
4495 struct device *dev = ap->dev;
4496 int rc;
4497
4498 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4499 if (!ap->prd)
4500 return -ENOMEM;
4501
4502 rc = ata_pad_alloc(ap, dev);
4503 if (rc) {
4504 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4505 return rc;
4506 }
4507
4508 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4509
4510 return 0;
4511 }
4512
4513
4514 /**
4515 * ata_port_stop - Undo ata_port_start()
4516 * @ap: Port to shut down
4517 *
4518 * Frees the PRD table.
4519 *
4520 * May be used as the port_stop() entry in ata_port_operations.
4521 *
4522 * LOCKING:
4523 * Inherited from caller.
4524 */
4525
4526 void ata_port_stop (struct ata_port *ap)
4527 {
4528 struct device *dev = ap->dev;
4529
4530 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4531 ata_pad_free(ap, dev);
4532 }
4533
4534 void ata_host_stop (struct ata_host_set *host_set)
4535 {
4536 if (host_set->mmio_base)
4537 iounmap(host_set->mmio_base);
4538 }
4539
4540
4541 /**
4542 * ata_host_remove - Unregister SCSI host structure with upper layers
4543 * @ap: Port to unregister
4544 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4545 *
4546 * LOCKING:
4547 * Inherited from caller.
4548 */
4549
4550 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4551 {
4552 struct Scsi_Host *sh = ap->host;
4553
4554 DPRINTK("ENTER\n");
4555
4556 if (do_unregister)
4557 scsi_remove_host(sh);
4558
4559 ap->ops->port_stop(ap);
4560 }
4561
4562 /**
4563 * ata_host_init - Initialize an ata_port structure
4564 * @ap: Structure to initialize
4565 * @host: associated SCSI mid-layer structure
4566 * @host_set: Collection of hosts to which @ap belongs
4567 * @ent: Probe information provided by low-level driver
4568 * @port_no: Port number associated with this ata_port
4569 *
4570 * Initialize a new ata_port structure, and its associated
4571 * scsi_host.
4572 *
4573 * LOCKING:
4574 * Inherited from caller.
4575 */
4576
4577 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4578 struct ata_host_set *host_set,
4579 const struct ata_probe_ent *ent, unsigned int port_no)
4580 {
4581 unsigned int i;
4582
4583 host->max_id = 16;
4584 host->max_lun = 1;
4585 host->max_channel = 1;
4586 host->unique_id = ata_unique_id++;
4587 host->max_cmd_len = 12;
4588
4589 ap->flags = ATA_FLAG_PORT_DISABLED;
4590 ap->id = host->unique_id;
4591 ap->host = host;
4592 ap->ctl = ATA_DEVCTL_OBS;
4593 ap->host_set = host_set;
4594 ap->dev = ent->dev;
4595 ap->port_no = port_no;
4596 ap->hard_port_no =
4597 ent->legacy_mode ? ent->hard_port_no : port_no;
4598 ap->pio_mask = ent->pio_mask;
4599 ap->mwdma_mask = ent->mwdma_mask;
4600 ap->udma_mask = ent->udma_mask;
4601 ap->flags |= ent->host_flags;
4602 ap->ops = ent->port_ops;
4603 ap->cbl = ATA_CBL_NONE;
4604 ap->active_tag = ATA_TAG_POISON;
4605 ap->last_ctl = 0xFF;
4606
4607 INIT_WORK(&ap->port_task, NULL, NULL);
4608 INIT_LIST_HEAD(&ap->eh_done_q);
4609
4610 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4611 struct ata_device *dev = &ap->device[i];
4612 dev->devno = i;
4613 dev->pio_mask = UINT_MAX;
4614 dev->mwdma_mask = UINT_MAX;
4615 dev->udma_mask = UINT_MAX;
4616 }
4617
4618 #ifdef ATA_IRQ_TRAP
4619 ap->stats.unhandled_irq = 1;
4620 ap->stats.idle_irq = 1;
4621 #endif
4622
4623 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4624 }
4625
4626 /**
4627 * ata_host_add - Attach low-level ATA driver to system
4628 * @ent: Information provided by low-level driver
4629 * @host_set: Collections of ports to which we add
4630 * @port_no: Port number associated with this host
4631 *
4632 * Attach low-level ATA driver to system.
4633 *
4634 * LOCKING:
4635 * PCI/etc. bus probe sem.
4636 *
4637 * RETURNS:
4638 * New ata_port on success, for NULL on error.
4639 */
4640
4641 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4642 struct ata_host_set *host_set,
4643 unsigned int port_no)
4644 {
4645 struct Scsi_Host *host;
4646 struct ata_port *ap;
4647 int rc;
4648
4649 DPRINTK("ENTER\n");
4650
4651 if (!ent->port_ops->probe_reset &&
4652 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4653 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4654 port_no);
4655 return NULL;
4656 }
4657
4658 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4659 if (!host)
4660 return NULL;
4661
4662 host->transportt = &ata_scsi_transport_template;
4663
4664 ap = (struct ata_port *) &host->hostdata[0];
4665
4666 ata_host_init(ap, host, host_set, ent, port_no);
4667
4668 rc = ap->ops->port_start(ap);
4669 if (rc)
4670 goto err_out;
4671
4672 return ap;
4673
4674 err_out:
4675 scsi_host_put(host);
4676 return NULL;
4677 }
4678
4679 /**
4680 * ata_device_add - Register hardware device with ATA and SCSI layers
4681 * @ent: Probe information describing hardware device to be registered
4682 *
4683 * This function processes the information provided in the probe
4684 * information struct @ent, allocates the necessary ATA and SCSI
4685 * host information structures, initializes them, and registers
4686 * everything with requisite kernel subsystems.
4687 *
4688 * This function requests irqs, probes the ATA bus, and probes
4689 * the SCSI bus.
4690 *
4691 * LOCKING:
4692 * PCI/etc. bus probe sem.
4693 *
4694 * RETURNS:
4695 * Number of ports registered. Zero on error (no ports registered).
4696 */
4697
4698 int ata_device_add(const struct ata_probe_ent *ent)
4699 {
4700 unsigned int count = 0, i;
4701 struct device *dev = ent->dev;
4702 struct ata_host_set *host_set;
4703
4704 DPRINTK("ENTER\n");
4705 /* alloc a container for our list of ATA ports (buses) */
4706 host_set = kzalloc(sizeof(struct ata_host_set) +
4707 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4708 if (!host_set)
4709 return 0;
4710 spin_lock_init(&host_set->lock);
4711
4712 host_set->dev = dev;
4713 host_set->n_ports = ent->n_ports;
4714 host_set->irq = ent->irq;
4715 host_set->mmio_base = ent->mmio_base;
4716 host_set->private_data = ent->private_data;
4717 host_set->ops = ent->port_ops;
4718 host_set->flags = ent->host_set_flags;
4719
4720 /* register each port bound to this device */
4721 for (i = 0; i < ent->n_ports; i++) {
4722 struct ata_port *ap;
4723 unsigned long xfer_mode_mask;
4724
4725 ap = ata_host_add(ent, host_set, i);
4726 if (!ap)
4727 goto err_out;
4728
4729 host_set->ports[i] = ap;
4730 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4731 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4732 (ap->pio_mask << ATA_SHIFT_PIO);
4733
4734 /* print per-port info to dmesg */
4735 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4736 "bmdma 0x%lX irq %lu\n",
4737 ap->id,
4738 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4739 ata_mode_string(xfer_mode_mask),
4740 ap->ioaddr.cmd_addr,
4741 ap->ioaddr.ctl_addr,
4742 ap->ioaddr.bmdma_addr,
4743 ent->irq);
4744
4745 ata_chk_status(ap);
4746 host_set->ops->irq_clear(ap);
4747 count++;
4748 }
4749
4750 if (!count)
4751 goto err_free_ret;
4752
4753 /* obtain irq, that is shared between channels */
4754 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4755 DRV_NAME, host_set))
4756 goto err_out;
4757
4758 /* perform each probe synchronously */
4759 DPRINTK("probe begin\n");
4760 for (i = 0; i < count; i++) {
4761 struct ata_port *ap;
4762 int rc;
4763
4764 ap = host_set->ports[i];
4765
4766 DPRINTK("ata%u: bus probe begin\n", ap->id);
4767 rc = ata_bus_probe(ap);
4768 DPRINTK("ata%u: bus probe end\n", ap->id);
4769
4770 if (rc) {
4771 /* FIXME: do something useful here?
4772 * Current libata behavior will
4773 * tear down everything when
4774 * the module is removed
4775 * or the h/w is unplugged.
4776 */
4777 }
4778
4779 rc = scsi_add_host(ap->host, dev);
4780 if (rc) {
4781 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4782 ap->id);
4783 /* FIXME: do something useful here */
4784 /* FIXME: handle unconditional calls to
4785 * scsi_scan_host and ata_host_remove, below,
4786 * at the very least
4787 */
4788 }
4789 }
4790
4791 /* probes are done, now scan each port's disk(s) */
4792 DPRINTK("host probe begin\n");
4793 for (i = 0; i < count; i++) {
4794 struct ata_port *ap = host_set->ports[i];
4795
4796 ata_scsi_scan_host(ap);
4797 }
4798
4799 dev_set_drvdata(dev, host_set);
4800
4801 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4802 return ent->n_ports; /* success */
4803
4804 err_out:
4805 for (i = 0; i < count; i++) {
4806 ata_host_remove(host_set->ports[i], 1);
4807 scsi_host_put(host_set->ports[i]->host);
4808 }
4809 err_free_ret:
4810 kfree(host_set);
4811 VPRINTK("EXIT, returning 0\n");
4812 return 0;
4813 }
4814
4815 /**
4816 * ata_host_set_remove - PCI layer callback for device removal
4817 * @host_set: ATA host set that was removed
4818 *
4819 * Unregister all objects associated with this host set. Free those
4820 * objects.
4821 *
4822 * LOCKING:
4823 * Inherited from calling layer (may sleep).
4824 */
4825
4826 void ata_host_set_remove(struct ata_host_set *host_set)
4827 {
4828 struct ata_port *ap;
4829 unsigned int i;
4830
4831 for (i = 0; i < host_set->n_ports; i++) {
4832 ap = host_set->ports[i];
4833 scsi_remove_host(ap->host);
4834 }
4835
4836 free_irq(host_set->irq, host_set);
4837
4838 for (i = 0; i < host_set->n_ports; i++) {
4839 ap = host_set->ports[i];
4840
4841 ata_scsi_release(ap->host);
4842
4843 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4844 struct ata_ioports *ioaddr = &ap->ioaddr;
4845
4846 if (ioaddr->cmd_addr == 0x1f0)
4847 release_region(0x1f0, 8);
4848 else if (ioaddr->cmd_addr == 0x170)
4849 release_region(0x170, 8);
4850 }
4851
4852 scsi_host_put(ap->host);
4853 }
4854
4855 if (host_set->ops->host_stop)
4856 host_set->ops->host_stop(host_set);
4857
4858 kfree(host_set);
4859 }
4860
4861 /**
4862 * ata_scsi_release - SCSI layer callback hook for host unload
4863 * @host: libata host to be unloaded
4864 *
4865 * Performs all duties necessary to shut down a libata port...
4866 * Kill port kthread, disable port, and release resources.
4867 *
4868 * LOCKING:
4869 * Inherited from SCSI layer.
4870 *
4871 * RETURNS:
4872 * One.
4873 */
4874
4875 int ata_scsi_release(struct Scsi_Host *host)
4876 {
4877 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4878 int i;
4879
4880 DPRINTK("ENTER\n");
4881
4882 ap->ops->port_disable(ap);
4883 ata_host_remove(ap, 0);
4884 for (i = 0; i < ATA_MAX_DEVICES; i++)
4885 kfree(ap->device[i].id);
4886
4887 DPRINTK("EXIT\n");
4888 return 1;
4889 }
4890
4891 /**
4892 * ata_std_ports - initialize ioaddr with standard port offsets.
4893 * @ioaddr: IO address structure to be initialized
4894 *
4895 * Utility function which initializes data_addr, error_addr,
4896 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4897 * device_addr, status_addr, and command_addr to standard offsets
4898 * relative to cmd_addr.
4899 *
4900 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4901 */
4902
4903 void ata_std_ports(struct ata_ioports *ioaddr)
4904 {
4905 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4906 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4907 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4908 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4909 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4910 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4911 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4912 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4913 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4914 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4915 }
4916
4917
4918 #ifdef CONFIG_PCI
4919
4920 void ata_pci_host_stop (struct ata_host_set *host_set)
4921 {
4922 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4923
4924 pci_iounmap(pdev, host_set->mmio_base);
4925 }
4926
4927 /**
4928 * ata_pci_remove_one - PCI layer callback for device removal
4929 * @pdev: PCI device that was removed
4930 *
4931 * PCI layer indicates to libata via this hook that
4932 * hot-unplug or module unload event has occurred.
4933 * Handle this by unregistering all objects associated
4934 * with this PCI device. Free those objects. Then finally
4935 * release PCI resources and disable device.
4936 *
4937 * LOCKING:
4938 * Inherited from PCI layer (may sleep).
4939 */
4940
4941 void ata_pci_remove_one (struct pci_dev *pdev)
4942 {
4943 struct device *dev = pci_dev_to_dev(pdev);
4944 struct ata_host_set *host_set = dev_get_drvdata(dev);
4945
4946 ata_host_set_remove(host_set);
4947 pci_release_regions(pdev);
4948 pci_disable_device(pdev);
4949 dev_set_drvdata(dev, NULL);
4950 }
4951
4952 /* move to PCI subsystem */
4953 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4954 {
4955 unsigned long tmp = 0;
4956
4957 switch (bits->width) {
4958 case 1: {
4959 u8 tmp8 = 0;
4960 pci_read_config_byte(pdev, bits->reg, &tmp8);
4961 tmp = tmp8;
4962 break;
4963 }
4964 case 2: {
4965 u16 tmp16 = 0;
4966 pci_read_config_word(pdev, bits->reg, &tmp16);
4967 tmp = tmp16;
4968 break;
4969 }
4970 case 4: {
4971 u32 tmp32 = 0;
4972 pci_read_config_dword(pdev, bits->reg, &tmp32);
4973 tmp = tmp32;
4974 break;
4975 }
4976
4977 default:
4978 return -EINVAL;
4979 }
4980
4981 tmp &= bits->mask;
4982
4983 return (tmp == bits->val) ? 1 : 0;
4984 }
4985
4986 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4987 {
4988 pci_save_state(pdev);
4989 pci_disable_device(pdev);
4990 pci_set_power_state(pdev, PCI_D3hot);
4991 return 0;
4992 }
4993
4994 int ata_pci_device_resume(struct pci_dev *pdev)
4995 {
4996 pci_set_power_state(pdev, PCI_D0);
4997 pci_restore_state(pdev);
4998 pci_enable_device(pdev);
4999 pci_set_master(pdev);
5000 return 0;
5001 }
5002 #endif /* CONFIG_PCI */
5003
5004
5005 static int __init ata_init(void)
5006 {
5007 ata_wq = create_workqueue("ata");
5008 if (!ata_wq)
5009 return -ENOMEM;
5010
5011 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5012 return 0;
5013 }
5014
5015 static void __exit ata_exit(void)
5016 {
5017 destroy_workqueue(ata_wq);
5018 }
5019
5020 module_init(ata_init);
5021 module_exit(ata_exit);
5022
5023 static unsigned long ratelimit_time;
5024 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5025
5026 int ata_ratelimit(void)
5027 {
5028 int rc;
5029 unsigned long flags;
5030
5031 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5032
5033 if (time_after(jiffies, ratelimit_time)) {
5034 rc = 1;
5035 ratelimit_time = jiffies + (HZ/5);
5036 } else
5037 rc = 0;
5038
5039 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5040
5041 return rc;
5042 }
5043
5044 /*
5045 * libata is essentially a library of internal helper functions for
5046 * low-level ATA host controller drivers. As such, the API/ABI is
5047 * likely to change as new drivers are added and updated.
5048 * Do not depend on ABI/API stability.
5049 */
5050
5051 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5052 EXPORT_SYMBOL_GPL(ata_std_ports);
5053 EXPORT_SYMBOL_GPL(ata_device_add);
5054 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5055 EXPORT_SYMBOL_GPL(ata_sg_init);
5056 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5057 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5058 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5059 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5060 EXPORT_SYMBOL_GPL(ata_tf_load);
5061 EXPORT_SYMBOL_GPL(ata_tf_read);
5062 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5063 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5064 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5065 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5066 EXPORT_SYMBOL_GPL(ata_check_status);
5067 EXPORT_SYMBOL_GPL(ata_altstatus);
5068 EXPORT_SYMBOL_GPL(ata_exec_command);
5069 EXPORT_SYMBOL_GPL(ata_port_start);
5070 EXPORT_SYMBOL_GPL(ata_port_stop);
5071 EXPORT_SYMBOL_GPL(ata_host_stop);
5072 EXPORT_SYMBOL_GPL(ata_interrupt);
5073 EXPORT_SYMBOL_GPL(ata_qc_prep);
5074 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5075 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5076 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5077 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5078 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5079 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5080 EXPORT_SYMBOL_GPL(ata_port_probe);
5081 EXPORT_SYMBOL_GPL(sata_phy_reset);
5082 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5083 EXPORT_SYMBOL_GPL(ata_bus_reset);
5084 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5085 EXPORT_SYMBOL_GPL(ata_std_softreset);
5086 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5087 EXPORT_SYMBOL_GPL(ata_std_postreset);
5088 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5089 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5090 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5091 EXPORT_SYMBOL_GPL(ata_dev_classify);
5092 EXPORT_SYMBOL_GPL(ata_dev_pair);
5093 EXPORT_SYMBOL_GPL(ata_port_disable);
5094 EXPORT_SYMBOL_GPL(ata_ratelimit);
5095 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5096 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5097 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5098 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5099 EXPORT_SYMBOL_GPL(ata_scsi_error);
5100 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5101 EXPORT_SYMBOL_GPL(ata_scsi_release);
5102 EXPORT_SYMBOL_GPL(ata_host_intr);
5103 EXPORT_SYMBOL_GPL(ata_id_string);
5104 EXPORT_SYMBOL_GPL(ata_id_c_string);
5105 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5106 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5107 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5108
5109 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5110 EXPORT_SYMBOL_GPL(ata_timing_compute);
5111 EXPORT_SYMBOL_GPL(ata_timing_merge);
5112
5113 #ifdef CONFIG_PCI
5114 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5115 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5116 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5117 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5118 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5119 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5120 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5121 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5122 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5123 #endif /* CONFIG_PCI */
5124
5125 EXPORT_SYMBOL_GPL(ata_device_suspend);
5126 EXPORT_SYMBOL_GPL(ata_device_resume);
5127 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5128 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.18703 seconds and 6 git commands to generate.