[PATCH] libata-dev: Remove atapi_packet_task()
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
61
62 #include "libata.h"
63
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
68 struct ata_device *dev);
69 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
70 static void ata_pio_error(struct ata_port *ap);
71
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
74
75 int atapi_enabled = 1;
76 module_param(atapi_enabled, int, 0444);
77 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78
79 int libata_fua = 0;
80 module_param_named(fua, libata_fua, int, 0444);
81 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82
83 MODULE_AUTHOR("Jeff Garzik");
84 MODULE_DESCRIPTION("Library module for ATA devices");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION);
87
88
89 /**
90 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
91 * @tf: Taskfile to convert
92 * @fis: Buffer into which data will output
93 * @pmp: Port multiplier port
94 *
95 * Converts a standard ATA taskfile to a Serial ATA
96 * FIS structure (Register - Host to Device).
97 *
98 * LOCKING:
99 * Inherited from caller.
100 */
101
102 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 {
104 fis[0] = 0x27; /* Register - Host to Device FIS */
105 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
106 bit 7 indicates Command FIS */
107 fis[2] = tf->command;
108 fis[3] = tf->feature;
109
110 fis[4] = tf->lbal;
111 fis[5] = tf->lbam;
112 fis[6] = tf->lbah;
113 fis[7] = tf->device;
114
115 fis[8] = tf->hob_lbal;
116 fis[9] = tf->hob_lbam;
117 fis[10] = tf->hob_lbah;
118 fis[11] = tf->hob_feature;
119
120 fis[12] = tf->nsect;
121 fis[13] = tf->hob_nsect;
122 fis[14] = 0;
123 fis[15] = tf->ctl;
124
125 fis[16] = 0;
126 fis[17] = 0;
127 fis[18] = 0;
128 fis[19] = 0;
129 }
130
131 /**
132 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
133 * @fis: Buffer from which data will be input
134 * @tf: Taskfile to output
135 *
136 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 *
138 * LOCKING:
139 * Inherited from caller.
140 */
141
142 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 {
144 tf->command = fis[2]; /* status */
145 tf->feature = fis[3]; /* error */
146
147 tf->lbal = fis[4];
148 tf->lbam = fis[5];
149 tf->lbah = fis[6];
150 tf->device = fis[7];
151
152 tf->hob_lbal = fis[8];
153 tf->hob_lbam = fis[9];
154 tf->hob_lbah = fis[10];
155
156 tf->nsect = fis[12];
157 tf->hob_nsect = fis[13];
158 }
159
160 static const u8 ata_rw_cmds[] = {
161 /* pio multi */
162 ATA_CMD_READ_MULTI,
163 ATA_CMD_WRITE_MULTI,
164 ATA_CMD_READ_MULTI_EXT,
165 ATA_CMD_WRITE_MULTI_EXT,
166 0,
167 0,
168 0,
169 ATA_CMD_WRITE_MULTI_FUA_EXT,
170 /* pio */
171 ATA_CMD_PIO_READ,
172 ATA_CMD_PIO_WRITE,
173 ATA_CMD_PIO_READ_EXT,
174 ATA_CMD_PIO_WRITE_EXT,
175 0,
176 0,
177 0,
178 0,
179 /* dma */
180 ATA_CMD_READ,
181 ATA_CMD_WRITE,
182 ATA_CMD_READ_EXT,
183 ATA_CMD_WRITE_EXT,
184 0,
185 0,
186 0,
187 ATA_CMD_WRITE_FUA_EXT
188 };
189
190 /**
191 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
192 * @qc: command to examine and configure
193 *
194 * Examine the device configuration and tf->flags to calculate
195 * the proper read/write commands and protocol to use.
196 *
197 * LOCKING:
198 * caller.
199 */
200 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 {
202 struct ata_taskfile *tf = &qc->tf;
203 struct ata_device *dev = qc->dev;
204 u8 cmd;
205
206 int index, fua, lba48, write;
207
208 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
209 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
210 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211
212 if (dev->flags & ATA_DFLAG_PIO) {
213 tf->protocol = ATA_PROT_PIO;
214 index = dev->multi_count ? 0 : 8;
215 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
216 /* Unable to use DMA due to host limitation */
217 tf->protocol = ATA_PROT_PIO;
218 index = dev->multi_count ? 0 : 8;
219 } else {
220 tf->protocol = ATA_PROT_DMA;
221 index = 16;
222 }
223
224 cmd = ata_rw_cmds[index + fua + lba48 + write];
225 if (cmd) {
226 tf->command = cmd;
227 return 0;
228 }
229 return -1;
230 }
231
232 /**
233 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
234 * @pio_mask: pio_mask
235 * @mwdma_mask: mwdma_mask
236 * @udma_mask: udma_mask
237 *
238 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
239 * unsigned int xfer_mask.
240 *
241 * LOCKING:
242 * None.
243 *
244 * RETURNS:
245 * Packed xfer_mask.
246 */
247 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
248 unsigned int mwdma_mask,
249 unsigned int udma_mask)
250 {
251 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
252 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
253 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
254 }
255
256 /**
257 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
258 * @xfer_mask: xfer_mask to unpack
259 * @pio_mask: resulting pio_mask
260 * @mwdma_mask: resulting mwdma_mask
261 * @udma_mask: resulting udma_mask
262 *
263 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
264 * Any NULL distination masks will be ignored.
265 */
266 static void ata_unpack_xfermask(unsigned int xfer_mask,
267 unsigned int *pio_mask,
268 unsigned int *mwdma_mask,
269 unsigned int *udma_mask)
270 {
271 if (pio_mask)
272 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
273 if (mwdma_mask)
274 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
275 if (udma_mask)
276 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
277 }
278
279 static const struct ata_xfer_ent {
280 unsigned int shift, bits;
281 u8 base;
282 } ata_xfer_tbl[] = {
283 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
284 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
285 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
286 { -1, },
287 };
288
289 /**
290 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
291 * @xfer_mask: xfer_mask of interest
292 *
293 * Return matching XFER_* value for @xfer_mask. Only the highest
294 * bit of @xfer_mask is considered.
295 *
296 * LOCKING:
297 * None.
298 *
299 * RETURNS:
300 * Matching XFER_* value, 0 if no match found.
301 */
302 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
303 {
304 int highbit = fls(xfer_mask) - 1;
305 const struct ata_xfer_ent *ent;
306
307 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
308 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
309 return ent->base + highbit - ent->shift;
310 return 0;
311 }
312
313 /**
314 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
315 * @xfer_mode: XFER_* of interest
316 *
317 * Return matching xfer_mask for @xfer_mode.
318 *
319 * LOCKING:
320 * None.
321 *
322 * RETURNS:
323 * Matching xfer_mask, 0 if no match found.
324 */
325 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
326 {
327 const struct ata_xfer_ent *ent;
328
329 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
330 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
331 return 1 << (ent->shift + xfer_mode - ent->base);
332 return 0;
333 }
334
335 /**
336 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
337 * @xfer_mode: XFER_* of interest
338 *
339 * Return matching xfer_shift for @xfer_mode.
340 *
341 * LOCKING:
342 * None.
343 *
344 * RETURNS:
345 * Matching xfer_shift, -1 if no match found.
346 */
347 static int ata_xfer_mode2shift(unsigned int xfer_mode)
348 {
349 const struct ata_xfer_ent *ent;
350
351 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
352 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
353 return ent->shift;
354 return -1;
355 }
356
357 /**
358 * ata_mode_string - convert xfer_mask to string
359 * @xfer_mask: mask of bits supported; only highest bit counts.
360 *
361 * Determine string which represents the highest speed
362 * (highest bit in @modemask).
363 *
364 * LOCKING:
365 * None.
366 *
367 * RETURNS:
368 * Constant C string representing highest speed listed in
369 * @mode_mask, or the constant C string "<n/a>".
370 */
371 static const char *ata_mode_string(unsigned int xfer_mask)
372 {
373 static const char * const xfer_mode_str[] = {
374 "PIO0",
375 "PIO1",
376 "PIO2",
377 "PIO3",
378 "PIO4",
379 "MWDMA0",
380 "MWDMA1",
381 "MWDMA2",
382 "UDMA/16",
383 "UDMA/25",
384 "UDMA/33",
385 "UDMA/44",
386 "UDMA/66",
387 "UDMA/100",
388 "UDMA/133",
389 "UDMA7",
390 };
391 int highbit;
392
393 highbit = fls(xfer_mask) - 1;
394 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
395 return xfer_mode_str[highbit];
396 return "<n/a>";
397 }
398
399 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
400 {
401 if (ata_dev_present(dev)) {
402 printk(KERN_WARNING "ata%u: dev %u disabled\n",
403 ap->id, dev->devno);
404 dev->class++;
405 }
406 }
407
408 /**
409 * ata_pio_devchk - PATA device presence detection
410 * @ap: ATA channel to examine
411 * @device: Device to examine (starting at zero)
412 *
413 * This technique was originally described in
414 * Hale Landis's ATADRVR (www.ata-atapi.com), and
415 * later found its way into the ATA/ATAPI spec.
416 *
417 * Write a pattern to the ATA shadow registers,
418 * and if a device is present, it will respond by
419 * correctly storing and echoing back the
420 * ATA shadow register contents.
421 *
422 * LOCKING:
423 * caller.
424 */
425
426 static unsigned int ata_pio_devchk(struct ata_port *ap,
427 unsigned int device)
428 {
429 struct ata_ioports *ioaddr = &ap->ioaddr;
430 u8 nsect, lbal;
431
432 ap->ops->dev_select(ap, device);
433
434 outb(0x55, ioaddr->nsect_addr);
435 outb(0xaa, ioaddr->lbal_addr);
436
437 outb(0xaa, ioaddr->nsect_addr);
438 outb(0x55, ioaddr->lbal_addr);
439
440 outb(0x55, ioaddr->nsect_addr);
441 outb(0xaa, ioaddr->lbal_addr);
442
443 nsect = inb(ioaddr->nsect_addr);
444 lbal = inb(ioaddr->lbal_addr);
445
446 if ((nsect == 0x55) && (lbal == 0xaa))
447 return 1; /* we found a device */
448
449 return 0; /* nothing found */
450 }
451
452 /**
453 * ata_mmio_devchk - PATA device presence detection
454 * @ap: ATA channel to examine
455 * @device: Device to examine (starting at zero)
456 *
457 * This technique was originally described in
458 * Hale Landis's ATADRVR (www.ata-atapi.com), and
459 * later found its way into the ATA/ATAPI spec.
460 *
461 * Write a pattern to the ATA shadow registers,
462 * and if a device is present, it will respond by
463 * correctly storing and echoing back the
464 * ATA shadow register contents.
465 *
466 * LOCKING:
467 * caller.
468 */
469
470 static unsigned int ata_mmio_devchk(struct ata_port *ap,
471 unsigned int device)
472 {
473 struct ata_ioports *ioaddr = &ap->ioaddr;
474 u8 nsect, lbal;
475
476 ap->ops->dev_select(ap, device);
477
478 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
479 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
480
481 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
482 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
483
484 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
485 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
486
487 nsect = readb((void __iomem *) ioaddr->nsect_addr);
488 lbal = readb((void __iomem *) ioaddr->lbal_addr);
489
490 if ((nsect == 0x55) && (lbal == 0xaa))
491 return 1; /* we found a device */
492
493 return 0; /* nothing found */
494 }
495
496 /**
497 * ata_devchk - PATA device presence detection
498 * @ap: ATA channel to examine
499 * @device: Device to examine (starting at zero)
500 *
501 * Dispatch ATA device presence detection, depending
502 * on whether we are using PIO or MMIO to talk to the
503 * ATA shadow registers.
504 *
505 * LOCKING:
506 * caller.
507 */
508
509 static unsigned int ata_devchk(struct ata_port *ap,
510 unsigned int device)
511 {
512 if (ap->flags & ATA_FLAG_MMIO)
513 return ata_mmio_devchk(ap, device);
514 return ata_pio_devchk(ap, device);
515 }
516
517 /**
518 * ata_dev_classify - determine device type based on ATA-spec signature
519 * @tf: ATA taskfile register set for device to be identified
520 *
521 * Determine from taskfile register contents whether a device is
522 * ATA or ATAPI, as per "Signature and persistence" section
523 * of ATA/PI spec (volume 1, sect 5.14).
524 *
525 * LOCKING:
526 * None.
527 *
528 * RETURNS:
529 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
530 * the event of failure.
531 */
532
533 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
534 {
535 /* Apple's open source Darwin code hints that some devices only
536 * put a proper signature into the LBA mid/high registers,
537 * So, we only check those. It's sufficient for uniqueness.
538 */
539
540 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
541 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
542 DPRINTK("found ATA device by sig\n");
543 return ATA_DEV_ATA;
544 }
545
546 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
547 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
548 DPRINTK("found ATAPI device by sig\n");
549 return ATA_DEV_ATAPI;
550 }
551
552 DPRINTK("unknown device\n");
553 return ATA_DEV_UNKNOWN;
554 }
555
556 /**
557 * ata_dev_try_classify - Parse returned ATA device signature
558 * @ap: ATA channel to examine
559 * @device: Device to examine (starting at zero)
560 * @r_err: Value of error register on completion
561 *
562 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
563 * an ATA/ATAPI-defined set of values is placed in the ATA
564 * shadow registers, indicating the results of device detection
565 * and diagnostics.
566 *
567 * Select the ATA device, and read the values from the ATA shadow
568 * registers. Then parse according to the Error register value,
569 * and the spec-defined values examined by ata_dev_classify().
570 *
571 * LOCKING:
572 * caller.
573 *
574 * RETURNS:
575 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
576 */
577
578 static unsigned int
579 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
580 {
581 struct ata_taskfile tf;
582 unsigned int class;
583 u8 err;
584
585 ap->ops->dev_select(ap, device);
586
587 memset(&tf, 0, sizeof(tf));
588
589 ap->ops->tf_read(ap, &tf);
590 err = tf.feature;
591 if (r_err)
592 *r_err = err;
593
594 /* see if device passed diags */
595 if (err == 1)
596 /* do nothing */ ;
597 else if ((device == 0) && (err == 0x81))
598 /* do nothing */ ;
599 else
600 return ATA_DEV_NONE;
601
602 /* determine if device is ATA or ATAPI */
603 class = ata_dev_classify(&tf);
604
605 if (class == ATA_DEV_UNKNOWN)
606 return ATA_DEV_NONE;
607 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
608 return ATA_DEV_NONE;
609 return class;
610 }
611
612 /**
613 * ata_id_string - Convert IDENTIFY DEVICE page into string
614 * @id: IDENTIFY DEVICE results we will examine
615 * @s: string into which data is output
616 * @ofs: offset into identify device page
617 * @len: length of string to return. must be an even number.
618 *
619 * The strings in the IDENTIFY DEVICE page are broken up into
620 * 16-bit chunks. Run through the string, and output each
621 * 8-bit chunk linearly, regardless of platform.
622 *
623 * LOCKING:
624 * caller.
625 */
626
627 void ata_id_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629 {
630 unsigned int c;
631
632 while (len > 0) {
633 c = id[ofs] >> 8;
634 *s = c;
635 s++;
636
637 c = id[ofs] & 0xff;
638 *s = c;
639 s++;
640
641 ofs++;
642 len -= 2;
643 }
644 }
645
646 /**
647 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
648 * @id: IDENTIFY DEVICE results we will examine
649 * @s: string into which data is output
650 * @ofs: offset into identify device page
651 * @len: length of string to return. must be an odd number.
652 *
653 * This function is identical to ata_id_string except that it
654 * trims trailing spaces and terminates the resulting string with
655 * null. @len must be actual maximum length (even number) + 1.
656 *
657 * LOCKING:
658 * caller.
659 */
660 void ata_id_c_string(const u16 *id, unsigned char *s,
661 unsigned int ofs, unsigned int len)
662 {
663 unsigned char *p;
664
665 WARN_ON(!(len & 1));
666
667 ata_id_string(id, s, ofs, len - 1);
668
669 p = s + strnlen(s, len - 1);
670 while (p > s && p[-1] == ' ')
671 p--;
672 *p = '\0';
673 }
674
675 static u64 ata_id_n_sectors(const u16 *id)
676 {
677 if (ata_id_has_lba(id)) {
678 if (ata_id_has_lba48(id))
679 return ata_id_u64(id, 100);
680 else
681 return ata_id_u32(id, 60);
682 } else {
683 if (ata_id_current_chs_valid(id))
684 return ata_id_u32(id, 57);
685 else
686 return id[1] * id[3] * id[6];
687 }
688 }
689
690 /**
691 * ata_noop_dev_select - Select device 0/1 on ATA bus
692 * @ap: ATA channel to manipulate
693 * @device: ATA device (numbered from zero) to select
694 *
695 * This function performs no actual function.
696 *
697 * May be used as the dev_select() entry in ata_port_operations.
698 *
699 * LOCKING:
700 * caller.
701 */
702 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
703 {
704 }
705
706
707 /**
708 * ata_std_dev_select - Select device 0/1 on ATA bus
709 * @ap: ATA channel to manipulate
710 * @device: ATA device (numbered from zero) to select
711 *
712 * Use the method defined in the ATA specification to
713 * make either device 0, or device 1, active on the
714 * ATA channel. Works with both PIO and MMIO.
715 *
716 * May be used as the dev_select() entry in ata_port_operations.
717 *
718 * LOCKING:
719 * caller.
720 */
721
722 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
723 {
724 u8 tmp;
725
726 if (device == 0)
727 tmp = ATA_DEVICE_OBS;
728 else
729 tmp = ATA_DEVICE_OBS | ATA_DEV1;
730
731 if (ap->flags & ATA_FLAG_MMIO) {
732 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
733 } else {
734 outb(tmp, ap->ioaddr.device_addr);
735 }
736 ata_pause(ap); /* needed; also flushes, for mmio */
737 }
738
739 /**
740 * ata_dev_select - Select device 0/1 on ATA bus
741 * @ap: ATA channel to manipulate
742 * @device: ATA device (numbered from zero) to select
743 * @wait: non-zero to wait for Status register BSY bit to clear
744 * @can_sleep: non-zero if context allows sleeping
745 *
746 * Use the method defined in the ATA specification to
747 * make either device 0, or device 1, active on the
748 * ATA channel.
749 *
750 * This is a high-level version of ata_std_dev_select(),
751 * which additionally provides the services of inserting
752 * the proper pauses and status polling, where needed.
753 *
754 * LOCKING:
755 * caller.
756 */
757
758 void ata_dev_select(struct ata_port *ap, unsigned int device,
759 unsigned int wait, unsigned int can_sleep)
760 {
761 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
762 ap->id, device, wait);
763
764 if (wait)
765 ata_wait_idle(ap);
766
767 ap->ops->dev_select(ap, device);
768
769 if (wait) {
770 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
771 msleep(150);
772 ata_wait_idle(ap);
773 }
774 }
775
776 /**
777 * ata_dump_id - IDENTIFY DEVICE info debugging output
778 * @id: IDENTIFY DEVICE page to dump
779 *
780 * Dump selected 16-bit words from the given IDENTIFY DEVICE
781 * page.
782 *
783 * LOCKING:
784 * caller.
785 */
786
787 static inline void ata_dump_id(const u16 *id)
788 {
789 DPRINTK("49==0x%04x "
790 "53==0x%04x "
791 "63==0x%04x "
792 "64==0x%04x "
793 "75==0x%04x \n",
794 id[49],
795 id[53],
796 id[63],
797 id[64],
798 id[75]);
799 DPRINTK("80==0x%04x "
800 "81==0x%04x "
801 "82==0x%04x "
802 "83==0x%04x "
803 "84==0x%04x \n",
804 id[80],
805 id[81],
806 id[82],
807 id[83],
808 id[84]);
809 DPRINTK("88==0x%04x "
810 "93==0x%04x\n",
811 id[88],
812 id[93]);
813 }
814
815 /**
816 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
817 * @id: IDENTIFY data to compute xfer mask from
818 *
819 * Compute the xfermask for this device. This is not as trivial
820 * as it seems if we must consider early devices correctly.
821 *
822 * FIXME: pre IDE drive timing (do we care ?).
823 *
824 * LOCKING:
825 * None.
826 *
827 * RETURNS:
828 * Computed xfermask
829 */
830 static unsigned int ata_id_xfermask(const u16 *id)
831 {
832 unsigned int pio_mask, mwdma_mask, udma_mask;
833
834 /* Usual case. Word 53 indicates word 64 is valid */
835 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
836 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
837 pio_mask <<= 3;
838 pio_mask |= 0x7;
839 } else {
840 /* If word 64 isn't valid then Word 51 high byte holds
841 * the PIO timing number for the maximum. Turn it into
842 * a mask.
843 */
844 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
845
846 /* But wait.. there's more. Design your standards by
847 * committee and you too can get a free iordy field to
848 * process. However its the speeds not the modes that
849 * are supported... Note drivers using the timing API
850 * will get this right anyway
851 */
852 }
853
854 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
855
856 udma_mask = 0;
857 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
858 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
859
860 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
861 }
862
863 /**
864 * ata_port_queue_task - Queue port_task
865 * @ap: The ata_port to queue port_task for
866 *
867 * Schedule @fn(@data) for execution after @delay jiffies using
868 * port_task. There is one port_task per port and it's the
869 * user(low level driver)'s responsibility to make sure that only
870 * one task is active at any given time.
871 *
872 * libata core layer takes care of synchronization between
873 * port_task and EH. ata_port_queue_task() may be ignored for EH
874 * synchronization.
875 *
876 * LOCKING:
877 * Inherited from caller.
878 */
879 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
880 unsigned long delay)
881 {
882 int rc;
883
884 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
885 return;
886
887 PREPARE_WORK(&ap->port_task, fn, data);
888
889 if (!delay)
890 rc = queue_work(ata_wq, &ap->port_task);
891 else
892 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
893
894 /* rc == 0 means that another user is using port task */
895 WARN_ON(rc == 0);
896 }
897
898 /**
899 * ata_port_flush_task - Flush port_task
900 * @ap: The ata_port to flush port_task for
901 *
902 * After this function completes, port_task is guranteed not to
903 * be running or scheduled.
904 *
905 * LOCKING:
906 * Kernel thread context (may sleep)
907 */
908 void ata_port_flush_task(struct ata_port *ap)
909 {
910 unsigned long flags;
911
912 DPRINTK("ENTER\n");
913
914 spin_lock_irqsave(&ap->host_set->lock, flags);
915 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
916 spin_unlock_irqrestore(&ap->host_set->lock, flags);
917
918 DPRINTK("flush #1\n");
919 flush_workqueue(ata_wq);
920
921 /*
922 * At this point, if a task is running, it's guaranteed to see
923 * the FLUSH flag; thus, it will never queue pio tasks again.
924 * Cancel and flush.
925 */
926 if (!cancel_delayed_work(&ap->port_task)) {
927 DPRINTK("flush #2\n");
928 flush_workqueue(ata_wq);
929 }
930
931 spin_lock_irqsave(&ap->host_set->lock, flags);
932 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
933 spin_unlock_irqrestore(&ap->host_set->lock, flags);
934
935 DPRINTK("EXIT\n");
936 }
937
938 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
939 {
940 struct completion *waiting = qc->private_data;
941
942 qc->ap->ops->tf_read(qc->ap, &qc->tf);
943 complete(waiting);
944 }
945
946 /**
947 * ata_exec_internal - execute libata internal command
948 * @ap: Port to which the command is sent
949 * @dev: Device to which the command is sent
950 * @tf: Taskfile registers for the command and the result
951 * @dma_dir: Data tranfer direction of the command
952 * @buf: Data buffer of the command
953 * @buflen: Length of data buffer
954 *
955 * Executes libata internal command with timeout. @tf contains
956 * command on entry and result on return. Timeout and error
957 * conditions are reported via return value. No recovery action
958 * is taken after a command times out. It's caller's duty to
959 * clean up after timeout.
960 *
961 * LOCKING:
962 * None. Should be called with kernel context, might sleep.
963 */
964
965 static unsigned
966 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
967 struct ata_taskfile *tf,
968 int dma_dir, void *buf, unsigned int buflen)
969 {
970 u8 command = tf->command;
971 struct ata_queued_cmd *qc;
972 DECLARE_COMPLETION(wait);
973 unsigned long flags;
974 unsigned int err_mask;
975
976 spin_lock_irqsave(&ap->host_set->lock, flags);
977
978 qc = ata_qc_new_init(ap, dev);
979 BUG_ON(qc == NULL);
980
981 qc->tf = *tf;
982 qc->dma_dir = dma_dir;
983 if (dma_dir != DMA_NONE) {
984 ata_sg_init_one(qc, buf, buflen);
985 qc->nsect = buflen / ATA_SECT_SIZE;
986 }
987
988 qc->private_data = &wait;
989 qc->complete_fn = ata_qc_complete_internal;
990
991 qc->err_mask = ata_qc_issue(qc);
992 if (qc->err_mask)
993 ata_qc_complete(qc);
994
995 spin_unlock_irqrestore(&ap->host_set->lock, flags);
996
997 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
998 ata_port_flush_task(ap);
999
1000 spin_lock_irqsave(&ap->host_set->lock, flags);
1001
1002 /* We're racing with irq here. If we lose, the
1003 * following test prevents us from completing the qc
1004 * again. If completion irq occurs after here but
1005 * before the caller cleans up, it will result in a
1006 * spurious interrupt. We can live with that.
1007 */
1008 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1009 qc->err_mask = AC_ERR_TIMEOUT;
1010 ata_qc_complete(qc);
1011 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1012 ap->id, command);
1013 }
1014
1015 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1016 }
1017
1018 *tf = qc->tf;
1019 err_mask = qc->err_mask;
1020
1021 ata_qc_free(qc);
1022
1023 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1024 * Until those drivers are fixed, we detect the condition
1025 * here, fail the command with AC_ERR_SYSTEM and reenable the
1026 * port.
1027 *
1028 * Note that this doesn't change any behavior as internal
1029 * command failure results in disabling the device in the
1030 * higher layer for LLDDs without new reset/EH callbacks.
1031 *
1032 * Kill the following code as soon as those drivers are fixed.
1033 */
1034 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1035 err_mask |= AC_ERR_SYSTEM;
1036 ata_port_probe(ap);
1037 }
1038
1039 return err_mask;
1040 }
1041
1042 /**
1043 * ata_pio_need_iordy - check if iordy needed
1044 * @adev: ATA device
1045 *
1046 * Check if the current speed of the device requires IORDY. Used
1047 * by various controllers for chip configuration.
1048 */
1049
1050 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1051 {
1052 int pio;
1053 int speed = adev->pio_mode - XFER_PIO_0;
1054
1055 if (speed < 2)
1056 return 0;
1057 if (speed > 2)
1058 return 1;
1059
1060 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1061
1062 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1063 pio = adev->id[ATA_ID_EIDE_PIO];
1064 /* Is the speed faster than the drive allows non IORDY ? */
1065 if (pio) {
1066 /* This is cycle times not frequency - watch the logic! */
1067 if (pio > 240) /* PIO2 is 240nS per cycle */
1068 return 1;
1069 return 0;
1070 }
1071 }
1072 return 0;
1073 }
1074
1075 /**
1076 * ata_dev_read_id - Read ID data from the specified device
1077 * @ap: port on which target device resides
1078 * @dev: target device
1079 * @p_class: pointer to class of the target device (may be changed)
1080 * @post_reset: is this read ID post-reset?
1081 * @p_id: read IDENTIFY page (newly allocated)
1082 *
1083 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1084 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1085 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1086 * for pre-ATA4 drives.
1087 *
1088 * LOCKING:
1089 * Kernel thread context (may sleep)
1090 *
1091 * RETURNS:
1092 * 0 on success, -errno otherwise.
1093 */
1094 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1095 unsigned int *p_class, int post_reset, u16 **p_id)
1096 {
1097 unsigned int class = *p_class;
1098 struct ata_taskfile tf;
1099 unsigned int err_mask = 0;
1100 u16 *id;
1101 const char *reason;
1102 int rc;
1103
1104 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1105
1106 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1107
1108 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1109 if (id == NULL) {
1110 rc = -ENOMEM;
1111 reason = "out of memory";
1112 goto err_out;
1113 }
1114
1115 retry:
1116 ata_tf_init(ap, &tf, dev->devno);
1117
1118 switch (class) {
1119 case ATA_DEV_ATA:
1120 tf.command = ATA_CMD_ID_ATA;
1121 break;
1122 case ATA_DEV_ATAPI:
1123 tf.command = ATA_CMD_ID_ATAPI;
1124 break;
1125 default:
1126 rc = -ENODEV;
1127 reason = "unsupported class";
1128 goto err_out;
1129 }
1130
1131 tf.protocol = ATA_PROT_PIO;
1132
1133 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1134 id, sizeof(id[0]) * ATA_ID_WORDS);
1135 if (err_mask) {
1136 rc = -EIO;
1137 reason = "I/O error";
1138 goto err_out;
1139 }
1140
1141 swap_buf_le16(id, ATA_ID_WORDS);
1142
1143 /* sanity check */
1144 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1145 rc = -EINVAL;
1146 reason = "device reports illegal type";
1147 goto err_out;
1148 }
1149
1150 if (post_reset && class == ATA_DEV_ATA) {
1151 /*
1152 * The exact sequence expected by certain pre-ATA4 drives is:
1153 * SRST RESET
1154 * IDENTIFY
1155 * INITIALIZE DEVICE PARAMETERS
1156 * anything else..
1157 * Some drives were very specific about that exact sequence.
1158 */
1159 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1160 err_mask = ata_dev_init_params(ap, dev);
1161 if (err_mask) {
1162 rc = -EIO;
1163 reason = "INIT_DEV_PARAMS failed";
1164 goto err_out;
1165 }
1166
1167 /* current CHS translation info (id[53-58]) might be
1168 * changed. reread the identify device info.
1169 */
1170 post_reset = 0;
1171 goto retry;
1172 }
1173 }
1174
1175 *p_class = class;
1176 *p_id = id;
1177 return 0;
1178
1179 err_out:
1180 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1181 ap->id, dev->devno, reason);
1182 kfree(id);
1183 return rc;
1184 }
1185
1186 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1187 struct ata_device *dev)
1188 {
1189 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1190 }
1191
1192 /**
1193 * ata_dev_configure - Configure the specified ATA/ATAPI device
1194 * @ap: Port on which target device resides
1195 * @dev: Target device to configure
1196 * @print_info: Enable device info printout
1197 *
1198 * Configure @dev according to @dev->id. Generic and low-level
1199 * driver specific fixups are also applied.
1200 *
1201 * LOCKING:
1202 * Kernel thread context (may sleep)
1203 *
1204 * RETURNS:
1205 * 0 on success, -errno otherwise
1206 */
1207 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1208 int print_info)
1209 {
1210 const u16 *id = dev->id;
1211 unsigned int xfer_mask;
1212 int i, rc;
1213
1214 if (!ata_dev_present(dev)) {
1215 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1216 ap->id, dev->devno);
1217 return 0;
1218 }
1219
1220 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1221
1222 /* print device capabilities */
1223 if (print_info)
1224 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1225 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1226 ap->id, dev->devno, id[49], id[82], id[83],
1227 id[84], id[85], id[86], id[87], id[88]);
1228
1229 /* initialize to-be-configured parameters */
1230 dev->flags = 0;
1231 dev->max_sectors = 0;
1232 dev->cdb_len = 0;
1233 dev->n_sectors = 0;
1234 dev->cylinders = 0;
1235 dev->heads = 0;
1236 dev->sectors = 0;
1237
1238 /*
1239 * common ATA, ATAPI feature tests
1240 */
1241
1242 /* find max transfer mode; for printk only */
1243 xfer_mask = ata_id_xfermask(id);
1244
1245 ata_dump_id(id);
1246
1247 /* ATA-specific feature tests */
1248 if (dev->class == ATA_DEV_ATA) {
1249 dev->n_sectors = ata_id_n_sectors(id);
1250
1251 if (ata_id_has_lba(id)) {
1252 const char *lba_desc;
1253
1254 lba_desc = "LBA";
1255 dev->flags |= ATA_DFLAG_LBA;
1256 if (ata_id_has_lba48(id)) {
1257 dev->flags |= ATA_DFLAG_LBA48;
1258 lba_desc = "LBA48";
1259 }
1260
1261 /* print device info to dmesg */
1262 if (print_info)
1263 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1264 "max %s, %Lu sectors: %s\n",
1265 ap->id, dev->devno,
1266 ata_id_major_version(id),
1267 ata_mode_string(xfer_mask),
1268 (unsigned long long)dev->n_sectors,
1269 lba_desc);
1270 } else {
1271 /* CHS */
1272
1273 /* Default translation */
1274 dev->cylinders = id[1];
1275 dev->heads = id[3];
1276 dev->sectors = id[6];
1277
1278 if (ata_id_current_chs_valid(id)) {
1279 /* Current CHS translation is valid. */
1280 dev->cylinders = id[54];
1281 dev->heads = id[55];
1282 dev->sectors = id[56];
1283 }
1284
1285 /* print device info to dmesg */
1286 if (print_info)
1287 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1288 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1289 ap->id, dev->devno,
1290 ata_id_major_version(id),
1291 ata_mode_string(xfer_mask),
1292 (unsigned long long)dev->n_sectors,
1293 dev->cylinders, dev->heads, dev->sectors);
1294 }
1295
1296 if (dev->id[59] & 0x100) {
1297 dev->multi_count = dev->id[59] & 0xff;
1298 DPRINTK("ata%u: dev %u multi count %u\n",
1299 ap->id, device, dev->multi_count);
1300 }
1301
1302 dev->cdb_len = 16;
1303 }
1304
1305 /* ATAPI-specific feature tests */
1306 else if (dev->class == ATA_DEV_ATAPI) {
1307 rc = atapi_cdb_len(id);
1308 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1309 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1310 rc = -EINVAL;
1311 goto err_out_nosup;
1312 }
1313 dev->cdb_len = (unsigned int) rc;
1314
1315 if (ata_id_cdb_intr(dev->id))
1316 dev->flags |= ATA_DFLAG_CDB_INTR;
1317
1318 /* print device info to dmesg */
1319 if (print_info)
1320 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1321 ap->id, dev->devno, ata_mode_string(xfer_mask));
1322 }
1323
1324 ap->host->max_cmd_len = 0;
1325 for (i = 0; i < ATA_MAX_DEVICES; i++)
1326 ap->host->max_cmd_len = max_t(unsigned int,
1327 ap->host->max_cmd_len,
1328 ap->device[i].cdb_len);
1329
1330 /* limit bridge transfers to udma5, 200 sectors */
1331 if (ata_dev_knobble(ap, dev)) {
1332 if (print_info)
1333 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1334 ap->id, dev->devno);
1335 dev->udma_mask &= ATA_UDMA5;
1336 dev->max_sectors = ATA_MAX_SECTORS;
1337 }
1338
1339 if (ap->ops->dev_config)
1340 ap->ops->dev_config(ap, dev);
1341
1342 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1343 return 0;
1344
1345 err_out_nosup:
1346 DPRINTK("EXIT, err\n");
1347 return rc;
1348 }
1349
1350 /**
1351 * ata_bus_probe - Reset and probe ATA bus
1352 * @ap: Bus to probe
1353 *
1354 * Master ATA bus probing function. Initiates a hardware-dependent
1355 * bus reset, then attempts to identify any devices found on
1356 * the bus.
1357 *
1358 * LOCKING:
1359 * PCI/etc. bus probe sem.
1360 *
1361 * RETURNS:
1362 * Zero on success, non-zero on error.
1363 */
1364
1365 static int ata_bus_probe(struct ata_port *ap)
1366 {
1367 unsigned int classes[ATA_MAX_DEVICES];
1368 unsigned int i, rc, found = 0;
1369
1370 ata_port_probe(ap);
1371
1372 /* reset and determine device classes */
1373 for (i = 0; i < ATA_MAX_DEVICES; i++)
1374 classes[i] = ATA_DEV_UNKNOWN;
1375
1376 if (ap->ops->probe_reset) {
1377 rc = ap->ops->probe_reset(ap, classes);
1378 if (rc) {
1379 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1380 return rc;
1381 }
1382 } else {
1383 ap->ops->phy_reset(ap);
1384
1385 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1386 for (i = 0; i < ATA_MAX_DEVICES; i++)
1387 classes[i] = ap->device[i].class;
1388
1389 ata_port_probe(ap);
1390 }
1391
1392 for (i = 0; i < ATA_MAX_DEVICES; i++)
1393 if (classes[i] == ATA_DEV_UNKNOWN)
1394 classes[i] = ATA_DEV_NONE;
1395
1396 /* read IDENTIFY page and configure devices */
1397 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1398 struct ata_device *dev = &ap->device[i];
1399
1400 dev->class = classes[i];
1401
1402 if (!ata_dev_present(dev))
1403 continue;
1404
1405 WARN_ON(dev->id != NULL);
1406 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1407 dev->class = ATA_DEV_NONE;
1408 continue;
1409 }
1410
1411 if (ata_dev_configure(ap, dev, 1)) {
1412 ata_dev_disable(ap, dev);
1413 continue;
1414 }
1415
1416 found = 1;
1417 }
1418
1419 if (!found)
1420 goto err_out_disable;
1421
1422 ata_set_mode(ap);
1423 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1424 goto err_out_disable;
1425
1426 return 0;
1427
1428 err_out_disable:
1429 ap->ops->port_disable(ap);
1430 return -1;
1431 }
1432
1433 /**
1434 * ata_port_probe - Mark port as enabled
1435 * @ap: Port for which we indicate enablement
1436 *
1437 * Modify @ap data structure such that the system
1438 * thinks that the entire port is enabled.
1439 *
1440 * LOCKING: host_set lock, or some other form of
1441 * serialization.
1442 */
1443
1444 void ata_port_probe(struct ata_port *ap)
1445 {
1446 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1447 }
1448
1449 /**
1450 * sata_print_link_status - Print SATA link status
1451 * @ap: SATA port to printk link status about
1452 *
1453 * This function prints link speed and status of a SATA link.
1454 *
1455 * LOCKING:
1456 * None.
1457 */
1458 static void sata_print_link_status(struct ata_port *ap)
1459 {
1460 u32 sstatus, tmp;
1461 const char *speed;
1462
1463 if (!ap->ops->scr_read)
1464 return;
1465
1466 sstatus = scr_read(ap, SCR_STATUS);
1467
1468 if (sata_dev_present(ap)) {
1469 tmp = (sstatus >> 4) & 0xf;
1470 if (tmp & (1 << 0))
1471 speed = "1.5";
1472 else if (tmp & (1 << 1))
1473 speed = "3.0";
1474 else
1475 speed = "<unknown>";
1476 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1477 ap->id, speed, sstatus);
1478 } else {
1479 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1480 ap->id, sstatus);
1481 }
1482 }
1483
1484 /**
1485 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1486 * @ap: SATA port associated with target SATA PHY.
1487 *
1488 * This function issues commands to standard SATA Sxxx
1489 * PHY registers, to wake up the phy (and device), and
1490 * clear any reset condition.
1491 *
1492 * LOCKING:
1493 * PCI/etc. bus probe sem.
1494 *
1495 */
1496 void __sata_phy_reset(struct ata_port *ap)
1497 {
1498 u32 sstatus;
1499 unsigned long timeout = jiffies + (HZ * 5);
1500
1501 if (ap->flags & ATA_FLAG_SATA_RESET) {
1502 /* issue phy wake/reset */
1503 scr_write_flush(ap, SCR_CONTROL, 0x301);
1504 /* Couldn't find anything in SATA I/II specs, but
1505 * AHCI-1.1 10.4.2 says at least 1 ms. */
1506 mdelay(1);
1507 }
1508 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1509
1510 /* wait for phy to become ready, if necessary */
1511 do {
1512 msleep(200);
1513 sstatus = scr_read(ap, SCR_STATUS);
1514 if ((sstatus & 0xf) != 1)
1515 break;
1516 } while (time_before(jiffies, timeout));
1517
1518 /* print link status */
1519 sata_print_link_status(ap);
1520
1521 /* TODO: phy layer with polling, timeouts, etc. */
1522 if (sata_dev_present(ap))
1523 ata_port_probe(ap);
1524 else
1525 ata_port_disable(ap);
1526
1527 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1528 return;
1529
1530 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1531 ata_port_disable(ap);
1532 return;
1533 }
1534
1535 ap->cbl = ATA_CBL_SATA;
1536 }
1537
1538 /**
1539 * sata_phy_reset - Reset SATA bus.
1540 * @ap: SATA port associated with target SATA PHY.
1541 *
1542 * This function resets the SATA bus, and then probes
1543 * the bus for devices.
1544 *
1545 * LOCKING:
1546 * PCI/etc. bus probe sem.
1547 *
1548 */
1549 void sata_phy_reset(struct ata_port *ap)
1550 {
1551 __sata_phy_reset(ap);
1552 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1553 return;
1554 ata_bus_reset(ap);
1555 }
1556
1557 /**
1558 * ata_dev_pair - return other device on cable
1559 * @ap: port
1560 * @adev: device
1561 *
1562 * Obtain the other device on the same cable, or if none is
1563 * present NULL is returned
1564 */
1565
1566 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1567 {
1568 struct ata_device *pair = &ap->device[1 - adev->devno];
1569 if (!ata_dev_present(pair))
1570 return NULL;
1571 return pair;
1572 }
1573
1574 /**
1575 * ata_port_disable - Disable port.
1576 * @ap: Port to be disabled.
1577 *
1578 * Modify @ap data structure such that the system
1579 * thinks that the entire port is disabled, and should
1580 * never attempt to probe or communicate with devices
1581 * on this port.
1582 *
1583 * LOCKING: host_set lock, or some other form of
1584 * serialization.
1585 */
1586
1587 void ata_port_disable(struct ata_port *ap)
1588 {
1589 ap->device[0].class = ATA_DEV_NONE;
1590 ap->device[1].class = ATA_DEV_NONE;
1591 ap->flags |= ATA_FLAG_PORT_DISABLED;
1592 }
1593
1594 /*
1595 * This mode timing computation functionality is ported over from
1596 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1597 */
1598 /*
1599 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1600 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1601 * for PIO 5, which is a nonstandard extension and UDMA6, which
1602 * is currently supported only by Maxtor drives.
1603 */
1604
1605 static const struct ata_timing ata_timing[] = {
1606
1607 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1608 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1609 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1610 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1611
1612 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1613 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1614 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1615
1616 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1617
1618 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1619 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1620 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1621
1622 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1623 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1624 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1625
1626 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1627 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1628 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1629
1630 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1631 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1632 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1633
1634 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1635
1636 { 0xFF }
1637 };
1638
1639 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1640 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1641
1642 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1643 {
1644 q->setup = EZ(t->setup * 1000, T);
1645 q->act8b = EZ(t->act8b * 1000, T);
1646 q->rec8b = EZ(t->rec8b * 1000, T);
1647 q->cyc8b = EZ(t->cyc8b * 1000, T);
1648 q->active = EZ(t->active * 1000, T);
1649 q->recover = EZ(t->recover * 1000, T);
1650 q->cycle = EZ(t->cycle * 1000, T);
1651 q->udma = EZ(t->udma * 1000, UT);
1652 }
1653
1654 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1655 struct ata_timing *m, unsigned int what)
1656 {
1657 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1658 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1659 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1660 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1661 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1662 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1663 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1664 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1665 }
1666
1667 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1668 {
1669 const struct ata_timing *t;
1670
1671 for (t = ata_timing; t->mode != speed; t++)
1672 if (t->mode == 0xFF)
1673 return NULL;
1674 return t;
1675 }
1676
1677 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1678 struct ata_timing *t, int T, int UT)
1679 {
1680 const struct ata_timing *s;
1681 struct ata_timing p;
1682
1683 /*
1684 * Find the mode.
1685 */
1686
1687 if (!(s = ata_timing_find_mode(speed)))
1688 return -EINVAL;
1689
1690 memcpy(t, s, sizeof(*s));
1691
1692 /*
1693 * If the drive is an EIDE drive, it can tell us it needs extended
1694 * PIO/MW_DMA cycle timing.
1695 */
1696
1697 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1698 memset(&p, 0, sizeof(p));
1699 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1700 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1701 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1702 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1703 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1704 }
1705 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1706 }
1707
1708 /*
1709 * Convert the timing to bus clock counts.
1710 */
1711
1712 ata_timing_quantize(t, t, T, UT);
1713
1714 /*
1715 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1716 * S.M.A.R.T * and some other commands. We have to ensure that the
1717 * DMA cycle timing is slower/equal than the fastest PIO timing.
1718 */
1719
1720 if (speed > XFER_PIO_4) {
1721 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1722 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1723 }
1724
1725 /*
1726 * Lengthen active & recovery time so that cycle time is correct.
1727 */
1728
1729 if (t->act8b + t->rec8b < t->cyc8b) {
1730 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1731 t->rec8b = t->cyc8b - t->act8b;
1732 }
1733
1734 if (t->active + t->recover < t->cycle) {
1735 t->active += (t->cycle - (t->active + t->recover)) / 2;
1736 t->recover = t->cycle - t->active;
1737 }
1738
1739 return 0;
1740 }
1741
1742 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1743 {
1744 unsigned int err_mask;
1745 int rc;
1746
1747 if (dev->xfer_shift == ATA_SHIFT_PIO)
1748 dev->flags |= ATA_DFLAG_PIO;
1749
1750 err_mask = ata_dev_set_xfermode(ap, dev);
1751 if (err_mask) {
1752 printk(KERN_ERR
1753 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1754 ap->id, err_mask);
1755 return -EIO;
1756 }
1757
1758 rc = ata_dev_revalidate(ap, dev, 0);
1759 if (rc) {
1760 printk(KERN_ERR
1761 "ata%u: failed to revalidate after set xfermode\n",
1762 ap->id);
1763 return rc;
1764 }
1765
1766 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1767 dev->xfer_shift, (int)dev->xfer_mode);
1768
1769 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1770 ap->id, dev->devno,
1771 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1772 return 0;
1773 }
1774
1775 static int ata_host_set_pio(struct ata_port *ap)
1776 {
1777 int i;
1778
1779 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1780 struct ata_device *dev = &ap->device[i];
1781
1782 if (!ata_dev_present(dev))
1783 continue;
1784
1785 if (!dev->pio_mode) {
1786 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1787 return -1;
1788 }
1789
1790 dev->xfer_mode = dev->pio_mode;
1791 dev->xfer_shift = ATA_SHIFT_PIO;
1792 if (ap->ops->set_piomode)
1793 ap->ops->set_piomode(ap, dev);
1794 }
1795
1796 return 0;
1797 }
1798
1799 static void ata_host_set_dma(struct ata_port *ap)
1800 {
1801 int i;
1802
1803 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1804 struct ata_device *dev = &ap->device[i];
1805
1806 if (!ata_dev_present(dev) || !dev->dma_mode)
1807 continue;
1808
1809 dev->xfer_mode = dev->dma_mode;
1810 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1811 if (ap->ops->set_dmamode)
1812 ap->ops->set_dmamode(ap, dev);
1813 }
1814 }
1815
1816 /**
1817 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1818 * @ap: port on which timings will be programmed
1819 *
1820 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1821 *
1822 * LOCKING:
1823 * PCI/etc. bus probe sem.
1824 */
1825 static void ata_set_mode(struct ata_port *ap)
1826 {
1827 int i, rc;
1828
1829 /* step 1: calculate xfer_mask */
1830 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1831 struct ata_device *dev = &ap->device[i];
1832 unsigned int pio_mask, dma_mask;
1833
1834 if (!ata_dev_present(dev))
1835 continue;
1836
1837 ata_dev_xfermask(ap, dev);
1838
1839 /* TODO: let LLDD filter dev->*_mask here */
1840
1841 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1842 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1843 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1844 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1845 }
1846
1847 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap);
1849 if (rc)
1850 goto err_out;
1851
1852 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap);
1854
1855 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i];
1858
1859 if (!ata_dev_present(dev))
1860 continue;
1861
1862 if (ata_dev_set_mode(ap, dev))
1863 goto err_out;
1864 }
1865
1866 if (ap->ops->post_set_mode)
1867 ap->ops->post_set_mode(ap);
1868
1869 return;
1870
1871 err_out:
1872 ata_port_disable(ap);
1873 }
1874
1875 /**
1876 * ata_tf_to_host - issue ATA taskfile to host controller
1877 * @ap: port to which command is being issued
1878 * @tf: ATA taskfile register set
1879 *
1880 * Issues ATA taskfile register set to ATA host controller,
1881 * with proper synchronization with interrupt handler and
1882 * other threads.
1883 *
1884 * LOCKING:
1885 * spin_lock_irqsave(host_set lock)
1886 */
1887
1888 static inline void ata_tf_to_host(struct ata_port *ap,
1889 const struct ata_taskfile *tf)
1890 {
1891 ap->ops->tf_load(ap, tf);
1892 ap->ops->exec_command(ap, tf);
1893 }
1894
1895 /**
1896 * ata_busy_sleep - sleep until BSY clears, or timeout
1897 * @ap: port containing status register to be polled
1898 * @tmout_pat: impatience timeout
1899 * @tmout: overall timeout
1900 *
1901 * Sleep until ATA Status register bit BSY clears,
1902 * or a timeout occurs.
1903 *
1904 * LOCKING: None.
1905 */
1906
1907 unsigned int ata_busy_sleep (struct ata_port *ap,
1908 unsigned long tmout_pat, unsigned long tmout)
1909 {
1910 unsigned long timer_start, timeout;
1911 u8 status;
1912
1913 status = ata_busy_wait(ap, ATA_BUSY, 300);
1914 timer_start = jiffies;
1915 timeout = timer_start + tmout_pat;
1916 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1917 msleep(50);
1918 status = ata_busy_wait(ap, ATA_BUSY, 3);
1919 }
1920
1921 if (status & ATA_BUSY)
1922 printk(KERN_WARNING "ata%u is slow to respond, "
1923 "please be patient\n", ap->id);
1924
1925 timeout = timer_start + tmout;
1926 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1927 msleep(50);
1928 status = ata_chk_status(ap);
1929 }
1930
1931 if (status & ATA_BUSY) {
1932 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1933 ap->id, tmout / HZ);
1934 return 1;
1935 }
1936
1937 return 0;
1938 }
1939
1940 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1941 {
1942 struct ata_ioports *ioaddr = &ap->ioaddr;
1943 unsigned int dev0 = devmask & (1 << 0);
1944 unsigned int dev1 = devmask & (1 << 1);
1945 unsigned long timeout;
1946
1947 /* if device 0 was found in ata_devchk, wait for its
1948 * BSY bit to clear
1949 */
1950 if (dev0)
1951 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1952
1953 /* if device 1 was found in ata_devchk, wait for
1954 * register access, then wait for BSY to clear
1955 */
1956 timeout = jiffies + ATA_TMOUT_BOOT;
1957 while (dev1) {
1958 u8 nsect, lbal;
1959
1960 ap->ops->dev_select(ap, 1);
1961 if (ap->flags & ATA_FLAG_MMIO) {
1962 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1963 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1964 } else {
1965 nsect = inb(ioaddr->nsect_addr);
1966 lbal = inb(ioaddr->lbal_addr);
1967 }
1968 if ((nsect == 1) && (lbal == 1))
1969 break;
1970 if (time_after(jiffies, timeout)) {
1971 dev1 = 0;
1972 break;
1973 }
1974 msleep(50); /* give drive a breather */
1975 }
1976 if (dev1)
1977 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1978
1979 /* is all this really necessary? */
1980 ap->ops->dev_select(ap, 0);
1981 if (dev1)
1982 ap->ops->dev_select(ap, 1);
1983 if (dev0)
1984 ap->ops->dev_select(ap, 0);
1985 }
1986
1987 static unsigned int ata_bus_softreset(struct ata_port *ap,
1988 unsigned int devmask)
1989 {
1990 struct ata_ioports *ioaddr = &ap->ioaddr;
1991
1992 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1993
1994 /* software reset. causes dev0 to be selected */
1995 if (ap->flags & ATA_FLAG_MMIO) {
1996 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1997 udelay(20); /* FIXME: flush */
1998 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1999 udelay(20); /* FIXME: flush */
2000 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2001 } else {
2002 outb(ap->ctl, ioaddr->ctl_addr);
2003 udelay(10);
2004 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2005 udelay(10);
2006 outb(ap->ctl, ioaddr->ctl_addr);
2007 }
2008
2009 /* spec mandates ">= 2ms" before checking status.
2010 * We wait 150ms, because that was the magic delay used for
2011 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2012 * between when the ATA command register is written, and then
2013 * status is checked. Because waiting for "a while" before
2014 * checking status is fine, post SRST, we perform this magic
2015 * delay here as well.
2016 *
2017 * Old drivers/ide uses the 2mS rule and then waits for ready
2018 */
2019 msleep(150);
2020
2021
2022 /* Before we perform post reset processing we want to see if
2023 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2024 resistor */
2025
2026 if (ata_check_status(ap) == 0xFF)
2027 return 1; /* Positive is failure for some reason */
2028
2029 ata_bus_post_reset(ap, devmask);
2030
2031 return 0;
2032 }
2033
2034 /**
2035 * ata_bus_reset - reset host port and associated ATA channel
2036 * @ap: port to reset
2037 *
2038 * This is typically the first time we actually start issuing
2039 * commands to the ATA channel. We wait for BSY to clear, then
2040 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2041 * result. Determine what devices, if any, are on the channel
2042 * by looking at the device 0/1 error register. Look at the signature
2043 * stored in each device's taskfile registers, to determine if
2044 * the device is ATA or ATAPI.
2045 *
2046 * LOCKING:
2047 * PCI/etc. bus probe sem.
2048 * Obtains host_set lock.
2049 *
2050 * SIDE EFFECTS:
2051 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2052 */
2053
2054 void ata_bus_reset(struct ata_port *ap)
2055 {
2056 struct ata_ioports *ioaddr = &ap->ioaddr;
2057 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2058 u8 err;
2059 unsigned int dev0, dev1 = 0, devmask = 0;
2060
2061 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2062
2063 /* determine if device 0/1 are present */
2064 if (ap->flags & ATA_FLAG_SATA_RESET)
2065 dev0 = 1;
2066 else {
2067 dev0 = ata_devchk(ap, 0);
2068 if (slave_possible)
2069 dev1 = ata_devchk(ap, 1);
2070 }
2071
2072 if (dev0)
2073 devmask |= (1 << 0);
2074 if (dev1)
2075 devmask |= (1 << 1);
2076
2077 /* select device 0 again */
2078 ap->ops->dev_select(ap, 0);
2079
2080 /* issue bus reset */
2081 if (ap->flags & ATA_FLAG_SRST)
2082 if (ata_bus_softreset(ap, devmask))
2083 goto err_out;
2084
2085 /*
2086 * determine by signature whether we have ATA or ATAPI devices
2087 */
2088 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2089 if ((slave_possible) && (err != 0x81))
2090 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2091
2092 /* re-enable interrupts */
2093 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2094 ata_irq_on(ap);
2095
2096 /* is double-select really necessary? */
2097 if (ap->device[1].class != ATA_DEV_NONE)
2098 ap->ops->dev_select(ap, 1);
2099 if (ap->device[0].class != ATA_DEV_NONE)
2100 ap->ops->dev_select(ap, 0);
2101
2102 /* if no devices were detected, disable this port */
2103 if ((ap->device[0].class == ATA_DEV_NONE) &&
2104 (ap->device[1].class == ATA_DEV_NONE))
2105 goto err_out;
2106
2107 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2108 /* set up device control for ATA_FLAG_SATA_RESET */
2109 if (ap->flags & ATA_FLAG_MMIO)
2110 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2111 else
2112 outb(ap->ctl, ioaddr->ctl_addr);
2113 }
2114
2115 DPRINTK("EXIT\n");
2116 return;
2117
2118 err_out:
2119 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2120 ap->ops->port_disable(ap);
2121
2122 DPRINTK("EXIT\n");
2123 }
2124
2125 static int sata_phy_resume(struct ata_port *ap)
2126 {
2127 unsigned long timeout = jiffies + (HZ * 5);
2128 u32 sstatus;
2129
2130 scr_write_flush(ap, SCR_CONTROL, 0x300);
2131
2132 /* Wait for phy to become ready, if necessary. */
2133 do {
2134 msleep(200);
2135 sstatus = scr_read(ap, SCR_STATUS);
2136 if ((sstatus & 0xf) != 1)
2137 return 0;
2138 } while (time_before(jiffies, timeout));
2139
2140 return -1;
2141 }
2142
2143 /**
2144 * ata_std_probeinit - initialize probing
2145 * @ap: port to be probed
2146 *
2147 * @ap is about to be probed. Initialize it. This function is
2148 * to be used as standard callback for ata_drive_probe_reset().
2149 *
2150 * NOTE!!! Do not use this function as probeinit if a low level
2151 * driver implements only hardreset. Just pass NULL as probeinit
2152 * in that case. Using this function is probably okay but doing
2153 * so makes reset sequence different from the original
2154 * ->phy_reset implementation and Jeff nervous. :-P
2155 */
2156 extern void ata_std_probeinit(struct ata_port *ap)
2157 {
2158 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2159 sata_phy_resume(ap);
2160 if (sata_dev_present(ap))
2161 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2162 }
2163 }
2164
2165 /**
2166 * ata_std_softreset - reset host port via ATA SRST
2167 * @ap: port to reset
2168 * @verbose: fail verbosely
2169 * @classes: resulting classes of attached devices
2170 *
2171 * Reset host port using ATA SRST. This function is to be used
2172 * as standard callback for ata_drive_*_reset() functions.
2173 *
2174 * LOCKING:
2175 * Kernel thread context (may sleep)
2176 *
2177 * RETURNS:
2178 * 0 on success, -errno otherwise.
2179 */
2180 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2181 {
2182 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2183 unsigned int devmask = 0, err_mask;
2184 u8 err;
2185
2186 DPRINTK("ENTER\n");
2187
2188 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2189 classes[0] = ATA_DEV_NONE;
2190 goto out;
2191 }
2192
2193 /* determine if device 0/1 are present */
2194 if (ata_devchk(ap, 0))
2195 devmask |= (1 << 0);
2196 if (slave_possible && ata_devchk(ap, 1))
2197 devmask |= (1 << 1);
2198
2199 /* select device 0 again */
2200 ap->ops->dev_select(ap, 0);
2201
2202 /* issue bus reset */
2203 DPRINTK("about to softreset, devmask=%x\n", devmask);
2204 err_mask = ata_bus_softreset(ap, devmask);
2205 if (err_mask) {
2206 if (verbose)
2207 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2208 ap->id, err_mask);
2209 else
2210 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2211 err_mask);
2212 return -EIO;
2213 }
2214
2215 /* determine by signature whether we have ATA or ATAPI devices */
2216 classes[0] = ata_dev_try_classify(ap, 0, &err);
2217 if (slave_possible && err != 0x81)
2218 classes[1] = ata_dev_try_classify(ap, 1, &err);
2219
2220 out:
2221 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2222 return 0;
2223 }
2224
2225 /**
2226 * sata_std_hardreset - reset host port via SATA phy reset
2227 * @ap: port to reset
2228 * @verbose: fail verbosely
2229 * @class: resulting class of attached device
2230 *
2231 * SATA phy-reset host port using DET bits of SControl register.
2232 * This function is to be used as standard callback for
2233 * ata_drive_*_reset().
2234 *
2235 * LOCKING:
2236 * Kernel thread context (may sleep)
2237 *
2238 * RETURNS:
2239 * 0 on success, -errno otherwise.
2240 */
2241 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2242 {
2243 DPRINTK("ENTER\n");
2244
2245 /* Issue phy wake/reset */
2246 scr_write_flush(ap, SCR_CONTROL, 0x301);
2247
2248 /*
2249 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2250 * 10.4.2 says at least 1 ms.
2251 */
2252 msleep(1);
2253
2254 /* Bring phy back */
2255 sata_phy_resume(ap);
2256
2257 /* TODO: phy layer with polling, timeouts, etc. */
2258 if (!sata_dev_present(ap)) {
2259 *class = ATA_DEV_NONE;
2260 DPRINTK("EXIT, link offline\n");
2261 return 0;
2262 }
2263
2264 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2265 if (verbose)
2266 printk(KERN_ERR "ata%u: COMRESET failed "
2267 "(device not ready)\n", ap->id);
2268 else
2269 DPRINTK("EXIT, device not ready\n");
2270 return -EIO;
2271 }
2272
2273 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2274
2275 *class = ata_dev_try_classify(ap, 0, NULL);
2276
2277 DPRINTK("EXIT, class=%u\n", *class);
2278 return 0;
2279 }
2280
2281 /**
2282 * ata_std_postreset - standard postreset callback
2283 * @ap: the target ata_port
2284 * @classes: classes of attached devices
2285 *
2286 * This function is invoked after a successful reset. Note that
2287 * the device might have been reset more than once using
2288 * different reset methods before postreset is invoked.
2289 *
2290 * This function is to be used as standard callback for
2291 * ata_drive_*_reset().
2292 *
2293 * LOCKING:
2294 * Kernel thread context (may sleep)
2295 */
2296 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2297 {
2298 DPRINTK("ENTER\n");
2299
2300 /* set cable type if it isn't already set */
2301 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2302 ap->cbl = ATA_CBL_SATA;
2303
2304 /* print link status */
2305 if (ap->cbl == ATA_CBL_SATA)
2306 sata_print_link_status(ap);
2307
2308 /* re-enable interrupts */
2309 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2310 ata_irq_on(ap);
2311
2312 /* is double-select really necessary? */
2313 if (classes[0] != ATA_DEV_NONE)
2314 ap->ops->dev_select(ap, 1);
2315 if (classes[1] != ATA_DEV_NONE)
2316 ap->ops->dev_select(ap, 0);
2317
2318 /* bail out if no device is present */
2319 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2320 DPRINTK("EXIT, no device\n");
2321 return;
2322 }
2323
2324 /* set up device control */
2325 if (ap->ioaddr.ctl_addr) {
2326 if (ap->flags & ATA_FLAG_MMIO)
2327 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2328 else
2329 outb(ap->ctl, ap->ioaddr.ctl_addr);
2330 }
2331
2332 DPRINTK("EXIT\n");
2333 }
2334
2335 /**
2336 * ata_std_probe_reset - standard probe reset method
2337 * @ap: prot to perform probe-reset
2338 * @classes: resulting classes of attached devices
2339 *
2340 * The stock off-the-shelf ->probe_reset method.
2341 *
2342 * LOCKING:
2343 * Kernel thread context (may sleep)
2344 *
2345 * RETURNS:
2346 * 0 on success, -errno otherwise.
2347 */
2348 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2349 {
2350 ata_reset_fn_t hardreset;
2351
2352 hardreset = NULL;
2353 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2354 hardreset = sata_std_hardreset;
2355
2356 return ata_drive_probe_reset(ap, ata_std_probeinit,
2357 ata_std_softreset, hardreset,
2358 ata_std_postreset, classes);
2359 }
2360
2361 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2362 ata_postreset_fn_t postreset,
2363 unsigned int *classes)
2364 {
2365 int i, rc;
2366
2367 for (i = 0; i < ATA_MAX_DEVICES; i++)
2368 classes[i] = ATA_DEV_UNKNOWN;
2369
2370 rc = reset(ap, 0, classes);
2371 if (rc)
2372 return rc;
2373
2374 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2375 * is complete and convert all ATA_DEV_UNKNOWN to
2376 * ATA_DEV_NONE.
2377 */
2378 for (i = 0; i < ATA_MAX_DEVICES; i++)
2379 if (classes[i] != ATA_DEV_UNKNOWN)
2380 break;
2381
2382 if (i < ATA_MAX_DEVICES)
2383 for (i = 0; i < ATA_MAX_DEVICES; i++)
2384 if (classes[i] == ATA_DEV_UNKNOWN)
2385 classes[i] = ATA_DEV_NONE;
2386
2387 if (postreset)
2388 postreset(ap, classes);
2389
2390 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2391 }
2392
2393 /**
2394 * ata_drive_probe_reset - Perform probe reset with given methods
2395 * @ap: port to reset
2396 * @probeinit: probeinit method (can be NULL)
2397 * @softreset: softreset method (can be NULL)
2398 * @hardreset: hardreset method (can be NULL)
2399 * @postreset: postreset method (can be NULL)
2400 * @classes: resulting classes of attached devices
2401 *
2402 * Reset the specified port and classify attached devices using
2403 * given methods. This function prefers softreset but tries all
2404 * possible reset sequences to reset and classify devices. This
2405 * function is intended to be used for constructing ->probe_reset
2406 * callback by low level drivers.
2407 *
2408 * Reset methods should follow the following rules.
2409 *
2410 * - Return 0 on sucess, -errno on failure.
2411 * - If classification is supported, fill classes[] with
2412 * recognized class codes.
2413 * - If classification is not supported, leave classes[] alone.
2414 * - If verbose is non-zero, print error message on failure;
2415 * otherwise, shut up.
2416 *
2417 * LOCKING:
2418 * Kernel thread context (may sleep)
2419 *
2420 * RETURNS:
2421 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2422 * if classification fails, and any error code from reset
2423 * methods.
2424 */
2425 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2426 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2427 ata_postreset_fn_t postreset, unsigned int *classes)
2428 {
2429 int rc = -EINVAL;
2430
2431 if (probeinit)
2432 probeinit(ap);
2433
2434 if (softreset) {
2435 rc = do_probe_reset(ap, softreset, postreset, classes);
2436 if (rc == 0)
2437 return 0;
2438 }
2439
2440 if (!hardreset)
2441 return rc;
2442
2443 rc = do_probe_reset(ap, hardreset, postreset, classes);
2444 if (rc == 0 || rc != -ENODEV)
2445 return rc;
2446
2447 if (softreset)
2448 rc = do_probe_reset(ap, softreset, postreset, classes);
2449
2450 return rc;
2451 }
2452
2453 /**
2454 * ata_dev_same_device - Determine whether new ID matches configured device
2455 * @ap: port on which the device to compare against resides
2456 * @dev: device to compare against
2457 * @new_class: class of the new device
2458 * @new_id: IDENTIFY page of the new device
2459 *
2460 * Compare @new_class and @new_id against @dev and determine
2461 * whether @dev is the device indicated by @new_class and
2462 * @new_id.
2463 *
2464 * LOCKING:
2465 * None.
2466 *
2467 * RETURNS:
2468 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2469 */
2470 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2471 unsigned int new_class, const u16 *new_id)
2472 {
2473 const u16 *old_id = dev->id;
2474 unsigned char model[2][41], serial[2][21];
2475 u64 new_n_sectors;
2476
2477 if (dev->class != new_class) {
2478 printk(KERN_INFO
2479 "ata%u: dev %u class mismatch %d != %d\n",
2480 ap->id, dev->devno, dev->class, new_class);
2481 return 0;
2482 }
2483
2484 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2485 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2486 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2487 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2488 new_n_sectors = ata_id_n_sectors(new_id);
2489
2490 if (strcmp(model[0], model[1])) {
2491 printk(KERN_INFO
2492 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2493 ap->id, dev->devno, model[0], model[1]);
2494 return 0;
2495 }
2496
2497 if (strcmp(serial[0], serial[1])) {
2498 printk(KERN_INFO
2499 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2500 ap->id, dev->devno, serial[0], serial[1]);
2501 return 0;
2502 }
2503
2504 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2505 printk(KERN_INFO
2506 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2507 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2508 (unsigned long long)new_n_sectors);
2509 return 0;
2510 }
2511
2512 return 1;
2513 }
2514
2515 /**
2516 * ata_dev_revalidate - Revalidate ATA device
2517 * @ap: port on which the device to revalidate resides
2518 * @dev: device to revalidate
2519 * @post_reset: is this revalidation after reset?
2520 *
2521 * Re-read IDENTIFY page and make sure @dev is still attached to
2522 * the port.
2523 *
2524 * LOCKING:
2525 * Kernel thread context (may sleep)
2526 *
2527 * RETURNS:
2528 * 0 on success, negative errno otherwise
2529 */
2530 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2531 int post_reset)
2532 {
2533 unsigned int class;
2534 u16 *id;
2535 int rc;
2536
2537 if (!ata_dev_present(dev))
2538 return -ENODEV;
2539
2540 class = dev->class;
2541 id = NULL;
2542
2543 /* allocate & read ID data */
2544 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2545 if (rc)
2546 goto fail;
2547
2548 /* is the device still there? */
2549 if (!ata_dev_same_device(ap, dev, class, id)) {
2550 rc = -ENODEV;
2551 goto fail;
2552 }
2553
2554 kfree(dev->id);
2555 dev->id = id;
2556
2557 /* configure device according to the new ID */
2558 return ata_dev_configure(ap, dev, 0);
2559
2560 fail:
2561 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2562 ap->id, dev->devno, rc);
2563 kfree(id);
2564 return rc;
2565 }
2566
2567 static const char * const ata_dma_blacklist [] = {
2568 "WDC AC11000H", NULL,
2569 "WDC AC22100H", NULL,
2570 "WDC AC32500H", NULL,
2571 "WDC AC33100H", NULL,
2572 "WDC AC31600H", NULL,
2573 "WDC AC32100H", "24.09P07",
2574 "WDC AC23200L", "21.10N21",
2575 "Compaq CRD-8241B", NULL,
2576 "CRD-8400B", NULL,
2577 "CRD-8480B", NULL,
2578 "CRD-8482B", NULL,
2579 "CRD-84", NULL,
2580 "SanDisk SDP3B", NULL,
2581 "SanDisk SDP3B-64", NULL,
2582 "SANYO CD-ROM CRD", NULL,
2583 "HITACHI CDR-8", NULL,
2584 "HITACHI CDR-8335", NULL,
2585 "HITACHI CDR-8435", NULL,
2586 "Toshiba CD-ROM XM-6202B", NULL,
2587 "TOSHIBA CD-ROM XM-1702BC", NULL,
2588 "CD-532E-A", NULL,
2589 "E-IDE CD-ROM CR-840", NULL,
2590 "CD-ROM Drive/F5A", NULL,
2591 "WPI CDD-820", NULL,
2592 "SAMSUNG CD-ROM SC-148C", NULL,
2593 "SAMSUNG CD-ROM SC", NULL,
2594 "SanDisk SDP3B-64", NULL,
2595 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2596 "_NEC DV5800A", NULL,
2597 "SAMSUNG CD-ROM SN-124", "N001"
2598 };
2599
2600 static int ata_strim(char *s, size_t len)
2601 {
2602 len = strnlen(s, len);
2603
2604 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2605 while ((len > 0) && (s[len - 1] == ' ')) {
2606 len--;
2607 s[len] = 0;
2608 }
2609 return len;
2610 }
2611
2612 static int ata_dma_blacklisted(const struct ata_device *dev)
2613 {
2614 unsigned char model_num[40];
2615 unsigned char model_rev[16];
2616 unsigned int nlen, rlen;
2617 int i;
2618
2619 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2620 sizeof(model_num));
2621 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2622 sizeof(model_rev));
2623 nlen = ata_strim(model_num, sizeof(model_num));
2624 rlen = ata_strim(model_rev, sizeof(model_rev));
2625
2626 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2627 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2628 if (ata_dma_blacklist[i+1] == NULL)
2629 return 1;
2630 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2631 return 1;
2632 }
2633 }
2634 return 0;
2635 }
2636
2637 /**
2638 * ata_dev_xfermask - Compute supported xfermask of the given device
2639 * @ap: Port on which the device to compute xfermask for resides
2640 * @dev: Device to compute xfermask for
2641 *
2642 * Compute supported xfermask of @dev and store it in
2643 * dev->*_mask. This function is responsible for applying all
2644 * known limits including host controller limits, device
2645 * blacklist, etc...
2646 *
2647 * LOCKING:
2648 * None.
2649 */
2650 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2651 {
2652 unsigned long xfer_mask;
2653 int i;
2654
2655 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2656 ap->udma_mask);
2657
2658 /* use port-wide xfermask for now */
2659 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2660 struct ata_device *d = &ap->device[i];
2661 if (!ata_dev_present(d))
2662 continue;
2663 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2664 d->udma_mask);
2665 xfer_mask &= ata_id_xfermask(d->id);
2666 if (ata_dma_blacklisted(d))
2667 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2668 }
2669
2670 if (ata_dma_blacklisted(dev))
2671 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2672 "disabling DMA\n", ap->id, dev->devno);
2673
2674 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2675 &dev->udma_mask);
2676 }
2677
2678 /**
2679 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2680 * @ap: Port associated with device @dev
2681 * @dev: Device to which command will be sent
2682 *
2683 * Issue SET FEATURES - XFER MODE command to device @dev
2684 * on port @ap.
2685 *
2686 * LOCKING:
2687 * PCI/etc. bus probe sem.
2688 *
2689 * RETURNS:
2690 * 0 on success, AC_ERR_* mask otherwise.
2691 */
2692
2693 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2694 struct ata_device *dev)
2695 {
2696 struct ata_taskfile tf;
2697 unsigned int err_mask;
2698
2699 /* set up set-features taskfile */
2700 DPRINTK("set features - xfer mode\n");
2701
2702 ata_tf_init(ap, &tf, dev->devno);
2703 tf.command = ATA_CMD_SET_FEATURES;
2704 tf.feature = SETFEATURES_XFER;
2705 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2706 tf.protocol = ATA_PROT_NODATA;
2707 tf.nsect = dev->xfer_mode;
2708
2709 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2710
2711 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2712 return err_mask;
2713 }
2714
2715 /**
2716 * ata_dev_init_params - Issue INIT DEV PARAMS command
2717 * @ap: Port associated with device @dev
2718 * @dev: Device to which command will be sent
2719 *
2720 * LOCKING:
2721 * Kernel thread context (may sleep)
2722 *
2723 * RETURNS:
2724 * 0 on success, AC_ERR_* mask otherwise.
2725 */
2726
2727 static unsigned int ata_dev_init_params(struct ata_port *ap,
2728 struct ata_device *dev)
2729 {
2730 struct ata_taskfile tf;
2731 unsigned int err_mask;
2732 u16 sectors = dev->id[6];
2733 u16 heads = dev->id[3];
2734
2735 /* Number of sectors per track 1-255. Number of heads 1-16 */
2736 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2737 return 0;
2738
2739 /* set up init dev params taskfile */
2740 DPRINTK("init dev params \n");
2741
2742 ata_tf_init(ap, &tf, dev->devno);
2743 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2744 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2745 tf.protocol = ATA_PROT_NODATA;
2746 tf.nsect = sectors;
2747 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2748
2749 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2750
2751 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2752 return err_mask;
2753 }
2754
2755 /**
2756 * ata_sg_clean - Unmap DMA memory associated with command
2757 * @qc: Command containing DMA memory to be released
2758 *
2759 * Unmap all mapped DMA memory associated with this command.
2760 *
2761 * LOCKING:
2762 * spin_lock_irqsave(host_set lock)
2763 */
2764
2765 static void ata_sg_clean(struct ata_queued_cmd *qc)
2766 {
2767 struct ata_port *ap = qc->ap;
2768 struct scatterlist *sg = qc->__sg;
2769 int dir = qc->dma_dir;
2770 void *pad_buf = NULL;
2771
2772 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2773 WARN_ON(sg == NULL);
2774
2775 if (qc->flags & ATA_QCFLAG_SINGLE)
2776 WARN_ON(qc->n_elem > 1);
2777
2778 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2779
2780 /* if we padded the buffer out to 32-bit bound, and data
2781 * xfer direction is from-device, we must copy from the
2782 * pad buffer back into the supplied buffer
2783 */
2784 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2785 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2786
2787 if (qc->flags & ATA_QCFLAG_SG) {
2788 if (qc->n_elem)
2789 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2790 /* restore last sg */
2791 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2792 if (pad_buf) {
2793 struct scatterlist *psg = &qc->pad_sgent;
2794 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2795 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2796 kunmap_atomic(addr, KM_IRQ0);
2797 }
2798 } else {
2799 if (qc->n_elem)
2800 dma_unmap_single(ap->dev,
2801 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2802 dir);
2803 /* restore sg */
2804 sg->length += qc->pad_len;
2805 if (pad_buf)
2806 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2807 pad_buf, qc->pad_len);
2808 }
2809
2810 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2811 qc->__sg = NULL;
2812 }
2813
2814 /**
2815 * ata_fill_sg - Fill PCI IDE PRD table
2816 * @qc: Metadata associated with taskfile to be transferred
2817 *
2818 * Fill PCI IDE PRD (scatter-gather) table with segments
2819 * associated with the current disk command.
2820 *
2821 * LOCKING:
2822 * spin_lock_irqsave(host_set lock)
2823 *
2824 */
2825 static void ata_fill_sg(struct ata_queued_cmd *qc)
2826 {
2827 struct ata_port *ap = qc->ap;
2828 struct scatterlist *sg;
2829 unsigned int idx;
2830
2831 WARN_ON(qc->__sg == NULL);
2832 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2833
2834 idx = 0;
2835 ata_for_each_sg(sg, qc) {
2836 u32 addr, offset;
2837 u32 sg_len, len;
2838
2839 /* determine if physical DMA addr spans 64K boundary.
2840 * Note h/w doesn't support 64-bit, so we unconditionally
2841 * truncate dma_addr_t to u32.
2842 */
2843 addr = (u32) sg_dma_address(sg);
2844 sg_len = sg_dma_len(sg);
2845
2846 while (sg_len) {
2847 offset = addr & 0xffff;
2848 len = sg_len;
2849 if ((offset + sg_len) > 0x10000)
2850 len = 0x10000 - offset;
2851
2852 ap->prd[idx].addr = cpu_to_le32(addr);
2853 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2854 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2855
2856 idx++;
2857 sg_len -= len;
2858 addr += len;
2859 }
2860 }
2861
2862 if (idx)
2863 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2864 }
2865 /**
2866 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2867 * @qc: Metadata associated with taskfile to check
2868 *
2869 * Allow low-level driver to filter ATA PACKET commands, returning
2870 * a status indicating whether or not it is OK to use DMA for the
2871 * supplied PACKET command.
2872 *
2873 * LOCKING:
2874 * spin_lock_irqsave(host_set lock)
2875 *
2876 * RETURNS: 0 when ATAPI DMA can be used
2877 * nonzero otherwise
2878 */
2879 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2880 {
2881 struct ata_port *ap = qc->ap;
2882 int rc = 0; /* Assume ATAPI DMA is OK by default */
2883
2884 if (ap->ops->check_atapi_dma)
2885 rc = ap->ops->check_atapi_dma(qc);
2886
2887 return rc;
2888 }
2889 /**
2890 * ata_qc_prep - Prepare taskfile for submission
2891 * @qc: Metadata associated with taskfile to be prepared
2892 *
2893 * Prepare ATA taskfile for submission.
2894 *
2895 * LOCKING:
2896 * spin_lock_irqsave(host_set lock)
2897 */
2898 void ata_qc_prep(struct ata_queued_cmd *qc)
2899 {
2900 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2901 return;
2902
2903 ata_fill_sg(qc);
2904 }
2905
2906 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2907
2908 /**
2909 * ata_sg_init_one - Associate command with memory buffer
2910 * @qc: Command to be associated
2911 * @buf: Memory buffer
2912 * @buflen: Length of memory buffer, in bytes.
2913 *
2914 * Initialize the data-related elements of queued_cmd @qc
2915 * to point to a single memory buffer, @buf of byte length @buflen.
2916 *
2917 * LOCKING:
2918 * spin_lock_irqsave(host_set lock)
2919 */
2920
2921 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2922 {
2923 struct scatterlist *sg;
2924
2925 qc->flags |= ATA_QCFLAG_SINGLE;
2926
2927 memset(&qc->sgent, 0, sizeof(qc->sgent));
2928 qc->__sg = &qc->sgent;
2929 qc->n_elem = 1;
2930 qc->orig_n_elem = 1;
2931 qc->buf_virt = buf;
2932
2933 sg = qc->__sg;
2934 sg_init_one(sg, buf, buflen);
2935 }
2936
2937 /**
2938 * ata_sg_init - Associate command with scatter-gather table.
2939 * @qc: Command to be associated
2940 * @sg: Scatter-gather table.
2941 * @n_elem: Number of elements in s/g table.
2942 *
2943 * Initialize the data-related elements of queued_cmd @qc
2944 * to point to a scatter-gather table @sg, containing @n_elem
2945 * elements.
2946 *
2947 * LOCKING:
2948 * spin_lock_irqsave(host_set lock)
2949 */
2950
2951 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2952 unsigned int n_elem)
2953 {
2954 qc->flags |= ATA_QCFLAG_SG;
2955 qc->__sg = sg;
2956 qc->n_elem = n_elem;
2957 qc->orig_n_elem = n_elem;
2958 }
2959
2960 /**
2961 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2962 * @qc: Command with memory buffer to be mapped.
2963 *
2964 * DMA-map the memory buffer associated with queued_cmd @qc.
2965 *
2966 * LOCKING:
2967 * spin_lock_irqsave(host_set lock)
2968 *
2969 * RETURNS:
2970 * Zero on success, negative on error.
2971 */
2972
2973 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2974 {
2975 struct ata_port *ap = qc->ap;
2976 int dir = qc->dma_dir;
2977 struct scatterlist *sg = qc->__sg;
2978 dma_addr_t dma_address;
2979 int trim_sg = 0;
2980
2981 /* we must lengthen transfers to end on a 32-bit boundary */
2982 qc->pad_len = sg->length & 3;
2983 if (qc->pad_len) {
2984 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2985 struct scatterlist *psg = &qc->pad_sgent;
2986
2987 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2988
2989 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2990
2991 if (qc->tf.flags & ATA_TFLAG_WRITE)
2992 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2993 qc->pad_len);
2994
2995 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2996 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2997 /* trim sg */
2998 sg->length -= qc->pad_len;
2999 if (sg->length == 0)
3000 trim_sg = 1;
3001
3002 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3003 sg->length, qc->pad_len);
3004 }
3005
3006 if (trim_sg) {
3007 qc->n_elem--;
3008 goto skip_map;
3009 }
3010
3011 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3012 sg->length, dir);
3013 if (dma_mapping_error(dma_address)) {
3014 /* restore sg */
3015 sg->length += qc->pad_len;
3016 return -1;
3017 }
3018
3019 sg_dma_address(sg) = dma_address;
3020 sg_dma_len(sg) = sg->length;
3021
3022 skip_map:
3023 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3024 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3025
3026 return 0;
3027 }
3028
3029 /**
3030 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3031 * @qc: Command with scatter-gather table to be mapped.
3032 *
3033 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3034 *
3035 * LOCKING:
3036 * spin_lock_irqsave(host_set lock)
3037 *
3038 * RETURNS:
3039 * Zero on success, negative on error.
3040 *
3041 */
3042
3043 static int ata_sg_setup(struct ata_queued_cmd *qc)
3044 {
3045 struct ata_port *ap = qc->ap;
3046 struct scatterlist *sg = qc->__sg;
3047 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3048 int n_elem, pre_n_elem, dir, trim_sg = 0;
3049
3050 VPRINTK("ENTER, ata%u\n", ap->id);
3051 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3052
3053 /* we must lengthen transfers to end on a 32-bit boundary */
3054 qc->pad_len = lsg->length & 3;
3055 if (qc->pad_len) {
3056 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3057 struct scatterlist *psg = &qc->pad_sgent;
3058 unsigned int offset;
3059
3060 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3061
3062 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3063
3064 /*
3065 * psg->page/offset are used to copy to-be-written
3066 * data in this function or read data in ata_sg_clean.
3067 */
3068 offset = lsg->offset + lsg->length - qc->pad_len;
3069 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3070 psg->offset = offset_in_page(offset);
3071
3072 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3073 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3074 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3075 kunmap_atomic(addr, KM_IRQ0);
3076 }
3077
3078 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3079 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3080 /* trim last sg */
3081 lsg->length -= qc->pad_len;
3082 if (lsg->length == 0)
3083 trim_sg = 1;
3084
3085 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3086 qc->n_elem - 1, lsg->length, qc->pad_len);
3087 }
3088
3089 pre_n_elem = qc->n_elem;
3090 if (trim_sg && pre_n_elem)
3091 pre_n_elem--;
3092
3093 if (!pre_n_elem) {
3094 n_elem = 0;
3095 goto skip_map;
3096 }
3097
3098 dir = qc->dma_dir;
3099 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3100 if (n_elem < 1) {
3101 /* restore last sg */
3102 lsg->length += qc->pad_len;
3103 return -1;
3104 }
3105
3106 DPRINTK("%d sg elements mapped\n", n_elem);
3107
3108 skip_map:
3109 qc->n_elem = n_elem;
3110
3111 return 0;
3112 }
3113
3114 /**
3115 * ata_poll_qc_complete - turn irq back on and finish qc
3116 * @qc: Command to complete
3117 * @err_mask: ATA status register content
3118 *
3119 * LOCKING:
3120 * None. (grabs host lock)
3121 */
3122
3123 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3124 {
3125 struct ata_port *ap = qc->ap;
3126 unsigned long flags;
3127
3128 spin_lock_irqsave(&ap->host_set->lock, flags);
3129 ata_irq_on(ap);
3130 ata_qc_complete(qc);
3131 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3132 }
3133
3134 /**
3135 * ata_pio_poll - poll using PIO, depending on current state
3136 * @ap: the target ata_port
3137 *
3138 * LOCKING:
3139 * None. (executing in kernel thread context)
3140 *
3141 * RETURNS:
3142 * timeout value to use
3143 */
3144
3145 static unsigned long ata_pio_poll(struct ata_port *ap)
3146 {
3147 struct ata_queued_cmd *qc;
3148 u8 status;
3149 unsigned int poll_state = HSM_ST_UNKNOWN;
3150 unsigned int reg_state = HSM_ST_UNKNOWN;
3151
3152 qc = ata_qc_from_tag(ap, ap->active_tag);
3153 WARN_ON(qc == NULL);
3154
3155 switch (ap->hsm_task_state) {
3156 case HSM_ST:
3157 case HSM_ST_POLL:
3158 poll_state = HSM_ST_POLL;
3159 reg_state = HSM_ST;
3160 break;
3161 case HSM_ST_LAST:
3162 case HSM_ST_LAST_POLL:
3163 poll_state = HSM_ST_LAST_POLL;
3164 reg_state = HSM_ST_LAST;
3165 break;
3166 default:
3167 BUG();
3168 break;
3169 }
3170
3171 status = ata_chk_status(ap);
3172 if (status & ATA_BUSY) {
3173 if (time_after(jiffies, ap->pio_task_timeout)) {
3174 qc->err_mask |= AC_ERR_TIMEOUT;
3175 ap->hsm_task_state = HSM_ST_TMOUT;
3176 return 0;
3177 }
3178 ap->hsm_task_state = poll_state;
3179 return ATA_SHORT_PAUSE;
3180 }
3181
3182 ap->hsm_task_state = reg_state;
3183 return 0;
3184 }
3185
3186 /**
3187 * ata_pio_complete - check if drive is busy or idle
3188 * @ap: the target ata_port
3189 *
3190 * LOCKING:
3191 * None. (executing in kernel thread context)
3192 *
3193 * RETURNS:
3194 * Zero if qc completed.
3195 * Non-zero if has next.
3196 */
3197
3198 static int ata_pio_complete (struct ata_port *ap)
3199 {
3200 struct ata_queued_cmd *qc;
3201 u8 drv_stat;
3202
3203 /*
3204 * This is purely heuristic. This is a fast path. Sometimes when
3205 * we enter, BSY will be cleared in a chk-status or two. If not,
3206 * the drive is probably seeking or something. Snooze for a couple
3207 * msecs, then chk-status again. If still busy, fall back to
3208 * HSM_ST_LAST_POLL state.
3209 */
3210 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3211 if (drv_stat & ATA_BUSY) {
3212 msleep(2);
3213 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3214 if (drv_stat & ATA_BUSY) {
3215 ap->hsm_task_state = HSM_ST_LAST_POLL;
3216 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3217 return 1;
3218 }
3219 }
3220
3221 qc = ata_qc_from_tag(ap, ap->active_tag);
3222 WARN_ON(qc == NULL);
3223
3224 drv_stat = ata_wait_idle(ap);
3225 if (!ata_ok(drv_stat)) {
3226 qc->err_mask |= __ac_err_mask(drv_stat);
3227 ap->hsm_task_state = HSM_ST_ERR;
3228 return 1;
3229 }
3230
3231 ap->hsm_task_state = HSM_ST_IDLE;
3232
3233 WARN_ON(qc->err_mask);
3234 ata_poll_qc_complete(qc);
3235
3236 /* another command may start at this point */
3237
3238 return 0;
3239 }
3240
3241
3242 /**
3243 * swap_buf_le16 - swap halves of 16-bit words in place
3244 * @buf: Buffer to swap
3245 * @buf_words: Number of 16-bit words in buffer.
3246 *
3247 * Swap halves of 16-bit words if needed to convert from
3248 * little-endian byte order to native cpu byte order, or
3249 * vice-versa.
3250 *
3251 * LOCKING:
3252 * Inherited from caller.
3253 */
3254 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3255 {
3256 #ifdef __BIG_ENDIAN
3257 unsigned int i;
3258
3259 for (i = 0; i < buf_words; i++)
3260 buf[i] = le16_to_cpu(buf[i]);
3261 #endif /* __BIG_ENDIAN */
3262 }
3263
3264 /**
3265 * ata_mmio_data_xfer - Transfer data by MMIO
3266 * @ap: port to read/write
3267 * @buf: data buffer
3268 * @buflen: buffer length
3269 * @write_data: read/write
3270 *
3271 * Transfer data from/to the device data register by MMIO.
3272 *
3273 * LOCKING:
3274 * Inherited from caller.
3275 */
3276
3277 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3278 unsigned int buflen, int write_data)
3279 {
3280 unsigned int i;
3281 unsigned int words = buflen >> 1;
3282 u16 *buf16 = (u16 *) buf;
3283 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3284
3285 /* Transfer multiple of 2 bytes */
3286 if (write_data) {
3287 for (i = 0; i < words; i++)
3288 writew(le16_to_cpu(buf16[i]), mmio);
3289 } else {
3290 for (i = 0; i < words; i++)
3291 buf16[i] = cpu_to_le16(readw(mmio));
3292 }
3293
3294 /* Transfer trailing 1 byte, if any. */
3295 if (unlikely(buflen & 0x01)) {
3296 u16 align_buf[1] = { 0 };
3297 unsigned char *trailing_buf = buf + buflen - 1;
3298
3299 if (write_data) {
3300 memcpy(align_buf, trailing_buf, 1);
3301 writew(le16_to_cpu(align_buf[0]), mmio);
3302 } else {
3303 align_buf[0] = cpu_to_le16(readw(mmio));
3304 memcpy(trailing_buf, align_buf, 1);
3305 }
3306 }
3307 }
3308
3309 /**
3310 * ata_pio_data_xfer - Transfer data by PIO
3311 * @ap: port to read/write
3312 * @buf: data buffer
3313 * @buflen: buffer length
3314 * @write_data: read/write
3315 *
3316 * Transfer data from/to the device data register by PIO.
3317 *
3318 * LOCKING:
3319 * Inherited from caller.
3320 */
3321
3322 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3323 unsigned int buflen, int write_data)
3324 {
3325 unsigned int words = buflen >> 1;
3326
3327 /* Transfer multiple of 2 bytes */
3328 if (write_data)
3329 outsw(ap->ioaddr.data_addr, buf, words);
3330 else
3331 insw(ap->ioaddr.data_addr, buf, words);
3332
3333 /* Transfer trailing 1 byte, if any. */
3334 if (unlikely(buflen & 0x01)) {
3335 u16 align_buf[1] = { 0 };
3336 unsigned char *trailing_buf = buf + buflen - 1;
3337
3338 if (write_data) {
3339 memcpy(align_buf, trailing_buf, 1);
3340 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3341 } else {
3342 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3343 memcpy(trailing_buf, align_buf, 1);
3344 }
3345 }
3346 }
3347
3348 /**
3349 * ata_data_xfer - Transfer data from/to the data register.
3350 * @ap: port to read/write
3351 * @buf: data buffer
3352 * @buflen: buffer length
3353 * @do_write: read/write
3354 *
3355 * Transfer data from/to the device data register.
3356 *
3357 * LOCKING:
3358 * Inherited from caller.
3359 */
3360
3361 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3362 unsigned int buflen, int do_write)
3363 {
3364 /* Make the crap hardware pay the costs not the good stuff */
3365 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3366 unsigned long flags;
3367 local_irq_save(flags);
3368 if (ap->flags & ATA_FLAG_MMIO)
3369 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3370 else
3371 ata_pio_data_xfer(ap, buf, buflen, do_write);
3372 local_irq_restore(flags);
3373 } else {
3374 if (ap->flags & ATA_FLAG_MMIO)
3375 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3376 else
3377 ata_pio_data_xfer(ap, buf, buflen, do_write);
3378 }
3379 }
3380
3381 /**
3382 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3383 * @qc: Command on going
3384 *
3385 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3386 *
3387 * LOCKING:
3388 * Inherited from caller.
3389 */
3390
3391 static void ata_pio_sector(struct ata_queued_cmd *qc)
3392 {
3393 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3394 struct scatterlist *sg = qc->__sg;
3395 struct ata_port *ap = qc->ap;
3396 struct page *page;
3397 unsigned int offset;
3398 unsigned char *buf;
3399
3400 if (qc->cursect == (qc->nsect - 1))
3401 ap->hsm_task_state = HSM_ST_LAST;
3402
3403 page = sg[qc->cursg].page;
3404 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3405
3406 /* get the current page and offset */
3407 page = nth_page(page, (offset >> PAGE_SHIFT));
3408 offset %= PAGE_SIZE;
3409
3410 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3411
3412 if (PageHighMem(page)) {
3413 unsigned long flags;
3414
3415 local_irq_save(flags);
3416 buf = kmap_atomic(page, KM_IRQ0);
3417
3418 /* do the actual data transfer */
3419 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3420
3421 kunmap_atomic(buf, KM_IRQ0);
3422 local_irq_restore(flags);
3423 } else {
3424 buf = page_address(page);
3425 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3426 }
3427
3428 qc->cursect++;
3429 qc->cursg_ofs++;
3430
3431 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3432 qc->cursg++;
3433 qc->cursg_ofs = 0;
3434 }
3435 }
3436
3437 /**
3438 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3439 * @qc: Command on going
3440 *
3441 * Transfer one or many ATA_SECT_SIZE of data from/to the
3442 * ATA device for the DRQ request.
3443 *
3444 * LOCKING:
3445 * Inherited from caller.
3446 */
3447
3448 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3449 {
3450 if (is_multi_taskfile(&qc->tf)) {
3451 /* READ/WRITE MULTIPLE */
3452 unsigned int nsect;
3453
3454 WARN_ON(qc->dev->multi_count == 0);
3455
3456 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3457 while (nsect--)
3458 ata_pio_sector(qc);
3459 } else
3460 ata_pio_sector(qc);
3461 }
3462
3463 /**
3464 * atapi_send_cdb - Write CDB bytes to hardware
3465 * @ap: Port to which ATAPI device is attached.
3466 * @qc: Taskfile currently active
3467 *
3468 * When device has indicated its readiness to accept
3469 * a CDB, this function is called. Send the CDB.
3470 *
3471 * LOCKING:
3472 * caller.
3473 */
3474
3475 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3476 {
3477 /* send SCSI cdb */
3478 DPRINTK("send cdb\n");
3479 WARN_ON(qc->dev->cdb_len < 12);
3480
3481 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3482 ata_altstatus(ap); /* flush */
3483
3484 switch (qc->tf.protocol) {
3485 case ATA_PROT_ATAPI:
3486 ap->hsm_task_state = HSM_ST;
3487 break;
3488 case ATA_PROT_ATAPI_NODATA:
3489 ap->hsm_task_state = HSM_ST_LAST;
3490 break;
3491 case ATA_PROT_ATAPI_DMA:
3492 ap->hsm_task_state = HSM_ST_LAST;
3493 /* initiate bmdma */
3494 ap->ops->bmdma_start(qc);
3495 break;
3496 }
3497 }
3498
3499 /**
3500 * ata_pio_first_block - Write first data block to hardware
3501 * @ap: Port to which ATA/ATAPI device is attached.
3502 *
3503 * When device has indicated its readiness to accept
3504 * the data, this function sends out the CDB or
3505 * the first data block by PIO.
3506 * After this,
3507 * - If polling, ata_pio_task() handles the rest.
3508 * - Otherwise, interrupt handler takes over.
3509 *
3510 * LOCKING:
3511 * Kernel thread context (may sleep)
3512 *
3513 * RETURNS:
3514 * Zero if irq handler takes over
3515 * Non-zero if has next (polling).
3516 */
3517
3518 static int ata_pio_first_block(struct ata_port *ap)
3519 {
3520 struct ata_queued_cmd *qc;
3521 u8 status;
3522 unsigned long flags;
3523 int has_next;
3524
3525 qc = ata_qc_from_tag(ap, ap->active_tag);
3526 WARN_ON(qc == NULL);
3527 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3528
3529 /* if polling, we will stay in the work queue after sending the data.
3530 * otherwise, interrupt handler takes over after sending the data.
3531 */
3532 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3533
3534 /* sleep-wait for BSY to clear */
3535 DPRINTK("busy wait\n");
3536 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3537 qc->err_mask |= AC_ERR_TIMEOUT;
3538 ap->hsm_task_state = HSM_ST_TMOUT;
3539 goto err_out;
3540 }
3541
3542 /* make sure DRQ is set */
3543 status = ata_chk_status(ap);
3544 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3545 /* device status error */
3546 qc->err_mask |= AC_ERR_HSM;
3547 ap->hsm_task_state = HSM_ST_ERR;
3548 goto err_out;
3549 }
3550
3551 /* Send the CDB (atapi) or the first data block (ata pio out).
3552 * During the state transition, interrupt handler shouldn't
3553 * be invoked before the data transfer is complete and
3554 * hsm_task_state is changed. Hence, the following locking.
3555 */
3556 spin_lock_irqsave(&ap->host_set->lock, flags);
3557
3558 if (qc->tf.protocol == ATA_PROT_PIO) {
3559 /* PIO data out protocol.
3560 * send first data block.
3561 */
3562
3563 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3564 * so, the state is changed here before ata_pio_sectors().
3565 */
3566 ap->hsm_task_state = HSM_ST;
3567 ata_pio_sectors(qc);
3568 ata_altstatus(ap); /* flush */
3569 } else
3570 /* send CDB */
3571 atapi_send_cdb(ap, qc);
3572
3573 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3574
3575 /* if polling, ata_pio_task() handles the rest.
3576 * otherwise, interrupt handler takes over from here.
3577 */
3578 return has_next;
3579
3580 err_out:
3581 return 1; /* has next */
3582 }
3583
3584 /**
3585 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3586 * @qc: Command on going
3587 * @bytes: number of bytes
3588 *
3589 * Transfer Transfer data from/to the ATAPI device.
3590 *
3591 * LOCKING:
3592 * Inherited from caller.
3593 *
3594 */
3595
3596 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3597 {
3598 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3599 struct scatterlist *sg = qc->__sg;
3600 struct ata_port *ap = qc->ap;
3601 struct page *page;
3602 unsigned char *buf;
3603 unsigned int offset, count;
3604
3605 if (qc->curbytes + bytes >= qc->nbytes)
3606 ap->hsm_task_state = HSM_ST_LAST;
3607
3608 next_sg:
3609 if (unlikely(qc->cursg >= qc->n_elem)) {
3610 /*
3611 * The end of qc->sg is reached and the device expects
3612 * more data to transfer. In order not to overrun qc->sg
3613 * and fulfill length specified in the byte count register,
3614 * - for read case, discard trailing data from the device
3615 * - for write case, padding zero data to the device
3616 */
3617 u16 pad_buf[1] = { 0 };
3618 unsigned int words = bytes >> 1;
3619 unsigned int i;
3620
3621 if (words) /* warning if bytes > 1 */
3622 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3623 ap->id, bytes);
3624
3625 for (i = 0; i < words; i++)
3626 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3627
3628 ap->hsm_task_state = HSM_ST_LAST;
3629 return;
3630 }
3631
3632 sg = &qc->__sg[qc->cursg];
3633
3634 page = sg->page;
3635 offset = sg->offset + qc->cursg_ofs;
3636
3637 /* get the current page and offset */
3638 page = nth_page(page, (offset >> PAGE_SHIFT));
3639 offset %= PAGE_SIZE;
3640
3641 /* don't overrun current sg */
3642 count = min(sg->length - qc->cursg_ofs, bytes);
3643
3644 /* don't cross page boundaries */
3645 count = min(count, (unsigned int)PAGE_SIZE - offset);
3646
3647 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3648
3649 if (PageHighMem(page)) {
3650 unsigned long flags;
3651
3652 local_irq_save(flags);
3653 buf = kmap_atomic(page, KM_IRQ0);
3654
3655 /* do the actual data transfer */
3656 ata_data_xfer(ap, buf + offset, count, do_write);
3657
3658 kunmap_atomic(buf, KM_IRQ0);
3659 local_irq_restore(flags);
3660 } else {
3661 buf = page_address(page);
3662 ata_data_xfer(ap, buf + offset, count, do_write);
3663 }
3664
3665 bytes -= count;
3666 qc->curbytes += count;
3667 qc->cursg_ofs += count;
3668
3669 if (qc->cursg_ofs == sg->length) {
3670 qc->cursg++;
3671 qc->cursg_ofs = 0;
3672 }
3673
3674 if (bytes)
3675 goto next_sg;
3676 }
3677
3678 /**
3679 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3680 * @qc: Command on going
3681 *
3682 * Transfer Transfer data from/to the ATAPI device.
3683 *
3684 * LOCKING:
3685 * Inherited from caller.
3686 */
3687
3688 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3689 {
3690 struct ata_port *ap = qc->ap;
3691 struct ata_device *dev = qc->dev;
3692 unsigned int ireason, bc_lo, bc_hi, bytes;
3693 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3694
3695 ap->ops->tf_read(ap, &qc->tf);
3696 ireason = qc->tf.nsect;
3697 bc_lo = qc->tf.lbam;
3698 bc_hi = qc->tf.lbah;
3699 bytes = (bc_hi << 8) | bc_lo;
3700
3701 /* shall be cleared to zero, indicating xfer of data */
3702 if (ireason & (1 << 0))
3703 goto err_out;
3704
3705 /* make sure transfer direction matches expected */
3706 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3707 if (do_write != i_write)
3708 goto err_out;
3709
3710 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3711
3712 __atapi_pio_bytes(qc, bytes);
3713
3714 return;
3715
3716 err_out:
3717 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3718 ap->id, dev->devno);
3719 qc->err_mask |= AC_ERR_HSM;
3720 ap->hsm_task_state = HSM_ST_ERR;
3721 }
3722
3723 /**
3724 * ata_pio_block - start PIO on a block
3725 * @ap: the target ata_port
3726 *
3727 * LOCKING:
3728 * None. (executing in kernel thread context)
3729 */
3730
3731 static void ata_pio_block(struct ata_port *ap)
3732 {
3733 struct ata_queued_cmd *qc;
3734 u8 status;
3735
3736 /*
3737 * This is purely heuristic. This is a fast path.
3738 * Sometimes when we enter, BSY will be cleared in
3739 * a chk-status or two. If not, the drive is probably seeking
3740 * or something. Snooze for a couple msecs, then
3741 * chk-status again. If still busy, fall back to
3742 * HSM_ST_POLL state.
3743 */
3744 status = ata_busy_wait(ap, ATA_BUSY, 5);
3745 if (status & ATA_BUSY) {
3746 msleep(2);
3747 status = ata_busy_wait(ap, ATA_BUSY, 10);
3748 if (status & ATA_BUSY) {
3749 ap->hsm_task_state = HSM_ST_POLL;
3750 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3751 return;
3752 }
3753 }
3754
3755 qc = ata_qc_from_tag(ap, ap->active_tag);
3756 WARN_ON(qc == NULL);
3757
3758 /* check error */
3759 if (status & (ATA_ERR | ATA_DF)) {
3760 qc->err_mask |= AC_ERR_DEV;
3761 ap->hsm_task_state = HSM_ST_ERR;
3762 return;
3763 }
3764
3765 /* transfer data if any */
3766 if (is_atapi_taskfile(&qc->tf)) {
3767 /* DRQ=0 means no more data to transfer */
3768 if ((status & ATA_DRQ) == 0) {
3769 ap->hsm_task_state = HSM_ST_LAST;
3770 return;
3771 }
3772
3773 atapi_pio_bytes(qc);
3774 } else {
3775 /* handle BSY=0, DRQ=0 as error */
3776 if ((status & ATA_DRQ) == 0) {
3777 qc->err_mask |= AC_ERR_HSM;
3778 ap->hsm_task_state = HSM_ST_ERR;
3779 return;
3780 }
3781
3782 ata_pio_sectors(qc);
3783 }
3784
3785 ata_altstatus(ap); /* flush */
3786 }
3787
3788 static void ata_pio_error(struct ata_port *ap)
3789 {
3790 struct ata_queued_cmd *qc;
3791
3792 qc = ata_qc_from_tag(ap, ap->active_tag);
3793 WARN_ON(qc == NULL);
3794
3795 if (qc->tf.command != ATA_CMD_PACKET)
3796 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3797
3798 /* make sure qc->err_mask is available to
3799 * know what's wrong and recover
3800 */
3801 WARN_ON(qc->err_mask == 0);
3802
3803 ap->hsm_task_state = HSM_ST_IDLE;
3804
3805 ata_poll_qc_complete(qc);
3806 }
3807
3808 static void ata_pio_task(void *_data)
3809 {
3810 struct ata_port *ap = _data;
3811 unsigned long timeout;
3812 int has_next;
3813
3814 fsm_start:
3815 timeout = 0;
3816 has_next = 1;
3817
3818 switch (ap->hsm_task_state) {
3819 case HSM_ST_FIRST:
3820 has_next = ata_pio_first_block(ap);
3821 break;
3822
3823 case HSM_ST:
3824 ata_pio_block(ap);
3825 break;
3826
3827 case HSM_ST_LAST:
3828 has_next = ata_pio_complete(ap);
3829 break;
3830
3831 case HSM_ST_POLL:
3832 case HSM_ST_LAST_POLL:
3833 timeout = ata_pio_poll(ap);
3834 break;
3835
3836 case HSM_ST_TMOUT:
3837 case HSM_ST_ERR:
3838 ata_pio_error(ap);
3839 return;
3840
3841 default:
3842 BUG();
3843 return;
3844 }
3845
3846 if (timeout)
3847 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3848 else if (has_next)
3849 goto fsm_start;
3850 }
3851
3852 /**
3853 * ata_qc_timeout - Handle timeout of queued command
3854 * @qc: Command that timed out
3855 *
3856 * Some part of the kernel (currently, only the SCSI layer)
3857 * has noticed that the active command on port @ap has not
3858 * completed after a specified length of time. Handle this
3859 * condition by disabling DMA (if necessary) and completing
3860 * transactions, with error if necessary.
3861 *
3862 * This also handles the case of the "lost interrupt", where
3863 * for some reason (possibly hardware bug, possibly driver bug)
3864 * an interrupt was not delivered to the driver, even though the
3865 * transaction completed successfully.
3866 *
3867 * LOCKING:
3868 * Inherited from SCSI layer (none, can sleep)
3869 */
3870
3871 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3872 {
3873 struct ata_port *ap = qc->ap;
3874 struct ata_host_set *host_set = ap->host_set;
3875 u8 host_stat = 0, drv_stat;
3876 unsigned long flags;
3877
3878 DPRINTK("ENTER\n");
3879
3880 ap->hsm_task_state = HSM_ST_IDLE;
3881
3882 spin_lock_irqsave(&host_set->lock, flags);
3883
3884 switch (qc->tf.protocol) {
3885
3886 case ATA_PROT_DMA:
3887 case ATA_PROT_ATAPI_DMA:
3888 host_stat = ap->ops->bmdma_status(ap);
3889
3890 /* before we do anything else, clear DMA-Start bit */
3891 ap->ops->bmdma_stop(qc);
3892
3893 /* fall through */
3894
3895 default:
3896 ata_altstatus(ap);
3897 drv_stat = ata_chk_status(ap);
3898
3899 /* ack bmdma irq events */
3900 ap->ops->irq_clear(ap);
3901
3902 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3903 ap->id, qc->tf.command, drv_stat, host_stat);
3904
3905 ap->hsm_task_state = HSM_ST_IDLE;
3906
3907 /* complete taskfile transaction */
3908 qc->err_mask |= AC_ERR_TIMEOUT;
3909 break;
3910 }
3911
3912 spin_unlock_irqrestore(&host_set->lock, flags);
3913
3914 ata_eh_qc_complete(qc);
3915
3916 DPRINTK("EXIT\n");
3917 }
3918
3919 /**
3920 * ata_eng_timeout - Handle timeout of queued command
3921 * @ap: Port on which timed-out command is active
3922 *
3923 * Some part of the kernel (currently, only the SCSI layer)
3924 * has noticed that the active command on port @ap has not
3925 * completed after a specified length of time. Handle this
3926 * condition by disabling DMA (if necessary) and completing
3927 * transactions, with error if necessary.
3928 *
3929 * This also handles the case of the "lost interrupt", where
3930 * for some reason (possibly hardware bug, possibly driver bug)
3931 * an interrupt was not delivered to the driver, even though the
3932 * transaction completed successfully.
3933 *
3934 * LOCKING:
3935 * Inherited from SCSI layer (none, can sleep)
3936 */
3937
3938 void ata_eng_timeout(struct ata_port *ap)
3939 {
3940 DPRINTK("ENTER\n");
3941
3942 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3943
3944 DPRINTK("EXIT\n");
3945 }
3946
3947 /**
3948 * ata_qc_new - Request an available ATA command, for queueing
3949 * @ap: Port associated with device @dev
3950 * @dev: Device from whom we request an available command structure
3951 *
3952 * LOCKING:
3953 * None.
3954 */
3955
3956 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3957 {
3958 struct ata_queued_cmd *qc = NULL;
3959 unsigned int i;
3960
3961 for (i = 0; i < ATA_MAX_QUEUE; i++)
3962 if (!test_and_set_bit(i, &ap->qactive)) {
3963 qc = ata_qc_from_tag(ap, i);
3964 break;
3965 }
3966
3967 if (qc)
3968 qc->tag = i;
3969
3970 return qc;
3971 }
3972
3973 /**
3974 * ata_qc_new_init - Request an available ATA command, and initialize it
3975 * @ap: Port associated with device @dev
3976 * @dev: Device from whom we request an available command structure
3977 *
3978 * LOCKING:
3979 * None.
3980 */
3981
3982 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3983 struct ata_device *dev)
3984 {
3985 struct ata_queued_cmd *qc;
3986
3987 qc = ata_qc_new(ap);
3988 if (qc) {
3989 qc->scsicmd = NULL;
3990 qc->ap = ap;
3991 qc->dev = dev;
3992
3993 ata_qc_reinit(qc);
3994 }
3995
3996 return qc;
3997 }
3998
3999 /**
4000 * ata_qc_free - free unused ata_queued_cmd
4001 * @qc: Command to complete
4002 *
4003 * Designed to free unused ata_queued_cmd object
4004 * in case something prevents using it.
4005 *
4006 * LOCKING:
4007 * spin_lock_irqsave(host_set lock)
4008 */
4009 void ata_qc_free(struct ata_queued_cmd *qc)
4010 {
4011 struct ata_port *ap = qc->ap;
4012 unsigned int tag;
4013
4014 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4015
4016 qc->flags = 0;
4017 tag = qc->tag;
4018 if (likely(ata_tag_valid(tag))) {
4019 if (tag == ap->active_tag)
4020 ap->active_tag = ATA_TAG_POISON;
4021 qc->tag = ATA_TAG_POISON;
4022 clear_bit(tag, &ap->qactive);
4023 }
4024 }
4025
4026 void __ata_qc_complete(struct ata_queued_cmd *qc)
4027 {
4028 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4029 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4030
4031 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4032 ata_sg_clean(qc);
4033
4034 /* atapi: mark qc as inactive to prevent the interrupt handler
4035 * from completing the command twice later, before the error handler
4036 * is called. (when rc != 0 and atapi request sense is needed)
4037 */
4038 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4039
4040 /* call completion callback */
4041 qc->complete_fn(qc);
4042 }
4043
4044 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4045 {
4046 struct ata_port *ap = qc->ap;
4047
4048 switch (qc->tf.protocol) {
4049 case ATA_PROT_DMA:
4050 case ATA_PROT_ATAPI_DMA:
4051 return 1;
4052
4053 case ATA_PROT_ATAPI:
4054 case ATA_PROT_PIO:
4055 if (ap->flags & ATA_FLAG_PIO_DMA)
4056 return 1;
4057
4058 /* fall through */
4059
4060 default:
4061 return 0;
4062 }
4063
4064 /* never reached */
4065 }
4066
4067 /**
4068 * ata_qc_issue - issue taskfile to device
4069 * @qc: command to issue to device
4070 *
4071 * Prepare an ATA command to submission to device.
4072 * This includes mapping the data into a DMA-able
4073 * area, filling in the S/G table, and finally
4074 * writing the taskfile to hardware, starting the command.
4075 *
4076 * LOCKING:
4077 * spin_lock_irqsave(host_set lock)
4078 *
4079 * RETURNS:
4080 * Zero on success, AC_ERR_* mask on failure
4081 */
4082
4083 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4084 {
4085 struct ata_port *ap = qc->ap;
4086
4087 if (ata_should_dma_map(qc)) {
4088 if (qc->flags & ATA_QCFLAG_SG) {
4089 if (ata_sg_setup(qc))
4090 goto sg_err;
4091 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4092 if (ata_sg_setup_one(qc))
4093 goto sg_err;
4094 }
4095 } else {
4096 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4097 }
4098
4099 ap->ops->qc_prep(qc);
4100
4101 qc->ap->active_tag = qc->tag;
4102 qc->flags |= ATA_QCFLAG_ACTIVE;
4103
4104 return ap->ops->qc_issue(qc);
4105
4106 sg_err:
4107 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4108 return AC_ERR_SYSTEM;
4109 }
4110
4111
4112 /**
4113 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4114 * @qc: command to issue to device
4115 *
4116 * Using various libata functions and hooks, this function
4117 * starts an ATA command. ATA commands are grouped into
4118 * classes called "protocols", and issuing each type of protocol
4119 * is slightly different.
4120 *
4121 * May be used as the qc_issue() entry in ata_port_operations.
4122 *
4123 * LOCKING:
4124 * spin_lock_irqsave(host_set lock)
4125 *
4126 * RETURNS:
4127 * Zero on success, AC_ERR_* mask on failure
4128 */
4129
4130 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4131 {
4132 struct ata_port *ap = qc->ap;
4133
4134 /* Use polling pio if the LLD doesn't handle
4135 * interrupt driven pio and atapi CDB interrupt.
4136 */
4137 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4138 switch (qc->tf.protocol) {
4139 case ATA_PROT_PIO:
4140 case ATA_PROT_ATAPI:
4141 case ATA_PROT_ATAPI_NODATA:
4142 qc->tf.flags |= ATA_TFLAG_POLLING;
4143 break;
4144 case ATA_PROT_ATAPI_DMA:
4145 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4146 BUG();
4147 break;
4148 default:
4149 break;
4150 }
4151 }
4152
4153 /* select the device */
4154 ata_dev_select(ap, qc->dev->devno, 1, 0);
4155
4156 /* start the command */
4157 switch (qc->tf.protocol) {
4158 case ATA_PROT_NODATA:
4159 if (qc->tf.flags & ATA_TFLAG_POLLING)
4160 ata_qc_set_polling(qc);
4161
4162 ata_tf_to_host(ap, &qc->tf);
4163 ap->hsm_task_state = HSM_ST_LAST;
4164
4165 if (qc->tf.flags & ATA_TFLAG_POLLING)
4166 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4167
4168 break;
4169
4170 case ATA_PROT_DMA:
4171 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4172
4173 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4174 ap->ops->bmdma_setup(qc); /* set up bmdma */
4175 ap->ops->bmdma_start(qc); /* initiate bmdma */
4176 ap->hsm_task_state = HSM_ST_LAST;
4177 break;
4178
4179 case ATA_PROT_PIO:
4180 if (qc->tf.flags & ATA_TFLAG_POLLING)
4181 ata_qc_set_polling(qc);
4182
4183 ata_tf_to_host(ap, &qc->tf);
4184
4185 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4186 /* PIO data out protocol */
4187 ap->hsm_task_state = HSM_ST_FIRST;
4188 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4189
4190 /* always send first data block using
4191 * the ata_pio_task() codepath.
4192 */
4193 } else {
4194 /* PIO data in protocol */
4195 ap->hsm_task_state = HSM_ST;
4196
4197 if (qc->tf.flags & ATA_TFLAG_POLLING)
4198 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4199
4200 /* if polling, ata_pio_task() handles the rest.
4201 * otherwise, interrupt handler takes over from here.
4202 */
4203 }
4204
4205 break;
4206
4207 case ATA_PROT_ATAPI:
4208 case ATA_PROT_ATAPI_NODATA:
4209 if (qc->tf.flags & ATA_TFLAG_POLLING)
4210 ata_qc_set_polling(qc);
4211
4212 ata_tf_to_host(ap, &qc->tf);
4213
4214 ap->hsm_task_state = HSM_ST_FIRST;
4215
4216 /* send cdb by polling if no cdb interrupt */
4217 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4218 (qc->tf.flags & ATA_TFLAG_POLLING))
4219 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4220 break;
4221
4222 case ATA_PROT_ATAPI_DMA:
4223 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4224
4225 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4226 ap->ops->bmdma_setup(qc); /* set up bmdma */
4227 ap->hsm_task_state = HSM_ST_FIRST;
4228
4229 /* send cdb by polling if no cdb interrupt */
4230 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4231 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4232 break;
4233
4234 default:
4235 WARN_ON(1);
4236 return AC_ERR_SYSTEM;
4237 }
4238
4239 return 0;
4240 }
4241
4242 /**
4243 * ata_host_intr - Handle host interrupt for given (port, task)
4244 * @ap: Port on which interrupt arrived (possibly...)
4245 * @qc: Taskfile currently active in engine
4246 *
4247 * Handle host interrupt for given queued command. Currently,
4248 * only DMA interrupts are handled. All other commands are
4249 * handled via polling with interrupts disabled (nIEN bit).
4250 *
4251 * LOCKING:
4252 * spin_lock_irqsave(host_set lock)
4253 *
4254 * RETURNS:
4255 * One if interrupt was handled, zero if not (shared irq).
4256 */
4257
4258 inline unsigned int ata_host_intr (struct ata_port *ap,
4259 struct ata_queued_cmd *qc)
4260 {
4261 u8 status, host_stat = 0;
4262
4263 VPRINTK("ata%u: protocol %d task_state %d\n",
4264 ap->id, qc->tf.protocol, ap->hsm_task_state);
4265
4266 /* Check whether we are expecting interrupt in this state */
4267 switch (ap->hsm_task_state) {
4268 case HSM_ST_FIRST:
4269 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4270 * The flag was turned on only for atapi devices.
4271 * No need to check is_atapi_taskfile(&qc->tf) again.
4272 */
4273 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4274 goto idle_irq;
4275 break;
4276 case HSM_ST_LAST:
4277 if (qc->tf.protocol == ATA_PROT_DMA ||
4278 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4279 /* check status of DMA engine */
4280 host_stat = ap->ops->bmdma_status(ap);
4281 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4282
4283 /* if it's not our irq... */
4284 if (!(host_stat & ATA_DMA_INTR))
4285 goto idle_irq;
4286
4287 /* before we do anything else, clear DMA-Start bit */
4288 ap->ops->bmdma_stop(qc);
4289
4290 if (unlikely(host_stat & ATA_DMA_ERR)) {
4291 /* error when transfering data to/from memory */
4292 qc->err_mask |= AC_ERR_HOST_BUS;
4293 ap->hsm_task_state = HSM_ST_ERR;
4294 }
4295 }
4296 break;
4297 case HSM_ST:
4298 break;
4299 default:
4300 goto idle_irq;
4301 }
4302
4303 /* check altstatus */
4304 status = ata_altstatus(ap);
4305 if (status & ATA_BUSY)
4306 goto idle_irq;
4307
4308 /* check main status, clearing INTRQ */
4309 status = ata_chk_status(ap);
4310 if (unlikely(status & ATA_BUSY))
4311 goto idle_irq;
4312
4313 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4314 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4315
4316 /* ack bmdma irq events */
4317 ap->ops->irq_clear(ap);
4318
4319 /* check error */
4320 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4321 qc->err_mask |= AC_ERR_DEV;
4322 ap->hsm_task_state = HSM_ST_ERR;
4323 }
4324
4325 fsm_start:
4326 switch (ap->hsm_task_state) {
4327 case HSM_ST_FIRST:
4328 /* Some pre-ATAPI-4 devices assert INTRQ
4329 * at this state when ready to receive CDB.
4330 */
4331
4332 /* check device status */
4333 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4334 /* Wrong status. Let EH handle this */
4335 qc->err_mask |= AC_ERR_HSM;
4336 ap->hsm_task_state = HSM_ST_ERR;
4337 goto fsm_start;
4338 }
4339
4340 atapi_send_cdb(ap, qc);
4341
4342 break;
4343
4344 case HSM_ST:
4345 /* complete command or read/write the data register */
4346 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4347 /* ATAPI PIO protocol */
4348 if ((status & ATA_DRQ) == 0) {
4349 /* no more data to transfer */
4350 ap->hsm_task_state = HSM_ST_LAST;
4351 goto fsm_start;
4352 }
4353
4354 atapi_pio_bytes(qc);
4355
4356 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4357 /* bad ireason reported by device */
4358 goto fsm_start;
4359
4360 } else {
4361 /* ATA PIO protocol */
4362 if (unlikely((status & ATA_DRQ) == 0)) {
4363 /* handle BSY=0, DRQ=0 as error */
4364 qc->err_mask |= AC_ERR_HSM;
4365 ap->hsm_task_state = HSM_ST_ERR;
4366 goto fsm_start;
4367 }
4368
4369 ata_pio_sectors(qc);
4370
4371 if (ap->hsm_task_state == HSM_ST_LAST &&
4372 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4373 /* all data read */
4374 ata_altstatus(ap);
4375 status = ata_chk_status(ap);
4376 goto fsm_start;
4377 }
4378 }
4379
4380 ata_altstatus(ap); /* flush */
4381 break;
4382
4383 case HSM_ST_LAST:
4384 if (unlikely(status & ATA_DRQ)) {
4385 /* handle DRQ=1 as error */
4386 qc->err_mask |= AC_ERR_HSM;
4387 ap->hsm_task_state = HSM_ST_ERR;
4388 goto fsm_start;
4389 }
4390
4391 /* no more data to transfer */
4392 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4393 ap->id, status);
4394
4395 ap->hsm_task_state = HSM_ST_IDLE;
4396
4397 /* complete taskfile transaction */
4398 qc->err_mask |= ac_err_mask(status);
4399 ata_qc_complete(qc);
4400 break;
4401
4402 case HSM_ST_ERR:
4403 if (qc->tf.command != ATA_CMD_PACKET)
4404 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4405 ap->id, status, host_stat);
4406
4407 /* make sure qc->err_mask is available to
4408 * know what's wrong and recover
4409 */
4410 WARN_ON(qc->err_mask == 0);
4411
4412 ap->hsm_task_state = HSM_ST_IDLE;
4413 ata_qc_complete(qc);
4414 break;
4415 default:
4416 goto idle_irq;
4417 }
4418
4419 return 1; /* irq handled */
4420
4421 idle_irq:
4422 ap->stats.idle_irq++;
4423
4424 #ifdef ATA_IRQ_TRAP
4425 if ((ap->stats.idle_irq % 1000) == 0) {
4426 ata_irq_ack(ap, 0); /* debug trap */
4427 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4428 return 1;
4429 }
4430 #endif
4431 return 0; /* irq not handled */
4432 }
4433
4434 /**
4435 * ata_interrupt - Default ATA host interrupt handler
4436 * @irq: irq line (unused)
4437 * @dev_instance: pointer to our ata_host_set information structure
4438 * @regs: unused
4439 *
4440 * Default interrupt handler for PCI IDE devices. Calls
4441 * ata_host_intr() for each port that is not disabled.
4442 *
4443 * LOCKING:
4444 * Obtains host_set lock during operation.
4445 *
4446 * RETURNS:
4447 * IRQ_NONE or IRQ_HANDLED.
4448 */
4449
4450 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4451 {
4452 struct ata_host_set *host_set = dev_instance;
4453 unsigned int i;
4454 unsigned int handled = 0;
4455 unsigned long flags;
4456
4457 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4458 spin_lock_irqsave(&host_set->lock, flags);
4459
4460 for (i = 0; i < host_set->n_ports; i++) {
4461 struct ata_port *ap;
4462
4463 ap = host_set->ports[i];
4464 if (ap &&
4465 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4466 struct ata_queued_cmd *qc;
4467
4468 qc = ata_qc_from_tag(ap, ap->active_tag);
4469 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4470 (qc->flags & ATA_QCFLAG_ACTIVE))
4471 handled |= ata_host_intr(ap, qc);
4472 }
4473 }
4474
4475 spin_unlock_irqrestore(&host_set->lock, flags);
4476
4477 return IRQ_RETVAL(handled);
4478 }
4479
4480
4481 /*
4482 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4483 * without filling any other registers
4484 */
4485 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4486 u8 cmd)
4487 {
4488 struct ata_taskfile tf;
4489 int err;
4490
4491 ata_tf_init(ap, &tf, dev->devno);
4492
4493 tf.command = cmd;
4494 tf.flags |= ATA_TFLAG_DEVICE;
4495 tf.protocol = ATA_PROT_NODATA;
4496
4497 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4498 if (err)
4499 printk(KERN_ERR "%s: ata command failed: %d\n",
4500 __FUNCTION__, err);
4501
4502 return err;
4503 }
4504
4505 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4506 {
4507 u8 cmd;
4508
4509 if (!ata_try_flush_cache(dev))
4510 return 0;
4511
4512 if (ata_id_has_flush_ext(dev->id))
4513 cmd = ATA_CMD_FLUSH_EXT;
4514 else
4515 cmd = ATA_CMD_FLUSH;
4516
4517 return ata_do_simple_cmd(ap, dev, cmd);
4518 }
4519
4520 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4521 {
4522 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4523 }
4524
4525 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4526 {
4527 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4528 }
4529
4530 /**
4531 * ata_device_resume - wakeup a previously suspended devices
4532 * @ap: port the device is connected to
4533 * @dev: the device to resume
4534 *
4535 * Kick the drive back into action, by sending it an idle immediate
4536 * command and making sure its transfer mode matches between drive
4537 * and host.
4538 *
4539 */
4540 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4541 {
4542 if (ap->flags & ATA_FLAG_SUSPENDED) {
4543 ap->flags &= ~ATA_FLAG_SUSPENDED;
4544 ata_set_mode(ap);
4545 }
4546 if (!ata_dev_present(dev))
4547 return 0;
4548 if (dev->class == ATA_DEV_ATA)
4549 ata_start_drive(ap, dev);
4550
4551 return 0;
4552 }
4553
4554 /**
4555 * ata_device_suspend - prepare a device for suspend
4556 * @ap: port the device is connected to
4557 * @dev: the device to suspend
4558 *
4559 * Flush the cache on the drive, if appropriate, then issue a
4560 * standbynow command.
4561 */
4562 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4563 {
4564 if (!ata_dev_present(dev))
4565 return 0;
4566 if (dev->class == ATA_DEV_ATA)
4567 ata_flush_cache(ap, dev);
4568
4569 if (state.event != PM_EVENT_FREEZE)
4570 ata_standby_drive(ap, dev);
4571 ap->flags |= ATA_FLAG_SUSPENDED;
4572 return 0;
4573 }
4574
4575 /**
4576 * ata_port_start - Set port up for dma.
4577 * @ap: Port to initialize
4578 *
4579 * Called just after data structures for each port are
4580 * initialized. Allocates space for PRD table.
4581 *
4582 * May be used as the port_start() entry in ata_port_operations.
4583 *
4584 * LOCKING:
4585 * Inherited from caller.
4586 */
4587
4588 int ata_port_start (struct ata_port *ap)
4589 {
4590 struct device *dev = ap->dev;
4591 int rc;
4592
4593 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4594 if (!ap->prd)
4595 return -ENOMEM;
4596
4597 rc = ata_pad_alloc(ap, dev);
4598 if (rc) {
4599 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4600 return rc;
4601 }
4602
4603 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4604
4605 return 0;
4606 }
4607
4608
4609 /**
4610 * ata_port_stop - Undo ata_port_start()
4611 * @ap: Port to shut down
4612 *
4613 * Frees the PRD table.
4614 *
4615 * May be used as the port_stop() entry in ata_port_operations.
4616 *
4617 * LOCKING:
4618 * Inherited from caller.
4619 */
4620
4621 void ata_port_stop (struct ata_port *ap)
4622 {
4623 struct device *dev = ap->dev;
4624
4625 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4626 ata_pad_free(ap, dev);
4627 }
4628
4629 void ata_host_stop (struct ata_host_set *host_set)
4630 {
4631 if (host_set->mmio_base)
4632 iounmap(host_set->mmio_base);
4633 }
4634
4635
4636 /**
4637 * ata_host_remove - Unregister SCSI host structure with upper layers
4638 * @ap: Port to unregister
4639 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4640 *
4641 * LOCKING:
4642 * Inherited from caller.
4643 */
4644
4645 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4646 {
4647 struct Scsi_Host *sh = ap->host;
4648
4649 DPRINTK("ENTER\n");
4650
4651 if (do_unregister)
4652 scsi_remove_host(sh);
4653
4654 ap->ops->port_stop(ap);
4655 }
4656
4657 /**
4658 * ata_host_init - Initialize an ata_port structure
4659 * @ap: Structure to initialize
4660 * @host: associated SCSI mid-layer structure
4661 * @host_set: Collection of hosts to which @ap belongs
4662 * @ent: Probe information provided by low-level driver
4663 * @port_no: Port number associated with this ata_port
4664 *
4665 * Initialize a new ata_port structure, and its associated
4666 * scsi_host.
4667 *
4668 * LOCKING:
4669 * Inherited from caller.
4670 */
4671
4672 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4673 struct ata_host_set *host_set,
4674 const struct ata_probe_ent *ent, unsigned int port_no)
4675 {
4676 unsigned int i;
4677
4678 host->max_id = 16;
4679 host->max_lun = 1;
4680 host->max_channel = 1;
4681 host->unique_id = ata_unique_id++;
4682 host->max_cmd_len = 12;
4683
4684 ap->flags = ATA_FLAG_PORT_DISABLED;
4685 ap->id = host->unique_id;
4686 ap->host = host;
4687 ap->ctl = ATA_DEVCTL_OBS;
4688 ap->host_set = host_set;
4689 ap->dev = ent->dev;
4690 ap->port_no = port_no;
4691 ap->hard_port_no =
4692 ent->legacy_mode ? ent->hard_port_no : port_no;
4693 ap->pio_mask = ent->pio_mask;
4694 ap->mwdma_mask = ent->mwdma_mask;
4695 ap->udma_mask = ent->udma_mask;
4696 ap->flags |= ent->host_flags;
4697 ap->ops = ent->port_ops;
4698 ap->cbl = ATA_CBL_NONE;
4699 ap->active_tag = ATA_TAG_POISON;
4700 ap->last_ctl = 0xFF;
4701
4702 INIT_WORK(&ap->port_task, NULL, NULL);
4703 INIT_LIST_HEAD(&ap->eh_done_q);
4704
4705 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4706 struct ata_device *dev = &ap->device[i];
4707 dev->devno = i;
4708 dev->pio_mask = UINT_MAX;
4709 dev->mwdma_mask = UINT_MAX;
4710 dev->udma_mask = UINT_MAX;
4711 }
4712
4713 #ifdef ATA_IRQ_TRAP
4714 ap->stats.unhandled_irq = 1;
4715 ap->stats.idle_irq = 1;
4716 #endif
4717
4718 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4719 }
4720
4721 /**
4722 * ata_host_add - Attach low-level ATA driver to system
4723 * @ent: Information provided by low-level driver
4724 * @host_set: Collections of ports to which we add
4725 * @port_no: Port number associated with this host
4726 *
4727 * Attach low-level ATA driver to system.
4728 *
4729 * LOCKING:
4730 * PCI/etc. bus probe sem.
4731 *
4732 * RETURNS:
4733 * New ata_port on success, for NULL on error.
4734 */
4735
4736 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4737 struct ata_host_set *host_set,
4738 unsigned int port_no)
4739 {
4740 struct Scsi_Host *host;
4741 struct ata_port *ap;
4742 int rc;
4743
4744 DPRINTK("ENTER\n");
4745
4746 if (!ent->port_ops->probe_reset &&
4747 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4748 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4749 port_no);
4750 return NULL;
4751 }
4752
4753 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4754 if (!host)
4755 return NULL;
4756
4757 host->transportt = &ata_scsi_transport_template;
4758
4759 ap = (struct ata_port *) &host->hostdata[0];
4760
4761 ata_host_init(ap, host, host_set, ent, port_no);
4762
4763 rc = ap->ops->port_start(ap);
4764 if (rc)
4765 goto err_out;
4766
4767 return ap;
4768
4769 err_out:
4770 scsi_host_put(host);
4771 return NULL;
4772 }
4773
4774 /**
4775 * ata_device_add - Register hardware device with ATA and SCSI layers
4776 * @ent: Probe information describing hardware device to be registered
4777 *
4778 * This function processes the information provided in the probe
4779 * information struct @ent, allocates the necessary ATA and SCSI
4780 * host information structures, initializes them, and registers
4781 * everything with requisite kernel subsystems.
4782 *
4783 * This function requests irqs, probes the ATA bus, and probes
4784 * the SCSI bus.
4785 *
4786 * LOCKING:
4787 * PCI/etc. bus probe sem.
4788 *
4789 * RETURNS:
4790 * Number of ports registered. Zero on error (no ports registered).
4791 */
4792
4793 int ata_device_add(const struct ata_probe_ent *ent)
4794 {
4795 unsigned int count = 0, i;
4796 struct device *dev = ent->dev;
4797 struct ata_host_set *host_set;
4798
4799 DPRINTK("ENTER\n");
4800 /* alloc a container for our list of ATA ports (buses) */
4801 host_set = kzalloc(sizeof(struct ata_host_set) +
4802 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4803 if (!host_set)
4804 return 0;
4805 spin_lock_init(&host_set->lock);
4806
4807 host_set->dev = dev;
4808 host_set->n_ports = ent->n_ports;
4809 host_set->irq = ent->irq;
4810 host_set->mmio_base = ent->mmio_base;
4811 host_set->private_data = ent->private_data;
4812 host_set->ops = ent->port_ops;
4813
4814 /* register each port bound to this device */
4815 for (i = 0; i < ent->n_ports; i++) {
4816 struct ata_port *ap;
4817 unsigned long xfer_mode_mask;
4818
4819 ap = ata_host_add(ent, host_set, i);
4820 if (!ap)
4821 goto err_out;
4822
4823 host_set->ports[i] = ap;
4824 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4825 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4826 (ap->pio_mask << ATA_SHIFT_PIO);
4827
4828 /* print per-port info to dmesg */
4829 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4830 "bmdma 0x%lX irq %lu\n",
4831 ap->id,
4832 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4833 ata_mode_string(xfer_mode_mask),
4834 ap->ioaddr.cmd_addr,
4835 ap->ioaddr.ctl_addr,
4836 ap->ioaddr.bmdma_addr,
4837 ent->irq);
4838
4839 ata_chk_status(ap);
4840 host_set->ops->irq_clear(ap);
4841 count++;
4842 }
4843
4844 if (!count)
4845 goto err_free_ret;
4846
4847 /* obtain irq, that is shared between channels */
4848 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4849 DRV_NAME, host_set))
4850 goto err_out;
4851
4852 /* perform each probe synchronously */
4853 DPRINTK("probe begin\n");
4854 for (i = 0; i < count; i++) {
4855 struct ata_port *ap;
4856 int rc;
4857
4858 ap = host_set->ports[i];
4859
4860 DPRINTK("ata%u: bus probe begin\n", ap->id);
4861 rc = ata_bus_probe(ap);
4862 DPRINTK("ata%u: bus probe end\n", ap->id);
4863
4864 if (rc) {
4865 /* FIXME: do something useful here?
4866 * Current libata behavior will
4867 * tear down everything when
4868 * the module is removed
4869 * or the h/w is unplugged.
4870 */
4871 }
4872
4873 rc = scsi_add_host(ap->host, dev);
4874 if (rc) {
4875 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4876 ap->id);
4877 /* FIXME: do something useful here */
4878 /* FIXME: handle unconditional calls to
4879 * scsi_scan_host and ata_host_remove, below,
4880 * at the very least
4881 */
4882 }
4883 }
4884
4885 /* probes are done, now scan each port's disk(s) */
4886 DPRINTK("host probe begin\n");
4887 for (i = 0; i < count; i++) {
4888 struct ata_port *ap = host_set->ports[i];
4889
4890 ata_scsi_scan_host(ap);
4891 }
4892
4893 dev_set_drvdata(dev, host_set);
4894
4895 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4896 return ent->n_ports; /* success */
4897
4898 err_out:
4899 for (i = 0; i < count; i++) {
4900 ata_host_remove(host_set->ports[i], 1);
4901 scsi_host_put(host_set->ports[i]->host);
4902 }
4903 err_free_ret:
4904 kfree(host_set);
4905 VPRINTK("EXIT, returning 0\n");
4906 return 0;
4907 }
4908
4909 /**
4910 * ata_host_set_remove - PCI layer callback for device removal
4911 * @host_set: ATA host set that was removed
4912 *
4913 * Unregister all objects associated with this host set. Free those
4914 * objects.
4915 *
4916 * LOCKING:
4917 * Inherited from calling layer (may sleep).
4918 */
4919
4920 void ata_host_set_remove(struct ata_host_set *host_set)
4921 {
4922 struct ata_port *ap;
4923 unsigned int i;
4924
4925 for (i = 0; i < host_set->n_ports; i++) {
4926 ap = host_set->ports[i];
4927 scsi_remove_host(ap->host);
4928 }
4929
4930 free_irq(host_set->irq, host_set);
4931
4932 for (i = 0; i < host_set->n_ports; i++) {
4933 ap = host_set->ports[i];
4934
4935 ata_scsi_release(ap->host);
4936
4937 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4938 struct ata_ioports *ioaddr = &ap->ioaddr;
4939
4940 if (ioaddr->cmd_addr == 0x1f0)
4941 release_region(0x1f0, 8);
4942 else if (ioaddr->cmd_addr == 0x170)
4943 release_region(0x170, 8);
4944 }
4945
4946 scsi_host_put(ap->host);
4947 }
4948
4949 if (host_set->ops->host_stop)
4950 host_set->ops->host_stop(host_set);
4951
4952 kfree(host_set);
4953 }
4954
4955 /**
4956 * ata_scsi_release - SCSI layer callback hook for host unload
4957 * @host: libata host to be unloaded
4958 *
4959 * Performs all duties necessary to shut down a libata port...
4960 * Kill port kthread, disable port, and release resources.
4961 *
4962 * LOCKING:
4963 * Inherited from SCSI layer.
4964 *
4965 * RETURNS:
4966 * One.
4967 */
4968
4969 int ata_scsi_release(struct Scsi_Host *host)
4970 {
4971 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4972 int i;
4973
4974 DPRINTK("ENTER\n");
4975
4976 ap->ops->port_disable(ap);
4977 ata_host_remove(ap, 0);
4978 for (i = 0; i < ATA_MAX_DEVICES; i++)
4979 kfree(ap->device[i].id);
4980
4981 DPRINTK("EXIT\n");
4982 return 1;
4983 }
4984
4985 /**
4986 * ata_std_ports - initialize ioaddr with standard port offsets.
4987 * @ioaddr: IO address structure to be initialized
4988 *
4989 * Utility function which initializes data_addr, error_addr,
4990 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4991 * device_addr, status_addr, and command_addr to standard offsets
4992 * relative to cmd_addr.
4993 *
4994 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4995 */
4996
4997 void ata_std_ports(struct ata_ioports *ioaddr)
4998 {
4999 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5000 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5001 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5002 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5003 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5004 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5005 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5006 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5007 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5008 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5009 }
5010
5011
5012 #ifdef CONFIG_PCI
5013
5014 void ata_pci_host_stop (struct ata_host_set *host_set)
5015 {
5016 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5017
5018 pci_iounmap(pdev, host_set->mmio_base);
5019 }
5020
5021 /**
5022 * ata_pci_remove_one - PCI layer callback for device removal
5023 * @pdev: PCI device that was removed
5024 *
5025 * PCI layer indicates to libata via this hook that
5026 * hot-unplug or module unload event has occurred.
5027 * Handle this by unregistering all objects associated
5028 * with this PCI device. Free those objects. Then finally
5029 * release PCI resources and disable device.
5030 *
5031 * LOCKING:
5032 * Inherited from PCI layer (may sleep).
5033 */
5034
5035 void ata_pci_remove_one (struct pci_dev *pdev)
5036 {
5037 struct device *dev = pci_dev_to_dev(pdev);
5038 struct ata_host_set *host_set = dev_get_drvdata(dev);
5039
5040 ata_host_set_remove(host_set);
5041 pci_release_regions(pdev);
5042 pci_disable_device(pdev);
5043 dev_set_drvdata(dev, NULL);
5044 }
5045
5046 /* move to PCI subsystem */
5047 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5048 {
5049 unsigned long tmp = 0;
5050
5051 switch (bits->width) {
5052 case 1: {
5053 u8 tmp8 = 0;
5054 pci_read_config_byte(pdev, bits->reg, &tmp8);
5055 tmp = tmp8;
5056 break;
5057 }
5058 case 2: {
5059 u16 tmp16 = 0;
5060 pci_read_config_word(pdev, bits->reg, &tmp16);
5061 tmp = tmp16;
5062 break;
5063 }
5064 case 4: {
5065 u32 tmp32 = 0;
5066 pci_read_config_dword(pdev, bits->reg, &tmp32);
5067 tmp = tmp32;
5068 break;
5069 }
5070
5071 default:
5072 return -EINVAL;
5073 }
5074
5075 tmp &= bits->mask;
5076
5077 return (tmp == bits->val) ? 1 : 0;
5078 }
5079
5080 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5081 {
5082 pci_save_state(pdev);
5083 pci_disable_device(pdev);
5084 pci_set_power_state(pdev, PCI_D3hot);
5085 return 0;
5086 }
5087
5088 int ata_pci_device_resume(struct pci_dev *pdev)
5089 {
5090 pci_set_power_state(pdev, PCI_D0);
5091 pci_restore_state(pdev);
5092 pci_enable_device(pdev);
5093 pci_set_master(pdev);
5094 return 0;
5095 }
5096 #endif /* CONFIG_PCI */
5097
5098
5099 static int __init ata_init(void)
5100 {
5101 ata_wq = create_workqueue("ata");
5102 if (!ata_wq)
5103 return -ENOMEM;
5104
5105 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5106 return 0;
5107 }
5108
5109 static void __exit ata_exit(void)
5110 {
5111 destroy_workqueue(ata_wq);
5112 }
5113
5114 module_init(ata_init);
5115 module_exit(ata_exit);
5116
5117 static unsigned long ratelimit_time;
5118 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5119
5120 int ata_ratelimit(void)
5121 {
5122 int rc;
5123 unsigned long flags;
5124
5125 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5126
5127 if (time_after(jiffies, ratelimit_time)) {
5128 rc = 1;
5129 ratelimit_time = jiffies + (HZ/5);
5130 } else
5131 rc = 0;
5132
5133 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5134
5135 return rc;
5136 }
5137
5138 /*
5139 * libata is essentially a library of internal helper functions for
5140 * low-level ATA host controller drivers. As such, the API/ABI is
5141 * likely to change as new drivers are added and updated.
5142 * Do not depend on ABI/API stability.
5143 */
5144
5145 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5146 EXPORT_SYMBOL_GPL(ata_std_ports);
5147 EXPORT_SYMBOL_GPL(ata_device_add);
5148 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5149 EXPORT_SYMBOL_GPL(ata_sg_init);
5150 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5151 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5152 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5153 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5154 EXPORT_SYMBOL_GPL(ata_tf_load);
5155 EXPORT_SYMBOL_GPL(ata_tf_read);
5156 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5157 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5158 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5159 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5160 EXPORT_SYMBOL_GPL(ata_check_status);
5161 EXPORT_SYMBOL_GPL(ata_altstatus);
5162 EXPORT_SYMBOL_GPL(ata_exec_command);
5163 EXPORT_SYMBOL_GPL(ata_port_start);
5164 EXPORT_SYMBOL_GPL(ata_port_stop);
5165 EXPORT_SYMBOL_GPL(ata_host_stop);
5166 EXPORT_SYMBOL_GPL(ata_interrupt);
5167 EXPORT_SYMBOL_GPL(ata_qc_prep);
5168 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5169 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5170 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5171 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5172 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5173 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5174 EXPORT_SYMBOL_GPL(ata_port_probe);
5175 EXPORT_SYMBOL_GPL(sata_phy_reset);
5176 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5177 EXPORT_SYMBOL_GPL(ata_bus_reset);
5178 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5179 EXPORT_SYMBOL_GPL(ata_std_softreset);
5180 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5181 EXPORT_SYMBOL_GPL(ata_std_postreset);
5182 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5183 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5184 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5185 EXPORT_SYMBOL_GPL(ata_dev_classify);
5186 EXPORT_SYMBOL_GPL(ata_dev_pair);
5187 EXPORT_SYMBOL_GPL(ata_port_disable);
5188 EXPORT_SYMBOL_GPL(ata_ratelimit);
5189 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5190 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5191 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5192 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5193 EXPORT_SYMBOL_GPL(ata_scsi_error);
5194 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5195 EXPORT_SYMBOL_GPL(ata_scsi_release);
5196 EXPORT_SYMBOL_GPL(ata_host_intr);
5197 EXPORT_SYMBOL_GPL(ata_id_string);
5198 EXPORT_SYMBOL_GPL(ata_id_c_string);
5199 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5200 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5201 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5202
5203 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5204 EXPORT_SYMBOL_GPL(ata_timing_compute);
5205 EXPORT_SYMBOL_GPL(ata_timing_merge);
5206
5207 #ifdef CONFIG_PCI
5208 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5209 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5210 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5211 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5212 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5213 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5214 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5215 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5216 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5217 #endif /* CONFIG_PCI */
5218
5219 EXPORT_SYMBOL_GPL(ata_device_suspend);
5220 EXPORT_SYMBOL_GPL(ata_device_resume);
5221 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5222 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
This page took 0.138535 seconds and 6 git commands to generate.