[libata] license change, other bits
[deliverable/linux.git] / drivers / scsi / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <scsi/scsi.h>
52 #include "scsi.h"
53 #include "scsi_priv.h"
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
59
60 #include "libata.h"
61
62 static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat,
64 unsigned long tmout);
65 static void ata_set_mode(struct ata_port *ap);
66 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
68 static int fgb(u32 bitmap);
69 static int ata_choose_xfer_mode(struct ata_port *ap,
70 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out);
72 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73 static void __ata_qc_complete(struct ata_queued_cmd *qc);
74
75 static unsigned int ata_unique_id = 1;
76 static struct workqueue_struct *ata_wq;
77
78 MODULE_AUTHOR("Jeff Garzik");
79 MODULE_DESCRIPTION("Library module for ATA devices");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_VERSION);
82
83 /**
84 * ata_tf_load - send taskfile registers to host controller
85 * @ap: Port to which output is sent
86 * @tf: ATA taskfile register set
87 *
88 * Outputs ATA taskfile to standard ATA host controller.
89 *
90 * LOCKING:
91 * Inherited from caller.
92 */
93
94 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
95 {
96 struct ata_ioports *ioaddr = &ap->ioaddr;
97 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
98
99 if (tf->ctl != ap->last_ctl) {
100 outb(tf->ctl, ioaddr->ctl_addr);
101 ap->last_ctl = tf->ctl;
102 ata_wait_idle(ap);
103 }
104
105 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
106 outb(tf->hob_feature, ioaddr->feature_addr);
107 outb(tf->hob_nsect, ioaddr->nsect_addr);
108 outb(tf->hob_lbal, ioaddr->lbal_addr);
109 outb(tf->hob_lbam, ioaddr->lbam_addr);
110 outb(tf->hob_lbah, ioaddr->lbah_addr);
111 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
112 tf->hob_feature,
113 tf->hob_nsect,
114 tf->hob_lbal,
115 tf->hob_lbam,
116 tf->hob_lbah);
117 }
118
119 if (is_addr) {
120 outb(tf->feature, ioaddr->feature_addr);
121 outb(tf->nsect, ioaddr->nsect_addr);
122 outb(tf->lbal, ioaddr->lbal_addr);
123 outb(tf->lbam, ioaddr->lbam_addr);
124 outb(tf->lbah, ioaddr->lbah_addr);
125 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
126 tf->feature,
127 tf->nsect,
128 tf->lbal,
129 tf->lbam,
130 tf->lbah);
131 }
132
133 if (tf->flags & ATA_TFLAG_DEVICE) {
134 outb(tf->device, ioaddr->device_addr);
135 VPRINTK("device 0x%X\n", tf->device);
136 }
137
138 ata_wait_idle(ap);
139 }
140
141 /**
142 * ata_tf_load_mmio - send taskfile registers to host controller
143 * @ap: Port to which output is sent
144 * @tf: ATA taskfile register set
145 *
146 * Outputs ATA taskfile to standard ATA host controller using MMIO.
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
152 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
153 {
154 struct ata_ioports *ioaddr = &ap->ioaddr;
155 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
156
157 if (tf->ctl != ap->last_ctl) {
158 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
159 ap->last_ctl = tf->ctl;
160 ata_wait_idle(ap);
161 }
162
163 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
164 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
165 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
166 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
167 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
168 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
169 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
170 tf->hob_feature,
171 tf->hob_nsect,
172 tf->hob_lbal,
173 tf->hob_lbam,
174 tf->hob_lbah);
175 }
176
177 if (is_addr) {
178 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
179 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
180 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
181 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
182 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
183 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
184 tf->feature,
185 tf->nsect,
186 tf->lbal,
187 tf->lbam,
188 tf->lbah);
189 }
190
191 if (tf->flags & ATA_TFLAG_DEVICE) {
192 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
193 VPRINTK("device 0x%X\n", tf->device);
194 }
195
196 ata_wait_idle(ap);
197 }
198
199
200 /**
201 * ata_tf_load - send taskfile registers to host controller
202 * @ap: Port to which output is sent
203 * @tf: ATA taskfile register set
204 *
205 * Outputs ATA taskfile to standard ATA host controller using MMIO
206 * or PIO as indicated by the ATA_FLAG_MMIO flag.
207 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
208 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
209 * hob_lbal, hob_lbam, and hob_lbah.
210 *
211 * This function waits for idle (!BUSY and !DRQ) after writing
212 * registers. If the control register has a new value, this
213 * function also waits for idle after writing control and before
214 * writing the remaining registers.
215 *
216 * May be used as the tf_load() entry in ata_port_operations.
217 *
218 * LOCKING:
219 * Inherited from caller.
220 */
221 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
222 {
223 if (ap->flags & ATA_FLAG_MMIO)
224 ata_tf_load_mmio(ap, tf);
225 else
226 ata_tf_load_pio(ap, tf);
227 }
228
229 /**
230 * ata_exec_command_pio - issue ATA command to host controller
231 * @ap: port to which command is being issued
232 * @tf: ATA taskfile register set
233 *
234 * Issues PIO write to ATA command register, with proper
235 * synchronization with interrupt handler / other threads.
236 *
237 * LOCKING:
238 * spin_lock_irqsave(host_set lock)
239 */
240
241 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
242 {
243 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
244
245 outb(tf->command, ap->ioaddr.command_addr);
246 ata_pause(ap);
247 }
248
249
250 /**
251 * ata_exec_command_mmio - issue ATA command to host controller
252 * @ap: port to which command is being issued
253 * @tf: ATA taskfile register set
254 *
255 * Issues MMIO write to ATA command register, with proper
256 * synchronization with interrupt handler / other threads.
257 *
258 * LOCKING:
259 * spin_lock_irqsave(host_set lock)
260 */
261
262 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
263 {
264 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
265
266 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
267 ata_pause(ap);
268 }
269
270
271 /**
272 * ata_exec_command - issue ATA command to host controller
273 * @ap: port to which command is being issued
274 * @tf: ATA taskfile register set
275 *
276 * Issues PIO/MMIO write to ATA command register, with proper
277 * synchronization with interrupt handler / other threads.
278 *
279 * LOCKING:
280 * spin_lock_irqsave(host_set lock)
281 */
282 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
283 {
284 if (ap->flags & ATA_FLAG_MMIO)
285 ata_exec_command_mmio(ap, tf);
286 else
287 ata_exec_command_pio(ap, tf);
288 }
289
290 /**
291 * ata_exec - issue ATA command to host controller
292 * @ap: port to which command is being issued
293 * @tf: ATA taskfile register set
294 *
295 * Issues PIO/MMIO write to ATA command register, with proper
296 * synchronization with interrupt handler / other threads.
297 *
298 * LOCKING:
299 * Obtains host_set lock.
300 */
301
302 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
303 {
304 unsigned long flags;
305
306 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
307 spin_lock_irqsave(&ap->host_set->lock, flags);
308 ap->ops->exec_command(ap, tf);
309 spin_unlock_irqrestore(&ap->host_set->lock, flags);
310 }
311
312 /**
313 * ata_tf_to_host - issue ATA taskfile to host controller
314 * @ap: port to which command is being issued
315 * @tf: ATA taskfile register set
316 *
317 * Issues ATA taskfile register set to ATA host controller,
318 * with proper synchronization with interrupt handler and
319 * other threads.
320 *
321 * LOCKING:
322 * Obtains host_set lock.
323 */
324
325 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
326 {
327 ap->ops->tf_load(ap, tf);
328
329 ata_exec(ap, tf);
330 }
331
332 /**
333 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
334 * @ap: port to which command is being issued
335 * @tf: ATA taskfile register set
336 *
337 * Issues ATA taskfile register set to ATA host controller,
338 * with proper synchronization with interrupt handler and
339 * other threads.
340 *
341 * LOCKING:
342 * spin_lock_irqsave(host_set lock)
343 */
344
345 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
346 {
347 ap->ops->tf_load(ap, tf);
348 ap->ops->exec_command(ap, tf);
349 }
350
351 /**
352 * ata_tf_read_pio - input device's ATA taskfile shadow registers
353 * @ap: Port from which input is read
354 * @tf: ATA taskfile register set for storing input
355 *
356 * Reads ATA taskfile registers for currently-selected device
357 * into @tf.
358 *
359 * LOCKING:
360 * Inherited from caller.
361 */
362
363 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
364 {
365 struct ata_ioports *ioaddr = &ap->ioaddr;
366
367 tf->nsect = inb(ioaddr->nsect_addr);
368 tf->lbal = inb(ioaddr->lbal_addr);
369 tf->lbam = inb(ioaddr->lbam_addr);
370 tf->lbah = inb(ioaddr->lbah_addr);
371 tf->device = inb(ioaddr->device_addr);
372
373 if (tf->flags & ATA_TFLAG_LBA48) {
374 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
375 tf->hob_feature = inb(ioaddr->error_addr);
376 tf->hob_nsect = inb(ioaddr->nsect_addr);
377 tf->hob_lbal = inb(ioaddr->lbal_addr);
378 tf->hob_lbam = inb(ioaddr->lbam_addr);
379 tf->hob_lbah = inb(ioaddr->lbah_addr);
380 }
381 }
382
383 /**
384 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
385 * @ap: Port from which input is read
386 * @tf: ATA taskfile register set for storing input
387 *
388 * Reads ATA taskfile registers for currently-selected device
389 * into @tf via MMIO.
390 *
391 * LOCKING:
392 * Inherited from caller.
393 */
394
395 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
396 {
397 struct ata_ioports *ioaddr = &ap->ioaddr;
398
399 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
400 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
401 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
402 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
403 tf->device = readb((void __iomem *)ioaddr->device_addr);
404
405 if (tf->flags & ATA_TFLAG_LBA48) {
406 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
407 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
408 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
409 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
410 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
411 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
412 }
413 }
414
415
416 /**
417 * ata_tf_read - input device's ATA taskfile shadow registers
418 * @ap: Port from which input is read
419 * @tf: ATA taskfile register set for storing input
420 *
421 * Reads ATA taskfile registers for currently-selected device
422 * into @tf.
423 *
424 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
425 * is set, also reads the hob registers.
426 *
427 * May be used as the tf_read() entry in ata_port_operations.
428 *
429 * LOCKING:
430 * Inherited from caller.
431 */
432 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
433 {
434 if (ap->flags & ATA_FLAG_MMIO)
435 ata_tf_read_mmio(ap, tf);
436 else
437 ata_tf_read_pio(ap, tf);
438 }
439
440 /**
441 * ata_check_status_pio - Read device status reg & clear interrupt
442 * @ap: port where the device is
443 *
444 * Reads ATA taskfile status register for currently-selected device
445 * and return its value. This also clears pending interrupts
446 * from this device
447 *
448 * LOCKING:
449 * Inherited from caller.
450 */
451 static u8 ata_check_status_pio(struct ata_port *ap)
452 {
453 return inb(ap->ioaddr.status_addr);
454 }
455
456 /**
457 * ata_check_status_mmio - Read device status reg & clear interrupt
458 * @ap: port where the device is
459 *
460 * Reads ATA taskfile status register for currently-selected device
461 * via MMIO and return its value. This also clears pending interrupts
462 * from this device
463 *
464 * LOCKING:
465 * Inherited from caller.
466 */
467 static u8 ata_check_status_mmio(struct ata_port *ap)
468 {
469 return readb((void __iomem *) ap->ioaddr.status_addr);
470 }
471
472
473 /**
474 * ata_check_status - Read device status reg & clear interrupt
475 * @ap: port where the device is
476 *
477 * Reads ATA taskfile status register for currently-selected device
478 * and return its value. This also clears pending interrupts
479 * from this device
480 *
481 * May be used as the check_status() entry in ata_port_operations.
482 *
483 * LOCKING:
484 * Inherited from caller.
485 */
486 u8 ata_check_status(struct ata_port *ap)
487 {
488 if (ap->flags & ATA_FLAG_MMIO)
489 return ata_check_status_mmio(ap);
490 return ata_check_status_pio(ap);
491 }
492
493
494 /**
495 * ata_altstatus - Read device alternate status reg
496 * @ap: port where the device is
497 *
498 * Reads ATA taskfile alternate status register for
499 * currently-selected device and return its value.
500 *
501 * Note: may NOT be used as the check_altstatus() entry in
502 * ata_port_operations.
503 *
504 * LOCKING:
505 * Inherited from caller.
506 */
507 u8 ata_altstatus(struct ata_port *ap)
508 {
509 if (ap->ops->check_altstatus)
510 return ap->ops->check_altstatus(ap);
511
512 if (ap->flags & ATA_FLAG_MMIO)
513 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
514 return inb(ap->ioaddr.altstatus_addr);
515 }
516
517
518 /**
519 * ata_chk_err - Read device error reg
520 * @ap: port where the device is
521 *
522 * Reads ATA taskfile error register for
523 * currently-selected device and return its value.
524 *
525 * Note: may NOT be used as the check_err() entry in
526 * ata_port_operations.
527 *
528 * LOCKING:
529 * Inherited from caller.
530 */
531 u8 ata_chk_err(struct ata_port *ap)
532 {
533 if (ap->ops->check_err)
534 return ap->ops->check_err(ap);
535
536 if (ap->flags & ATA_FLAG_MMIO) {
537 return readb((void __iomem *) ap->ioaddr.error_addr);
538 }
539 return inb(ap->ioaddr.error_addr);
540 }
541
542 /**
543 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
544 * @tf: Taskfile to convert
545 * @fis: Buffer into which data will output
546 * @pmp: Port multiplier port
547 *
548 * Converts a standard ATA taskfile to a Serial ATA
549 * FIS structure (Register - Host to Device).
550 *
551 * LOCKING:
552 * Inherited from caller.
553 */
554
555 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
556 {
557 fis[0] = 0x27; /* Register - Host to Device FIS */
558 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
559 bit 7 indicates Command FIS */
560 fis[2] = tf->command;
561 fis[3] = tf->feature;
562
563 fis[4] = tf->lbal;
564 fis[5] = tf->lbam;
565 fis[6] = tf->lbah;
566 fis[7] = tf->device;
567
568 fis[8] = tf->hob_lbal;
569 fis[9] = tf->hob_lbam;
570 fis[10] = tf->hob_lbah;
571 fis[11] = tf->hob_feature;
572
573 fis[12] = tf->nsect;
574 fis[13] = tf->hob_nsect;
575 fis[14] = 0;
576 fis[15] = tf->ctl;
577
578 fis[16] = 0;
579 fis[17] = 0;
580 fis[18] = 0;
581 fis[19] = 0;
582 }
583
584 /**
585 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
586 * @fis: Buffer from which data will be input
587 * @tf: Taskfile to output
588 *
589 * Converts a standard ATA taskfile to a Serial ATA
590 * FIS structure (Register - Host to Device).
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595
596 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
597 {
598 tf->command = fis[2]; /* status */
599 tf->feature = fis[3]; /* error */
600
601 tf->lbal = fis[4];
602 tf->lbam = fis[5];
603 tf->lbah = fis[6];
604 tf->device = fis[7];
605
606 tf->hob_lbal = fis[8];
607 tf->hob_lbam = fis[9];
608 tf->hob_lbah = fis[10];
609
610 tf->nsect = fis[12];
611 tf->hob_nsect = fis[13];
612 }
613
614 /**
615 * ata_prot_to_cmd - determine which read/write opcodes to use
616 * @protocol: ATA_PROT_xxx taskfile protocol
617 * @lba48: true is lba48 is present
618 *
619 * Given necessary input, determine which read/write commands
620 * to use to transfer data.
621 *
622 * LOCKING:
623 * None.
624 */
625 static int ata_prot_to_cmd(int protocol, int lba48)
626 {
627 int rcmd = 0, wcmd = 0;
628
629 switch (protocol) {
630 case ATA_PROT_PIO:
631 if (lba48) {
632 rcmd = ATA_CMD_PIO_READ_EXT;
633 wcmd = ATA_CMD_PIO_WRITE_EXT;
634 } else {
635 rcmd = ATA_CMD_PIO_READ;
636 wcmd = ATA_CMD_PIO_WRITE;
637 }
638 break;
639
640 case ATA_PROT_DMA:
641 if (lba48) {
642 rcmd = ATA_CMD_READ_EXT;
643 wcmd = ATA_CMD_WRITE_EXT;
644 } else {
645 rcmd = ATA_CMD_READ;
646 wcmd = ATA_CMD_WRITE;
647 }
648 break;
649
650 default:
651 return -1;
652 }
653
654 return rcmd | (wcmd << 8);
655 }
656
657 /**
658 * ata_dev_set_protocol - set taskfile protocol and r/w commands
659 * @dev: device to examine and configure
660 *
661 * Examine the device configuration, after we have
662 * read the identify-device page and configured the
663 * data transfer mode. Set internal state related to
664 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
665 * and calculate the proper read/write commands to use.
666 *
667 * LOCKING:
668 * caller.
669 */
670 static void ata_dev_set_protocol(struct ata_device *dev)
671 {
672 int pio = (dev->flags & ATA_DFLAG_PIO);
673 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
674 int proto, cmd;
675
676 if (pio)
677 proto = dev->xfer_protocol = ATA_PROT_PIO;
678 else
679 proto = dev->xfer_protocol = ATA_PROT_DMA;
680
681 cmd = ata_prot_to_cmd(proto, lba48);
682 if (cmd < 0)
683 BUG();
684
685 dev->read_cmd = cmd & 0xff;
686 dev->write_cmd = (cmd >> 8) & 0xff;
687 }
688
689 static const char * xfer_mode_str[] = {
690 "UDMA/16",
691 "UDMA/25",
692 "UDMA/33",
693 "UDMA/44",
694 "UDMA/66",
695 "UDMA/100",
696 "UDMA/133",
697 "UDMA7",
698 "MWDMA0",
699 "MWDMA1",
700 "MWDMA2",
701 "PIO0",
702 "PIO1",
703 "PIO2",
704 "PIO3",
705 "PIO4",
706 };
707
708 /**
709 * ata_udma_string - convert UDMA bit offset to string
710 * @mask: mask of bits supported; only highest bit counts.
711 *
712 * Determine string which represents the highest speed
713 * (highest bit in @udma_mask).
714 *
715 * LOCKING:
716 * None.
717 *
718 * RETURNS:
719 * Constant C string representing highest speed listed in
720 * @udma_mask, or the constant C string "<n/a>".
721 */
722
723 static const char *ata_mode_string(unsigned int mask)
724 {
725 int i;
726
727 for (i = 7; i >= 0; i--)
728 if (mask & (1 << i))
729 goto out;
730 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
731 if (mask & (1 << i))
732 goto out;
733 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
734 if (mask & (1 << i))
735 goto out;
736
737 return "<n/a>";
738
739 out:
740 return xfer_mode_str[i];
741 }
742
743 /**
744 * ata_pio_devchk - PATA device presence detection
745 * @ap: ATA channel to examine
746 * @device: Device to examine (starting at zero)
747 *
748 * This technique was originally described in
749 * Hale Landis's ATADRVR (www.ata-atapi.com), and
750 * later found its way into the ATA/ATAPI spec.
751 *
752 * Write a pattern to the ATA shadow registers,
753 * and if a device is present, it will respond by
754 * correctly storing and echoing back the
755 * ATA shadow register contents.
756 *
757 * LOCKING:
758 * caller.
759 */
760
761 static unsigned int ata_pio_devchk(struct ata_port *ap,
762 unsigned int device)
763 {
764 struct ata_ioports *ioaddr = &ap->ioaddr;
765 u8 nsect, lbal;
766
767 ap->ops->dev_select(ap, device);
768
769 outb(0x55, ioaddr->nsect_addr);
770 outb(0xaa, ioaddr->lbal_addr);
771
772 outb(0xaa, ioaddr->nsect_addr);
773 outb(0x55, ioaddr->lbal_addr);
774
775 outb(0x55, ioaddr->nsect_addr);
776 outb(0xaa, ioaddr->lbal_addr);
777
778 nsect = inb(ioaddr->nsect_addr);
779 lbal = inb(ioaddr->lbal_addr);
780
781 if ((nsect == 0x55) && (lbal == 0xaa))
782 return 1; /* we found a device */
783
784 return 0; /* nothing found */
785 }
786
787 /**
788 * ata_mmio_devchk - PATA device presence detection
789 * @ap: ATA channel to examine
790 * @device: Device to examine (starting at zero)
791 *
792 * This technique was originally described in
793 * Hale Landis's ATADRVR (www.ata-atapi.com), and
794 * later found its way into the ATA/ATAPI spec.
795 *
796 * Write a pattern to the ATA shadow registers,
797 * and if a device is present, it will respond by
798 * correctly storing and echoing back the
799 * ATA shadow register contents.
800 *
801 * LOCKING:
802 * caller.
803 */
804
805 static unsigned int ata_mmio_devchk(struct ata_port *ap,
806 unsigned int device)
807 {
808 struct ata_ioports *ioaddr = &ap->ioaddr;
809 u8 nsect, lbal;
810
811 ap->ops->dev_select(ap, device);
812
813 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
814 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
815
816 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
817 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
818
819 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
820 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
821
822 nsect = readb((void __iomem *) ioaddr->nsect_addr);
823 lbal = readb((void __iomem *) ioaddr->lbal_addr);
824
825 if ((nsect == 0x55) && (lbal == 0xaa))
826 return 1; /* we found a device */
827
828 return 0; /* nothing found */
829 }
830
831 /**
832 * ata_devchk - PATA device presence detection
833 * @ap: ATA channel to examine
834 * @device: Device to examine (starting at zero)
835 *
836 * Dispatch ATA device presence detection, depending
837 * on whether we are using PIO or MMIO to talk to the
838 * ATA shadow registers.
839 *
840 * LOCKING:
841 * caller.
842 */
843
844 static unsigned int ata_devchk(struct ata_port *ap,
845 unsigned int device)
846 {
847 if (ap->flags & ATA_FLAG_MMIO)
848 return ata_mmio_devchk(ap, device);
849 return ata_pio_devchk(ap, device);
850 }
851
852 /**
853 * ata_dev_classify - determine device type based on ATA-spec signature
854 * @tf: ATA taskfile register set for device to be identified
855 *
856 * Determine from taskfile register contents whether a device is
857 * ATA or ATAPI, as per "Signature and persistence" section
858 * of ATA/PI spec (volume 1, sect 5.14).
859 *
860 * LOCKING:
861 * None.
862 *
863 * RETURNS:
864 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
865 * the event of failure.
866 */
867
868 unsigned int ata_dev_classify(struct ata_taskfile *tf)
869 {
870 /* Apple's open source Darwin code hints that some devices only
871 * put a proper signature into the LBA mid/high registers,
872 * So, we only check those. It's sufficient for uniqueness.
873 */
874
875 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
876 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
877 DPRINTK("found ATA device by sig\n");
878 return ATA_DEV_ATA;
879 }
880
881 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
882 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
883 DPRINTK("found ATAPI device by sig\n");
884 return ATA_DEV_ATAPI;
885 }
886
887 DPRINTK("unknown device\n");
888 return ATA_DEV_UNKNOWN;
889 }
890
891 /**
892 * ata_dev_try_classify - Parse returned ATA device signature
893 * @ap: ATA channel to examine
894 * @device: Device to examine (starting at zero)
895 *
896 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
897 * an ATA/ATAPI-defined set of values is placed in the ATA
898 * shadow registers, indicating the results of device detection
899 * and diagnostics.
900 *
901 * Select the ATA device, and read the values from the ATA shadow
902 * registers. Then parse according to the Error register value,
903 * and the spec-defined values examined by ata_dev_classify().
904 *
905 * LOCKING:
906 * caller.
907 */
908
909 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
910 {
911 struct ata_device *dev = &ap->device[device];
912 struct ata_taskfile tf;
913 unsigned int class;
914 u8 err;
915
916 ap->ops->dev_select(ap, device);
917
918 memset(&tf, 0, sizeof(tf));
919
920 err = ata_chk_err(ap);
921 ap->ops->tf_read(ap, &tf);
922
923 dev->class = ATA_DEV_NONE;
924
925 /* see if device passed diags */
926 if (err == 1)
927 /* do nothing */ ;
928 else if ((device == 0) && (err == 0x81))
929 /* do nothing */ ;
930 else
931 return err;
932
933 /* determine if device if ATA or ATAPI */
934 class = ata_dev_classify(&tf);
935 if (class == ATA_DEV_UNKNOWN)
936 return err;
937 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
938 return err;
939
940 dev->class = class;
941
942 return err;
943 }
944
945 /**
946 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
947 * @id: IDENTIFY DEVICE results we will examine
948 * @s: string into which data is output
949 * @ofs: offset into identify device page
950 * @len: length of string to return. must be an even number.
951 *
952 * The strings in the IDENTIFY DEVICE page are broken up into
953 * 16-bit chunks. Run through the string, and output each
954 * 8-bit chunk linearly, regardless of platform.
955 *
956 * LOCKING:
957 * caller.
958 */
959
960 void ata_dev_id_string(u16 *id, unsigned char *s,
961 unsigned int ofs, unsigned int len)
962 {
963 unsigned int c;
964
965 while (len > 0) {
966 c = id[ofs] >> 8;
967 *s = c;
968 s++;
969
970 c = id[ofs] & 0xff;
971 *s = c;
972 s++;
973
974 ofs++;
975 len -= 2;
976 }
977 }
978
979
980 /**
981 * ata_noop_dev_select - Select device 0/1 on ATA bus
982 * @ap: ATA channel to manipulate
983 * @device: ATA device (numbered from zero) to select
984 *
985 * This function performs no actual function.
986 *
987 * May be used as the dev_select() entry in ata_port_operations.
988 *
989 * LOCKING:
990 * caller.
991 */
992 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
993 {
994 }
995
996
997 /**
998 * ata_std_dev_select - Select device 0/1 on ATA bus
999 * @ap: ATA channel to manipulate
1000 * @device: ATA device (numbered from zero) to select
1001 *
1002 * Use the method defined in the ATA specification to
1003 * make either device 0, or device 1, active on the
1004 * ATA channel. Works with both PIO and MMIO.
1005 *
1006 * May be used as the dev_select() entry in ata_port_operations.
1007 *
1008 * LOCKING:
1009 * caller.
1010 */
1011
1012 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1013 {
1014 u8 tmp;
1015
1016 if (device == 0)
1017 tmp = ATA_DEVICE_OBS;
1018 else
1019 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1020
1021 if (ap->flags & ATA_FLAG_MMIO) {
1022 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1023 } else {
1024 outb(tmp, ap->ioaddr.device_addr);
1025 }
1026 ata_pause(ap); /* needed; also flushes, for mmio */
1027 }
1028
1029 /**
1030 * ata_dev_select - Select device 0/1 on ATA bus
1031 * @ap: ATA channel to manipulate
1032 * @device: ATA device (numbered from zero) to select
1033 * @wait: non-zero to wait for Status register BSY bit to clear
1034 * @can_sleep: non-zero if context allows sleeping
1035 *
1036 * Use the method defined in the ATA specification to
1037 * make either device 0, or device 1, active on the
1038 * ATA channel.
1039 *
1040 * This is a high-level version of ata_std_dev_select(),
1041 * which additionally provides the services of inserting
1042 * the proper pauses and status polling, where needed.
1043 *
1044 * LOCKING:
1045 * caller.
1046 */
1047
1048 void ata_dev_select(struct ata_port *ap, unsigned int device,
1049 unsigned int wait, unsigned int can_sleep)
1050 {
1051 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1052 ap->id, device, wait);
1053
1054 if (wait)
1055 ata_wait_idle(ap);
1056
1057 ap->ops->dev_select(ap, device);
1058
1059 if (wait) {
1060 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1061 msleep(150);
1062 ata_wait_idle(ap);
1063 }
1064 }
1065
1066 /**
1067 * ata_dump_id - IDENTIFY DEVICE info debugging output
1068 * @dev: Device whose IDENTIFY DEVICE page we will dump
1069 *
1070 * Dump selected 16-bit words from a detected device's
1071 * IDENTIFY PAGE page.
1072 *
1073 * LOCKING:
1074 * caller.
1075 */
1076
1077 static inline void ata_dump_id(struct ata_device *dev)
1078 {
1079 DPRINTK("49==0x%04x "
1080 "53==0x%04x "
1081 "63==0x%04x "
1082 "64==0x%04x "
1083 "75==0x%04x \n",
1084 dev->id[49],
1085 dev->id[53],
1086 dev->id[63],
1087 dev->id[64],
1088 dev->id[75]);
1089 DPRINTK("80==0x%04x "
1090 "81==0x%04x "
1091 "82==0x%04x "
1092 "83==0x%04x "
1093 "84==0x%04x \n",
1094 dev->id[80],
1095 dev->id[81],
1096 dev->id[82],
1097 dev->id[83],
1098 dev->id[84]);
1099 DPRINTK("88==0x%04x "
1100 "93==0x%04x\n",
1101 dev->id[88],
1102 dev->id[93]);
1103 }
1104
1105 /**
1106 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1107 * @ap: port on which device we wish to probe resides
1108 * @device: device bus address, starting at zero
1109 *
1110 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1111 * command, and read back the 512-byte device information page.
1112 * The device information page is fed to us via the standard
1113 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1114 * using standard PIO-IN paths)
1115 *
1116 * After reading the device information page, we use several
1117 * bits of information from it to initialize data structures
1118 * that will be used during the lifetime of the ata_device.
1119 * Other data from the info page is used to disqualify certain
1120 * older ATA devices we do not wish to support.
1121 *
1122 * LOCKING:
1123 * Inherited from caller. Some functions called by this function
1124 * obtain the host_set lock.
1125 */
1126
1127 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1128 {
1129 struct ata_device *dev = &ap->device[device];
1130 unsigned int i;
1131 u16 tmp;
1132 unsigned long xfer_modes;
1133 u8 status;
1134 unsigned int using_edd;
1135 DECLARE_COMPLETION(wait);
1136 struct ata_queued_cmd *qc;
1137 unsigned long flags;
1138 int rc;
1139
1140 if (!ata_dev_present(dev)) {
1141 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1142 ap->id, device);
1143 return;
1144 }
1145
1146 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1147 using_edd = 0;
1148 else
1149 using_edd = 1;
1150
1151 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1152
1153 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1154 dev->class == ATA_DEV_NONE);
1155
1156 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1157
1158 qc = ata_qc_new_init(ap, dev);
1159 BUG_ON(qc == NULL);
1160
1161 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1162 qc->dma_dir = DMA_FROM_DEVICE;
1163 qc->tf.protocol = ATA_PROT_PIO;
1164 qc->nsect = 1;
1165
1166 retry:
1167 if (dev->class == ATA_DEV_ATA) {
1168 qc->tf.command = ATA_CMD_ID_ATA;
1169 DPRINTK("do ATA identify\n");
1170 } else {
1171 qc->tf.command = ATA_CMD_ID_ATAPI;
1172 DPRINTK("do ATAPI identify\n");
1173 }
1174
1175 qc->waiting = &wait;
1176 qc->complete_fn = ata_qc_complete_noop;
1177
1178 spin_lock_irqsave(&ap->host_set->lock, flags);
1179 rc = ata_qc_issue(qc);
1180 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1181
1182 if (rc)
1183 goto err_out;
1184 else
1185 wait_for_completion(&wait);
1186
1187 status = ata_chk_status(ap);
1188 if (status & ATA_ERR) {
1189 /*
1190 * arg! EDD works for all test cases, but seems to return
1191 * the ATA signature for some ATAPI devices. Until the
1192 * reason for this is found and fixed, we fix up the mess
1193 * here. If IDENTIFY DEVICE returns command aborted
1194 * (as ATAPI devices do), then we issue an
1195 * IDENTIFY PACKET DEVICE.
1196 *
1197 * ATA software reset (SRST, the default) does not appear
1198 * to have this problem.
1199 */
1200 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1201 u8 err = ata_chk_err(ap);
1202 if (err & ATA_ABORTED) {
1203 dev->class = ATA_DEV_ATAPI;
1204 qc->cursg = 0;
1205 qc->cursg_ofs = 0;
1206 qc->cursect = 0;
1207 qc->nsect = 1;
1208 goto retry;
1209 }
1210 }
1211 goto err_out;
1212 }
1213
1214 swap_buf_le16(dev->id, ATA_ID_WORDS);
1215
1216 /* print device capabilities */
1217 printk(KERN_DEBUG "ata%u: dev %u cfg "
1218 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1219 ap->id, device, dev->id[49],
1220 dev->id[82], dev->id[83], dev->id[84],
1221 dev->id[85], dev->id[86], dev->id[87],
1222 dev->id[88]);
1223
1224 /*
1225 * common ATA, ATAPI feature tests
1226 */
1227
1228 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1229 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1230 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1231 goto err_out_nosup;
1232 }
1233
1234 /* quick-n-dirty find max transfer mode; for printk only */
1235 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1236 if (!xfer_modes)
1237 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1238 if (!xfer_modes) {
1239 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1240 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1241 }
1242
1243 ata_dump_id(dev);
1244
1245 /* ATA-specific feature tests */
1246 if (dev->class == ATA_DEV_ATA) {
1247 if (!ata_id_is_ata(dev->id)) /* sanity check */
1248 goto err_out_nosup;
1249
1250 tmp = dev->id[ATA_ID_MAJOR_VER];
1251 for (i = 14; i >= 1; i--)
1252 if (tmp & (1 << i))
1253 break;
1254
1255 /* we require at least ATA-3 */
1256 if (i < 3) {
1257 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1258 goto err_out_nosup;
1259 }
1260
1261 if (ata_id_has_lba48(dev->id)) {
1262 dev->flags |= ATA_DFLAG_LBA48;
1263 dev->n_sectors = ata_id_u64(dev->id, 100);
1264 } else {
1265 dev->n_sectors = ata_id_u32(dev->id, 60);
1266 }
1267
1268 ap->host->max_cmd_len = 16;
1269
1270 /* print device info to dmesg */
1271 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1272 ap->id, device,
1273 ata_mode_string(xfer_modes),
1274 (unsigned long long)dev->n_sectors,
1275 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1276 }
1277
1278 /* ATAPI-specific feature tests */
1279 else {
1280 if (ata_id_is_ata(dev->id)) /* sanity check */
1281 goto err_out_nosup;
1282
1283 rc = atapi_cdb_len(dev->id);
1284 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1285 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1286 goto err_out_nosup;
1287 }
1288 ap->cdb_len = (unsigned int) rc;
1289 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1290
1291 /* print device info to dmesg */
1292 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1293 ap->id, device,
1294 ata_mode_string(xfer_modes));
1295 }
1296
1297 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1298 return;
1299
1300 err_out_nosup:
1301 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1302 ap->id, device);
1303 err_out:
1304 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1305 DPRINTK("EXIT, err\n");
1306 }
1307
1308
1309 static inline u8 ata_dev_knobble(struct ata_port *ap)
1310 {
1311 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1312 }
1313
1314 /**
1315 * ata_dev_config - Run device specific handlers and check for
1316 * SATA->PATA bridges
1317 * @ap: Bus
1318 * @i: Device
1319 *
1320 * LOCKING:
1321 */
1322
1323 void ata_dev_config(struct ata_port *ap, unsigned int i)
1324 {
1325 /* limit bridge transfers to udma5, 200 sectors */
1326 if (ata_dev_knobble(ap)) {
1327 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1328 ap->id, ap->device->devno);
1329 ap->udma_mask &= ATA_UDMA5;
1330 ap->host->max_sectors = ATA_MAX_SECTORS;
1331 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1332 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1333 }
1334
1335 if (ap->ops->dev_config)
1336 ap->ops->dev_config(ap, &ap->device[i]);
1337 }
1338
1339 /**
1340 * ata_bus_probe - Reset and probe ATA bus
1341 * @ap: Bus to probe
1342 *
1343 * Master ATA bus probing function. Initiates a hardware-dependent
1344 * bus reset, then attempts to identify any devices found on
1345 * the bus.
1346 *
1347 * LOCKING:
1348 * PCI/etc. bus probe sem.
1349 *
1350 * RETURNS:
1351 * Zero on success, non-zero on error.
1352 */
1353
1354 static int ata_bus_probe(struct ata_port *ap)
1355 {
1356 unsigned int i, found = 0;
1357
1358 ap->ops->phy_reset(ap);
1359 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1360 goto err_out;
1361
1362 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1363 ata_dev_identify(ap, i);
1364 if (ata_dev_present(&ap->device[i])) {
1365 found = 1;
1366 ata_dev_config(ap,i);
1367 }
1368 }
1369
1370 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1371 goto err_out_disable;
1372
1373 ata_set_mode(ap);
1374 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1375 goto err_out_disable;
1376
1377 return 0;
1378
1379 err_out_disable:
1380 ap->ops->port_disable(ap);
1381 err_out:
1382 return -1;
1383 }
1384
1385 /**
1386 * ata_port_probe - Mark port as enabled
1387 * @ap: Port for which we indicate enablement
1388 *
1389 * Modify @ap data structure such that the system
1390 * thinks that the entire port is enabled.
1391 *
1392 * LOCKING: host_set lock, or some other form of
1393 * serialization.
1394 */
1395
1396 void ata_port_probe(struct ata_port *ap)
1397 {
1398 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1399 }
1400
1401 /**
1402 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1403 * @ap: SATA port associated with target SATA PHY.
1404 *
1405 * This function issues commands to standard SATA Sxxx
1406 * PHY registers, to wake up the phy (and device), and
1407 * clear any reset condition.
1408 *
1409 * LOCKING:
1410 * PCI/etc. bus probe sem.
1411 *
1412 */
1413 void __sata_phy_reset(struct ata_port *ap)
1414 {
1415 u32 sstatus;
1416 unsigned long timeout = jiffies + (HZ * 5);
1417
1418 if (ap->flags & ATA_FLAG_SATA_RESET) {
1419 /* issue phy wake/reset */
1420 scr_write_flush(ap, SCR_CONTROL, 0x301);
1421 /* Couldn't find anything in SATA I/II specs, but
1422 * AHCI-1.1 10.4.2 says at least 1 ms. */
1423 mdelay(1);
1424 }
1425 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1426
1427 /* wait for phy to become ready, if necessary */
1428 do {
1429 msleep(200);
1430 sstatus = scr_read(ap, SCR_STATUS);
1431 if ((sstatus & 0xf) != 1)
1432 break;
1433 } while (time_before(jiffies, timeout));
1434
1435 /* TODO: phy layer with polling, timeouts, etc. */
1436 if (sata_dev_present(ap))
1437 ata_port_probe(ap);
1438 else {
1439 sstatus = scr_read(ap, SCR_STATUS);
1440 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1441 ap->id, sstatus);
1442 ata_port_disable(ap);
1443 }
1444
1445 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1446 return;
1447
1448 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1449 ata_port_disable(ap);
1450 return;
1451 }
1452
1453 ap->cbl = ATA_CBL_SATA;
1454 }
1455
1456 /**
1457 * sata_phy_reset - Reset SATA bus.
1458 * @ap: SATA port associated with target SATA PHY.
1459 *
1460 * This function resets the SATA bus, and then probes
1461 * the bus for devices.
1462 *
1463 * LOCKING:
1464 * PCI/etc. bus probe sem.
1465 *
1466 */
1467 void sata_phy_reset(struct ata_port *ap)
1468 {
1469 __sata_phy_reset(ap);
1470 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1471 return;
1472 ata_bus_reset(ap);
1473 }
1474
1475 /**
1476 * ata_port_disable - Disable port.
1477 * @ap: Port to be disabled.
1478 *
1479 * Modify @ap data structure such that the system
1480 * thinks that the entire port is disabled, and should
1481 * never attempt to probe or communicate with devices
1482 * on this port.
1483 *
1484 * LOCKING: host_set lock, or some other form of
1485 * serialization.
1486 */
1487
1488 void ata_port_disable(struct ata_port *ap)
1489 {
1490 ap->device[0].class = ATA_DEV_NONE;
1491 ap->device[1].class = ATA_DEV_NONE;
1492 ap->flags |= ATA_FLAG_PORT_DISABLED;
1493 }
1494
1495 static struct {
1496 unsigned int shift;
1497 u8 base;
1498 } xfer_mode_classes[] = {
1499 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1500 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1501 { ATA_SHIFT_PIO, XFER_PIO_0 },
1502 };
1503
1504 static inline u8 base_from_shift(unsigned int shift)
1505 {
1506 int i;
1507
1508 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1509 if (xfer_mode_classes[i].shift == shift)
1510 return xfer_mode_classes[i].base;
1511
1512 return 0xff;
1513 }
1514
1515 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1516 {
1517 int ofs, idx;
1518 u8 base;
1519
1520 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1521 return;
1522
1523 if (dev->xfer_shift == ATA_SHIFT_PIO)
1524 dev->flags |= ATA_DFLAG_PIO;
1525
1526 ata_dev_set_xfermode(ap, dev);
1527
1528 base = base_from_shift(dev->xfer_shift);
1529 ofs = dev->xfer_mode - base;
1530 idx = ofs + dev->xfer_shift;
1531 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1532
1533 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1534 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1535
1536 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1537 ap->id, dev->devno, xfer_mode_str[idx]);
1538 }
1539
1540 static int ata_host_set_pio(struct ata_port *ap)
1541 {
1542 unsigned int mask;
1543 int x, i;
1544 u8 base, xfer_mode;
1545
1546 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1547 x = fgb(mask);
1548 if (x < 0) {
1549 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1550 return -1;
1551 }
1552
1553 base = base_from_shift(ATA_SHIFT_PIO);
1554 xfer_mode = base + x;
1555
1556 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1557 (int)base, (int)xfer_mode, mask, x);
1558
1559 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1560 struct ata_device *dev = &ap->device[i];
1561 if (ata_dev_present(dev)) {
1562 dev->pio_mode = xfer_mode;
1563 dev->xfer_mode = xfer_mode;
1564 dev->xfer_shift = ATA_SHIFT_PIO;
1565 if (ap->ops->set_piomode)
1566 ap->ops->set_piomode(ap, dev);
1567 }
1568 }
1569
1570 return 0;
1571 }
1572
1573 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1574 unsigned int xfer_shift)
1575 {
1576 int i;
1577
1578 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1579 struct ata_device *dev = &ap->device[i];
1580 if (ata_dev_present(dev)) {
1581 dev->dma_mode = xfer_mode;
1582 dev->xfer_mode = xfer_mode;
1583 dev->xfer_shift = xfer_shift;
1584 if (ap->ops->set_dmamode)
1585 ap->ops->set_dmamode(ap, dev);
1586 }
1587 }
1588 }
1589
1590 /**
1591 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1592 * @ap: port on which timings will be programmed
1593 *
1594 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1595 *
1596 * LOCKING:
1597 * PCI/etc. bus probe sem.
1598 *
1599 */
1600 static void ata_set_mode(struct ata_port *ap)
1601 {
1602 unsigned int i, xfer_shift;
1603 u8 xfer_mode;
1604 int rc;
1605
1606 /* step 1: always set host PIO timings */
1607 rc = ata_host_set_pio(ap);
1608 if (rc)
1609 goto err_out;
1610
1611 /* step 2: choose the best data xfer mode */
1612 xfer_mode = xfer_shift = 0;
1613 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1614 if (rc)
1615 goto err_out;
1616
1617 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1618 if (xfer_shift != ATA_SHIFT_PIO)
1619 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1620
1621 /* step 4: update devices' xfer mode */
1622 ata_dev_set_mode(ap, &ap->device[0]);
1623 ata_dev_set_mode(ap, &ap->device[1]);
1624
1625 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1626 return;
1627
1628 if (ap->ops->post_set_mode)
1629 ap->ops->post_set_mode(ap);
1630
1631 for (i = 0; i < 2; i++) {
1632 struct ata_device *dev = &ap->device[i];
1633 ata_dev_set_protocol(dev);
1634 }
1635
1636 return;
1637
1638 err_out:
1639 ata_port_disable(ap);
1640 }
1641
1642 /**
1643 * ata_busy_sleep - sleep until BSY clears, or timeout
1644 * @ap: port containing status register to be polled
1645 * @tmout_pat: impatience timeout
1646 * @tmout: overall timeout
1647 *
1648 * Sleep until ATA Status register bit BSY clears,
1649 * or a timeout occurs.
1650 *
1651 * LOCKING: None.
1652 *
1653 */
1654
1655 static unsigned int ata_busy_sleep (struct ata_port *ap,
1656 unsigned long tmout_pat,
1657 unsigned long tmout)
1658 {
1659 unsigned long timer_start, timeout;
1660 u8 status;
1661
1662 status = ata_busy_wait(ap, ATA_BUSY, 300);
1663 timer_start = jiffies;
1664 timeout = timer_start + tmout_pat;
1665 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1666 msleep(50);
1667 status = ata_busy_wait(ap, ATA_BUSY, 3);
1668 }
1669
1670 if (status & ATA_BUSY)
1671 printk(KERN_WARNING "ata%u is slow to respond, "
1672 "please be patient\n", ap->id);
1673
1674 timeout = timer_start + tmout;
1675 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1676 msleep(50);
1677 status = ata_chk_status(ap);
1678 }
1679
1680 if (status & ATA_BUSY) {
1681 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1682 ap->id, tmout / HZ);
1683 return 1;
1684 }
1685
1686 return 0;
1687 }
1688
1689 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1690 {
1691 struct ata_ioports *ioaddr = &ap->ioaddr;
1692 unsigned int dev0 = devmask & (1 << 0);
1693 unsigned int dev1 = devmask & (1 << 1);
1694 unsigned long timeout;
1695
1696 /* if device 0 was found in ata_devchk, wait for its
1697 * BSY bit to clear
1698 */
1699 if (dev0)
1700 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1701
1702 /* if device 1 was found in ata_devchk, wait for
1703 * register access, then wait for BSY to clear
1704 */
1705 timeout = jiffies + ATA_TMOUT_BOOT;
1706 while (dev1) {
1707 u8 nsect, lbal;
1708
1709 ap->ops->dev_select(ap, 1);
1710 if (ap->flags & ATA_FLAG_MMIO) {
1711 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1712 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1713 } else {
1714 nsect = inb(ioaddr->nsect_addr);
1715 lbal = inb(ioaddr->lbal_addr);
1716 }
1717 if ((nsect == 1) && (lbal == 1))
1718 break;
1719 if (time_after(jiffies, timeout)) {
1720 dev1 = 0;
1721 break;
1722 }
1723 msleep(50); /* give drive a breather */
1724 }
1725 if (dev1)
1726 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1727
1728 /* is all this really necessary? */
1729 ap->ops->dev_select(ap, 0);
1730 if (dev1)
1731 ap->ops->dev_select(ap, 1);
1732 if (dev0)
1733 ap->ops->dev_select(ap, 0);
1734 }
1735
1736 /**
1737 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1738 * @ap: Port to reset and probe
1739 *
1740 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1741 * probe the bus. Not often used these days.
1742 *
1743 * LOCKING:
1744 * PCI/etc. bus probe sem.
1745 *
1746 */
1747
1748 static unsigned int ata_bus_edd(struct ata_port *ap)
1749 {
1750 struct ata_taskfile tf;
1751
1752 /* set up execute-device-diag (bus reset) taskfile */
1753 /* also, take interrupts to a known state (disabled) */
1754 DPRINTK("execute-device-diag\n");
1755 ata_tf_init(ap, &tf, 0);
1756 tf.ctl |= ATA_NIEN;
1757 tf.command = ATA_CMD_EDD;
1758 tf.protocol = ATA_PROT_NODATA;
1759
1760 /* do bus reset */
1761 ata_tf_to_host(ap, &tf);
1762
1763 /* spec says at least 2ms. but who knows with those
1764 * crazy ATAPI devices...
1765 */
1766 msleep(150);
1767
1768 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1769 }
1770
1771 static unsigned int ata_bus_softreset(struct ata_port *ap,
1772 unsigned int devmask)
1773 {
1774 struct ata_ioports *ioaddr = &ap->ioaddr;
1775
1776 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1777
1778 /* software reset. causes dev0 to be selected */
1779 if (ap->flags & ATA_FLAG_MMIO) {
1780 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1781 udelay(20); /* FIXME: flush */
1782 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1783 udelay(20); /* FIXME: flush */
1784 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1785 } else {
1786 outb(ap->ctl, ioaddr->ctl_addr);
1787 udelay(10);
1788 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1789 udelay(10);
1790 outb(ap->ctl, ioaddr->ctl_addr);
1791 }
1792
1793 /* spec mandates ">= 2ms" before checking status.
1794 * We wait 150ms, because that was the magic delay used for
1795 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1796 * between when the ATA command register is written, and then
1797 * status is checked. Because waiting for "a while" before
1798 * checking status is fine, post SRST, we perform this magic
1799 * delay here as well.
1800 */
1801 msleep(150);
1802
1803 ata_bus_post_reset(ap, devmask);
1804
1805 return 0;
1806 }
1807
1808 /**
1809 * ata_bus_reset - reset host port and associated ATA channel
1810 * @ap: port to reset
1811 *
1812 * This is typically the first time we actually start issuing
1813 * commands to the ATA channel. We wait for BSY to clear, then
1814 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1815 * result. Determine what devices, if any, are on the channel
1816 * by looking at the device 0/1 error register. Look at the signature
1817 * stored in each device's taskfile registers, to determine if
1818 * the device is ATA or ATAPI.
1819 *
1820 * LOCKING:
1821 * PCI/etc. bus probe sem.
1822 * Obtains host_set lock.
1823 *
1824 * SIDE EFFECTS:
1825 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1826 */
1827
1828 void ata_bus_reset(struct ata_port *ap)
1829 {
1830 struct ata_ioports *ioaddr = &ap->ioaddr;
1831 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1832 u8 err;
1833 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1834
1835 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1836
1837 /* determine if device 0/1 are present */
1838 if (ap->flags & ATA_FLAG_SATA_RESET)
1839 dev0 = 1;
1840 else {
1841 dev0 = ata_devchk(ap, 0);
1842 if (slave_possible)
1843 dev1 = ata_devchk(ap, 1);
1844 }
1845
1846 if (dev0)
1847 devmask |= (1 << 0);
1848 if (dev1)
1849 devmask |= (1 << 1);
1850
1851 /* select device 0 again */
1852 ap->ops->dev_select(ap, 0);
1853
1854 /* issue bus reset */
1855 if (ap->flags & ATA_FLAG_SRST)
1856 rc = ata_bus_softreset(ap, devmask);
1857 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1858 /* set up device control */
1859 if (ap->flags & ATA_FLAG_MMIO)
1860 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1861 else
1862 outb(ap->ctl, ioaddr->ctl_addr);
1863 rc = ata_bus_edd(ap);
1864 }
1865
1866 if (rc)
1867 goto err_out;
1868
1869 /*
1870 * determine by signature whether we have ATA or ATAPI devices
1871 */
1872 err = ata_dev_try_classify(ap, 0);
1873 if ((slave_possible) && (err != 0x81))
1874 ata_dev_try_classify(ap, 1);
1875
1876 /* re-enable interrupts */
1877 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1878 ata_irq_on(ap);
1879
1880 /* is double-select really necessary? */
1881 if (ap->device[1].class != ATA_DEV_NONE)
1882 ap->ops->dev_select(ap, 1);
1883 if (ap->device[0].class != ATA_DEV_NONE)
1884 ap->ops->dev_select(ap, 0);
1885
1886 /* if no devices were detected, disable this port */
1887 if ((ap->device[0].class == ATA_DEV_NONE) &&
1888 (ap->device[1].class == ATA_DEV_NONE))
1889 goto err_out;
1890
1891 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1892 /* set up device control for ATA_FLAG_SATA_RESET */
1893 if (ap->flags & ATA_FLAG_MMIO)
1894 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1895 else
1896 outb(ap->ctl, ioaddr->ctl_addr);
1897 }
1898
1899 DPRINTK("EXIT\n");
1900 return;
1901
1902 err_out:
1903 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1904 ap->ops->port_disable(ap);
1905
1906 DPRINTK("EXIT\n");
1907 }
1908
1909 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1910 {
1911 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1912 ap->id, dev->devno);
1913 }
1914
1915 static const char * ata_dma_blacklist [] = {
1916 "WDC AC11000H",
1917 "WDC AC22100H",
1918 "WDC AC32500H",
1919 "WDC AC33100H",
1920 "WDC AC31600H",
1921 "WDC AC32100H",
1922 "WDC AC23200L",
1923 "Compaq CRD-8241B",
1924 "CRD-8400B",
1925 "CRD-8480B",
1926 "CRD-8482B",
1927 "CRD-84",
1928 "SanDisk SDP3B",
1929 "SanDisk SDP3B-64",
1930 "SANYO CD-ROM CRD",
1931 "HITACHI CDR-8",
1932 "HITACHI CDR-8335",
1933 "HITACHI CDR-8435",
1934 "Toshiba CD-ROM XM-6202B",
1935 "TOSHIBA CD-ROM XM-1702BC",
1936 "CD-532E-A",
1937 "E-IDE CD-ROM CR-840",
1938 "CD-ROM Drive/F5A",
1939 "WPI CDD-820",
1940 "SAMSUNG CD-ROM SC-148C",
1941 "SAMSUNG CD-ROM SC",
1942 "SanDisk SDP3B-64",
1943 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1944 "_NEC DV5800A",
1945 };
1946
1947 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1948 {
1949 unsigned char model_num[40];
1950 char *s;
1951 unsigned int len;
1952 int i;
1953
1954 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1955 sizeof(model_num));
1956 s = &model_num[0];
1957 len = strnlen(s, sizeof(model_num));
1958
1959 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1960 while ((len > 0) && (s[len - 1] == ' ')) {
1961 len--;
1962 s[len] = 0;
1963 }
1964
1965 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1966 if (!strncmp(ata_dma_blacklist[i], s, len))
1967 return 1;
1968
1969 return 0;
1970 }
1971
1972 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1973 {
1974 struct ata_device *master, *slave;
1975 unsigned int mask;
1976
1977 master = &ap->device[0];
1978 slave = &ap->device[1];
1979
1980 assert (ata_dev_present(master) || ata_dev_present(slave));
1981
1982 if (shift == ATA_SHIFT_UDMA) {
1983 mask = ap->udma_mask;
1984 if (ata_dev_present(master)) {
1985 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1986 if (ata_dma_blacklisted(ap, master)) {
1987 mask = 0;
1988 ata_pr_blacklisted(ap, master);
1989 }
1990 }
1991 if (ata_dev_present(slave)) {
1992 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1993 if (ata_dma_blacklisted(ap, slave)) {
1994 mask = 0;
1995 ata_pr_blacklisted(ap, slave);
1996 }
1997 }
1998 }
1999 else if (shift == ATA_SHIFT_MWDMA) {
2000 mask = ap->mwdma_mask;
2001 if (ata_dev_present(master)) {
2002 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2003 if (ata_dma_blacklisted(ap, master)) {
2004 mask = 0;
2005 ata_pr_blacklisted(ap, master);
2006 }
2007 }
2008 if (ata_dev_present(slave)) {
2009 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2010 if (ata_dma_blacklisted(ap, slave)) {
2011 mask = 0;
2012 ata_pr_blacklisted(ap, slave);
2013 }
2014 }
2015 }
2016 else if (shift == ATA_SHIFT_PIO) {
2017 mask = ap->pio_mask;
2018 if (ata_dev_present(master)) {
2019 /* spec doesn't return explicit support for
2020 * PIO0-2, so we fake it
2021 */
2022 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2023 tmp_mode <<= 3;
2024 tmp_mode |= 0x7;
2025 mask &= tmp_mode;
2026 }
2027 if (ata_dev_present(slave)) {
2028 /* spec doesn't return explicit support for
2029 * PIO0-2, so we fake it
2030 */
2031 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2032 tmp_mode <<= 3;
2033 tmp_mode |= 0x7;
2034 mask &= tmp_mode;
2035 }
2036 }
2037 else {
2038 mask = 0xffffffff; /* shut up compiler warning */
2039 BUG();
2040 }
2041
2042 return mask;
2043 }
2044
2045 /* find greatest bit */
2046 static int fgb(u32 bitmap)
2047 {
2048 unsigned int i;
2049 int x = -1;
2050
2051 for (i = 0; i < 32; i++)
2052 if (bitmap & (1 << i))
2053 x = i;
2054
2055 return x;
2056 }
2057
2058 /**
2059 * ata_choose_xfer_mode - attempt to find best transfer mode
2060 * @ap: Port for which an xfer mode will be selected
2061 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2062 * @xfer_shift_out: (output) bit shift that selects this mode
2063 *
2064 * Based on host and device capabilities, determine the
2065 * maximum transfer mode that is amenable to all.
2066 *
2067 * LOCKING:
2068 * PCI/etc. bus probe sem.
2069 *
2070 * RETURNS:
2071 * Zero on success, negative on error.
2072 */
2073
2074 static int ata_choose_xfer_mode(struct ata_port *ap,
2075 u8 *xfer_mode_out,
2076 unsigned int *xfer_shift_out)
2077 {
2078 unsigned int mask, shift;
2079 int x, i;
2080
2081 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2082 shift = xfer_mode_classes[i].shift;
2083 mask = ata_get_mode_mask(ap, shift);
2084
2085 x = fgb(mask);
2086 if (x >= 0) {
2087 *xfer_mode_out = xfer_mode_classes[i].base + x;
2088 *xfer_shift_out = shift;
2089 return 0;
2090 }
2091 }
2092
2093 return -1;
2094 }
2095
2096 /**
2097 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2098 * @ap: Port associated with device @dev
2099 * @dev: Device to which command will be sent
2100 *
2101 * Issue SET FEATURES - XFER MODE command to device @dev
2102 * on port @ap.
2103 *
2104 * LOCKING:
2105 * PCI/etc. bus probe sem.
2106 */
2107
2108 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2109 {
2110 DECLARE_COMPLETION(wait);
2111 struct ata_queued_cmd *qc;
2112 int rc;
2113 unsigned long flags;
2114
2115 /* set up set-features taskfile */
2116 DPRINTK("set features - xfer mode\n");
2117
2118 qc = ata_qc_new_init(ap, dev);
2119 BUG_ON(qc == NULL);
2120
2121 qc->tf.command = ATA_CMD_SET_FEATURES;
2122 qc->tf.feature = SETFEATURES_XFER;
2123 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2124 qc->tf.protocol = ATA_PROT_NODATA;
2125 qc->tf.nsect = dev->xfer_mode;
2126
2127 qc->waiting = &wait;
2128 qc->complete_fn = ata_qc_complete_noop;
2129
2130 spin_lock_irqsave(&ap->host_set->lock, flags);
2131 rc = ata_qc_issue(qc);
2132 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2133
2134 if (rc)
2135 ata_port_disable(ap);
2136 else
2137 wait_for_completion(&wait);
2138
2139 DPRINTK("EXIT\n");
2140 }
2141
2142 /**
2143 * ata_sg_clean - Unmap DMA memory associated with command
2144 * @qc: Command containing DMA memory to be released
2145 *
2146 * Unmap all mapped DMA memory associated with this command.
2147 *
2148 * LOCKING:
2149 * spin_lock_irqsave(host_set lock)
2150 */
2151
2152 static void ata_sg_clean(struct ata_queued_cmd *qc)
2153 {
2154 struct ata_port *ap = qc->ap;
2155 struct scatterlist *sg = qc->sg;
2156 int dir = qc->dma_dir;
2157
2158 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2159 assert(sg != NULL);
2160
2161 if (qc->flags & ATA_QCFLAG_SINGLE)
2162 assert(qc->n_elem == 1);
2163
2164 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2165
2166 if (qc->flags & ATA_QCFLAG_SG)
2167 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2168 else
2169 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2170 sg_dma_len(&sg[0]), dir);
2171
2172 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2173 qc->sg = NULL;
2174 }
2175
2176 /**
2177 * ata_fill_sg - Fill PCI IDE PRD table
2178 * @qc: Metadata associated with taskfile to be transferred
2179 *
2180 * Fill PCI IDE PRD (scatter-gather) table with segments
2181 * associated with the current disk command.
2182 *
2183 * LOCKING:
2184 * spin_lock_irqsave(host_set lock)
2185 *
2186 */
2187 static void ata_fill_sg(struct ata_queued_cmd *qc)
2188 {
2189 struct scatterlist *sg = qc->sg;
2190 struct ata_port *ap = qc->ap;
2191 unsigned int idx, nelem;
2192
2193 assert(sg != NULL);
2194 assert(qc->n_elem > 0);
2195
2196 idx = 0;
2197 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2198 u32 addr, offset;
2199 u32 sg_len, len;
2200
2201 /* determine if physical DMA addr spans 64K boundary.
2202 * Note h/w doesn't support 64-bit, so we unconditionally
2203 * truncate dma_addr_t to u32.
2204 */
2205 addr = (u32) sg_dma_address(sg);
2206 sg_len = sg_dma_len(sg);
2207
2208 while (sg_len) {
2209 offset = addr & 0xffff;
2210 len = sg_len;
2211 if ((offset + sg_len) > 0x10000)
2212 len = 0x10000 - offset;
2213
2214 ap->prd[idx].addr = cpu_to_le32(addr);
2215 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2216 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2217
2218 idx++;
2219 sg_len -= len;
2220 addr += len;
2221 }
2222 }
2223
2224 if (idx)
2225 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2226 }
2227 /**
2228 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2229 * @qc: Metadata associated with taskfile to check
2230 *
2231 * Allow low-level driver to filter ATA PACKET commands, returning
2232 * a status indicating whether or not it is OK to use DMA for the
2233 * supplied PACKET command.
2234 *
2235 * LOCKING:
2236 * spin_lock_irqsave(host_set lock)
2237 *
2238 * RETURNS: 0 when ATAPI DMA can be used
2239 * nonzero otherwise
2240 */
2241 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2242 {
2243 struct ata_port *ap = qc->ap;
2244 int rc = 0; /* Assume ATAPI DMA is OK by default */
2245
2246 if (ap->ops->check_atapi_dma)
2247 rc = ap->ops->check_atapi_dma(qc);
2248
2249 return rc;
2250 }
2251 /**
2252 * ata_qc_prep - Prepare taskfile for submission
2253 * @qc: Metadata associated with taskfile to be prepared
2254 *
2255 * Prepare ATA taskfile for submission.
2256 *
2257 * LOCKING:
2258 * spin_lock_irqsave(host_set lock)
2259 */
2260 void ata_qc_prep(struct ata_queued_cmd *qc)
2261 {
2262 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2263 return;
2264
2265 ata_fill_sg(qc);
2266 }
2267
2268 /**
2269 * ata_sg_init_one - Associate command with memory buffer
2270 * @qc: Command to be associated
2271 * @buf: Memory buffer
2272 * @buflen: Length of memory buffer, in bytes.
2273 *
2274 * Initialize the data-related elements of queued_cmd @qc
2275 * to point to a single memory buffer, @buf of byte length @buflen.
2276 *
2277 * LOCKING:
2278 * spin_lock_irqsave(host_set lock)
2279 */
2280
2281 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2282 {
2283 struct scatterlist *sg;
2284
2285 qc->flags |= ATA_QCFLAG_SINGLE;
2286
2287 memset(&qc->sgent, 0, sizeof(qc->sgent));
2288 qc->sg = &qc->sgent;
2289 qc->n_elem = 1;
2290 qc->buf_virt = buf;
2291
2292 sg = qc->sg;
2293 sg->page = virt_to_page(buf);
2294 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2295 sg->length = buflen;
2296 }
2297
2298 /**
2299 * ata_sg_init - Associate command with scatter-gather table.
2300 * @qc: Command to be associated
2301 * @sg: Scatter-gather table.
2302 * @n_elem: Number of elements in s/g table.
2303 *
2304 * Initialize the data-related elements of queued_cmd @qc
2305 * to point to a scatter-gather table @sg, containing @n_elem
2306 * elements.
2307 *
2308 * LOCKING:
2309 * spin_lock_irqsave(host_set lock)
2310 */
2311
2312 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2313 unsigned int n_elem)
2314 {
2315 qc->flags |= ATA_QCFLAG_SG;
2316 qc->sg = sg;
2317 qc->n_elem = n_elem;
2318 }
2319
2320 /**
2321 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2322 * @qc: Command with memory buffer to be mapped.
2323 *
2324 * DMA-map the memory buffer associated with queued_cmd @qc.
2325 *
2326 * LOCKING:
2327 * spin_lock_irqsave(host_set lock)
2328 *
2329 * RETURNS:
2330 * Zero on success, negative on error.
2331 */
2332
2333 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2334 {
2335 struct ata_port *ap = qc->ap;
2336 int dir = qc->dma_dir;
2337 struct scatterlist *sg = qc->sg;
2338 dma_addr_t dma_address;
2339
2340 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2341 sg->length, dir);
2342 if (dma_mapping_error(dma_address))
2343 return -1;
2344
2345 sg_dma_address(sg) = dma_address;
2346 sg_dma_len(sg) = sg->length;
2347
2348 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2349 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2350
2351 return 0;
2352 }
2353
2354 /**
2355 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2356 * @qc: Command with scatter-gather table to be mapped.
2357 *
2358 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2359 *
2360 * LOCKING:
2361 * spin_lock_irqsave(host_set lock)
2362 *
2363 * RETURNS:
2364 * Zero on success, negative on error.
2365 *
2366 */
2367
2368 static int ata_sg_setup(struct ata_queued_cmd *qc)
2369 {
2370 struct ata_port *ap = qc->ap;
2371 struct scatterlist *sg = qc->sg;
2372 int n_elem, dir;
2373
2374 VPRINTK("ENTER, ata%u\n", ap->id);
2375 assert(qc->flags & ATA_QCFLAG_SG);
2376
2377 dir = qc->dma_dir;
2378 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2379 if (n_elem < 1)
2380 return -1;
2381
2382 DPRINTK("%d sg elements mapped\n", n_elem);
2383
2384 qc->n_elem = n_elem;
2385
2386 return 0;
2387 }
2388
2389 /**
2390 * ata_pio_poll -
2391 * @ap:
2392 *
2393 * LOCKING:
2394 * None. (executing in kernel thread context)
2395 *
2396 * RETURNS:
2397 *
2398 */
2399
2400 static unsigned long ata_pio_poll(struct ata_port *ap)
2401 {
2402 u8 status;
2403 unsigned int poll_state = PIO_ST_UNKNOWN;
2404 unsigned int reg_state = PIO_ST_UNKNOWN;
2405 const unsigned int tmout_state = PIO_ST_TMOUT;
2406
2407 switch (ap->pio_task_state) {
2408 case PIO_ST:
2409 case PIO_ST_POLL:
2410 poll_state = PIO_ST_POLL;
2411 reg_state = PIO_ST;
2412 break;
2413 case PIO_ST_LAST:
2414 case PIO_ST_LAST_POLL:
2415 poll_state = PIO_ST_LAST_POLL;
2416 reg_state = PIO_ST_LAST;
2417 break;
2418 default:
2419 BUG();
2420 break;
2421 }
2422
2423 status = ata_chk_status(ap);
2424 if (status & ATA_BUSY) {
2425 if (time_after(jiffies, ap->pio_task_timeout)) {
2426 ap->pio_task_state = tmout_state;
2427 return 0;
2428 }
2429 ap->pio_task_state = poll_state;
2430 return ATA_SHORT_PAUSE;
2431 }
2432
2433 ap->pio_task_state = reg_state;
2434 return 0;
2435 }
2436
2437 /**
2438 * ata_pio_complete -
2439 * @ap:
2440 *
2441 * LOCKING:
2442 * None. (executing in kernel thread context)
2443 */
2444
2445 static void ata_pio_complete (struct ata_port *ap)
2446 {
2447 struct ata_queued_cmd *qc;
2448 u8 drv_stat;
2449
2450 /*
2451 * This is purely hueristic. This is a fast path.
2452 * Sometimes when we enter, BSY will be cleared in
2453 * a chk-status or two. If not, the drive is probably seeking
2454 * or something. Snooze for a couple msecs, then
2455 * chk-status again. If still busy, fall back to
2456 * PIO_ST_POLL state.
2457 */
2458 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2459 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2460 msleep(2);
2461 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2462 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2463 ap->pio_task_state = PIO_ST_LAST_POLL;
2464 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2465 return;
2466 }
2467 }
2468
2469 drv_stat = ata_wait_idle(ap);
2470 if (!ata_ok(drv_stat)) {
2471 ap->pio_task_state = PIO_ST_ERR;
2472 return;
2473 }
2474
2475 qc = ata_qc_from_tag(ap, ap->active_tag);
2476 assert(qc != NULL);
2477
2478 ap->pio_task_state = PIO_ST_IDLE;
2479
2480 ata_irq_on(ap);
2481
2482 ata_qc_complete(qc, drv_stat);
2483 }
2484
2485
2486 /**
2487 * swap_buf_le16 -
2488 * @buf: Buffer to swap
2489 * @buf_words: Number of 16-bit words in buffer.
2490 *
2491 * Swap halves of 16-bit words if needed to convert from
2492 * little-endian byte order to native cpu byte order, or
2493 * vice-versa.
2494 *
2495 * LOCKING:
2496 */
2497 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2498 {
2499 #ifdef __BIG_ENDIAN
2500 unsigned int i;
2501
2502 for (i = 0; i < buf_words; i++)
2503 buf[i] = le16_to_cpu(buf[i]);
2504 #endif /* __BIG_ENDIAN */
2505 }
2506
2507 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2508 unsigned int buflen, int write_data)
2509 {
2510 unsigned int i;
2511 unsigned int words = buflen >> 1;
2512 u16 *buf16 = (u16 *) buf;
2513 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2514
2515 if (write_data) {
2516 for (i = 0; i < words; i++)
2517 writew(le16_to_cpu(buf16[i]), mmio);
2518 } else {
2519 for (i = 0; i < words; i++)
2520 buf16[i] = cpu_to_le16(readw(mmio));
2521 }
2522 }
2523
2524 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2525 unsigned int buflen, int write_data)
2526 {
2527 unsigned int dwords = buflen >> 1;
2528
2529 if (write_data)
2530 outsw(ap->ioaddr.data_addr, buf, dwords);
2531 else
2532 insw(ap->ioaddr.data_addr, buf, dwords);
2533 }
2534
2535 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2536 unsigned int buflen, int do_write)
2537 {
2538 if (ap->flags & ATA_FLAG_MMIO)
2539 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2540 else
2541 ata_pio_data_xfer(ap, buf, buflen, do_write);
2542 }
2543
2544 static void ata_pio_sector(struct ata_queued_cmd *qc)
2545 {
2546 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2547 struct scatterlist *sg = qc->sg;
2548 struct ata_port *ap = qc->ap;
2549 struct page *page;
2550 unsigned int offset;
2551 unsigned char *buf;
2552
2553 if (qc->cursect == (qc->nsect - 1))
2554 ap->pio_task_state = PIO_ST_LAST;
2555
2556 page = sg[qc->cursg].page;
2557 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2558
2559 /* get the current page and offset */
2560 page = nth_page(page, (offset >> PAGE_SHIFT));
2561 offset %= PAGE_SIZE;
2562
2563 buf = kmap(page) + offset;
2564
2565 qc->cursect++;
2566 qc->cursg_ofs++;
2567
2568 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2569 qc->cursg++;
2570 qc->cursg_ofs = 0;
2571 }
2572
2573 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2574
2575 /* do the actual data transfer */
2576 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2577 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2578
2579 kunmap(page);
2580 }
2581
2582 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2583 {
2584 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2585 struct scatterlist *sg = qc->sg;
2586 struct ata_port *ap = qc->ap;
2587 struct page *page;
2588 unsigned char *buf;
2589 unsigned int offset, count;
2590
2591 if (qc->curbytes == qc->nbytes - bytes)
2592 ap->pio_task_state = PIO_ST_LAST;
2593
2594 next_sg:
2595 sg = &qc->sg[qc->cursg];
2596
2597 page = sg->page;
2598 offset = sg->offset + qc->cursg_ofs;
2599
2600 /* get the current page and offset */
2601 page = nth_page(page, (offset >> PAGE_SHIFT));
2602 offset %= PAGE_SIZE;
2603
2604 /* don't overrun current sg */
2605 count = min(sg->length - qc->cursg_ofs, bytes);
2606
2607 /* don't cross page boundaries */
2608 count = min(count, (unsigned int)PAGE_SIZE - offset);
2609
2610 buf = kmap(page) + offset;
2611
2612 bytes -= count;
2613 qc->curbytes += count;
2614 qc->cursg_ofs += count;
2615
2616 if (qc->cursg_ofs == sg->length) {
2617 qc->cursg++;
2618 qc->cursg_ofs = 0;
2619 }
2620
2621 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2622
2623 /* do the actual data transfer */
2624 ata_data_xfer(ap, buf, count, do_write);
2625
2626 kunmap(page);
2627
2628 if (bytes) {
2629 goto next_sg;
2630 }
2631 }
2632
2633 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2634 {
2635 struct ata_port *ap = qc->ap;
2636 struct ata_device *dev = qc->dev;
2637 unsigned int ireason, bc_lo, bc_hi, bytes;
2638 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2639
2640 ap->ops->tf_read(ap, &qc->tf);
2641 ireason = qc->tf.nsect;
2642 bc_lo = qc->tf.lbam;
2643 bc_hi = qc->tf.lbah;
2644 bytes = (bc_hi << 8) | bc_lo;
2645
2646 /* shall be cleared to zero, indicating xfer of data */
2647 if (ireason & (1 << 0))
2648 goto err_out;
2649
2650 /* make sure transfer direction matches expected */
2651 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2652 if (do_write != i_write)
2653 goto err_out;
2654
2655 __atapi_pio_bytes(qc, bytes);
2656
2657 return;
2658
2659 err_out:
2660 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2661 ap->id, dev->devno);
2662 ap->pio_task_state = PIO_ST_ERR;
2663 }
2664
2665 /**
2666 * ata_pio_sector -
2667 * @ap:
2668 *
2669 * LOCKING:
2670 * None. (executing in kernel thread context)
2671 */
2672
2673 static void ata_pio_block(struct ata_port *ap)
2674 {
2675 struct ata_queued_cmd *qc;
2676 u8 status;
2677
2678 /*
2679 * This is purely hueristic. This is a fast path.
2680 * Sometimes when we enter, BSY will be cleared in
2681 * a chk-status or two. If not, the drive is probably seeking
2682 * or something. Snooze for a couple msecs, then
2683 * chk-status again. If still busy, fall back to
2684 * PIO_ST_POLL state.
2685 */
2686 status = ata_busy_wait(ap, ATA_BUSY, 5);
2687 if (status & ATA_BUSY) {
2688 msleep(2);
2689 status = ata_busy_wait(ap, ATA_BUSY, 10);
2690 if (status & ATA_BUSY) {
2691 ap->pio_task_state = PIO_ST_POLL;
2692 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2693 return;
2694 }
2695 }
2696
2697 qc = ata_qc_from_tag(ap, ap->active_tag);
2698 assert(qc != NULL);
2699
2700 if (is_atapi_taskfile(&qc->tf)) {
2701 /* no more data to transfer or unsupported ATAPI command */
2702 if ((status & ATA_DRQ) == 0) {
2703 ap->pio_task_state = PIO_ST_IDLE;
2704
2705 ata_irq_on(ap);
2706
2707 ata_qc_complete(qc, status);
2708 return;
2709 }
2710
2711 atapi_pio_bytes(qc);
2712 } else {
2713 /* handle BSY=0, DRQ=0 as error */
2714 if ((status & ATA_DRQ) == 0) {
2715 ap->pio_task_state = PIO_ST_ERR;
2716 return;
2717 }
2718
2719 ata_pio_sector(qc);
2720 }
2721 }
2722
2723 static void ata_pio_error(struct ata_port *ap)
2724 {
2725 struct ata_queued_cmd *qc;
2726 u8 drv_stat;
2727
2728 qc = ata_qc_from_tag(ap, ap->active_tag);
2729 assert(qc != NULL);
2730
2731 drv_stat = ata_chk_status(ap);
2732 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2733 ap->id, drv_stat);
2734
2735 ap->pio_task_state = PIO_ST_IDLE;
2736
2737 ata_irq_on(ap);
2738
2739 ata_qc_complete(qc, drv_stat | ATA_ERR);
2740 }
2741
2742 static void ata_pio_task(void *_data)
2743 {
2744 struct ata_port *ap = _data;
2745 unsigned long timeout = 0;
2746
2747 switch (ap->pio_task_state) {
2748 case PIO_ST_IDLE:
2749 return;
2750
2751 case PIO_ST:
2752 ata_pio_block(ap);
2753 break;
2754
2755 case PIO_ST_LAST:
2756 ata_pio_complete(ap);
2757 break;
2758
2759 case PIO_ST_POLL:
2760 case PIO_ST_LAST_POLL:
2761 timeout = ata_pio_poll(ap);
2762 break;
2763
2764 case PIO_ST_TMOUT:
2765 case PIO_ST_ERR:
2766 ata_pio_error(ap);
2767 return;
2768 }
2769
2770 if (timeout)
2771 queue_delayed_work(ata_wq, &ap->pio_task,
2772 timeout);
2773 else
2774 queue_work(ata_wq, &ap->pio_task);
2775 }
2776
2777 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2778 struct scsi_cmnd *cmd)
2779 {
2780 DECLARE_COMPLETION(wait);
2781 struct ata_queued_cmd *qc;
2782 unsigned long flags;
2783 int rc;
2784
2785 DPRINTK("ATAPI request sense\n");
2786
2787 qc = ata_qc_new_init(ap, dev);
2788 BUG_ON(qc == NULL);
2789
2790 /* FIXME: is this needed? */
2791 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2792
2793 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2794 qc->dma_dir = DMA_FROM_DEVICE;
2795
2796 memset(&qc->cdb, 0, ap->cdb_len);
2797 qc->cdb[0] = REQUEST_SENSE;
2798 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2799
2800 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2801 qc->tf.command = ATA_CMD_PACKET;
2802
2803 qc->tf.protocol = ATA_PROT_ATAPI;
2804 qc->tf.lbam = (8 * 1024) & 0xff;
2805 qc->tf.lbah = (8 * 1024) >> 8;
2806 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2807
2808 qc->waiting = &wait;
2809 qc->complete_fn = ata_qc_complete_noop;
2810
2811 spin_lock_irqsave(&ap->host_set->lock, flags);
2812 rc = ata_qc_issue(qc);
2813 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2814
2815 if (rc)
2816 ata_port_disable(ap);
2817 else
2818 wait_for_completion(&wait);
2819
2820 DPRINTK("EXIT\n");
2821 }
2822
2823 /**
2824 * ata_qc_timeout - Handle timeout of queued command
2825 * @qc: Command that timed out
2826 *
2827 * Some part of the kernel (currently, only the SCSI layer)
2828 * has noticed that the active command on port @ap has not
2829 * completed after a specified length of time. Handle this
2830 * condition by disabling DMA (if necessary) and completing
2831 * transactions, with error if necessary.
2832 *
2833 * This also handles the case of the "lost interrupt", where
2834 * for some reason (possibly hardware bug, possibly driver bug)
2835 * an interrupt was not delivered to the driver, even though the
2836 * transaction completed successfully.
2837 *
2838 * LOCKING:
2839 * Inherited from SCSI layer (none, can sleep)
2840 */
2841
2842 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2843 {
2844 struct ata_port *ap = qc->ap;
2845 struct ata_device *dev = qc->dev;
2846 u8 host_stat = 0, drv_stat;
2847
2848 DPRINTK("ENTER\n");
2849
2850 /* FIXME: doesn't this conflict with timeout handling? */
2851 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2852 struct scsi_cmnd *cmd = qc->scsicmd;
2853
2854 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
2855
2856 /* finish completing original command */
2857 __ata_qc_complete(qc);
2858
2859 atapi_request_sense(ap, dev, cmd);
2860
2861 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2862 scsi_finish_command(cmd);
2863
2864 goto out;
2865 }
2866 }
2867
2868 /* hack alert! We cannot use the supplied completion
2869 * function from inside the ->eh_strategy_handler() thread.
2870 * libata is the only user of ->eh_strategy_handler() in
2871 * any kernel, so the default scsi_done() assumes it is
2872 * not being called from the SCSI EH.
2873 */
2874 qc->scsidone = scsi_finish_command;
2875
2876 switch (qc->tf.protocol) {
2877
2878 case ATA_PROT_DMA:
2879 case ATA_PROT_ATAPI_DMA:
2880 host_stat = ap->ops->bmdma_status(ap);
2881
2882 /* before we do anything else, clear DMA-Start bit */
2883 ap->ops->bmdma_stop(ap);
2884
2885 /* fall through */
2886
2887 default:
2888 ata_altstatus(ap);
2889 drv_stat = ata_chk_status(ap);
2890
2891 /* ack bmdma irq events */
2892 ap->ops->irq_clear(ap);
2893
2894 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2895 ap->id, qc->tf.command, drv_stat, host_stat);
2896
2897 /* complete taskfile transaction */
2898 ata_qc_complete(qc, drv_stat);
2899 break;
2900 }
2901 out:
2902 DPRINTK("EXIT\n");
2903 }
2904
2905 /**
2906 * ata_eng_timeout - Handle timeout of queued command
2907 * @ap: Port on which timed-out command is active
2908 *
2909 * Some part of the kernel (currently, only the SCSI layer)
2910 * has noticed that the active command on port @ap has not
2911 * completed after a specified length of time. Handle this
2912 * condition by disabling DMA (if necessary) and completing
2913 * transactions, with error if necessary.
2914 *
2915 * This also handles the case of the "lost interrupt", where
2916 * for some reason (possibly hardware bug, possibly driver bug)
2917 * an interrupt was not delivered to the driver, even though the
2918 * transaction completed successfully.
2919 *
2920 * LOCKING:
2921 * Inherited from SCSI layer (none, can sleep)
2922 */
2923
2924 void ata_eng_timeout(struct ata_port *ap)
2925 {
2926 struct ata_queued_cmd *qc;
2927
2928 DPRINTK("ENTER\n");
2929
2930 qc = ata_qc_from_tag(ap, ap->active_tag);
2931 if (!qc) {
2932 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2933 ap->id);
2934 goto out;
2935 }
2936
2937 ata_qc_timeout(qc);
2938
2939 out:
2940 DPRINTK("EXIT\n");
2941 }
2942
2943 /**
2944 * ata_qc_new - Request an available ATA command, for queueing
2945 * @ap: Port associated with device @dev
2946 * @dev: Device from whom we request an available command structure
2947 *
2948 * LOCKING:
2949 * None.
2950 */
2951
2952 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2953 {
2954 struct ata_queued_cmd *qc = NULL;
2955 unsigned int i;
2956
2957 for (i = 0; i < ATA_MAX_QUEUE; i++)
2958 if (!test_and_set_bit(i, &ap->qactive)) {
2959 qc = ata_qc_from_tag(ap, i);
2960 break;
2961 }
2962
2963 if (qc)
2964 qc->tag = i;
2965
2966 return qc;
2967 }
2968
2969 /**
2970 * ata_qc_new_init - Request an available ATA command, and initialize it
2971 * @ap: Port associated with device @dev
2972 * @dev: Device from whom we request an available command structure
2973 *
2974 * LOCKING:
2975 * None.
2976 */
2977
2978 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2979 struct ata_device *dev)
2980 {
2981 struct ata_queued_cmd *qc;
2982
2983 qc = ata_qc_new(ap);
2984 if (qc) {
2985 qc->sg = NULL;
2986 qc->flags = 0;
2987 qc->scsicmd = NULL;
2988 qc->ap = ap;
2989 qc->dev = dev;
2990 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2991 qc->nsect = 0;
2992 qc->nbytes = qc->curbytes = 0;
2993
2994 ata_tf_init(ap, &qc->tf, dev->devno);
2995
2996 if (dev->flags & ATA_DFLAG_LBA48)
2997 qc->tf.flags |= ATA_TFLAG_LBA48;
2998 }
2999
3000 return qc;
3001 }
3002
3003 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3004 {
3005 return 0;
3006 }
3007
3008 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3009 {
3010 struct ata_port *ap = qc->ap;
3011 unsigned int tag, do_clear = 0;
3012
3013 qc->flags = 0;
3014 tag = qc->tag;
3015 if (likely(ata_tag_valid(tag))) {
3016 if (tag == ap->active_tag)
3017 ap->active_tag = ATA_TAG_POISON;
3018 qc->tag = ATA_TAG_POISON;
3019 do_clear = 1;
3020 }
3021
3022 if (qc->waiting) {
3023 struct completion *waiting = qc->waiting;
3024 qc->waiting = NULL;
3025 complete(waiting);
3026 }
3027
3028 if (likely(do_clear))
3029 clear_bit(tag, &ap->qactive);
3030 }
3031
3032 /**
3033 * ata_qc_free - free unused ata_queued_cmd
3034 * @qc: Command to complete
3035 *
3036 * Designed to free unused ata_queued_cmd object
3037 * in case something prevents using it.
3038 *
3039 * LOCKING:
3040 * spin_lock_irqsave(host_set lock)
3041 *
3042 */
3043 void ata_qc_free(struct ata_queued_cmd *qc)
3044 {
3045 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3046 assert(qc->waiting == NULL); /* nothing should be waiting */
3047
3048 __ata_qc_complete(qc);
3049 }
3050
3051 /**
3052 * ata_qc_complete - Complete an active ATA command
3053 * @qc: Command to complete
3054 * @drv_stat: ATA Status register contents
3055 *
3056 * Indicate to the mid and upper layers that an ATA
3057 * command has completed, with either an ok or not-ok status.
3058 *
3059 * LOCKING:
3060 * spin_lock_irqsave(host_set lock)
3061 *
3062 */
3063
3064 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3065 {
3066 int rc;
3067
3068 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3069 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3070
3071 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3072 ata_sg_clean(qc);
3073
3074 /* call completion callback */
3075 rc = qc->complete_fn(qc, drv_stat);
3076 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3077
3078 /* if callback indicates not to complete command (non-zero),
3079 * return immediately
3080 */
3081 if (rc != 0)
3082 return;
3083
3084 __ata_qc_complete(qc);
3085
3086 VPRINTK("EXIT\n");
3087 }
3088
3089 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3090 {
3091 struct ata_port *ap = qc->ap;
3092
3093 switch (qc->tf.protocol) {
3094 case ATA_PROT_DMA:
3095 case ATA_PROT_ATAPI_DMA:
3096 return 1;
3097
3098 case ATA_PROT_ATAPI:
3099 case ATA_PROT_PIO:
3100 case ATA_PROT_PIO_MULT:
3101 if (ap->flags & ATA_FLAG_PIO_DMA)
3102 return 1;
3103
3104 /* fall through */
3105
3106 default:
3107 return 0;
3108 }
3109
3110 /* never reached */
3111 }
3112
3113 /**
3114 * ata_qc_issue - issue taskfile to device
3115 * @qc: command to issue to device
3116 *
3117 * Prepare an ATA command to submission to device.
3118 * This includes mapping the data into a DMA-able
3119 * area, filling in the S/G table, and finally
3120 * writing the taskfile to hardware, starting the command.
3121 *
3122 * LOCKING:
3123 * spin_lock_irqsave(host_set lock)
3124 *
3125 * RETURNS:
3126 * Zero on success, negative on error.
3127 */
3128
3129 int ata_qc_issue(struct ata_queued_cmd *qc)
3130 {
3131 struct ata_port *ap = qc->ap;
3132
3133 if (ata_should_dma_map(qc)) {
3134 if (qc->flags & ATA_QCFLAG_SG) {
3135 if (ata_sg_setup(qc))
3136 goto err_out;
3137 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3138 if (ata_sg_setup_one(qc))
3139 goto err_out;
3140 }
3141 } else {
3142 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3143 }
3144
3145 ap->ops->qc_prep(qc);
3146
3147 qc->ap->active_tag = qc->tag;
3148 qc->flags |= ATA_QCFLAG_ACTIVE;
3149
3150 return ap->ops->qc_issue(qc);
3151
3152 err_out:
3153 return -1;
3154 }
3155
3156
3157 /**
3158 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3159 * @qc: command to issue to device
3160 *
3161 * Using various libata functions and hooks, this function
3162 * starts an ATA command. ATA commands are grouped into
3163 * classes called "protocols", and issuing each type of protocol
3164 * is slightly different.
3165 *
3166 * May be used as the qc_issue() entry in ata_port_operations.
3167 *
3168 * LOCKING:
3169 * spin_lock_irqsave(host_set lock)
3170 *
3171 * RETURNS:
3172 * Zero on success, negative on error.
3173 */
3174
3175 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3176 {
3177 struct ata_port *ap = qc->ap;
3178
3179 ata_dev_select(ap, qc->dev->devno, 1, 0);
3180
3181 switch (qc->tf.protocol) {
3182 case ATA_PROT_NODATA:
3183 ata_tf_to_host_nolock(ap, &qc->tf);
3184 break;
3185
3186 case ATA_PROT_DMA:
3187 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3188 ap->ops->bmdma_setup(qc); /* set up bmdma */
3189 ap->ops->bmdma_start(qc); /* initiate bmdma */
3190 break;
3191
3192 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3193 ata_qc_set_polling(qc);
3194 ata_tf_to_host_nolock(ap, &qc->tf);
3195 ap->pio_task_state = PIO_ST;
3196 queue_work(ata_wq, &ap->pio_task);
3197 break;
3198
3199 case ATA_PROT_ATAPI:
3200 ata_qc_set_polling(qc);
3201 ata_tf_to_host_nolock(ap, &qc->tf);
3202 queue_work(ata_wq, &ap->packet_task);
3203 break;
3204
3205 case ATA_PROT_ATAPI_NODATA:
3206 ata_tf_to_host_nolock(ap, &qc->tf);
3207 queue_work(ata_wq, &ap->packet_task);
3208 break;
3209
3210 case ATA_PROT_ATAPI_DMA:
3211 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3212 ap->ops->bmdma_setup(qc); /* set up bmdma */
3213 queue_work(ata_wq, &ap->packet_task);
3214 break;
3215
3216 default:
3217 WARN_ON(1);
3218 return -1;
3219 }
3220
3221 return 0;
3222 }
3223
3224 /**
3225 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3226 * @qc: Info associated with this ATA transaction.
3227 *
3228 * LOCKING:
3229 * spin_lock_irqsave(host_set lock)
3230 */
3231
3232 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3233 {
3234 struct ata_port *ap = qc->ap;
3235 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3236 u8 dmactl;
3237 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3238
3239 /* load PRD table addr. */
3240 mb(); /* make sure PRD table writes are visible to controller */
3241 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3242
3243 /* specify data direction, triple-check start bit is clear */
3244 dmactl = readb(mmio + ATA_DMA_CMD);
3245 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3246 if (!rw)
3247 dmactl |= ATA_DMA_WR;
3248 writeb(dmactl, mmio + ATA_DMA_CMD);
3249
3250 /* issue r/w command */
3251 ap->ops->exec_command(ap, &qc->tf);
3252 }
3253
3254 /**
3255 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3256 * @qc: Info associated with this ATA transaction.
3257 *
3258 * LOCKING:
3259 * spin_lock_irqsave(host_set lock)
3260 */
3261
3262 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3263 {
3264 struct ata_port *ap = qc->ap;
3265 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3266 u8 dmactl;
3267
3268 /* start host DMA transaction */
3269 dmactl = readb(mmio + ATA_DMA_CMD);
3270 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3271
3272 /* Strictly, one may wish to issue a readb() here, to
3273 * flush the mmio write. However, control also passes
3274 * to the hardware at this point, and it will interrupt
3275 * us when we are to resume control. So, in effect,
3276 * we don't care when the mmio write flushes.
3277 * Further, a read of the DMA status register _immediately_
3278 * following the write may not be what certain flaky hardware
3279 * is expected, so I think it is best to not add a readb()
3280 * without first all the MMIO ATA cards/mobos.
3281 * Or maybe I'm just being paranoid.
3282 */
3283 }
3284
3285 /**
3286 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3287 * @qc: Info associated with this ATA transaction.
3288 *
3289 * LOCKING:
3290 * spin_lock_irqsave(host_set lock)
3291 */
3292
3293 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3294 {
3295 struct ata_port *ap = qc->ap;
3296 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3297 u8 dmactl;
3298
3299 /* load PRD table addr. */
3300 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3301
3302 /* specify data direction, triple-check start bit is clear */
3303 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3304 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3305 if (!rw)
3306 dmactl |= ATA_DMA_WR;
3307 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3308
3309 /* issue r/w command */
3310 ap->ops->exec_command(ap, &qc->tf);
3311 }
3312
3313 /**
3314 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3315 * @qc: Info associated with this ATA transaction.
3316 *
3317 * LOCKING:
3318 * spin_lock_irqsave(host_set lock)
3319 */
3320
3321 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3322 {
3323 struct ata_port *ap = qc->ap;
3324 u8 dmactl;
3325
3326 /* start host DMA transaction */
3327 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3328 outb(dmactl | ATA_DMA_START,
3329 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3330 }
3331
3332
3333 /**
3334 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3335 * @qc: Info associated with this ATA transaction.
3336 *
3337 * Writes the ATA_DMA_START flag to the DMA command register.
3338 *
3339 * May be used as the bmdma_start() entry in ata_port_operations.
3340 *
3341 * LOCKING:
3342 * spin_lock_irqsave(host_set lock)
3343 */
3344 void ata_bmdma_start(struct ata_queued_cmd *qc)
3345 {
3346 if (qc->ap->flags & ATA_FLAG_MMIO)
3347 ata_bmdma_start_mmio(qc);
3348 else
3349 ata_bmdma_start_pio(qc);
3350 }
3351
3352
3353 /**
3354 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3355 * @qc: Info associated with this ATA transaction.
3356 *
3357 * Writes address of PRD table to device's PRD Table Address
3358 * register, sets the DMA control register, and calls
3359 * ops->exec_command() to start the transfer.
3360 *
3361 * May be used as the bmdma_setup() entry in ata_port_operations.
3362 *
3363 * LOCKING:
3364 * spin_lock_irqsave(host_set lock)
3365 */
3366 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3367 {
3368 if (qc->ap->flags & ATA_FLAG_MMIO)
3369 ata_bmdma_setup_mmio(qc);
3370 else
3371 ata_bmdma_setup_pio(qc);
3372 }
3373
3374
3375 /**
3376 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3377 * @ap: Port associated with this ATA transaction.
3378 *
3379 * Clear interrupt and error flags in DMA status register.
3380 *
3381 * May be used as the irq_clear() entry in ata_port_operations.
3382 *
3383 * LOCKING:
3384 * spin_lock_irqsave(host_set lock)
3385 */
3386
3387 void ata_bmdma_irq_clear(struct ata_port *ap)
3388 {
3389 if (ap->flags & ATA_FLAG_MMIO) {
3390 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3391 writeb(readb(mmio), mmio);
3392 } else {
3393 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3394 outb(inb(addr), addr);
3395 }
3396
3397 }
3398
3399
3400 /**
3401 * ata_bmdma_status - Read PCI IDE BMDMA status
3402 * @ap: Port associated with this ATA transaction.
3403 *
3404 * Read and return BMDMA status register.
3405 *
3406 * May be used as the bmdma_status() entry in ata_port_operations.
3407 *
3408 * LOCKING:
3409 * spin_lock_irqsave(host_set lock)
3410 */
3411
3412 u8 ata_bmdma_status(struct ata_port *ap)
3413 {
3414 u8 host_stat;
3415 if (ap->flags & ATA_FLAG_MMIO) {
3416 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3417 host_stat = readb(mmio + ATA_DMA_STATUS);
3418 } else
3419 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3420 return host_stat;
3421 }
3422
3423
3424 /**
3425 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3426 * @ap: Port associated with this ATA transaction.
3427 *
3428 * Clears the ATA_DMA_START flag in the dma control register
3429 *
3430 * May be used as the bmdma_stop() entry in ata_port_operations.
3431 *
3432 * LOCKING:
3433 * spin_lock_irqsave(host_set lock)
3434 */
3435
3436 void ata_bmdma_stop(struct ata_port *ap)
3437 {
3438 if (ap->flags & ATA_FLAG_MMIO) {
3439 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3440
3441 /* clear start/stop bit */
3442 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3443 mmio + ATA_DMA_CMD);
3444 } else {
3445 /* clear start/stop bit */
3446 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3447 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3448 }
3449
3450 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3451 ata_altstatus(ap); /* dummy read */
3452 }
3453
3454 /**
3455 * ata_host_intr - Handle host interrupt for given (port, task)
3456 * @ap: Port on which interrupt arrived (possibly...)
3457 * @qc: Taskfile currently active in engine
3458 *
3459 * Handle host interrupt for given queued command. Currently,
3460 * only DMA interrupts are handled. All other commands are
3461 * handled via polling with interrupts disabled (nIEN bit).
3462 *
3463 * LOCKING:
3464 * spin_lock_irqsave(host_set lock)
3465 *
3466 * RETURNS:
3467 * One if interrupt was handled, zero if not (shared irq).
3468 */
3469
3470 inline unsigned int ata_host_intr (struct ata_port *ap,
3471 struct ata_queued_cmd *qc)
3472 {
3473 u8 status, host_stat;
3474
3475 switch (qc->tf.protocol) {
3476
3477 case ATA_PROT_DMA:
3478 case ATA_PROT_ATAPI_DMA:
3479 case ATA_PROT_ATAPI:
3480 /* check status of DMA engine */
3481 host_stat = ap->ops->bmdma_status(ap);
3482 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3483
3484 /* if it's not our irq... */
3485 if (!(host_stat & ATA_DMA_INTR))
3486 goto idle_irq;
3487
3488 /* before we do anything else, clear DMA-Start bit */
3489 ap->ops->bmdma_stop(ap);
3490
3491 /* fall through */
3492
3493 case ATA_PROT_ATAPI_NODATA:
3494 case ATA_PROT_NODATA:
3495 /* check altstatus */
3496 status = ata_altstatus(ap);
3497 if (status & ATA_BUSY)
3498 goto idle_irq;
3499
3500 /* check main status, clearing INTRQ */
3501 status = ata_chk_status(ap);
3502 if (unlikely(status & ATA_BUSY))
3503 goto idle_irq;
3504 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3505 ap->id, qc->tf.protocol, status);
3506
3507 /* ack bmdma irq events */
3508 ap->ops->irq_clear(ap);
3509
3510 /* complete taskfile transaction */
3511 ata_qc_complete(qc, status);
3512 break;
3513
3514 default:
3515 goto idle_irq;
3516 }
3517
3518 return 1; /* irq handled */
3519
3520 idle_irq:
3521 ap->stats.idle_irq++;
3522
3523 #ifdef ATA_IRQ_TRAP
3524 if ((ap->stats.idle_irq % 1000) == 0) {
3525 handled = 1;
3526 ata_irq_ack(ap, 0); /* debug trap */
3527 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3528 }
3529 #endif
3530 return 0; /* irq not handled */
3531 }
3532
3533 /**
3534 * ata_interrupt - Default ATA host interrupt handler
3535 * @irq: irq line (unused)
3536 * @dev_instance: pointer to our ata_host_set information structure
3537 * @regs: unused
3538 *
3539 * Default interrupt handler for PCI IDE devices. Calls
3540 * ata_host_intr() for each port that is not disabled.
3541 *
3542 * LOCKING:
3543 * Obtains host_set lock during operation.
3544 *
3545 * RETURNS:
3546 * IRQ_NONE or IRQ_HANDLED.
3547 *
3548 */
3549
3550 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3551 {
3552 struct ata_host_set *host_set = dev_instance;
3553 unsigned int i;
3554 unsigned int handled = 0;
3555 unsigned long flags;
3556
3557 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3558 spin_lock_irqsave(&host_set->lock, flags);
3559
3560 for (i = 0; i < host_set->n_ports; i++) {
3561 struct ata_port *ap;
3562
3563 ap = host_set->ports[i];
3564 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3565 struct ata_queued_cmd *qc;
3566
3567 qc = ata_qc_from_tag(ap, ap->active_tag);
3568 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3569 (qc->flags & ATA_QCFLAG_ACTIVE))
3570 handled |= ata_host_intr(ap, qc);
3571 }
3572 }
3573
3574 spin_unlock_irqrestore(&host_set->lock, flags);
3575
3576 return IRQ_RETVAL(handled);
3577 }
3578
3579 /**
3580 * atapi_packet_task - Write CDB bytes to hardware
3581 * @_data: Port to which ATAPI device is attached.
3582 *
3583 * When device has indicated its readiness to accept
3584 * a CDB, this function is called. Send the CDB.
3585 * If DMA is to be performed, exit immediately.
3586 * Otherwise, we are in polling mode, so poll
3587 * status under operation succeeds or fails.
3588 *
3589 * LOCKING:
3590 * Kernel thread context (may sleep)
3591 */
3592
3593 static void atapi_packet_task(void *_data)
3594 {
3595 struct ata_port *ap = _data;
3596 struct ata_queued_cmd *qc;
3597 u8 status;
3598
3599 qc = ata_qc_from_tag(ap, ap->active_tag);
3600 assert(qc != NULL);
3601 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3602
3603 /* sleep-wait for BSY to clear */
3604 DPRINTK("busy wait\n");
3605 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3606 goto err_out;
3607
3608 /* make sure DRQ is set */
3609 status = ata_chk_status(ap);
3610 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3611 goto err_out;
3612
3613 /* send SCSI cdb */
3614 DPRINTK("send cdb\n");
3615 assert(ap->cdb_len >= 12);
3616 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3617
3618 /* if we are DMA'ing, irq handler takes over from here */
3619 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3620 ap->ops->bmdma_start(qc); /* initiate bmdma */
3621
3622 /* non-data commands are also handled via irq */
3623 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3624 /* do nothing */
3625 }
3626
3627 /* PIO commands are handled by polling */
3628 else {
3629 ap->pio_task_state = PIO_ST;
3630 queue_work(ata_wq, &ap->pio_task);
3631 }
3632
3633 return;
3634
3635 err_out:
3636 ata_qc_complete(qc, ATA_ERR);
3637 }
3638
3639
3640 /**
3641 * ata_port_start - Set port up for dma.
3642 * @ap: Port to initialize
3643 *
3644 * Called just after data structures for each port are
3645 * initialized. Allocates space for PRD table.
3646 *
3647 * May be used as the port_start() entry in ata_port_operations.
3648 *
3649 * LOCKING:
3650 */
3651
3652 int ata_port_start (struct ata_port *ap)
3653 {
3654 struct device *dev = ap->host_set->dev;
3655
3656 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3657 if (!ap->prd)
3658 return -ENOMEM;
3659
3660 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3661
3662 return 0;
3663 }
3664
3665
3666 /**
3667 * ata_port_stop - Undo ata_port_start()
3668 * @ap: Port to shut down
3669 *
3670 * Frees the PRD table.
3671 *
3672 * May be used as the port_stop() entry in ata_port_operations.
3673 *
3674 * LOCKING:
3675 */
3676
3677 void ata_port_stop (struct ata_port *ap)
3678 {
3679 struct device *dev = ap->host_set->dev;
3680
3681 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3682 }
3683
3684 void ata_host_stop (struct ata_host_set *host_set)
3685 {
3686 if (host_set->mmio_base)
3687 iounmap(host_set->mmio_base);
3688 }
3689
3690
3691 /**
3692 * ata_host_remove - Unregister SCSI host structure with upper layers
3693 * @ap: Port to unregister
3694 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3695 *
3696 * LOCKING:
3697 */
3698
3699 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3700 {
3701 struct Scsi_Host *sh = ap->host;
3702
3703 DPRINTK("ENTER\n");
3704
3705 if (do_unregister)
3706 scsi_remove_host(sh);
3707
3708 ap->ops->port_stop(ap);
3709 }
3710
3711 /**
3712 * ata_host_init - Initialize an ata_port structure
3713 * @ap: Structure to initialize
3714 * @host: associated SCSI mid-layer structure
3715 * @host_set: Collection of hosts to which @ap belongs
3716 * @ent: Probe information provided by low-level driver
3717 * @port_no: Port number associated with this ata_port
3718 *
3719 * Initialize a new ata_port structure, and its associated
3720 * scsi_host.
3721 *
3722 * LOCKING:
3723 * Inherited from caller.
3724 *
3725 */
3726
3727 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3728 struct ata_host_set *host_set,
3729 struct ata_probe_ent *ent, unsigned int port_no)
3730 {
3731 unsigned int i;
3732
3733 host->max_id = 16;
3734 host->max_lun = 1;
3735 host->max_channel = 1;
3736 host->unique_id = ata_unique_id++;
3737 host->max_cmd_len = 12;
3738
3739 scsi_assign_lock(host, &host_set->lock);
3740
3741 ap->flags = ATA_FLAG_PORT_DISABLED;
3742 ap->id = host->unique_id;
3743 ap->host = host;
3744 ap->ctl = ATA_DEVCTL_OBS;
3745 ap->host_set = host_set;
3746 ap->port_no = port_no;
3747 ap->hard_port_no =
3748 ent->legacy_mode ? ent->hard_port_no : port_no;
3749 ap->pio_mask = ent->pio_mask;
3750 ap->mwdma_mask = ent->mwdma_mask;
3751 ap->udma_mask = ent->udma_mask;
3752 ap->flags |= ent->host_flags;
3753 ap->ops = ent->port_ops;
3754 ap->cbl = ATA_CBL_NONE;
3755 ap->active_tag = ATA_TAG_POISON;
3756 ap->last_ctl = 0xFF;
3757
3758 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3759 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3760
3761 for (i = 0; i < ATA_MAX_DEVICES; i++)
3762 ap->device[i].devno = i;
3763
3764 #ifdef ATA_IRQ_TRAP
3765 ap->stats.unhandled_irq = 1;
3766 ap->stats.idle_irq = 1;
3767 #endif
3768
3769 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3770 }
3771
3772 /**
3773 * ata_host_add - Attach low-level ATA driver to system
3774 * @ent: Information provided by low-level driver
3775 * @host_set: Collections of ports to which we add
3776 * @port_no: Port number associated with this host
3777 *
3778 * Attach low-level ATA driver to system.
3779 *
3780 * LOCKING:
3781 * PCI/etc. bus probe sem.
3782 *
3783 * RETURNS:
3784 * New ata_port on success, for NULL on error.
3785 *
3786 */
3787
3788 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3789 struct ata_host_set *host_set,
3790 unsigned int port_no)
3791 {
3792 struct Scsi_Host *host;
3793 struct ata_port *ap;
3794 int rc;
3795
3796 DPRINTK("ENTER\n");
3797 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3798 if (!host)
3799 return NULL;
3800
3801 ap = (struct ata_port *) &host->hostdata[0];
3802
3803 ata_host_init(ap, host, host_set, ent, port_no);
3804
3805 rc = ap->ops->port_start(ap);
3806 if (rc)
3807 goto err_out;
3808
3809 return ap;
3810
3811 err_out:
3812 scsi_host_put(host);
3813 return NULL;
3814 }
3815
3816 /**
3817 * ata_device_add - Register hardware device with ATA and SCSI layers
3818 * @ent: Probe information describing hardware device to be registered
3819 *
3820 * This function processes the information provided in the probe
3821 * information struct @ent, allocates the necessary ATA and SCSI
3822 * host information structures, initializes them, and registers
3823 * everything with requisite kernel subsystems.
3824 *
3825 * This function requests irqs, probes the ATA bus, and probes
3826 * the SCSI bus.
3827 *
3828 * LOCKING:
3829 * PCI/etc. bus probe sem.
3830 *
3831 * RETURNS:
3832 * Number of ports registered. Zero on error (no ports registered).
3833 *
3834 */
3835
3836 int ata_device_add(struct ata_probe_ent *ent)
3837 {
3838 unsigned int count = 0, i;
3839 struct device *dev = ent->dev;
3840 struct ata_host_set *host_set;
3841
3842 DPRINTK("ENTER\n");
3843 /* alloc a container for our list of ATA ports (buses) */
3844 host_set = kmalloc(sizeof(struct ata_host_set) +
3845 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3846 if (!host_set)
3847 return 0;
3848 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3849 spin_lock_init(&host_set->lock);
3850
3851 host_set->dev = dev;
3852 host_set->n_ports = ent->n_ports;
3853 host_set->irq = ent->irq;
3854 host_set->mmio_base = ent->mmio_base;
3855 host_set->private_data = ent->private_data;
3856 host_set->ops = ent->port_ops;
3857
3858 /* register each port bound to this device */
3859 for (i = 0; i < ent->n_ports; i++) {
3860 struct ata_port *ap;
3861 unsigned long xfer_mode_mask;
3862
3863 ap = ata_host_add(ent, host_set, i);
3864 if (!ap)
3865 goto err_out;
3866
3867 host_set->ports[i] = ap;
3868 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3869 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3870 (ap->pio_mask << ATA_SHIFT_PIO);
3871
3872 /* print per-port info to dmesg */
3873 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3874 "bmdma 0x%lX irq %lu\n",
3875 ap->id,
3876 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3877 ata_mode_string(xfer_mode_mask),
3878 ap->ioaddr.cmd_addr,
3879 ap->ioaddr.ctl_addr,
3880 ap->ioaddr.bmdma_addr,
3881 ent->irq);
3882
3883 ata_chk_status(ap);
3884 host_set->ops->irq_clear(ap);
3885 count++;
3886 }
3887
3888 if (!count) {
3889 kfree(host_set);
3890 return 0;
3891 }
3892
3893 /* obtain irq, that is shared between channels */
3894 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3895 DRV_NAME, host_set))
3896 goto err_out;
3897
3898 /* perform each probe synchronously */
3899 DPRINTK("probe begin\n");
3900 for (i = 0; i < count; i++) {
3901 struct ata_port *ap;
3902 int rc;
3903
3904 ap = host_set->ports[i];
3905
3906 DPRINTK("ata%u: probe begin\n", ap->id);
3907 rc = ata_bus_probe(ap);
3908 DPRINTK("ata%u: probe end\n", ap->id);
3909
3910 if (rc) {
3911 /* FIXME: do something useful here?
3912 * Current libata behavior will
3913 * tear down everything when
3914 * the module is removed
3915 * or the h/w is unplugged.
3916 */
3917 }
3918
3919 rc = scsi_add_host(ap->host, dev);
3920 if (rc) {
3921 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3922 ap->id);
3923 /* FIXME: do something useful here */
3924 /* FIXME: handle unconditional calls to
3925 * scsi_scan_host and ata_host_remove, below,
3926 * at the very least
3927 */
3928 }
3929 }
3930
3931 /* probes are done, now scan each port's disk(s) */
3932 DPRINTK("probe begin\n");
3933 for (i = 0; i < count; i++) {
3934 struct ata_port *ap = host_set->ports[i];
3935
3936 scsi_scan_host(ap->host);
3937 }
3938
3939 dev_set_drvdata(dev, host_set);
3940
3941 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3942 return ent->n_ports; /* success */
3943
3944 err_out:
3945 for (i = 0; i < count; i++) {
3946 ata_host_remove(host_set->ports[i], 1);
3947 scsi_host_put(host_set->ports[i]->host);
3948 }
3949 kfree(host_set);
3950 VPRINTK("EXIT, returning 0\n");
3951 return 0;
3952 }
3953
3954 /**
3955 * ata_scsi_release - SCSI layer callback hook for host unload
3956 * @host: libata host to be unloaded
3957 *
3958 * Performs all duties necessary to shut down a libata port...
3959 * Kill port kthread, disable port, and release resources.
3960 *
3961 * LOCKING:
3962 * Inherited from SCSI layer.
3963 *
3964 * RETURNS:
3965 * One.
3966 */
3967
3968 int ata_scsi_release(struct Scsi_Host *host)
3969 {
3970 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3971
3972 DPRINTK("ENTER\n");
3973
3974 ap->ops->port_disable(ap);
3975 ata_host_remove(ap, 0);
3976
3977 DPRINTK("EXIT\n");
3978 return 1;
3979 }
3980
3981 /**
3982 * ata_std_ports - initialize ioaddr with standard port offsets.
3983 * @ioaddr: IO address structure to be initialized
3984 *
3985 * Utility function which initializes data_addr, error_addr,
3986 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
3987 * device_addr, status_addr, and command_addr to standard offsets
3988 * relative to cmd_addr.
3989 *
3990 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
3991 */
3992
3993 void ata_std_ports(struct ata_ioports *ioaddr)
3994 {
3995 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3996 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3997 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3998 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3999 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4000 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4001 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4002 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4003 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4004 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4005 }
4006
4007 static struct ata_probe_ent *
4008 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4009 {
4010 struct ata_probe_ent *probe_ent;
4011
4012 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
4013 if (!probe_ent) {
4014 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4015 kobject_name(&(dev->kobj)));
4016 return NULL;
4017 }
4018
4019 memset(probe_ent, 0, sizeof(*probe_ent));
4020
4021 INIT_LIST_HEAD(&probe_ent->node);
4022 probe_ent->dev = dev;
4023
4024 probe_ent->sht = port->sht;
4025 probe_ent->host_flags = port->host_flags;
4026 probe_ent->pio_mask = port->pio_mask;
4027 probe_ent->mwdma_mask = port->mwdma_mask;
4028 probe_ent->udma_mask = port->udma_mask;
4029 probe_ent->port_ops = port->port_ops;
4030
4031 return probe_ent;
4032 }
4033
4034
4035
4036 /**
4037 * ata_pci_init_native_mode - Initialize native-mode driver
4038 * @pdev: pci device to be initialized
4039 * @port: array[2] of pointers to port info structures.
4040 *
4041 * Utility function which allocates and initializes an
4042 * ata_probe_ent structure for a standard dual-port
4043 * PIO-based IDE controller. The returned ata_probe_ent
4044 * structure can be passed to ata_device_add(). The returned
4045 * ata_probe_ent structure should then be freed with kfree().
4046 */
4047
4048 #ifdef CONFIG_PCI
4049 struct ata_probe_ent *
4050 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4051 {
4052 struct ata_probe_ent *probe_ent =
4053 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4054 if (!probe_ent)
4055 return NULL;
4056
4057 probe_ent->n_ports = 2;
4058 probe_ent->irq = pdev->irq;
4059 probe_ent->irq_flags = SA_SHIRQ;
4060
4061 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
4062 probe_ent->port[0].altstatus_addr =
4063 probe_ent->port[0].ctl_addr =
4064 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4065 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4066
4067 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
4068 probe_ent->port[1].altstatus_addr =
4069 probe_ent->port[1].ctl_addr =
4070 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4071 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4072
4073 ata_std_ports(&probe_ent->port[0]);
4074 ata_std_ports(&probe_ent->port[1]);
4075
4076 return probe_ent;
4077 }
4078
4079 static struct ata_probe_ent *
4080 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4081 struct ata_probe_ent **ppe2)
4082 {
4083 struct ata_probe_ent *probe_ent, *probe_ent2;
4084
4085 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4086 if (!probe_ent)
4087 return NULL;
4088 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4089 if (!probe_ent2) {
4090 kfree(probe_ent);
4091 return NULL;
4092 }
4093
4094 probe_ent->n_ports = 1;
4095 probe_ent->irq = 14;
4096
4097 probe_ent->hard_port_no = 0;
4098 probe_ent->legacy_mode = 1;
4099
4100 probe_ent2->n_ports = 1;
4101 probe_ent2->irq = 15;
4102
4103 probe_ent2->hard_port_no = 1;
4104 probe_ent2->legacy_mode = 1;
4105
4106 probe_ent->port[0].cmd_addr = 0x1f0;
4107 probe_ent->port[0].altstatus_addr =
4108 probe_ent->port[0].ctl_addr = 0x3f6;
4109 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4110
4111 probe_ent2->port[0].cmd_addr = 0x170;
4112 probe_ent2->port[0].altstatus_addr =
4113 probe_ent2->port[0].ctl_addr = 0x376;
4114 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
4115
4116 ata_std_ports(&probe_ent->port[0]);
4117 ata_std_ports(&probe_ent2->port[0]);
4118
4119 *ppe2 = probe_ent2;
4120 return probe_ent;
4121 }
4122
4123 /**
4124 * ata_pci_init_one - Initialize/register PCI IDE host controller
4125 * @pdev: Controller to be initialized
4126 * @port_info: Information from low-level host driver
4127 * @n_ports: Number of ports attached to host controller
4128 *
4129 * This is a helper function which can be called from a driver's
4130 * xxx_init_one() probe function if the hardware uses traditional
4131 * IDE taskfile registers.
4132 *
4133 * This function calls pci_enable_device(), reserves its register
4134 * regions, sets the dma mask, enables bus master mode, and calls
4135 * ata_device_add()
4136 *
4137 * LOCKING:
4138 * Inherited from PCI layer (may sleep).
4139 *
4140 * RETURNS:
4141 * Zero on success, negative on errno-based value on error.
4142 *
4143 */
4144
4145 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4146 unsigned int n_ports)
4147 {
4148 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
4149 struct ata_port_info *port[2];
4150 u8 tmp8, mask;
4151 unsigned int legacy_mode = 0;
4152 int disable_dev_on_err = 1;
4153 int rc;
4154
4155 DPRINTK("ENTER\n");
4156
4157 port[0] = port_info[0];
4158 if (n_ports > 1)
4159 port[1] = port_info[1];
4160 else
4161 port[1] = port[0];
4162
4163 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4164 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4165 /* TODO: support transitioning to native mode? */
4166 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4167 mask = (1 << 2) | (1 << 0);
4168 if ((tmp8 & mask) != mask)
4169 legacy_mode = (1 << 3);
4170 }
4171
4172 /* FIXME... */
4173 if ((!legacy_mode) && (n_ports > 1)) {
4174 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
4175 return -EINVAL;
4176 }
4177
4178 rc = pci_enable_device(pdev);
4179 if (rc)
4180 return rc;
4181
4182 rc = pci_request_regions(pdev, DRV_NAME);
4183 if (rc) {
4184 disable_dev_on_err = 0;
4185 goto err_out;
4186 }
4187
4188 if (legacy_mode) {
4189 if (!request_region(0x1f0, 8, "libata")) {
4190 struct resource *conflict, res;
4191 res.start = 0x1f0;
4192 res.end = 0x1f0 + 8 - 1;
4193 conflict = ____request_resource(&ioport_resource, &res);
4194 if (!strcmp(conflict->name, "libata"))
4195 legacy_mode |= (1 << 0);
4196 else {
4197 disable_dev_on_err = 0;
4198 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4199 }
4200 } else
4201 legacy_mode |= (1 << 0);
4202
4203 if (!request_region(0x170, 8, "libata")) {
4204 struct resource *conflict, res;
4205 res.start = 0x170;
4206 res.end = 0x170 + 8 - 1;
4207 conflict = ____request_resource(&ioport_resource, &res);
4208 if (!strcmp(conflict->name, "libata"))
4209 legacy_mode |= (1 << 1);
4210 else {
4211 disable_dev_on_err = 0;
4212 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4213 }
4214 } else
4215 legacy_mode |= (1 << 1);
4216 }
4217
4218 /* we have legacy mode, but all ports are unavailable */
4219 if (legacy_mode == (1 << 3)) {
4220 rc = -EBUSY;
4221 goto err_out_regions;
4222 }
4223
4224 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4225 if (rc)
4226 goto err_out_regions;
4227 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4228 if (rc)
4229 goto err_out_regions;
4230
4231 if (legacy_mode) {
4232 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
4233 } else
4234 probe_ent = ata_pci_init_native_mode(pdev, port);
4235 if (!probe_ent) {
4236 rc = -ENOMEM;
4237 goto err_out_regions;
4238 }
4239
4240 pci_set_master(pdev);
4241
4242 /* FIXME: check ata_device_add return */
4243 if (legacy_mode) {
4244 if (legacy_mode & (1 << 0))
4245 ata_device_add(probe_ent);
4246 if (legacy_mode & (1 << 1))
4247 ata_device_add(probe_ent2);
4248 } else
4249 ata_device_add(probe_ent);
4250
4251 kfree(probe_ent);
4252 kfree(probe_ent2);
4253
4254 return 0;
4255
4256 err_out_regions:
4257 if (legacy_mode & (1 << 0))
4258 release_region(0x1f0, 8);
4259 if (legacy_mode & (1 << 1))
4260 release_region(0x170, 8);
4261 pci_release_regions(pdev);
4262 err_out:
4263 if (disable_dev_on_err)
4264 pci_disable_device(pdev);
4265 return rc;
4266 }
4267
4268 /**
4269 * ata_pci_remove_one - PCI layer callback for device removal
4270 * @pdev: PCI device that was removed
4271 *
4272 * PCI layer indicates to libata via this hook that
4273 * hot-unplug or module unload event has occured.
4274 * Handle this by unregistering all objects associated
4275 * with this PCI device. Free those objects. Then finally
4276 * release PCI resources and disable device.
4277 *
4278 * LOCKING:
4279 * Inherited from PCI layer (may sleep).
4280 */
4281
4282 void ata_pci_remove_one (struct pci_dev *pdev)
4283 {
4284 struct device *dev = pci_dev_to_dev(pdev);
4285 struct ata_host_set *host_set = dev_get_drvdata(dev);
4286 struct ata_port *ap;
4287 unsigned int i;
4288
4289 for (i = 0; i < host_set->n_ports; i++) {
4290 ap = host_set->ports[i];
4291
4292 scsi_remove_host(ap->host);
4293 }
4294
4295 free_irq(host_set->irq, host_set);
4296
4297 for (i = 0; i < host_set->n_ports; i++) {
4298 ap = host_set->ports[i];
4299
4300 ata_scsi_release(ap->host);
4301
4302 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4303 struct ata_ioports *ioaddr = &ap->ioaddr;
4304
4305 if (ioaddr->cmd_addr == 0x1f0)
4306 release_region(0x1f0, 8);
4307 else if (ioaddr->cmd_addr == 0x170)
4308 release_region(0x170, 8);
4309 }
4310
4311 scsi_host_put(ap->host);
4312 }
4313
4314 if (host_set->ops->host_stop)
4315 host_set->ops->host_stop(host_set);
4316
4317 kfree(host_set);
4318
4319 pci_release_regions(pdev);
4320 pci_disable_device(pdev);
4321 dev_set_drvdata(dev, NULL);
4322 }
4323
4324 /* move to PCI subsystem */
4325 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4326 {
4327 unsigned long tmp = 0;
4328
4329 switch (bits->width) {
4330 case 1: {
4331 u8 tmp8 = 0;
4332 pci_read_config_byte(pdev, bits->reg, &tmp8);
4333 tmp = tmp8;
4334 break;
4335 }
4336 case 2: {
4337 u16 tmp16 = 0;
4338 pci_read_config_word(pdev, bits->reg, &tmp16);
4339 tmp = tmp16;
4340 break;
4341 }
4342 case 4: {
4343 u32 tmp32 = 0;
4344 pci_read_config_dword(pdev, bits->reg, &tmp32);
4345 tmp = tmp32;
4346 break;
4347 }
4348
4349 default:
4350 return -EINVAL;
4351 }
4352
4353 tmp &= bits->mask;
4354
4355 return (tmp == bits->val) ? 1 : 0;
4356 }
4357 #endif /* CONFIG_PCI */
4358
4359
4360 static int __init ata_init(void)
4361 {
4362 ata_wq = create_workqueue("ata");
4363 if (!ata_wq)
4364 return -ENOMEM;
4365
4366 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4367 return 0;
4368 }
4369
4370 static void __exit ata_exit(void)
4371 {
4372 destroy_workqueue(ata_wq);
4373 }
4374
4375 module_init(ata_init);
4376 module_exit(ata_exit);
4377
4378 /*
4379 * libata is essentially a library of internal helper functions for
4380 * low-level ATA host controller drivers. As such, the API/ABI is
4381 * likely to change as new drivers are added and updated.
4382 * Do not depend on ABI/API stability.
4383 */
4384
4385 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4386 EXPORT_SYMBOL_GPL(ata_std_ports);
4387 EXPORT_SYMBOL_GPL(ata_device_add);
4388 EXPORT_SYMBOL_GPL(ata_sg_init);
4389 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4390 EXPORT_SYMBOL_GPL(ata_qc_complete);
4391 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4392 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4393 EXPORT_SYMBOL_GPL(ata_tf_load);
4394 EXPORT_SYMBOL_GPL(ata_tf_read);
4395 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4396 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4397 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4398 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4399 EXPORT_SYMBOL_GPL(ata_check_status);
4400 EXPORT_SYMBOL_GPL(ata_altstatus);
4401 EXPORT_SYMBOL_GPL(ata_chk_err);
4402 EXPORT_SYMBOL_GPL(ata_exec_command);
4403 EXPORT_SYMBOL_GPL(ata_port_start);
4404 EXPORT_SYMBOL_GPL(ata_port_stop);
4405 EXPORT_SYMBOL_GPL(ata_host_stop);
4406 EXPORT_SYMBOL_GPL(ata_interrupt);
4407 EXPORT_SYMBOL_GPL(ata_qc_prep);
4408 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4409 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4410 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4411 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4412 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4413 EXPORT_SYMBOL_GPL(ata_port_probe);
4414 EXPORT_SYMBOL_GPL(sata_phy_reset);
4415 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4416 EXPORT_SYMBOL_GPL(ata_bus_reset);
4417 EXPORT_SYMBOL_GPL(ata_port_disable);
4418 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4419 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4420 EXPORT_SYMBOL_GPL(ata_scsi_error);
4421 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4422 EXPORT_SYMBOL_GPL(ata_scsi_release);
4423 EXPORT_SYMBOL_GPL(ata_host_intr);
4424 EXPORT_SYMBOL_GPL(ata_dev_classify);
4425 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4426 EXPORT_SYMBOL_GPL(ata_dev_config);
4427 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4428
4429 #ifdef CONFIG_PCI
4430 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4431 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4432 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4433 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4434 #endif /* CONFIG_PCI */
This page took 0.138568 seconds and 5 git commands to generate.