8af18ad1ca7f4de2856d6a21b7b495771305120f
[deliverable/linux.git] / drivers / ata / libata-sff.c
1 /*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/libata.h>
38
39 #include "libata.h"
40
41 /**
42 * ata_irq_on - Enable interrupts on a port.
43 * @ap: Port on which interrupts are enabled.
44 *
45 * Enable interrupts on a legacy IDE device using MMIO or PIO,
46 * wait for idle, clear any pending interrupts.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51 u8 ata_irq_on(struct ata_port *ap)
52 {
53 struct ata_ioports *ioaddr = &ap->ioaddr;
54 u8 tmp;
55
56 ap->ctl &= ~ATA_NIEN;
57 ap->last_ctl = ap->ctl;
58
59 iowrite8(ap->ctl, ioaddr->ctl_addr);
60 tmp = ata_wait_idle(ap);
61
62 ap->ops->irq_clear(ap);
63
64 return tmp;
65 }
66
67 u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; }
68
69 /**
70 * ata_irq_ack - Acknowledge a device interrupt.
71 * @ap: Port on which interrupts are enabled.
72 *
73 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
74 * or BUSY+DRQ clear). Obtain dma status and port status from
75 * device. Clear the interrupt. Return port status.
76 *
77 * LOCKING:
78 */
79
80 u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
81 {
82 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
83 u8 host_stat, post_stat, status;
84
85 status = ata_busy_wait(ap, bits, 1000);
86 if (status & bits)
87 if (ata_msg_err(ap))
88 printk(KERN_ERR "abnormal status 0x%X\n", status);
89
90 /* get controller status; clear intr, err bits */
91 host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
92 iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
93 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
94
95 post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
96
97 if (ata_msg_intr(ap))
98 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
99 __FUNCTION__,
100 host_stat, post_stat, status);
101
102 return status;
103 }
104
105 u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }
106
107 /**
108 * ata_tf_load - send taskfile registers to host controller
109 * @ap: Port to which output is sent
110 * @tf: ATA taskfile register set
111 *
112 * Outputs ATA taskfile to standard ATA host controller.
113 *
114 * LOCKING:
115 * Inherited from caller.
116 */
117
118 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
119 {
120 struct ata_ioports *ioaddr = &ap->ioaddr;
121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
122
123 if (tf->ctl != ap->last_ctl) {
124 iowrite8(tf->ctl, ioaddr->ctl_addr);
125 ap->last_ctl = tf->ctl;
126 ata_wait_idle(ap);
127 }
128
129 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
130 iowrite8(tf->hob_feature, ioaddr->feature_addr);
131 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
132 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
133 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
134 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
135 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
136 tf->hob_feature,
137 tf->hob_nsect,
138 tf->hob_lbal,
139 tf->hob_lbam,
140 tf->hob_lbah);
141 }
142
143 if (is_addr) {
144 iowrite8(tf->feature, ioaddr->feature_addr);
145 iowrite8(tf->nsect, ioaddr->nsect_addr);
146 iowrite8(tf->lbal, ioaddr->lbal_addr);
147 iowrite8(tf->lbam, ioaddr->lbam_addr);
148 iowrite8(tf->lbah, ioaddr->lbah_addr);
149 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
150 tf->feature,
151 tf->nsect,
152 tf->lbal,
153 tf->lbam,
154 tf->lbah);
155 }
156
157 if (tf->flags & ATA_TFLAG_DEVICE) {
158 iowrite8(tf->device, ioaddr->device_addr);
159 VPRINTK("device 0x%X\n", tf->device);
160 }
161
162 ata_wait_idle(ap);
163 }
164
165 /**
166 * ata_exec_command - issue ATA command to host controller
167 * @ap: port to which command is being issued
168 * @tf: ATA taskfile register set
169 *
170 * Issues ATA command, with proper synchronization with interrupt
171 * handler / other threads.
172 *
173 * LOCKING:
174 * spin_lock_irqsave(host lock)
175 */
176 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
177 {
178 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
179
180 iowrite8(tf->command, ap->ioaddr.command_addr);
181 ata_pause(ap);
182 }
183
184 /**
185 * ata_tf_read - input device's ATA taskfile shadow registers
186 * @ap: Port from which input is read
187 * @tf: ATA taskfile register set for storing input
188 *
189 * Reads ATA taskfile registers for currently-selected device
190 * into @tf.
191 *
192 * LOCKING:
193 * Inherited from caller.
194 */
195 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
196 {
197 struct ata_ioports *ioaddr = &ap->ioaddr;
198
199 tf->command = ata_check_status(ap);
200 tf->feature = ioread8(ioaddr->error_addr);
201 tf->nsect = ioread8(ioaddr->nsect_addr);
202 tf->lbal = ioread8(ioaddr->lbal_addr);
203 tf->lbam = ioread8(ioaddr->lbam_addr);
204 tf->lbah = ioread8(ioaddr->lbah_addr);
205 tf->device = ioread8(ioaddr->device_addr);
206
207 if (tf->flags & ATA_TFLAG_LBA48) {
208 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
209 tf->hob_feature = ioread8(ioaddr->error_addr);
210 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
211 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
212 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
213 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
214 }
215 }
216
217 /**
218 * ata_check_status - Read device status reg & clear interrupt
219 * @ap: port where the device is
220 *
221 * Reads ATA taskfile status register for currently-selected device
222 * and return its value. This also clears pending interrupts
223 * from this device
224 *
225 * LOCKING:
226 * Inherited from caller.
227 */
228 u8 ata_check_status(struct ata_port *ap)
229 {
230 return ioread8(ap->ioaddr.status_addr);
231 }
232
233 /**
234 * ata_altstatus - Read device alternate status reg
235 * @ap: port where the device is
236 *
237 * Reads ATA taskfile alternate status register for
238 * currently-selected device and return its value.
239 *
240 * Note: may NOT be used as the check_altstatus() entry in
241 * ata_port_operations.
242 *
243 * LOCKING:
244 * Inherited from caller.
245 */
246 u8 ata_altstatus(struct ata_port *ap)
247 {
248 if (ap->ops->check_altstatus)
249 return ap->ops->check_altstatus(ap);
250
251 return ioread8(ap->ioaddr.altstatus_addr);
252 }
253
254 /**
255 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
256 * @qc: Info associated with this ATA transaction.
257 *
258 * LOCKING:
259 * spin_lock_irqsave(host lock)
260 */
261 void ata_bmdma_setup(struct ata_queued_cmd *qc)
262 {
263 struct ata_port *ap = qc->ap;
264 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
265 u8 dmactl;
266
267 /* load PRD table addr. */
268 mb(); /* make sure PRD table writes are visible to controller */
269 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
270
271 /* specify data direction, triple-check start bit is clear */
272 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
273 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
274 if (!rw)
275 dmactl |= ATA_DMA_WR;
276 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
277
278 /* issue r/w command */
279 ap->ops->exec_command(ap, &qc->tf);
280 }
281
282 /**
283 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
284 * @qc: Info associated with this ATA transaction.
285 *
286 * LOCKING:
287 * spin_lock_irqsave(host lock)
288 */
289 void ata_bmdma_start (struct ata_queued_cmd *qc)
290 {
291 struct ata_port *ap = qc->ap;
292 u8 dmactl;
293
294 /* start host DMA transaction */
295 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
296 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
297
298 /* Strictly, one may wish to issue a readb() here, to
299 * flush the mmio write. However, control also passes
300 * to the hardware at this point, and it will interrupt
301 * us when we are to resume control. So, in effect,
302 * we don't care when the mmio write flushes.
303 * Further, a read of the DMA status register _immediately_
304 * following the write may not be what certain flaky hardware
305 * is expected, so I think it is best to not add a readb()
306 * without first all the MMIO ATA cards/mobos.
307 * Or maybe I'm just being paranoid.
308 */
309 }
310
311 /**
312 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
313 * @ap: Port associated with this ATA transaction.
314 *
315 * Clear interrupt and error flags in DMA status register.
316 *
317 * May be used as the irq_clear() entry in ata_port_operations.
318 *
319 * LOCKING:
320 * spin_lock_irqsave(host lock)
321 */
322 void ata_bmdma_irq_clear(struct ata_port *ap)
323 {
324 void __iomem *mmio = ap->ioaddr.bmdma_addr;
325
326 if (!mmio)
327 return;
328
329 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
330 }
331
332 /**
333 * ata_bmdma_status - Read PCI IDE BMDMA status
334 * @ap: Port associated with this ATA transaction.
335 *
336 * Read and return BMDMA status register.
337 *
338 * May be used as the bmdma_status() entry in ata_port_operations.
339 *
340 * LOCKING:
341 * spin_lock_irqsave(host lock)
342 */
343 u8 ata_bmdma_status(struct ata_port *ap)
344 {
345 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
346 }
347
348 /**
349 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
350 * @qc: Command we are ending DMA for
351 *
352 * Clears the ATA_DMA_START flag in the dma control register
353 *
354 * May be used as the bmdma_stop() entry in ata_port_operations.
355 *
356 * LOCKING:
357 * spin_lock_irqsave(host lock)
358 */
359 void ata_bmdma_stop(struct ata_queued_cmd *qc)
360 {
361 struct ata_port *ap = qc->ap;
362 void __iomem *mmio = ap->ioaddr.bmdma_addr;
363
364 /* clear start/stop bit */
365 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
366 mmio + ATA_DMA_CMD);
367
368 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
369 ata_altstatus(ap); /* dummy read */
370 }
371
372 /**
373 * ata_bmdma_freeze - Freeze BMDMA controller port
374 * @ap: port to freeze
375 *
376 * Freeze BMDMA controller port.
377 *
378 * LOCKING:
379 * Inherited from caller.
380 */
381 void ata_bmdma_freeze(struct ata_port *ap)
382 {
383 struct ata_ioports *ioaddr = &ap->ioaddr;
384
385 ap->ctl |= ATA_NIEN;
386 ap->last_ctl = ap->ctl;
387
388 iowrite8(ap->ctl, ioaddr->ctl_addr);
389
390 /* Under certain circumstances, some controllers raise IRQ on
391 * ATA_NIEN manipulation. Also, many controllers fail to mask
392 * previously pending IRQ on ATA_NIEN assertion. Clear it.
393 */
394 ata_chk_status(ap);
395
396 ap->ops->irq_clear(ap);
397 }
398
399 /**
400 * ata_bmdma_thaw - Thaw BMDMA controller port
401 * @ap: port to thaw
402 *
403 * Thaw BMDMA controller port.
404 *
405 * LOCKING:
406 * Inherited from caller.
407 */
408 void ata_bmdma_thaw(struct ata_port *ap)
409 {
410 /* clear & re-enable interrupts */
411 ata_chk_status(ap);
412 ap->ops->irq_clear(ap);
413 ap->ops->irq_on(ap);
414 }
415
416 /**
417 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
418 * @ap: port to handle error for
419 * @prereset: prereset method (can be NULL)
420 * @softreset: softreset method (can be NULL)
421 * @hardreset: hardreset method (can be NULL)
422 * @postreset: postreset method (can be NULL)
423 *
424 * Handle error for ATA BMDMA controller. It can handle both
425 * PATA and SATA controllers. Many controllers should be able to
426 * use this EH as-is or with some added handling before and
427 * after.
428 *
429 * This function is intended to be used for constructing
430 * ->error_handler callback by low level drivers.
431 *
432 * LOCKING:
433 * Kernel thread context (may sleep)
434 */
435 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
436 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
437 ata_postreset_fn_t postreset)
438 {
439 struct ata_queued_cmd *qc;
440 unsigned long flags;
441 int thaw = 0;
442
443 qc = __ata_qc_from_tag(ap, ap->active_tag);
444 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
445 qc = NULL;
446
447 /* reset PIO HSM and stop DMA engine */
448 spin_lock_irqsave(ap->lock, flags);
449
450 ap->hsm_task_state = HSM_ST_IDLE;
451
452 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
453 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
454 u8 host_stat;
455
456 host_stat = ap->ops->bmdma_status(ap);
457
458 /* BMDMA controllers indicate host bus error by
459 * setting DMA_ERR bit and timing out. As it wasn't
460 * really a timeout event, adjust error mask and
461 * cancel frozen state.
462 */
463 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
464 qc->err_mask = AC_ERR_HOST_BUS;
465 thaw = 1;
466 }
467
468 ap->ops->bmdma_stop(qc);
469 }
470
471 ata_altstatus(ap);
472 ata_chk_status(ap);
473 ap->ops->irq_clear(ap);
474
475 spin_unlock_irqrestore(ap->lock, flags);
476
477 if (thaw)
478 ata_eh_thaw_port(ap);
479
480 /* PIO and DMA engines have been stopped, perform recovery */
481 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
482 }
483
484 /**
485 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
486 * @ap: port to handle error for
487 *
488 * Stock error handler for BMDMA controller.
489 *
490 * LOCKING:
491 * Kernel thread context (may sleep)
492 */
493 void ata_bmdma_error_handler(struct ata_port *ap)
494 {
495 ata_reset_fn_t hardreset;
496
497 hardreset = NULL;
498 if (sata_scr_valid(ap))
499 hardreset = sata_std_hardreset;
500
501 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
502 ata_std_postreset);
503 }
504
505 /**
506 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
507 * BMDMA controller
508 * @qc: internal command to clean up
509 *
510 * LOCKING:
511 * Kernel thread context (may sleep)
512 */
513 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
514 {
515 if (qc->ap->ioaddr.bmdma_addr)
516 ata_bmdma_stop(qc);
517 }
518
519 #ifdef CONFIG_PCI
520
521 static int ata_resources_present(struct pci_dev *pdev, int port)
522 {
523 int i;
524
525 /* Check the PCI resources for this channel are enabled */
526 port = port * 2;
527 for (i = 0; i < 2; i ++) {
528 if (pci_resource_start(pdev, port + i) == 0 ||
529 pci_resource_len(pdev, port + i) == 0)
530 return 0;
531 }
532 return 1;
533 }
534
535 /**
536 * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
537 * @host: target ATA host
538 *
539 * Acquire PCI BMDMA resources and initialize @host accordingly.
540 *
541 * LOCKING:
542 * Inherited from calling layer (may sleep).
543 *
544 * RETURNS:
545 * 0 on success, -errno otherwise.
546 */
547 static int ata_pci_init_bmdma(struct ata_host *host)
548 {
549 struct device *gdev = host->dev;
550 struct pci_dev *pdev = to_pci_dev(gdev);
551 int i, rc;
552
553 /* TODO: If we get no DMA mask we should fall back to PIO */
554 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
555 if (rc)
556 return rc;
557 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
558 if (rc)
559 return rc;
560
561 /* request and iomap DMA region */
562 rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
563 if (rc) {
564 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
565 return -ENOMEM;
566 }
567 host->iomap = pcim_iomap_table(pdev);
568
569 for (i = 0; i < 2; i++) {
570 struct ata_port *ap = host->ports[i];
571 void __iomem *bmdma = host->iomap[4] + 8 * i;
572
573 if (ata_port_is_dummy(ap))
574 continue;
575
576 ap->ioaddr.bmdma_addr = bmdma;
577 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
578 (ioread8(bmdma + 2) & 0x80))
579 host->flags |= ATA_HOST_SIMPLEX;
580 }
581
582 return 0;
583 }
584
585 /**
586 * ata_pci_init_native_host - acquire native ATA resources and init host
587 * @host: target ATA host
588 * @port_mask: ports to consider
589 *
590 * Acquire native PCI ATA resources for @host and initialize
591 * @host accordoingly.
592 *
593 * LOCKING:
594 * Inherited from calling layer (may sleep).
595 *
596 * RETURNS:
597 * 0 on success, -errno otherwise.
598 */
599 int ata_pci_init_native_host(struct ata_host *host, unsigned int port_mask)
600 {
601 struct device *gdev = host->dev;
602 struct pci_dev *pdev = to_pci_dev(gdev);
603 int i, rc;
604
605 /* Discard disabled ports. Some controllers show their unused
606 * channels this way. Disabled ports are made dummy.
607 */
608 for (i = 0; i < 2; i++) {
609 if ((port_mask & (1 << i)) && !ata_resources_present(pdev, i)) {
610 host->ports[i]->ops = &ata_dummy_port_ops;
611 port_mask &= ~(1 << i);
612 }
613 }
614
615 if (!port_mask) {
616 dev_printk(KERN_ERR, gdev, "no available port\n");
617 return -ENODEV;
618 }
619
620 /* request, iomap BARs and init port addresses accordingly */
621 for (i = 0; i < 2; i++) {
622 struct ata_port *ap = host->ports[i];
623 int base = i * 2;
624 void __iomem * const *iomap;
625
626 if (!(port_mask & (1 << i)))
627 continue;
628
629 rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
630 if (rc) {
631 dev_printk(KERN_ERR, gdev, "failed to request/iomap "
632 "BARs for port %d (errno=%d)\n", i, rc);
633 if (rc == -EBUSY)
634 pcim_pin_device(pdev);
635 return rc;
636 }
637 host->iomap = iomap = pcim_iomap_table(pdev);
638
639 ap->ioaddr.cmd_addr = iomap[base];
640 ap->ioaddr.altstatus_addr =
641 ap->ioaddr.ctl_addr = (void __iomem *)
642 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
643 ata_std_ports(&ap->ioaddr);
644 }
645
646 return 0;
647 }
648
649 /**
650 * ata_pci_prepare_native_host - helper to prepare native PCI ATA host
651 * @pdev: target PCI device
652 * @ppi: array of port_info
653 * @n_ports: number of ports to allocate
654 * @r_host: out argument for the initialized ATA host
655 *
656 * Helper to allocate ATA host for @pdev, acquire all native PCI
657 * resources and initialize it accordingly in one go.
658 *
659 * LOCKING:
660 * Inherited from calling layer (may sleep).
661 *
662 * RETURNS:
663 * 0 on success, -errno otherwise.
664 */
665 int ata_pci_prepare_native_host(struct pci_dev *pdev,
666 const struct ata_port_info * const * ppi,
667 int n_ports, struct ata_host **r_host)
668 {
669 struct ata_host *host;
670 unsigned int port_mask;
671 int rc;
672
673 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
674 return -ENOMEM;
675
676 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
677 if (!host) {
678 dev_printk(KERN_ERR, &pdev->dev,
679 "failed to allocate ATA host\n");
680 rc = -ENOMEM;
681 goto err_out;
682 }
683
684 port_mask = ATA_PORT_PRIMARY;
685 if (n_ports > 1)
686 port_mask |= ATA_PORT_SECONDARY;
687
688 rc = ata_pci_init_native_host(host, port_mask);
689 if (rc)
690 goto err_out;
691
692 /* init DMA related stuff */
693 rc = ata_pci_init_bmdma(host);
694 if (rc)
695 goto err_bmdma;
696
697 devres_remove_group(&pdev->dev, NULL);
698 *r_host = host;
699 return 0;
700
701 err_bmdma:
702 /* This is necessary because PCI and iomap resources are
703 * merged and releasing the top group won't release the
704 * acquired resources if some of those have been acquired
705 * before entering this function.
706 */
707 pcim_iounmap_regions(pdev, 0xf);
708 err_out:
709 devres_release_group(&pdev->dev, NULL);
710 return rc;
711 }
712
713 struct ata_legacy_devres {
714 unsigned int mask;
715 unsigned long cmd_port[2];
716 void __iomem * cmd_addr[2];
717 void __iomem * ctl_addr[2];
718 unsigned int irq[2];
719 void * irq_dev_id[2];
720 };
721
722 static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
723 {
724 int i;
725
726 for (i = 0; i < 2; i++) {
727 if (!legacy_dr->irq[i])
728 continue;
729
730 free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
731 legacy_dr->irq[i] = 0;
732 legacy_dr->irq_dev_id[i] = NULL;
733 }
734 }
735
736 static void ata_legacy_release(struct device *gdev, void *res)
737 {
738 struct ata_legacy_devres *this = res;
739 int i;
740
741 ata_legacy_free_irqs(this);
742
743 for (i = 0; i < 2; i++) {
744 if (this->cmd_addr[i])
745 ioport_unmap(this->cmd_addr[i]);
746 if (this->ctl_addr[i])
747 ioport_unmap(this->ctl_addr[i]);
748 if (this->cmd_port[i])
749 release_region(this->cmd_port[i], 8);
750 }
751 }
752
753 static int ata_init_legacy_port(struct ata_port *ap,
754 struct ata_legacy_devres *legacy_dr)
755 {
756 struct ata_host *host = ap->host;
757 int port_no = ap->port_no;
758 unsigned long cmd_port, ctl_port;
759
760 if (port_no == 0) {
761 cmd_port = ATA_PRIMARY_CMD;
762 ctl_port = ATA_PRIMARY_CTL;
763 } else {
764 cmd_port = ATA_SECONDARY_CMD;
765 ctl_port = ATA_SECONDARY_CTL;
766 }
767
768 /* request cmd_port */
769 if (request_region(cmd_port, 8, "libata"))
770 legacy_dr->cmd_port[port_no] = cmd_port;
771 else {
772 dev_printk(KERN_WARNING, host->dev,
773 "0x%0lX IDE port busy\n", cmd_port);
774 return -EBUSY;
775 }
776
777 /* iomap cmd and ctl ports */
778 legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
779 legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
780 if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no])
781 return -ENOMEM;
782
783 /* init IO addresses */
784 ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
785 ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
786 ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
787 ata_std_ports(&ap->ioaddr);
788
789 return 0;
790 }
791
792 /**
793 * ata_init_legacy_host - acquire legacy ATA resources and init ATA host
794 * @host: target ATA host
795 * @legacy_mask: out parameter, mask indicating ports is in legacy mode
796 * @was_busy: out parameter, indicates whether any port was busy
797 *
798 * Acquire legacy ATA resources for ports.
799 *
800 * LOCKING:
801 * Inherited from calling layer (may sleep).
802 *
803 * RETURNS:
804 * 0 on success, -errno otherwise.
805 */
806 static int ata_init_legacy_host(struct ata_host *host,
807 unsigned int *legacy_mask, int *was_busy)
808 {
809 struct device *gdev = host->dev;
810 struct ata_legacy_devres *legacy_dr;
811 int i, rc;
812
813 if (!devres_open_group(gdev, NULL, GFP_KERNEL))
814 return -ENOMEM;
815
816 rc = -ENOMEM;
817 legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
818 GFP_KERNEL);
819 if (!legacy_dr)
820 goto err_out;
821 devres_add(gdev, legacy_dr);
822
823 for (i = 0; i < 2; i++) {
824 *legacy_mask &= ~(1 << i);
825 rc = ata_init_legacy_port(host->ports[i], legacy_dr);
826 if (rc == 0)
827 legacy_dr->mask |= 1 << i;
828 else if (rc == -EBUSY)
829 (*was_busy)++;
830 }
831
832 if (!legacy_dr->mask)
833 return -EBUSY;
834
835 for (i = 0; i < 2; i++)
836 if (!(legacy_dr->mask & (1 << i)))
837 host->ports[i]->ops = &ata_dummy_port_ops;
838
839 *legacy_mask |= legacy_dr->mask;
840
841 devres_remove_group(gdev, NULL);
842 return 0;
843
844 err_out:
845 devres_release_group(gdev, NULL);
846 return rc;
847 }
848
849 /**
850 * ata_request_legacy_irqs - request legacy ATA IRQs
851 * @host: target ATA host
852 * @handler: array of IRQ handlers
853 * @irq_flags: array of IRQ flags
854 * @dev_id: array of IRQ dev_ids
855 *
856 * Request legacy IRQs for non-dummy legacy ports in @host. All
857 * IRQ parameters are passed as array to allow ports to have
858 * separate IRQ handlers.
859 *
860 * LOCKING:
861 * Inherited from calling layer (may sleep).
862 *
863 * RETURNS:
864 * 0 on success, -errno otherwise.
865 */
866 static int ata_request_legacy_irqs(struct ata_host *host,
867 irq_handler_t const *handler,
868 const unsigned int *irq_flags,
869 void * const *dev_id)
870 {
871 struct device *gdev = host->dev;
872 struct ata_legacy_devres *legacy_dr;
873 int i, rc;
874
875 legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
876 BUG_ON(!legacy_dr);
877
878 for (i = 0; i < 2; i++) {
879 unsigned int irq;
880
881 /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
882 if (i == 0)
883 irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
884 else
885 irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));
886
887 if (!(legacy_dr->mask & (1 << i)))
888 continue;
889
890 if (!handler[i]) {
891 dev_printk(KERN_ERR, gdev,
892 "NULL handler specified for port %d\n", i);
893 rc = -EINVAL;
894 goto err_out;
895 }
896
897 rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
898 dev_id[i]);
899 if (rc) {
900 dev_printk(KERN_ERR, gdev,
901 "irq %u request failed (errno=%d)\n", irq, rc);
902 goto err_out;
903 }
904
905 /* record irq allocation in legacy_dr */
906 legacy_dr->irq[i] = irq;
907 legacy_dr->irq_dev_id[i] = dev_id[i];
908
909 /* only used to print info */
910 if (i == 0)
911 host->irq = irq;
912 else
913 host->irq2 = irq;
914 }
915
916 return 0;
917
918 err_out:
919 ata_legacy_free_irqs(legacy_dr);
920 return rc;
921 }
922
923 /**
924 * ata_pci_init_one - Initialize/register PCI IDE host controller
925 * @pdev: Controller to be initialized
926 * @port_info: Information from low-level host driver
927 * @n_ports: Number of ports attached to host controller
928 *
929 * This is a helper function which can be called from a driver's
930 * xxx_init_one() probe function if the hardware uses traditional
931 * IDE taskfile registers.
932 *
933 * This function calls pci_enable_device(), reserves its register
934 * regions, sets the dma mask, enables bus master mode, and calls
935 * ata_device_add()
936 *
937 * ASSUMPTION:
938 * Nobody makes a single channel controller that appears solely as
939 * the secondary legacy port on PCI.
940 *
941 * LOCKING:
942 * Inherited from PCI layer (may sleep).
943 *
944 * RETURNS:
945 * Zero on success, negative on errno-based value on error.
946 */
947
948 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
949 unsigned int n_ports)
950 {
951 struct device *dev = &pdev->dev;
952 struct ata_host *host = NULL;
953 const struct ata_port_info *port[2];
954 u8 mask;
955 unsigned int legacy_mode = 0;
956 int rc;
957
958 DPRINTK("ENTER\n");
959
960 if (!devres_open_group(dev, NULL, GFP_KERNEL))
961 return -ENOMEM;
962
963 BUG_ON(n_ports < 1 || n_ports > 2);
964
965 port[0] = port_info[0];
966 if (n_ports > 1)
967 port[1] = port_info[1];
968 else
969 port[1] = port[0];
970
971 /* FIXME: Really for ATA it isn't safe because the device may be
972 multi-purpose and we want to leave it alone if it was already
973 enabled. Secondly for shared use as Arjan says we want refcounting
974
975 Checking dev->is_enabled is insufficient as this is not set at
976 boot for the primary video which is BIOS enabled
977 */
978
979 rc = pcim_enable_device(pdev);
980 if (rc)
981 goto err_out;
982
983 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
984 u8 tmp8;
985
986 /* TODO: What if one channel is in native mode ... */
987 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
988 mask = (1 << 2) | (1 << 0);
989 if ((tmp8 & mask) != mask)
990 legacy_mode = (1 << 3);
991 #if defined(CONFIG_NO_ATA_LEGACY)
992 /* Some platforms with PCI limits cannot address compat
993 port space. In that case we punt if their firmware has
994 left a device in compatibility mode */
995 if (legacy_mode) {
996 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
997 rc = -EOPNOTSUPP;
998 goto err_out;
999 }
1000 #endif
1001 }
1002
1003 /* alloc and init host */
1004 host = ata_host_alloc_pinfo(dev, port, 2);
1005 if (!host) {
1006 dev_printk(KERN_ERR, &pdev->dev,
1007 "failed to allocate ATA host\n");
1008 rc = -ENOMEM;
1009 goto err_out;
1010 }
1011
1012 if (!legacy_mode) {
1013 unsigned int port_mask;
1014
1015 port_mask = ATA_PORT_PRIMARY;
1016 if (n_ports > 1)
1017 port_mask |= ATA_PORT_SECONDARY;
1018
1019 rc = ata_pci_init_native_host(host, port_mask);
1020 if (rc)
1021 goto err_out;
1022 } else {
1023 int was_busy = 0;
1024
1025 rc = ata_init_legacy_host(host, &legacy_mode, &was_busy);
1026 if (was_busy)
1027 pcim_pin_device(pdev);
1028 if (rc)
1029 goto err_out;
1030
1031 /* request respective PCI regions, may fail */
1032 rc = pci_request_region(pdev, 1, DRV_NAME);
1033 rc = pci_request_region(pdev, 3, DRV_NAME);
1034 }
1035
1036 /* init BMDMA, may fail */
1037 ata_pci_init_bmdma(host);
1038 pci_set_master(pdev);
1039
1040 /* start host and request IRQ */
1041 rc = ata_host_start(host);
1042 if (rc)
1043 goto err_out;
1044
1045 if (!legacy_mode)
1046 rc = devm_request_irq(dev, pdev->irq,
1047 port_info[0]->port_ops->irq_handler,
1048 IRQF_SHARED, DRV_NAME, host);
1049 else {
1050 irq_handler_t handler[2] = { host->ops->irq_handler,
1051 host->ops->irq_handler };
1052 unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
1053 void *dev_id[2] = { host, host };
1054
1055 rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
1056 }
1057 if (rc)
1058 goto err_out;
1059
1060 /* register */
1061 rc = ata_host_register(host, port_info[0]->sht);
1062 if (rc)
1063 goto err_out;
1064
1065 devres_remove_group(dev, NULL);
1066 return 0;
1067
1068 err_out:
1069 devres_release_group(dev, NULL);
1070 return rc;
1071 }
1072
1073 /**
1074 * ata_pci_clear_simplex - attempt to kick device out of simplex
1075 * @pdev: PCI device
1076 *
1077 * Some PCI ATA devices report simplex mode but in fact can be told to
1078 * enter non simplex mode. This implements the neccessary logic to
1079 * perform the task on such devices. Calling it on other devices will
1080 * have -undefined- behaviour.
1081 */
1082
1083 int ata_pci_clear_simplex(struct pci_dev *pdev)
1084 {
1085 unsigned long bmdma = pci_resource_start(pdev, 4);
1086 u8 simplex;
1087
1088 if (bmdma == 0)
1089 return -ENOENT;
1090
1091 simplex = inb(bmdma + 0x02);
1092 outb(simplex & 0x60, bmdma + 0x02);
1093 simplex = inb(bmdma + 0x02);
1094 if (simplex & 0x80)
1095 return -EOPNOTSUPP;
1096 return 0;
1097 }
1098
1099 unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
1100 {
1101 /* Filter out DMA modes if the device has been configured by
1102 the BIOS as PIO only */
1103
1104 if (adev->ap->ioaddr.bmdma_addr == 0)
1105 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1106 return xfer_mask;
1107 }
1108
1109 #endif /* CONFIG_PCI */
1110
This page took 0.050571 seconds and 4 git commands to generate.