2 * Support for IDE interfaces on Celleb platform
4 * (C) Copyright 2006 TOSHIBA CORPORATION
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30 #include <linux/ide.h>
31 #include <linux/init.h>
33 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
35 #define SCC_PATA_NAME "scc IDE"
37 #define TDVHSEL_MASTER 0x00000001
38 #define TDVHSEL_SLAVE 0x00000004
40 #define MODE_JCUSFEN 0x00000080
42 #define CCKCTRL_ATARESET 0x00040000
43 #define CCKCTRL_BUFCNT 0x00020000
44 #define CCKCTRL_CRST 0x00010000
45 #define CCKCTRL_OCLKEN 0x00000100
46 #define CCKCTRL_ATACLKOEN 0x00000002
47 #define CCKCTRL_LCLKEN 0x00000001
49 #define QCHCD_IOS_SS 0x00000001
51 #define QCHSD_STPDIAG 0x00020000
53 #define INTMASK_MSK 0xD1000012
54 #define INTSTS_SERROR 0x80000000
55 #define INTSTS_PRERR 0x40000000
56 #define INTSTS_RERR 0x10000000
57 #define INTSTS_ICERR 0x01000000
58 #define INTSTS_BMSINT 0x00000010
59 #define INTSTS_BMHE 0x00000008
60 #define INTSTS_IOIRQS 0x00000004
61 #define INTSTS_INTRQ 0x00000002
62 #define INTSTS_ACTEINT 0x00000001
64 #define ECMODE_VALUE 0x01
66 static struct scc_ports
{
67 unsigned long ctl
, dma
;
68 ide_hwif_t
*hwif
; /* for removing port from system */
69 } scc_ports
[MAX_HWIFS
];
71 /* PIO transfer mode table */
73 static unsigned long JCHSTtbl
[2][7] = {
74 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
75 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
79 static unsigned long JCHHTtbl
[2][7] = {
80 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
81 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
85 static unsigned long JCHCTtbl
[2][7] = {
86 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
87 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
91 /* DMA transfer mode table */
93 static unsigned long JCHDCTxtbl
[2][7] = {
94 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
95 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
99 static unsigned long JCSTWTxtbl
[2][7] = {
100 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
101 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
105 static unsigned long JCTSStbl
[2][7] = {
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
107 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
111 static unsigned long JCENVTtbl
[2][7] = {
112 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
113 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
116 /* JCACTSELS/JCACTSELM */
117 static unsigned long JCACTSELtbl
[2][7] = {
118 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
119 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
123 static u8
scc_ide_inb(unsigned long port
)
125 u32 data
= in_be32((void*)port
);
129 static void scc_exec_command(ide_hwif_t
*hwif
, u8 cmd
)
131 out_be32((void *)hwif
->io_ports
.command_addr
, cmd
);
133 in_be32((void *)(hwif
->dma_base
+ 0x01c));
137 static u8
scc_read_sff_dma_status(ide_hwif_t
*hwif
)
139 return (u8
)in_be32((void *)(hwif
->dma_base
+ 4));
142 static void scc_ide_insw(unsigned long port
, void *addr
, u32 count
)
144 u16
*ptr
= (u16
*)addr
;
146 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
150 static void scc_ide_insl(unsigned long port
, void *addr
, u32 count
)
152 u16
*ptr
= (u16
*)addr
;
154 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
155 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
159 static void scc_ide_outb(u8 addr
, unsigned long port
)
161 out_be32((void*)port
, addr
);
164 static void scc_ide_outbsync(ide_hwif_t
*hwif
, u8 addr
, unsigned long port
)
166 out_be32((void*)port
, addr
);
168 in_be32((void*)(hwif
->dma_base
+ 0x01c));
173 scc_ide_outsw(unsigned long port
, void *addr
, u32 count
)
175 u16
*ptr
= (u16
*)addr
;
177 out_be32((void*)port
, cpu_to_le16(*ptr
++));
182 scc_ide_outsl(unsigned long port
, void *addr
, u32 count
)
184 u16
*ptr
= (u16
*)addr
;
186 out_be32((void*)port
, cpu_to_le16(*ptr
++));
187 out_be32((void*)port
, cpu_to_le16(*ptr
++));
192 * scc_set_pio_mode - set host controller for PIO mode
194 * @pio: PIO mode number
196 * Load the timing settings for this device mode into the
200 static void scc_set_pio_mode(ide_drive_t
*drive
, const u8 pio
)
202 ide_hwif_t
*hwif
= HWIF(drive
);
203 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
204 unsigned long ctl_base
= ports
->ctl
;
205 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
206 unsigned long piosht_port
= ctl_base
+ 0x000;
207 unsigned long pioct_port
= ctl_base
+ 0x004;
211 reg
= in_be32((void __iomem
*)cckctrl_port
);
212 if (reg
& CCKCTRL_ATACLKOEN
) {
213 offset
= 1; /* 133MHz */
215 offset
= 0; /* 100MHz */
217 reg
= JCHSTtbl
[offset
][pio
] << 16 | JCHHTtbl
[offset
][pio
];
218 out_be32((void __iomem
*)piosht_port
, reg
);
219 reg
= JCHCTtbl
[offset
][pio
];
220 out_be32((void __iomem
*)pioct_port
, reg
);
224 * scc_set_dma_mode - set host controller for DMA mode
228 * Load the timing settings for this device mode into the
232 static void scc_set_dma_mode(ide_drive_t
*drive
, const u8 speed
)
234 ide_hwif_t
*hwif
= HWIF(drive
);
235 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
236 unsigned long ctl_base
= ports
->ctl
;
237 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
238 unsigned long mdmact_port
= ctl_base
+ 0x008;
239 unsigned long mcrcst_port
= ctl_base
+ 0x00c;
240 unsigned long sdmact_port
= ctl_base
+ 0x010;
241 unsigned long scrcst_port
= ctl_base
+ 0x014;
242 unsigned long udenvt_port
= ctl_base
+ 0x018;
243 unsigned long tdvhsel_port
= ctl_base
+ 0x020;
244 int is_slave
= (&hwif
->drives
[1] == drive
);
247 unsigned long jcactsel
;
249 reg
= in_be32((void __iomem
*)cckctrl_port
);
250 if (reg
& CCKCTRL_ATACLKOEN
) {
251 offset
= 1; /* 133MHz */
253 offset
= 0; /* 100MHz */
256 idx
= speed
- XFER_UDMA_0
;
258 jcactsel
= JCACTSELtbl
[offset
][idx
];
260 out_be32((void __iomem
*)sdmact_port
, JCHDCTxtbl
[offset
][idx
]);
261 out_be32((void __iomem
*)scrcst_port
, JCSTWTxtbl
[offset
][idx
]);
262 jcactsel
= jcactsel
<< 2;
263 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_SLAVE
) | jcactsel
);
265 out_be32((void __iomem
*)mdmact_port
, JCHDCTxtbl
[offset
][idx
]);
266 out_be32((void __iomem
*)mcrcst_port
, JCSTWTxtbl
[offset
][idx
]);
267 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_MASTER
) | jcactsel
);
269 reg
= JCTSStbl
[offset
][idx
] << 16 | JCENVTtbl
[offset
][idx
];
270 out_be32((void __iomem
*)udenvt_port
, reg
);
273 static void scc_dma_host_set(ide_drive_t
*drive
, int on
)
275 ide_hwif_t
*hwif
= drive
->hwif
;
276 u8 unit
= (drive
->select
.b
.unit
& 0x01);
277 u8 dma_stat
= scc_ide_inb(hwif
->dma_base
+ 4);
280 dma_stat
|= (1 << (5 + unit
));
282 dma_stat
&= ~(1 << (5 + unit
));
284 scc_ide_outb(dma_stat
, hwif
->dma_base
+ 4);
288 * scc_ide_dma_setup - begin a DMA phase
289 * @drive: target device
291 * Build an IDE DMA PRD (IDE speak for scatter gather table)
292 * and then set up the DMA transfer registers.
294 * Returns 0 on success. If a PIO fallback is required then 1
298 static int scc_dma_setup(ide_drive_t
*drive
)
300 ide_hwif_t
*hwif
= drive
->hwif
;
301 struct request
*rq
= HWGROUP(drive
)->rq
;
302 unsigned int reading
;
310 /* fall back to pio! */
311 if (!ide_build_dmatable(drive
, rq
)) {
312 ide_map_sg(drive
, rq
);
317 out_be32((void __iomem
*)(hwif
->dma_base
+ 8), hwif
->dmatable_dma
);
320 out_be32((void __iomem
*)hwif
->dma_base
, reading
);
322 /* read DMA status for INTR & ERROR flags */
323 dma_stat
= in_be32((void __iomem
*)(hwif
->dma_base
+ 4));
325 /* clear INTR & ERROR flags */
326 out_be32((void __iomem
*)(hwif
->dma_base
+ 4), dma_stat
| 6);
327 drive
->waiting_for_dma
= 1;
331 static void scc_dma_start(ide_drive_t
*drive
)
333 ide_hwif_t
*hwif
= drive
->hwif
;
334 u8 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
337 scc_ide_outb(dma_cmd
| 1, hwif
->dma_base
);
342 static int __scc_dma_end(ide_drive_t
*drive
)
344 ide_hwif_t
*hwif
= drive
->hwif
;
345 u8 dma_stat
, dma_cmd
;
347 drive
->waiting_for_dma
= 0;
348 /* get DMA command mode */
349 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
351 scc_ide_outb(dma_cmd
& ~1, hwif
->dma_base
);
353 dma_stat
= scc_ide_inb(hwif
->dma_base
+ 4);
354 /* clear the INTR & ERROR bits */
355 scc_ide_outb(dma_stat
| 6, hwif
->dma_base
+ 4);
356 /* purge DMA mappings */
357 ide_destroy_dmatable(drive
);
358 /* verify good DMA status */
361 return (dma_stat
& 7) != 4 ? (0x10 | dma_stat
) : 0;
365 * scc_dma_end - Stop DMA
368 * Check and clear INT Status register.
369 * Then call __scc_dma_end().
372 static int scc_dma_end(ide_drive_t
*drive
)
374 ide_hwif_t
*hwif
= HWIF(drive
);
375 void __iomem
*dma_base
= (void __iomem
*)hwif
->dma_base
;
376 unsigned long intsts_port
= hwif
->dma_base
+ 0x014;
378 int dma_stat
, data_loss
= 0;
379 static int retry
= 0;
381 /* errata A308 workaround: Step5 (check data loss) */
382 /* We don't check non ide_disk because it is limited to UDMA4 */
383 if (!(in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
385 drive
->media
== ide_disk
&& drive
->current_speed
> XFER_UDMA_4
) {
386 reg
= in_be32((void __iomem
*)intsts_port
);
387 if (!(reg
& INTSTS_ACTEINT
)) {
388 printk(KERN_WARNING
"%s: operation failed (transfer data loss)\n",
392 struct request
*rq
= HWGROUP(drive
)->rq
;
394 /* ERROR_RESET and drive->crc_count are needed
395 * to reduce DMA transfer mode in retry process.
398 rq
->errors
|= ERROR_RESET
;
399 for (unit
= 0; unit
< MAX_DRIVES
; unit
++) {
400 ide_drive_t
*drive
= &hwif
->drives
[unit
];
408 reg
= in_be32((void __iomem
*)intsts_port
);
410 if (reg
& INTSTS_SERROR
) {
411 printk(KERN_WARNING
"%s: SERROR\n", SCC_PATA_NAME
);
412 out_be32((void __iomem
*)intsts_port
, INTSTS_SERROR
|INTSTS_BMSINT
);
414 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
418 if (reg
& INTSTS_PRERR
) {
420 unsigned long ctl_base
= hwif
->config_data
;
422 maea0
= in_be32((void __iomem
*)(ctl_base
+ 0xF50));
423 maec0
= in_be32((void __iomem
*)(ctl_base
+ 0xF54));
425 printk(KERN_WARNING
"%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME
, maea0
, maec0
);
427 out_be32((void __iomem
*)intsts_port
, INTSTS_PRERR
|INTSTS_BMSINT
);
429 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
433 if (reg
& INTSTS_RERR
) {
434 printk(KERN_WARNING
"%s: Response Error\n", SCC_PATA_NAME
);
435 out_be32((void __iomem
*)intsts_port
, INTSTS_RERR
|INTSTS_BMSINT
);
437 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
441 if (reg
& INTSTS_ICERR
) {
442 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
444 printk(KERN_WARNING
"%s: Illegal Configuration\n", SCC_PATA_NAME
);
445 out_be32((void __iomem
*)intsts_port
, INTSTS_ICERR
|INTSTS_BMSINT
);
449 if (reg
& INTSTS_BMSINT
) {
450 printk(KERN_WARNING
"%s: Internal Bus Error\n", SCC_PATA_NAME
);
451 out_be32((void __iomem
*)intsts_port
, INTSTS_BMSINT
);
457 if (reg
& INTSTS_BMHE
) {
458 out_be32((void __iomem
*)intsts_port
, INTSTS_BMHE
);
462 if (reg
& INTSTS_ACTEINT
) {
463 out_be32((void __iomem
*)intsts_port
, INTSTS_ACTEINT
);
467 if (reg
& INTSTS_IOIRQS
) {
468 out_be32((void __iomem
*)intsts_port
, INTSTS_IOIRQS
);
474 dma_stat
= __scc_dma_end(drive
);
476 dma_stat
|= 2; /* emulate DMA error (to retry command) */
480 /* returns 1 if dma irq issued, 0 otherwise */
481 static int scc_dma_test_irq(ide_drive_t
*drive
)
483 ide_hwif_t
*hwif
= HWIF(drive
);
484 u32 int_stat
= in_be32((void __iomem
*)hwif
->dma_base
+ 0x014);
486 /* SCC errata A252,A308 workaround: Step4 */
487 if ((in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
489 (int_stat
& INTSTS_INTRQ
))
492 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
493 if (int_stat
& INTSTS_IOIRQS
)
496 if (!drive
->waiting_for_dma
)
497 printk(KERN_WARNING
"%s: (%s) called while not waiting\n",
498 drive
->name
, __func__
);
502 static u8
scc_udma_filter(ide_drive_t
*drive
)
504 ide_hwif_t
*hwif
= drive
->hwif
;
505 u8 mask
= hwif
->ultra_mask
;
507 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
508 if ((drive
->media
!= ide_disk
) && (mask
& 0xE0)) {
509 printk(KERN_INFO
"%s: limit %s to UDMA4\n",
510 SCC_PATA_NAME
, drive
->name
);
518 * setup_mmio_scc - map CTRL/BMID region
519 * @dev: PCI device we are configuring
524 static int setup_mmio_scc (struct pci_dev
*dev
, const char *name
)
526 unsigned long ctl_base
= pci_resource_start(dev
, 0);
527 unsigned long dma_base
= pci_resource_start(dev
, 1);
528 unsigned long ctl_size
= pci_resource_len(dev
, 0);
529 unsigned long dma_size
= pci_resource_len(dev
, 1);
530 void __iomem
*ctl_addr
;
531 void __iomem
*dma_addr
;
534 for (i
= 0; i
< MAX_HWIFS
; i
++) {
535 if (scc_ports
[i
].ctl
== 0)
541 ret
= pci_request_selected_regions(dev
, (1 << 2) - 1, name
);
543 printk(KERN_ERR
"%s: can't reserve resources\n", name
);
547 if ((ctl_addr
= ioremap(ctl_base
, ctl_size
)) == NULL
)
550 if ((dma_addr
= ioremap(dma_base
, dma_size
)) == NULL
)
554 scc_ports
[i
].ctl
= (unsigned long)ctl_addr
;
555 scc_ports
[i
].dma
= (unsigned long)dma_addr
;
556 pci_set_drvdata(dev
, (void *) &scc_ports
[i
]);
566 static int scc_ide_setup_pci_device(struct pci_dev
*dev
,
567 const struct ide_port_info
*d
)
569 struct scc_ports
*ports
= pci_get_drvdata(dev
);
570 ide_hwif_t
*hwif
= NULL
;
571 hw_regs_t hw
, *hws
[] = { &hw
, NULL
, NULL
, NULL
};
572 u8 idx
[4] = { 0xff, 0xff, 0xff, 0xff };
575 hwif
= ide_find_port_slot(d
);
579 memset(&hw
, 0, sizeof(hw
));
580 for (i
= 0; i
<= 8; i
++)
581 hw
.io_ports_array
[i
] = ports
->dma
+ 0x20 + i
* 4;
584 hw
.chipset
= ide_pci
;
586 idx
[0] = hwif
->index
;
588 ide_device_add(idx
, d
, hws
);
594 * init_setup_scc - set up an SCC PATA Controller
598 * Perform the initial set up for this device.
601 static int __devinit
init_setup_scc(struct pci_dev
*dev
,
602 const struct ide_port_info
*d
)
604 unsigned long ctl_base
;
605 unsigned long dma_base
;
606 unsigned long cckctrl_port
;
607 unsigned long intmask_port
;
608 unsigned long mode_port
;
609 unsigned long ecmode_port
;
610 unsigned long dma_status_port
;
612 struct scc_ports
*ports
;
615 rc
= pci_enable_device(dev
);
619 rc
= setup_mmio_scc(dev
, d
->name
);
623 ports
= pci_get_drvdata(dev
);
624 ctl_base
= ports
->ctl
;
625 dma_base
= ports
->dma
;
626 cckctrl_port
= ctl_base
+ 0xff0;
627 intmask_port
= dma_base
+ 0x010;
628 mode_port
= ctl_base
+ 0x024;
629 ecmode_port
= ctl_base
+ 0xf00;
630 dma_status_port
= dma_base
+ 0x004;
632 /* controller initialization */
634 out_be32((void*)cckctrl_port
, reg
);
635 reg
|= CCKCTRL_ATACLKOEN
;
636 out_be32((void*)cckctrl_port
, reg
);
637 reg
|= CCKCTRL_LCLKEN
| CCKCTRL_OCLKEN
;
638 out_be32((void*)cckctrl_port
, reg
);
640 out_be32((void*)cckctrl_port
, reg
);
643 reg
= in_be32((void*)cckctrl_port
);
644 if (reg
& CCKCTRL_CRST
)
649 reg
|= CCKCTRL_ATARESET
;
650 out_be32((void*)cckctrl_port
, reg
);
652 out_be32((void*)ecmode_port
, ECMODE_VALUE
);
653 out_be32((void*)mode_port
, MODE_JCUSFEN
);
654 out_be32((void*)intmask_port
, INTMASK_MSK
);
656 rc
= scc_ide_setup_pci_device(dev
, d
);
662 static void scc_tf_load(ide_drive_t
*drive
, ide_task_t
*task
)
664 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
665 struct ide_taskfile
*tf
= &task
->tf
;
666 u8 HIHI
= (task
->tf_flags
& IDE_TFLAG_LBA48
) ? 0xE0 : 0xEF;
668 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
671 if (task
->tf_flags
& IDE_TFLAG_OUT_DATA
)
672 out_be32((void *)io_ports
->data_addr
,
673 (tf
->hob_data
<< 8) | tf
->data
);
675 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_FEATURE
)
676 scc_ide_outb(tf
->hob_feature
, io_ports
->feature_addr
);
677 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_NSECT
)
678 scc_ide_outb(tf
->hob_nsect
, io_ports
->nsect_addr
);
679 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAL
)
680 scc_ide_outb(tf
->hob_lbal
, io_ports
->lbal_addr
);
681 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAM
)
682 scc_ide_outb(tf
->hob_lbam
, io_ports
->lbam_addr
);
683 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAH
)
684 scc_ide_outb(tf
->hob_lbah
, io_ports
->lbah_addr
);
686 if (task
->tf_flags
& IDE_TFLAG_OUT_FEATURE
)
687 scc_ide_outb(tf
->feature
, io_ports
->feature_addr
);
688 if (task
->tf_flags
& IDE_TFLAG_OUT_NSECT
)
689 scc_ide_outb(tf
->nsect
, io_ports
->nsect_addr
);
690 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAL
)
691 scc_ide_outb(tf
->lbal
, io_ports
->lbal_addr
);
692 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAM
)
693 scc_ide_outb(tf
->lbam
, io_ports
->lbam_addr
);
694 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAH
)
695 scc_ide_outb(tf
->lbah
, io_ports
->lbah_addr
);
697 if (task
->tf_flags
& IDE_TFLAG_OUT_DEVICE
)
698 scc_ide_outb((tf
->device
& HIHI
) | drive
->select
.all
,
699 io_ports
->device_addr
);
702 static void scc_tf_read(ide_drive_t
*drive
, ide_task_t
*task
)
704 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
705 struct ide_taskfile
*tf
= &task
->tf
;
707 if (task
->tf_flags
& IDE_TFLAG_IN_DATA
) {
708 u16 data
= (u16
)in_be32((void *)io_ports
->data_addr
);
710 tf
->data
= data
& 0xff;
711 tf
->hob_data
= (data
>> 8) & 0xff;
714 /* be sure we're looking at the low order bits */
715 scc_ide_outb(ATA_DEVCTL_OBS
& ~0x80, io_ports
->ctl_addr
);
717 if (task
->tf_flags
& IDE_TFLAG_IN_NSECT
)
718 tf
->nsect
= scc_ide_inb(io_ports
->nsect_addr
);
719 if (task
->tf_flags
& IDE_TFLAG_IN_LBAL
)
720 tf
->lbal
= scc_ide_inb(io_ports
->lbal_addr
);
721 if (task
->tf_flags
& IDE_TFLAG_IN_LBAM
)
722 tf
->lbam
= scc_ide_inb(io_ports
->lbam_addr
);
723 if (task
->tf_flags
& IDE_TFLAG_IN_LBAH
)
724 tf
->lbah
= scc_ide_inb(io_ports
->lbah_addr
);
725 if (task
->tf_flags
& IDE_TFLAG_IN_DEVICE
)
726 tf
->device
= scc_ide_inb(io_ports
->device_addr
);
728 if (task
->tf_flags
& IDE_TFLAG_LBA48
) {
729 scc_ide_outb(ATA_DEVCTL_OBS
| 0x80, io_ports
->ctl_addr
);
731 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_FEATURE
)
732 tf
->hob_feature
= scc_ide_inb(io_ports
->feature_addr
);
733 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_NSECT
)
734 tf
->hob_nsect
= scc_ide_inb(io_ports
->nsect_addr
);
735 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAL
)
736 tf
->hob_lbal
= scc_ide_inb(io_ports
->lbal_addr
);
737 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAM
)
738 tf
->hob_lbam
= scc_ide_inb(io_ports
->lbam_addr
);
739 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAH
)
740 tf
->hob_lbah
= scc_ide_inb(io_ports
->lbah_addr
);
744 static void scc_input_data(ide_drive_t
*drive
, struct request
*rq
,
745 void *buf
, unsigned int len
)
747 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
751 if (drive
->io_32bit
) {
752 scc_ide_insl(data_addr
, buf
, len
/ 4);
755 scc_ide_insw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
757 scc_ide_insw(data_addr
, buf
, len
/ 2);
760 static void scc_output_data(ide_drive_t
*drive
, struct request
*rq
,
761 void *buf
, unsigned int len
)
763 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
767 if (drive
->io_32bit
) {
768 scc_ide_outsl(data_addr
, buf
, len
/ 4);
771 scc_ide_outsw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
773 scc_ide_outsw(data_addr
, buf
, len
/ 2);
777 * init_mmio_iops_scc - set up the iops for MMIO
778 * @hwif: interface to set up
782 static void __devinit
init_mmio_iops_scc(ide_hwif_t
*hwif
)
784 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
785 struct scc_ports
*ports
= pci_get_drvdata(dev
);
786 unsigned long dma_base
= ports
->dma
;
788 ide_set_hwifdata(hwif
, ports
);
790 hwif
->exec_command
= scc_exec_command
;
791 hwif
->read_sff_dma_status
= scc_read_sff_dma_status
;
793 hwif
->tf_load
= scc_tf_load
;
794 hwif
->tf_read
= scc_tf_read
;
796 hwif
->input_data
= scc_input_data
;
797 hwif
->output_data
= scc_output_data
;
799 hwif
->INB
= scc_ide_inb
;
800 hwif
->OUTB
= scc_ide_outb
;
801 hwif
->OUTBSYNC
= scc_ide_outbsync
;
803 hwif
->dma_base
= dma_base
;
804 hwif
->config_data
= ports
->ctl
;
808 * init_iops_scc - set up iops
809 * @hwif: interface to set up
811 * Do the basic setup for the SCC hardware interface
812 * and then do the MMIO setup.
815 static void __devinit
init_iops_scc(ide_hwif_t
*hwif
)
817 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
819 hwif
->hwif_data
= NULL
;
820 if (pci_get_drvdata(dev
) == NULL
)
822 init_mmio_iops_scc(hwif
);
825 static u8 __devinit
scc_cable_detect(ide_hwif_t
*hwif
)
827 return ATA_CBL_PATA80
;
831 * init_hwif_scc - set up hwif
832 * @hwif: interface to set up
834 * We do the basic set up of the interface structure. The SCC
835 * requires several custom handlers so we override the default
836 * ide DMA handlers appropriately.
839 static void __devinit
init_hwif_scc(ide_hwif_t
*hwif
)
841 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
846 out_be32((void __iomem
*)(hwif
->dma_base
+ 0x018), hwif
->dmatable_dma
);
848 if (in_be32((void __iomem
*)(hwif
->config_data
+ 0xff0)) & CCKCTRL_ATACLKOEN
)
849 hwif
->ultra_mask
= ATA_UDMA6
; /* 133MHz */
851 hwif
->ultra_mask
= ATA_UDMA5
; /* 100MHz */
854 static const struct ide_port_ops scc_port_ops
= {
855 .set_pio_mode
= scc_set_pio_mode
,
856 .set_dma_mode
= scc_set_dma_mode
,
857 .udma_filter
= scc_udma_filter
,
858 .cable_detect
= scc_cable_detect
,
861 static const struct ide_dma_ops scc_dma_ops
= {
862 .dma_host_set
= scc_dma_host_set
,
863 .dma_setup
= scc_dma_setup
,
864 .dma_exec_cmd
= ide_dma_exec_cmd
,
865 .dma_start
= scc_dma_start
,
866 .dma_end
= scc_dma_end
,
867 .dma_test_irq
= scc_dma_test_irq
,
868 .dma_lost_irq
= ide_dma_lost_irq
,
869 .dma_timeout
= ide_dma_timeout
,
872 #define DECLARE_SCC_DEV(name_str) \
875 .init_iops = init_iops_scc, \
876 .init_hwif = init_hwif_scc, \
877 .port_ops = &scc_port_ops, \
878 .dma_ops = &scc_dma_ops, \
879 .host_flags = IDE_HFLAG_SINGLE, \
880 .pio_mask = ATA_PIO4, \
883 static const struct ide_port_info scc_chipsets
[] __devinitdata
= {
884 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
888 * scc_init_one - pci layer discovery entry
890 * @id: ident table entry
892 * Called by the PCI code when it finds an SCC PATA controller.
893 * We then use the IDE PCI generic helper to do most of the work.
896 static int __devinit
scc_init_one(struct pci_dev
*dev
, const struct pci_device_id
*id
)
898 return init_setup_scc(dev
, &scc_chipsets
[id
->driver_data
]);
902 * scc_remove - pci layer remove entry
905 * Called by the PCI code when it removes an SCC PATA controller.
908 static void __devexit
scc_remove(struct pci_dev
*dev
)
910 struct scc_ports
*ports
= pci_get_drvdata(dev
);
911 ide_hwif_t
*hwif
= ports
->hwif
;
913 if (hwif
->dmatable_cpu
) {
914 pci_free_consistent(dev
, PRD_ENTRIES
* PRD_BYTES
,
915 hwif
->dmatable_cpu
, hwif
->dmatable_dma
);
916 hwif
->dmatable_cpu
= NULL
;
919 ide_unregister(hwif
);
921 iounmap((void*)ports
->dma
);
922 iounmap((void*)ports
->ctl
);
923 pci_release_selected_regions(dev
, (1 << 2) - 1);
924 memset(ports
, 0, sizeof(*ports
));
927 static const struct pci_device_id scc_pci_tbl
[] = {
928 { PCI_VDEVICE(TOSHIBA_2
, PCI_DEVICE_ID_TOSHIBA_SCC_ATA
), 0 },
931 MODULE_DEVICE_TABLE(pci
, scc_pci_tbl
);
933 static struct pci_driver driver
= {
935 .id_table
= scc_pci_tbl
,
936 .probe
= scc_init_one
,
937 .remove
= scc_remove
,
940 static int scc_ide_init(void)
942 return ide_pci_register_driver(&driver
);
945 module_init(scc_ide_init
);
947 static void scc_ide_exit(void)
949 ide_pci_unregister_driver(&driver);
951 module_exit(scc_ide_exit);
955 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
956 MODULE_LICENSE("GPL");