ide: add ->exec_command method
[deliverable/linux.git] / drivers / ide / pci / scc_pata.c
1 /*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30 #include <linux/ide.h>
31 #include <linux/init.h>
32
33 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34
35 #define SCC_PATA_NAME "scc IDE"
36
37 #define TDVHSEL_MASTER 0x00000001
38 #define TDVHSEL_SLAVE 0x00000004
39
40 #define MODE_JCUSFEN 0x00000080
41
42 #define CCKCTRL_ATARESET 0x00040000
43 #define CCKCTRL_BUFCNT 0x00020000
44 #define CCKCTRL_CRST 0x00010000
45 #define CCKCTRL_OCLKEN 0x00000100
46 #define CCKCTRL_ATACLKOEN 0x00000002
47 #define CCKCTRL_LCLKEN 0x00000001
48
49 #define QCHCD_IOS_SS 0x00000001
50
51 #define QCHSD_STPDIAG 0x00020000
52
53 #define INTMASK_MSK 0xD1000012
54 #define INTSTS_SERROR 0x80000000
55 #define INTSTS_PRERR 0x40000000
56 #define INTSTS_RERR 0x10000000
57 #define INTSTS_ICERR 0x01000000
58 #define INTSTS_BMSINT 0x00000010
59 #define INTSTS_BMHE 0x00000008
60 #define INTSTS_IOIRQS 0x00000004
61 #define INTSTS_INTRQ 0x00000002
62 #define INTSTS_ACTEINT 0x00000001
63
64 #define ECMODE_VALUE 0x01
65
66 static struct scc_ports {
67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */
69 } scc_ports[MAX_HWIFS];
70
71 /* PIO transfer mode table */
72 /* JCHST */
73 static unsigned long JCHSTtbl[2][7] = {
74 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
75 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
76 };
77
78 /* JCHHT */
79 static unsigned long JCHHTtbl[2][7] = {
80 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
81 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
82 };
83
84 /* JCHCT */
85 static unsigned long JCHCTtbl[2][7] = {
86 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
87 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
88 };
89
90
91 /* DMA transfer mode table */
92 /* JCHDCTM/JCHDCTS */
93 static unsigned long JCHDCTxtbl[2][7] = {
94 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
95 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
96 };
97
98 /* JCSTWTM/JCSTWTS */
99 static unsigned long JCSTWTxtbl[2][7] = {
100 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
101 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
102 };
103
104 /* JCTSS */
105 static unsigned long JCTSStbl[2][7] = {
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
107 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
108 };
109
110 /* JCENVT */
111 static unsigned long JCENVTtbl[2][7] = {
112 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
113 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
114 };
115
116 /* JCACTSELS/JCACTSELM */
117 static unsigned long JCACTSELtbl[2][7] = {
118 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
119 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
120 };
121
122
123 static u8 scc_ide_inb(unsigned long port)
124 {
125 u32 data = in_be32((void*)port);
126 return (u8)data;
127 }
128
129 static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
130 {
131 out_be32((void *)hwif->io_ports.command_addr, cmd);
132 eieio();
133 in_be32((void *)(hwif->dma_base + 0x01c));
134 eieio();
135 }
136
137 static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
138 {
139 return (u8)in_be32((void *)(hwif->dma_base + 4));
140 }
141
142 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
143 {
144 u16 *ptr = (u16 *)addr;
145 while (count--) {
146 *ptr++ = le16_to_cpu(in_be32((void*)port));
147 }
148 }
149
150 static void scc_ide_insl(unsigned long port, void *addr, u32 count)
151 {
152 u16 *ptr = (u16 *)addr;
153 while (count--) {
154 *ptr++ = le16_to_cpu(in_be32((void*)port));
155 *ptr++ = le16_to_cpu(in_be32((void*)port));
156 }
157 }
158
159 static void scc_ide_outb(u8 addr, unsigned long port)
160 {
161 out_be32((void*)port, addr);
162 }
163
164 static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
165 {
166 out_be32((void*)port, addr);
167 eieio();
168 in_be32((void*)(hwif->dma_base + 0x01c));
169 eieio();
170 }
171
172 static void
173 scc_ide_outsw(unsigned long port, void *addr, u32 count)
174 {
175 u16 *ptr = (u16 *)addr;
176 while (count--) {
177 out_be32((void*)port, cpu_to_le16(*ptr++));
178 }
179 }
180
181 static void
182 scc_ide_outsl(unsigned long port, void *addr, u32 count)
183 {
184 u16 *ptr = (u16 *)addr;
185 while (count--) {
186 out_be32((void*)port, cpu_to_le16(*ptr++));
187 out_be32((void*)port, cpu_to_le16(*ptr++));
188 }
189 }
190
191 /**
192 * scc_set_pio_mode - set host controller for PIO mode
193 * @drive: drive
194 * @pio: PIO mode number
195 *
196 * Load the timing settings for this device mode into the
197 * controller.
198 */
199
200 static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
201 {
202 ide_hwif_t *hwif = HWIF(drive);
203 struct scc_ports *ports = ide_get_hwifdata(hwif);
204 unsigned long ctl_base = ports->ctl;
205 unsigned long cckctrl_port = ctl_base + 0xff0;
206 unsigned long piosht_port = ctl_base + 0x000;
207 unsigned long pioct_port = ctl_base + 0x004;
208 unsigned long reg;
209 int offset;
210
211 reg = in_be32((void __iomem *)cckctrl_port);
212 if (reg & CCKCTRL_ATACLKOEN) {
213 offset = 1; /* 133MHz */
214 } else {
215 offset = 0; /* 100MHz */
216 }
217 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
218 out_be32((void __iomem *)piosht_port, reg);
219 reg = JCHCTtbl[offset][pio];
220 out_be32((void __iomem *)pioct_port, reg);
221 }
222
223 /**
224 * scc_set_dma_mode - set host controller for DMA mode
225 * @drive: drive
226 * @speed: DMA mode
227 *
228 * Load the timing settings for this device mode into the
229 * controller.
230 */
231
232 static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
233 {
234 ide_hwif_t *hwif = HWIF(drive);
235 struct scc_ports *ports = ide_get_hwifdata(hwif);
236 unsigned long ctl_base = ports->ctl;
237 unsigned long cckctrl_port = ctl_base + 0xff0;
238 unsigned long mdmact_port = ctl_base + 0x008;
239 unsigned long mcrcst_port = ctl_base + 0x00c;
240 unsigned long sdmact_port = ctl_base + 0x010;
241 unsigned long scrcst_port = ctl_base + 0x014;
242 unsigned long udenvt_port = ctl_base + 0x018;
243 unsigned long tdvhsel_port = ctl_base + 0x020;
244 int is_slave = (&hwif->drives[1] == drive);
245 int offset, idx;
246 unsigned long reg;
247 unsigned long jcactsel;
248
249 reg = in_be32((void __iomem *)cckctrl_port);
250 if (reg & CCKCTRL_ATACLKOEN) {
251 offset = 1; /* 133MHz */
252 } else {
253 offset = 0; /* 100MHz */
254 }
255
256 idx = speed - XFER_UDMA_0;
257
258 jcactsel = JCACTSELtbl[offset][idx];
259 if (is_slave) {
260 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
261 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
262 jcactsel = jcactsel << 2;
263 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
264 } else {
265 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
266 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
267 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
268 }
269 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
270 out_be32((void __iomem *)udenvt_port, reg);
271 }
272
273 static void scc_dma_host_set(ide_drive_t *drive, int on)
274 {
275 ide_hwif_t *hwif = drive->hwif;
276 u8 unit = (drive->select.b.unit & 0x01);
277 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
278
279 if (on)
280 dma_stat |= (1 << (5 + unit));
281 else
282 dma_stat &= ~(1 << (5 + unit));
283
284 scc_ide_outb(dma_stat, hwif->dma_base + 4);
285 }
286
287 /**
288 * scc_ide_dma_setup - begin a DMA phase
289 * @drive: target device
290 *
291 * Build an IDE DMA PRD (IDE speak for scatter gather table)
292 * and then set up the DMA transfer registers.
293 *
294 * Returns 0 on success. If a PIO fallback is required then 1
295 * is returned.
296 */
297
298 static int scc_dma_setup(ide_drive_t *drive)
299 {
300 ide_hwif_t *hwif = drive->hwif;
301 struct request *rq = HWGROUP(drive)->rq;
302 unsigned int reading;
303 u8 dma_stat;
304
305 if (rq_data_dir(rq))
306 reading = 0;
307 else
308 reading = 1 << 3;
309
310 /* fall back to pio! */
311 if (!ide_build_dmatable(drive, rq)) {
312 ide_map_sg(drive, rq);
313 return 1;
314 }
315
316 /* PRD table */
317 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
318
319 /* specify r/w */
320 out_be32((void __iomem *)hwif->dma_base, reading);
321
322 /* read DMA status for INTR & ERROR flags */
323 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
324
325 /* clear INTR & ERROR flags */
326 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
327 drive->waiting_for_dma = 1;
328 return 0;
329 }
330
331 static void scc_dma_start(ide_drive_t *drive)
332 {
333 ide_hwif_t *hwif = drive->hwif;
334 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
335
336 /* start DMA */
337 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
338 hwif->dma = 1;
339 wmb();
340 }
341
342 static int __scc_dma_end(ide_drive_t *drive)
343 {
344 ide_hwif_t *hwif = drive->hwif;
345 u8 dma_stat, dma_cmd;
346
347 drive->waiting_for_dma = 0;
348 /* get DMA command mode */
349 dma_cmd = scc_ide_inb(hwif->dma_base);
350 /* stop DMA */
351 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
352 /* get DMA status */
353 dma_stat = scc_ide_inb(hwif->dma_base + 4);
354 /* clear the INTR & ERROR bits */
355 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
356 /* purge DMA mappings */
357 ide_destroy_dmatable(drive);
358 /* verify good DMA status */
359 hwif->dma = 0;
360 wmb();
361 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
362 }
363
364 /**
365 * scc_dma_end - Stop DMA
366 * @drive: IDE drive
367 *
368 * Check and clear INT Status register.
369 * Then call __scc_dma_end().
370 */
371
372 static int scc_dma_end(ide_drive_t *drive)
373 {
374 ide_hwif_t *hwif = HWIF(drive);
375 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
376 unsigned long intsts_port = hwif->dma_base + 0x014;
377 u32 reg;
378 int dma_stat, data_loss = 0;
379 static int retry = 0;
380
381 /* errata A308 workaround: Step5 (check data loss) */
382 /* We don't check non ide_disk because it is limited to UDMA4 */
383 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
384 & ERR_STAT) &&
385 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
386 reg = in_be32((void __iomem *)intsts_port);
387 if (!(reg & INTSTS_ACTEINT)) {
388 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
389 drive->name);
390 data_loss = 1;
391 if (retry++) {
392 struct request *rq = HWGROUP(drive)->rq;
393 int unit;
394 /* ERROR_RESET and drive->crc_count are needed
395 * to reduce DMA transfer mode in retry process.
396 */
397 if (rq)
398 rq->errors |= ERROR_RESET;
399 for (unit = 0; unit < MAX_DRIVES; unit++) {
400 ide_drive_t *drive = &hwif->drives[unit];
401 drive->crc_count++;
402 }
403 }
404 }
405 }
406
407 while (1) {
408 reg = in_be32((void __iomem *)intsts_port);
409
410 if (reg & INTSTS_SERROR) {
411 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
412 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
413
414 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
415 continue;
416 }
417
418 if (reg & INTSTS_PRERR) {
419 u32 maea0, maec0;
420 unsigned long ctl_base = hwif->config_data;
421
422 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
423 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
424
425 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
426
427 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
428
429 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
430 continue;
431 }
432
433 if (reg & INTSTS_RERR) {
434 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
435 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
436
437 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
438 continue;
439 }
440
441 if (reg & INTSTS_ICERR) {
442 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
443
444 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
445 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
446 continue;
447 }
448
449 if (reg & INTSTS_BMSINT) {
450 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
451 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
452
453 ide_do_reset(drive);
454 continue;
455 }
456
457 if (reg & INTSTS_BMHE) {
458 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
459 continue;
460 }
461
462 if (reg & INTSTS_ACTEINT) {
463 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
464 continue;
465 }
466
467 if (reg & INTSTS_IOIRQS) {
468 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
469 continue;
470 }
471 break;
472 }
473
474 dma_stat = __scc_dma_end(drive);
475 if (data_loss)
476 dma_stat |= 2; /* emulate DMA error (to retry command) */
477 return dma_stat;
478 }
479
480 /* returns 1 if dma irq issued, 0 otherwise */
481 static int scc_dma_test_irq(ide_drive_t *drive)
482 {
483 ide_hwif_t *hwif = HWIF(drive);
484 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
485
486 /* SCC errata A252,A308 workaround: Step4 */
487 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
488 & ERR_STAT) &&
489 (int_stat & INTSTS_INTRQ))
490 return 1;
491
492 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
493 if (int_stat & INTSTS_IOIRQS)
494 return 1;
495
496 if (!drive->waiting_for_dma)
497 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
498 drive->name, __func__);
499 return 0;
500 }
501
502 static u8 scc_udma_filter(ide_drive_t *drive)
503 {
504 ide_hwif_t *hwif = drive->hwif;
505 u8 mask = hwif->ultra_mask;
506
507 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
508 if ((drive->media != ide_disk) && (mask & 0xE0)) {
509 printk(KERN_INFO "%s: limit %s to UDMA4\n",
510 SCC_PATA_NAME, drive->name);
511 mask = ATA_UDMA4;
512 }
513
514 return mask;
515 }
516
517 /**
518 * setup_mmio_scc - map CTRL/BMID region
519 * @dev: PCI device we are configuring
520 * @name: device name
521 *
522 */
523
524 static int setup_mmio_scc (struct pci_dev *dev, const char *name)
525 {
526 unsigned long ctl_base = pci_resource_start(dev, 0);
527 unsigned long dma_base = pci_resource_start(dev, 1);
528 unsigned long ctl_size = pci_resource_len(dev, 0);
529 unsigned long dma_size = pci_resource_len(dev, 1);
530 void __iomem *ctl_addr;
531 void __iomem *dma_addr;
532 int i, ret;
533
534 for (i = 0; i < MAX_HWIFS; i++) {
535 if (scc_ports[i].ctl == 0)
536 break;
537 }
538 if (i >= MAX_HWIFS)
539 return -ENOMEM;
540
541 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
542 if (ret < 0) {
543 printk(KERN_ERR "%s: can't reserve resources\n", name);
544 return ret;
545 }
546
547 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
548 goto fail_0;
549
550 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
551 goto fail_1;
552
553 pci_set_master(dev);
554 scc_ports[i].ctl = (unsigned long)ctl_addr;
555 scc_ports[i].dma = (unsigned long)dma_addr;
556 pci_set_drvdata(dev, (void *) &scc_ports[i]);
557
558 return 1;
559
560 fail_1:
561 iounmap(ctl_addr);
562 fail_0:
563 return -ENOMEM;
564 }
565
566 static int scc_ide_setup_pci_device(struct pci_dev *dev,
567 const struct ide_port_info *d)
568 {
569 struct scc_ports *ports = pci_get_drvdata(dev);
570 ide_hwif_t *hwif = NULL;
571 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
572 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
573 int i;
574
575 hwif = ide_find_port_slot(d);
576 if (hwif == NULL)
577 return -ENOMEM;
578
579 memset(&hw, 0, sizeof(hw));
580 for (i = 0; i <= 8; i++)
581 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
582 hw.irq = dev->irq;
583 hw.dev = &dev->dev;
584 hw.chipset = ide_pci;
585
586 idx[0] = hwif->index;
587
588 ide_device_add(idx, d, hws);
589
590 return 0;
591 }
592
593 /**
594 * init_setup_scc - set up an SCC PATA Controller
595 * @dev: PCI device
596 * @d: IDE port info
597 *
598 * Perform the initial set up for this device.
599 */
600
601 static int __devinit init_setup_scc(struct pci_dev *dev,
602 const struct ide_port_info *d)
603 {
604 unsigned long ctl_base;
605 unsigned long dma_base;
606 unsigned long cckctrl_port;
607 unsigned long intmask_port;
608 unsigned long mode_port;
609 unsigned long ecmode_port;
610 unsigned long dma_status_port;
611 u32 reg = 0;
612 struct scc_ports *ports;
613 int rc;
614
615 rc = pci_enable_device(dev);
616 if (rc)
617 goto end;
618
619 rc = setup_mmio_scc(dev, d->name);
620 if (rc < 0)
621 goto end;
622
623 ports = pci_get_drvdata(dev);
624 ctl_base = ports->ctl;
625 dma_base = ports->dma;
626 cckctrl_port = ctl_base + 0xff0;
627 intmask_port = dma_base + 0x010;
628 mode_port = ctl_base + 0x024;
629 ecmode_port = ctl_base + 0xf00;
630 dma_status_port = dma_base + 0x004;
631
632 /* controller initialization */
633 reg = 0;
634 out_be32((void*)cckctrl_port, reg);
635 reg |= CCKCTRL_ATACLKOEN;
636 out_be32((void*)cckctrl_port, reg);
637 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
638 out_be32((void*)cckctrl_port, reg);
639 reg |= CCKCTRL_CRST;
640 out_be32((void*)cckctrl_port, reg);
641
642 for (;;) {
643 reg = in_be32((void*)cckctrl_port);
644 if (reg & CCKCTRL_CRST)
645 break;
646 udelay(5000);
647 }
648
649 reg |= CCKCTRL_ATARESET;
650 out_be32((void*)cckctrl_port, reg);
651
652 out_be32((void*)ecmode_port, ECMODE_VALUE);
653 out_be32((void*)mode_port, MODE_JCUSFEN);
654 out_be32((void*)intmask_port, INTMASK_MSK);
655
656 rc = scc_ide_setup_pci_device(dev, d);
657
658 end:
659 return rc;
660 }
661
662 static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
663 {
664 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
665 struct ide_taskfile *tf = &task->tf;
666 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
667
668 if (task->tf_flags & IDE_TFLAG_FLAGGED)
669 HIHI = 0xFF;
670
671 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
672 out_be32((void *)io_ports->data_addr,
673 (tf->hob_data << 8) | tf->data);
674
675 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
676 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
677 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
678 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
679 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
680 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
681 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
682 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
683 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
684 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
685
686 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
687 scc_ide_outb(tf->feature, io_ports->feature_addr);
688 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
689 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
690 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
691 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
692 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
693 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
694 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
695 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
696
697 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
698 scc_ide_outb((tf->device & HIHI) | drive->select.all,
699 io_ports->device_addr);
700 }
701
702 static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
703 {
704 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
705 struct ide_taskfile *tf = &task->tf;
706
707 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
708 u16 data = (u16)in_be32((void *)io_ports->data_addr);
709
710 tf->data = data & 0xff;
711 tf->hob_data = (data >> 8) & 0xff;
712 }
713
714 /* be sure we're looking at the low order bits */
715 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
716
717 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
718 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
719 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
720 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
721 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
722 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
723 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
724 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
725 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
726 tf->device = scc_ide_inb(io_ports->device_addr);
727
728 if (task->tf_flags & IDE_TFLAG_LBA48) {
729 scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
730
731 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
732 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
733 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
734 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
735 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
736 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
737 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
738 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
739 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
740 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
741 }
742 }
743
744 static void scc_input_data(ide_drive_t *drive, struct request *rq,
745 void *buf, unsigned int len)
746 {
747 unsigned long data_addr = drive->hwif->io_ports.data_addr;
748
749 len++;
750
751 if (drive->io_32bit) {
752 scc_ide_insl(data_addr, buf, len / 4);
753
754 if ((len & 3) >= 2)
755 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
756 } else
757 scc_ide_insw(data_addr, buf, len / 2);
758 }
759
760 static void scc_output_data(ide_drive_t *drive, struct request *rq,
761 void *buf, unsigned int len)
762 {
763 unsigned long data_addr = drive->hwif->io_ports.data_addr;
764
765 len++;
766
767 if (drive->io_32bit) {
768 scc_ide_outsl(data_addr, buf, len / 4);
769
770 if ((len & 3) >= 2)
771 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
772 } else
773 scc_ide_outsw(data_addr, buf, len / 2);
774 }
775
776 /**
777 * init_mmio_iops_scc - set up the iops for MMIO
778 * @hwif: interface to set up
779 *
780 */
781
782 static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
783 {
784 struct pci_dev *dev = to_pci_dev(hwif->dev);
785 struct scc_ports *ports = pci_get_drvdata(dev);
786 unsigned long dma_base = ports->dma;
787
788 ide_set_hwifdata(hwif, ports);
789
790 hwif->exec_command = scc_exec_command;
791 hwif->read_sff_dma_status = scc_read_sff_dma_status;
792
793 hwif->tf_load = scc_tf_load;
794 hwif->tf_read = scc_tf_read;
795
796 hwif->input_data = scc_input_data;
797 hwif->output_data = scc_output_data;
798
799 hwif->INB = scc_ide_inb;
800 hwif->OUTB = scc_ide_outb;
801 hwif->OUTBSYNC = scc_ide_outbsync;
802
803 hwif->dma_base = dma_base;
804 hwif->config_data = ports->ctl;
805 }
806
807 /**
808 * init_iops_scc - set up iops
809 * @hwif: interface to set up
810 *
811 * Do the basic setup for the SCC hardware interface
812 * and then do the MMIO setup.
813 */
814
815 static void __devinit init_iops_scc(ide_hwif_t *hwif)
816 {
817 struct pci_dev *dev = to_pci_dev(hwif->dev);
818
819 hwif->hwif_data = NULL;
820 if (pci_get_drvdata(dev) == NULL)
821 return;
822 init_mmio_iops_scc(hwif);
823 }
824
825 static u8 __devinit scc_cable_detect(ide_hwif_t *hwif)
826 {
827 return ATA_CBL_PATA80;
828 }
829
830 /**
831 * init_hwif_scc - set up hwif
832 * @hwif: interface to set up
833 *
834 * We do the basic set up of the interface structure. The SCC
835 * requires several custom handlers so we override the default
836 * ide DMA handlers appropriately.
837 */
838
839 static void __devinit init_hwif_scc(ide_hwif_t *hwif)
840 {
841 struct scc_ports *ports = ide_get_hwifdata(hwif);
842
843 ports->hwif = hwif;
844
845 /* PTERADD */
846 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
847
848 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
849 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
850 else
851 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
852 }
853
854 static const struct ide_port_ops scc_port_ops = {
855 .set_pio_mode = scc_set_pio_mode,
856 .set_dma_mode = scc_set_dma_mode,
857 .udma_filter = scc_udma_filter,
858 .cable_detect = scc_cable_detect,
859 };
860
861 static const struct ide_dma_ops scc_dma_ops = {
862 .dma_host_set = scc_dma_host_set,
863 .dma_setup = scc_dma_setup,
864 .dma_exec_cmd = ide_dma_exec_cmd,
865 .dma_start = scc_dma_start,
866 .dma_end = scc_dma_end,
867 .dma_test_irq = scc_dma_test_irq,
868 .dma_lost_irq = ide_dma_lost_irq,
869 .dma_timeout = ide_dma_timeout,
870 };
871
872 #define DECLARE_SCC_DEV(name_str) \
873 { \
874 .name = name_str, \
875 .init_iops = init_iops_scc, \
876 .init_hwif = init_hwif_scc, \
877 .port_ops = &scc_port_ops, \
878 .dma_ops = &scc_dma_ops, \
879 .host_flags = IDE_HFLAG_SINGLE, \
880 .pio_mask = ATA_PIO4, \
881 }
882
883 static const struct ide_port_info scc_chipsets[] __devinitdata = {
884 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
885 };
886
887 /**
888 * scc_init_one - pci layer discovery entry
889 * @dev: PCI device
890 * @id: ident table entry
891 *
892 * Called by the PCI code when it finds an SCC PATA controller.
893 * We then use the IDE PCI generic helper to do most of the work.
894 */
895
896 static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
897 {
898 return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
899 }
900
901 /**
902 * scc_remove - pci layer remove entry
903 * @dev: PCI device
904 *
905 * Called by the PCI code when it removes an SCC PATA controller.
906 */
907
908 static void __devexit scc_remove(struct pci_dev *dev)
909 {
910 struct scc_ports *ports = pci_get_drvdata(dev);
911 ide_hwif_t *hwif = ports->hwif;
912
913 if (hwif->dmatable_cpu) {
914 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
915 hwif->dmatable_cpu, hwif->dmatable_dma);
916 hwif->dmatable_cpu = NULL;
917 }
918
919 ide_unregister(hwif);
920
921 iounmap((void*)ports->dma);
922 iounmap((void*)ports->ctl);
923 pci_release_selected_regions(dev, (1 << 2) - 1);
924 memset(ports, 0, sizeof(*ports));
925 }
926
927 static const struct pci_device_id scc_pci_tbl[] = {
928 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
929 { 0, },
930 };
931 MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
932
933 static struct pci_driver driver = {
934 .name = "SCC IDE",
935 .id_table = scc_pci_tbl,
936 .probe = scc_init_one,
937 .remove = scc_remove,
938 };
939
940 static int scc_ide_init(void)
941 {
942 return ide_pci_register_driver(&driver);
943 }
944
945 module_init(scc_ide_init);
946 /* -- No exit code?
947 static void scc_ide_exit(void)
948 {
949 ide_pci_unregister_driver(&driver);
950 }
951 module_exit(scc_ide_exit);
952 */
953
954
955 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
956 MODULE_LICENSE("GPL");
This page took 0.049556 seconds and 5 git commands to generate.