ide: set hwif->dev in ide_init_port_hw() (take 2)
[deliverable/linux.git] / drivers / ide / pci / scc_pata.c
1 /*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30 #include <linux/ide.h>
31 #include <linux/init.h>
32
33 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34
35 #define SCC_PATA_NAME "scc IDE"
36
37 #define TDVHSEL_MASTER 0x00000001
38 #define TDVHSEL_SLAVE 0x00000004
39
40 #define MODE_JCUSFEN 0x00000080
41
42 #define CCKCTRL_ATARESET 0x00040000
43 #define CCKCTRL_BUFCNT 0x00020000
44 #define CCKCTRL_CRST 0x00010000
45 #define CCKCTRL_OCLKEN 0x00000100
46 #define CCKCTRL_ATACLKOEN 0x00000002
47 #define CCKCTRL_LCLKEN 0x00000001
48
49 #define QCHCD_IOS_SS 0x00000001
50
51 #define QCHSD_STPDIAG 0x00020000
52
53 #define INTMASK_MSK 0xD1000012
54 #define INTSTS_SERROR 0x80000000
55 #define INTSTS_PRERR 0x40000000
56 #define INTSTS_RERR 0x10000000
57 #define INTSTS_ICERR 0x01000000
58 #define INTSTS_BMSINT 0x00000010
59 #define INTSTS_BMHE 0x00000008
60 #define INTSTS_IOIRQS 0x00000004
61 #define INTSTS_INTRQ 0x00000002
62 #define INTSTS_ACTEINT 0x00000001
63
64 #define ECMODE_VALUE 0x01
65
66 static struct scc_ports {
67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */
69 } scc_ports[MAX_HWIFS];
70
71 /* PIO transfer mode table */
72 /* JCHST */
73 static unsigned long JCHSTtbl[2][7] = {
74 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
75 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
76 };
77
78 /* JCHHT */
79 static unsigned long JCHHTtbl[2][7] = {
80 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
81 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
82 };
83
84 /* JCHCT */
85 static unsigned long JCHCTtbl[2][7] = {
86 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
87 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
88 };
89
90
91 /* DMA transfer mode table */
92 /* JCHDCTM/JCHDCTS */
93 static unsigned long JCHDCTxtbl[2][7] = {
94 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
95 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
96 };
97
98 /* JCSTWTM/JCSTWTS */
99 static unsigned long JCSTWTxtbl[2][7] = {
100 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
101 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
102 };
103
104 /* JCTSS */
105 static unsigned long JCTSStbl[2][7] = {
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
107 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
108 };
109
110 /* JCENVT */
111 static unsigned long JCENVTtbl[2][7] = {
112 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
113 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
114 };
115
116 /* JCACTSELS/JCACTSELM */
117 static unsigned long JCACTSELtbl[2][7] = {
118 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
119 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
120 };
121
122
123 static u8 scc_ide_inb(unsigned long port)
124 {
125 u32 data = in_be32((void*)port);
126 return (u8)data;
127 }
128
129 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
130 {
131 u16 *ptr = (u16 *)addr;
132 while (count--) {
133 *ptr++ = le16_to_cpu(in_be32((void*)port));
134 }
135 }
136
137 static void scc_ide_insl(unsigned long port, void *addr, u32 count)
138 {
139 u16 *ptr = (u16 *)addr;
140 while (count--) {
141 *ptr++ = le16_to_cpu(in_be32((void*)port));
142 *ptr++ = le16_to_cpu(in_be32((void*)port));
143 }
144 }
145
146 static void scc_ide_outb(u8 addr, unsigned long port)
147 {
148 out_be32((void*)port, addr);
149 }
150
151 static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
152 {
153 out_be32((void*)port, addr);
154 eieio();
155 in_be32((void*)(hwif->dma_base + 0x01c));
156 eieio();
157 }
158
159 static void
160 scc_ide_outsw(unsigned long port, void *addr, u32 count)
161 {
162 u16 *ptr = (u16 *)addr;
163 while (count--) {
164 out_be32((void*)port, cpu_to_le16(*ptr++));
165 }
166 }
167
168 static void
169 scc_ide_outsl(unsigned long port, void *addr, u32 count)
170 {
171 u16 *ptr = (u16 *)addr;
172 while (count--) {
173 out_be32((void*)port, cpu_to_le16(*ptr++));
174 out_be32((void*)port, cpu_to_le16(*ptr++));
175 }
176 }
177
178 /**
179 * scc_set_pio_mode - set host controller for PIO mode
180 * @drive: drive
181 * @pio: PIO mode number
182 *
183 * Load the timing settings for this device mode into the
184 * controller.
185 */
186
187 static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
188 {
189 ide_hwif_t *hwif = HWIF(drive);
190 struct scc_ports *ports = ide_get_hwifdata(hwif);
191 unsigned long ctl_base = ports->ctl;
192 unsigned long cckctrl_port = ctl_base + 0xff0;
193 unsigned long piosht_port = ctl_base + 0x000;
194 unsigned long pioct_port = ctl_base + 0x004;
195 unsigned long reg;
196 int offset;
197
198 reg = in_be32((void __iomem *)cckctrl_port);
199 if (reg & CCKCTRL_ATACLKOEN) {
200 offset = 1; /* 133MHz */
201 } else {
202 offset = 0; /* 100MHz */
203 }
204 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
205 out_be32((void __iomem *)piosht_port, reg);
206 reg = JCHCTtbl[offset][pio];
207 out_be32((void __iomem *)pioct_port, reg);
208 }
209
210 /**
211 * scc_set_dma_mode - set host controller for DMA mode
212 * @drive: drive
213 * @speed: DMA mode
214 *
215 * Load the timing settings for this device mode into the
216 * controller.
217 */
218
219 static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
220 {
221 ide_hwif_t *hwif = HWIF(drive);
222 struct scc_ports *ports = ide_get_hwifdata(hwif);
223 unsigned long ctl_base = ports->ctl;
224 unsigned long cckctrl_port = ctl_base + 0xff0;
225 unsigned long mdmact_port = ctl_base + 0x008;
226 unsigned long mcrcst_port = ctl_base + 0x00c;
227 unsigned long sdmact_port = ctl_base + 0x010;
228 unsigned long scrcst_port = ctl_base + 0x014;
229 unsigned long udenvt_port = ctl_base + 0x018;
230 unsigned long tdvhsel_port = ctl_base + 0x020;
231 int is_slave = (&hwif->drives[1] == drive);
232 int offset, idx;
233 unsigned long reg;
234 unsigned long jcactsel;
235
236 reg = in_be32((void __iomem *)cckctrl_port);
237 if (reg & CCKCTRL_ATACLKOEN) {
238 offset = 1; /* 133MHz */
239 } else {
240 offset = 0; /* 100MHz */
241 }
242
243 idx = speed - XFER_UDMA_0;
244
245 jcactsel = JCACTSELtbl[offset][idx];
246 if (is_slave) {
247 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
248 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
249 jcactsel = jcactsel << 2;
250 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
251 } else {
252 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
253 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
254 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
255 }
256 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
257 out_be32((void __iomem *)udenvt_port, reg);
258 }
259
260 static void scc_dma_host_set(ide_drive_t *drive, int on)
261 {
262 ide_hwif_t *hwif = drive->hwif;
263 u8 unit = (drive->select.b.unit & 0x01);
264 u8 dma_stat = scc_ide_inb(hwif->dma_status);
265
266 if (on)
267 dma_stat |= (1 << (5 + unit));
268 else
269 dma_stat &= ~(1 << (5 + unit));
270
271 scc_ide_outb(dma_stat, hwif->dma_status);
272 }
273
274 /**
275 * scc_ide_dma_setup - begin a DMA phase
276 * @drive: target device
277 *
278 * Build an IDE DMA PRD (IDE speak for scatter gather table)
279 * and then set up the DMA transfer registers.
280 *
281 * Returns 0 on success. If a PIO fallback is required then 1
282 * is returned.
283 */
284
285 static int scc_dma_setup(ide_drive_t *drive)
286 {
287 ide_hwif_t *hwif = drive->hwif;
288 struct request *rq = HWGROUP(drive)->rq;
289 unsigned int reading;
290 u8 dma_stat;
291
292 if (rq_data_dir(rq))
293 reading = 0;
294 else
295 reading = 1 << 3;
296
297 /* fall back to pio! */
298 if (!ide_build_dmatable(drive, rq)) {
299 ide_map_sg(drive, rq);
300 return 1;
301 }
302
303 /* PRD table */
304 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
305
306 /* specify r/w */
307 out_be32((void __iomem *)hwif->dma_command, reading);
308
309 /* read dma_status for INTR & ERROR flags */
310 dma_stat = in_be32((void __iomem *)hwif->dma_status);
311
312 /* clear INTR & ERROR flags */
313 out_be32((void __iomem *)hwif->dma_status, dma_stat|6);
314 drive->waiting_for_dma = 1;
315 return 0;
316 }
317
318 static void scc_dma_start(ide_drive_t *drive)
319 {
320 ide_hwif_t *hwif = drive->hwif;
321 u8 dma_cmd = scc_ide_inb(hwif->dma_command);
322
323 /* start DMA */
324 scc_ide_outb(dma_cmd | 1, hwif->dma_command);
325 hwif->dma = 1;
326 wmb();
327 }
328
329 static int __scc_dma_end(ide_drive_t *drive)
330 {
331 ide_hwif_t *hwif = drive->hwif;
332 u8 dma_stat, dma_cmd;
333
334 drive->waiting_for_dma = 0;
335 /* get DMA command mode */
336 dma_cmd = scc_ide_inb(hwif->dma_command);
337 /* stop DMA */
338 scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
339 /* get DMA status */
340 dma_stat = scc_ide_inb(hwif->dma_status);
341 /* clear the INTR & ERROR bits */
342 scc_ide_outb(dma_stat | 6, hwif->dma_status);
343 /* purge DMA mappings */
344 ide_destroy_dmatable(drive);
345 /* verify good DMA status */
346 hwif->dma = 0;
347 wmb();
348 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
349 }
350
351 /**
352 * scc_dma_end - Stop DMA
353 * @drive: IDE drive
354 *
355 * Check and clear INT Status register.
356 * Then call __scc_dma_end().
357 */
358
359 static int scc_dma_end(ide_drive_t *drive)
360 {
361 ide_hwif_t *hwif = HWIF(drive);
362 unsigned long intsts_port = hwif->dma_base + 0x014;
363 u32 reg;
364 int dma_stat, data_loss = 0;
365 static int retry = 0;
366
367 /* errata A308 workaround: Step5 (check data loss) */
368 /* We don't check non ide_disk because it is limited to UDMA4 */
369 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
370 & ERR_STAT) &&
371 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
372 reg = in_be32((void __iomem *)intsts_port);
373 if (!(reg & INTSTS_ACTEINT)) {
374 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
375 drive->name);
376 data_loss = 1;
377 if (retry++) {
378 struct request *rq = HWGROUP(drive)->rq;
379 int unit;
380 /* ERROR_RESET and drive->crc_count are needed
381 * to reduce DMA transfer mode in retry process.
382 */
383 if (rq)
384 rq->errors |= ERROR_RESET;
385 for (unit = 0; unit < MAX_DRIVES; unit++) {
386 ide_drive_t *drive = &hwif->drives[unit];
387 drive->crc_count++;
388 }
389 }
390 }
391 }
392
393 while (1) {
394 reg = in_be32((void __iomem *)intsts_port);
395
396 if (reg & INTSTS_SERROR) {
397 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
398 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
399
400 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
401 continue;
402 }
403
404 if (reg & INTSTS_PRERR) {
405 u32 maea0, maec0;
406 unsigned long ctl_base = hwif->config_data;
407
408 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
409 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
410
411 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
412
413 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
414
415 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
416 continue;
417 }
418
419 if (reg & INTSTS_RERR) {
420 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
421 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
422
423 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
424 continue;
425 }
426
427 if (reg & INTSTS_ICERR) {
428 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
429
430 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
431 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
432 continue;
433 }
434
435 if (reg & INTSTS_BMSINT) {
436 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
437 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
438
439 ide_do_reset(drive);
440 continue;
441 }
442
443 if (reg & INTSTS_BMHE) {
444 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
445 continue;
446 }
447
448 if (reg & INTSTS_ACTEINT) {
449 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
450 continue;
451 }
452
453 if (reg & INTSTS_IOIRQS) {
454 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
455 continue;
456 }
457 break;
458 }
459
460 dma_stat = __scc_dma_end(drive);
461 if (data_loss)
462 dma_stat |= 2; /* emulate DMA error (to retry command) */
463 return dma_stat;
464 }
465
466 /* returns 1 if dma irq issued, 0 otherwise */
467 static int scc_dma_test_irq(ide_drive_t *drive)
468 {
469 ide_hwif_t *hwif = HWIF(drive);
470 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
471
472 /* SCC errata A252,A308 workaround: Step4 */
473 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
474 & ERR_STAT) &&
475 (int_stat & INTSTS_INTRQ))
476 return 1;
477
478 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
479 if (int_stat & INTSTS_IOIRQS)
480 return 1;
481
482 if (!drive->waiting_for_dma)
483 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
484 drive->name, __func__);
485 return 0;
486 }
487
488 static u8 scc_udma_filter(ide_drive_t *drive)
489 {
490 ide_hwif_t *hwif = drive->hwif;
491 u8 mask = hwif->ultra_mask;
492
493 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
494 if ((drive->media != ide_disk) && (mask & 0xE0)) {
495 printk(KERN_INFO "%s: limit %s to UDMA4\n",
496 SCC_PATA_NAME, drive->name);
497 mask = ATA_UDMA4;
498 }
499
500 return mask;
501 }
502
503 /**
504 * setup_mmio_scc - map CTRL/BMID region
505 * @dev: PCI device we are configuring
506 * @name: device name
507 *
508 */
509
510 static int setup_mmio_scc (struct pci_dev *dev, const char *name)
511 {
512 unsigned long ctl_base = pci_resource_start(dev, 0);
513 unsigned long dma_base = pci_resource_start(dev, 1);
514 unsigned long ctl_size = pci_resource_len(dev, 0);
515 unsigned long dma_size = pci_resource_len(dev, 1);
516 void __iomem *ctl_addr;
517 void __iomem *dma_addr;
518 int i, ret;
519
520 for (i = 0; i < MAX_HWIFS; i++) {
521 if (scc_ports[i].ctl == 0)
522 break;
523 }
524 if (i >= MAX_HWIFS)
525 return -ENOMEM;
526
527 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
528 if (ret < 0) {
529 printk(KERN_ERR "%s: can't reserve resources\n", name);
530 return ret;
531 }
532
533 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
534 goto fail_0;
535
536 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
537 goto fail_1;
538
539 pci_set_master(dev);
540 scc_ports[i].ctl = (unsigned long)ctl_addr;
541 scc_ports[i].dma = (unsigned long)dma_addr;
542 pci_set_drvdata(dev, (void *) &scc_ports[i]);
543
544 return 1;
545
546 fail_1:
547 iounmap(ctl_addr);
548 fail_0:
549 return -ENOMEM;
550 }
551
552 static int scc_ide_setup_pci_device(struct pci_dev *dev,
553 const struct ide_port_info *d)
554 {
555 struct scc_ports *ports = pci_get_drvdata(dev);
556 ide_hwif_t *hwif = NULL;
557 hw_regs_t hw;
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
559 int i;
560
561 hwif = ide_find_port();
562 if (hwif == NULL) {
563 printk(KERN_ERR "%s: too many IDE interfaces, "
564 "no room in table\n", SCC_PATA_NAME);
565 return -ENOMEM;
566 }
567
568 memset(&hw, 0, sizeof(hw));
569 for (i = 0; i <= 8; i++)
570 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
571 hw.irq = dev->irq;
572 hw.dev = &dev->dev;
573 hw.chipset = ide_pci;
574 ide_init_port_hw(hwif, &hw);
575
576 idx[0] = hwif->index;
577
578 ide_device_add(idx, d);
579
580 return 0;
581 }
582
583 /**
584 * init_setup_scc - set up an SCC PATA Controller
585 * @dev: PCI device
586 * @d: IDE port info
587 *
588 * Perform the initial set up for this device.
589 */
590
591 static int __devinit init_setup_scc(struct pci_dev *dev,
592 const struct ide_port_info *d)
593 {
594 unsigned long ctl_base;
595 unsigned long dma_base;
596 unsigned long cckctrl_port;
597 unsigned long intmask_port;
598 unsigned long mode_port;
599 unsigned long ecmode_port;
600 unsigned long dma_status_port;
601 u32 reg = 0;
602 struct scc_ports *ports;
603 int rc;
604
605 rc = pci_enable_device(dev);
606 if (rc)
607 goto end;
608
609 rc = setup_mmio_scc(dev, d->name);
610 if (rc < 0)
611 goto end;
612
613 ports = pci_get_drvdata(dev);
614 ctl_base = ports->ctl;
615 dma_base = ports->dma;
616 cckctrl_port = ctl_base + 0xff0;
617 intmask_port = dma_base + 0x010;
618 mode_port = ctl_base + 0x024;
619 ecmode_port = ctl_base + 0xf00;
620 dma_status_port = dma_base + 0x004;
621
622 /* controller initialization */
623 reg = 0;
624 out_be32((void*)cckctrl_port, reg);
625 reg |= CCKCTRL_ATACLKOEN;
626 out_be32((void*)cckctrl_port, reg);
627 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
628 out_be32((void*)cckctrl_port, reg);
629 reg |= CCKCTRL_CRST;
630 out_be32((void*)cckctrl_port, reg);
631
632 for (;;) {
633 reg = in_be32((void*)cckctrl_port);
634 if (reg & CCKCTRL_CRST)
635 break;
636 udelay(5000);
637 }
638
639 reg |= CCKCTRL_ATARESET;
640 out_be32((void*)cckctrl_port, reg);
641
642 out_be32((void*)ecmode_port, ECMODE_VALUE);
643 out_be32((void*)mode_port, MODE_JCUSFEN);
644 out_be32((void*)intmask_port, INTMASK_MSK);
645
646 rc = scc_ide_setup_pci_device(dev, d);
647
648 end:
649 return rc;
650 }
651
652 static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
653 {
654 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
655 struct ide_taskfile *tf = &task->tf;
656 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
657
658 if (task->tf_flags & IDE_TFLAG_FLAGGED)
659 HIHI = 0xFF;
660
661 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
662 out_be32((void *)io_ports->data_addr,
663 (tf->hob_data << 8) | tf->data);
664
665 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
666 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
667 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
668 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
669 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
670 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
671 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
672 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
673 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
674 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
675
676 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
677 scc_ide_outb(tf->feature, io_ports->feature_addr);
678 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
679 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
680 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
681 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
682 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
683 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
684 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
685 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
686
687 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
688 scc_ide_outb((tf->device & HIHI) | drive->select.all,
689 io_ports->device_addr);
690 }
691
692 static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
693 {
694 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
695 struct ide_taskfile *tf = &task->tf;
696
697 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
698 u16 data = (u16)in_be32((void *)io_ports->data_addr);
699
700 tf->data = data & 0xff;
701 tf->hob_data = (data >> 8) & 0xff;
702 }
703
704 /* be sure we're looking at the low order bits */
705 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
706
707 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
708 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
709 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
710 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
711 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
712 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
713 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
714 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
715 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
716 tf->device = scc_ide_inb(io_ports->device_addr);
717
718 if (task->tf_flags & IDE_TFLAG_LBA48) {
719 scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
720
721 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
722 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
723 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
724 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
725 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
726 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
727 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
728 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
729 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
730 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
731 }
732 }
733
734 static void scc_input_data(ide_drive_t *drive, struct request *rq,
735 void *buf, unsigned int len)
736 {
737 unsigned long data_addr = drive->hwif->io_ports.data_addr;
738
739 len++;
740
741 if (drive->io_32bit) {
742 scc_ide_insl(data_addr, buf, len / 4);
743
744 if ((len & 3) >= 2)
745 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
746 } else
747 scc_ide_insw(data_addr, buf, len / 2);
748 }
749
750 static void scc_output_data(ide_drive_t *drive, struct request *rq,
751 void *buf, unsigned int len)
752 {
753 unsigned long data_addr = drive->hwif->io_ports.data_addr;
754
755 len++;
756
757 if (drive->io_32bit) {
758 scc_ide_outsl(data_addr, buf, len / 4);
759
760 if ((len & 3) >= 2)
761 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
762 } else
763 scc_ide_outsw(data_addr, buf, len / 2);
764 }
765
766 /**
767 * init_mmio_iops_scc - set up the iops for MMIO
768 * @hwif: interface to set up
769 *
770 */
771
772 static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
773 {
774 struct pci_dev *dev = to_pci_dev(hwif->dev);
775 struct scc_ports *ports = pci_get_drvdata(dev);
776 unsigned long dma_base = ports->dma;
777
778 ide_set_hwifdata(hwif, ports);
779
780 hwif->tf_load = scc_tf_load;
781 hwif->tf_read = scc_tf_read;
782
783 hwif->input_data = scc_input_data;
784 hwif->output_data = scc_output_data;
785
786 hwif->INB = scc_ide_inb;
787 hwif->OUTB = scc_ide_outb;
788 hwif->OUTBSYNC = scc_ide_outbsync;
789
790 hwif->dma_base = dma_base;
791 hwif->config_data = ports->ctl;
792 }
793
794 /**
795 * init_iops_scc - set up iops
796 * @hwif: interface to set up
797 *
798 * Do the basic setup for the SCC hardware interface
799 * and then do the MMIO setup.
800 */
801
802 static void __devinit init_iops_scc(ide_hwif_t *hwif)
803 {
804 struct pci_dev *dev = to_pci_dev(hwif->dev);
805
806 hwif->hwif_data = NULL;
807 if (pci_get_drvdata(dev) == NULL)
808 return;
809 init_mmio_iops_scc(hwif);
810 }
811
812 static u8 __devinit scc_cable_detect(ide_hwif_t *hwif)
813 {
814 return ATA_CBL_PATA80;
815 }
816
817 /**
818 * init_hwif_scc - set up hwif
819 * @hwif: interface to set up
820 *
821 * We do the basic set up of the interface structure. The SCC
822 * requires several custom handlers so we override the default
823 * ide DMA handlers appropriately.
824 */
825
826 static void __devinit init_hwif_scc(ide_hwif_t *hwif)
827 {
828 struct scc_ports *ports = ide_get_hwifdata(hwif);
829
830 ports->hwif = hwif;
831
832 hwif->dma_command = hwif->dma_base;
833 hwif->dma_status = hwif->dma_base + 0x04;
834
835 /* PTERADD */
836 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
837
838 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
839 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
840 else
841 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
842 }
843
844 static const struct ide_port_ops scc_port_ops = {
845 .set_pio_mode = scc_set_pio_mode,
846 .set_dma_mode = scc_set_dma_mode,
847 .udma_filter = scc_udma_filter,
848 .cable_detect = scc_cable_detect,
849 };
850
851 static const struct ide_dma_ops scc_dma_ops = {
852 .dma_host_set = scc_dma_host_set,
853 .dma_setup = scc_dma_setup,
854 .dma_exec_cmd = ide_dma_exec_cmd,
855 .dma_start = scc_dma_start,
856 .dma_end = scc_dma_end,
857 .dma_test_irq = scc_dma_test_irq,
858 .dma_lost_irq = ide_dma_lost_irq,
859 .dma_timeout = ide_dma_timeout,
860 };
861
862 #define DECLARE_SCC_DEV(name_str) \
863 { \
864 .name = name_str, \
865 .init_iops = init_iops_scc, \
866 .init_hwif = init_hwif_scc, \
867 .port_ops = &scc_port_ops, \
868 .dma_ops = &scc_dma_ops, \
869 .host_flags = IDE_HFLAG_SINGLE, \
870 .pio_mask = ATA_PIO4, \
871 }
872
873 static const struct ide_port_info scc_chipsets[] __devinitdata = {
874 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
875 };
876
877 /**
878 * scc_init_one - pci layer discovery entry
879 * @dev: PCI device
880 * @id: ident table entry
881 *
882 * Called by the PCI code when it finds an SCC PATA controller.
883 * We then use the IDE PCI generic helper to do most of the work.
884 */
885
886 static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
887 {
888 return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
889 }
890
891 /**
892 * scc_remove - pci layer remove entry
893 * @dev: PCI device
894 *
895 * Called by the PCI code when it removes an SCC PATA controller.
896 */
897
898 static void __devexit scc_remove(struct pci_dev *dev)
899 {
900 struct scc_ports *ports = pci_get_drvdata(dev);
901 ide_hwif_t *hwif = ports->hwif;
902
903 if (hwif->dmatable_cpu) {
904 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
905 hwif->dmatable_cpu, hwif->dmatable_dma);
906 hwif->dmatable_cpu = NULL;
907 }
908
909 ide_unregister(hwif);
910
911 iounmap((void*)ports->dma);
912 iounmap((void*)ports->ctl);
913 pci_release_selected_regions(dev, (1 << 2) - 1);
914 memset(ports, 0, sizeof(*ports));
915 }
916
917 static const struct pci_device_id scc_pci_tbl[] = {
918 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
919 { 0, },
920 };
921 MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
922
923 static struct pci_driver driver = {
924 .name = "SCC IDE",
925 .id_table = scc_pci_tbl,
926 .probe = scc_init_one,
927 .remove = scc_remove,
928 };
929
930 static int scc_ide_init(void)
931 {
932 return ide_pci_register_driver(&driver);
933 }
934
935 module_init(scc_ide_init);
936 /* -- No exit code?
937 static void scc_ide_exit(void)
938 {
939 ide_pci_unregister_driver(&driver);
940 }
941 module_exit(scc_ide_exit);
942 */
943
944
945 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
946 MODULE_LICENSE("GPL");
This page took 0.054782 seconds and 5 git commands to generate.