delkin_cb: add PM support
[deliverable/linux.git] / drivers / ide / pci / sgiioc4.c
CommitLineData
1da177e4 1/*
0271fc2d 2 * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
aa95f0e7 3 * Copyright (C) 2008 MontaVista Software, Inc.
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
1da177e4
LT
17 * For further information regarding this notice, see:
18 *
19 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
1da177e4
LT
26#include <linux/init.h>
27#include <linux/kernel.h>
1da177e4
LT
28#include <linux/ioport.h>
29#include <linux/blkdev.h>
55c16a70 30#include <linux/scatterlist.h>
22329b51 31#include <linux/ioc4.h>
1da177e4
LT
32#include <asm/io.h>
33
34#include <linux/ide.h>
35
ca1997c1
BZ
36#define DRV_NAME "SGIIOC4"
37
1da177e4
LT
38/* IOC4 Specific Definitions */
39#define IOC4_CMD_OFFSET 0x100
40#define IOC4_CTRL_OFFSET 0x120
41#define IOC4_DMA_OFFSET 0x140
42#define IOC4_INTR_OFFSET 0x0
43
44#define IOC4_TIMING 0x00
45#define IOC4_DMA_PTR_L 0x01
46#define IOC4_DMA_PTR_H 0x02
47#define IOC4_DMA_ADDR_L 0x03
48#define IOC4_DMA_ADDR_H 0x04
49#define IOC4_BC_DEV 0x05
50#define IOC4_BC_MEM 0x06
51#define IOC4_DMA_CTRL 0x07
52#define IOC4_DMA_END_ADDR 0x08
53
54/* Bits in the IOC4 Control/Status Register */
55#define IOC4_S_DMA_START 0x01
56#define IOC4_S_DMA_STOP 0x02
57#define IOC4_S_DMA_DIR 0x04
58#define IOC4_S_DMA_ACTIVE 0x08
59#define IOC4_S_DMA_ERROR 0x10
60#define IOC4_ATA_MEMERR 0x02
61
62/* Read/Write Directions */
63#define IOC4_DMA_WRITE 0x04
64#define IOC4_DMA_READ 0x00
65
66/* Interrupt Register Offsets */
67#define IOC4_INTR_REG 0x03
68#define IOC4_INTR_SET 0x05
69#define IOC4_INTR_CLEAR 0x07
70
71#define IOC4_IDE_CACHELINE_SIZE 128
72#define IOC4_CMD_CTL_BLK_SIZE 0x20
73#define IOC4_SUPPORTED_FIRMWARE_REV 46
74
75typedef struct {
76 u32 timing_reg0;
77 u32 timing_reg1;
78 u32 low_mem_ptr;
79 u32 high_mem_ptr;
80 u32 low_mem_addr;
81 u32 high_mem_addr;
82 u32 dev_byte_count;
83 u32 mem_byte_count;
84 u32 status;
85} ioc4_dma_regs_t;
86
87/* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */
88/* IOC4 has only 1 IDE channel */
89#define IOC4_PRD_BYTES 16
90#define IOC4_PRD_ENTRIES (PAGE_SIZE /(4*IOC4_PRD_BYTES))
91
92
93static void
94sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
95 unsigned long ctrl_port, unsigned long irq_port)
96{
97 unsigned long reg = data_port;
98 int i;
99
100 /* Registers are word (32 bit) aligned */
4c3032d8
BZ
101 for (i = 0; i <= 7; i++)
102 hw->io_ports_array[i] = reg + i * 4;
1da177e4
LT
103
104 if (ctrl_port)
4c3032d8 105 hw->io_ports.ctl_addr = ctrl_port;
1da177e4
LT
106
107 if (irq_port)
4c3032d8 108 hw->io_ports.irq_addr = irq_port;
1da177e4
LT
109}
110
1da177e4
LT
111static int
112sgiioc4_checkirq(ide_hwif_t * hwif)
113{
0ecdca26 114 unsigned long intr_addr =
4c3032d8 115 hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
1da177e4 116
0ecdca26 117 if ((u8)readl((void __iomem *)intr_addr) & 0x03)
1da177e4
LT
118 return 1;
119
120 return 0;
121}
122
b73c7ee2 123static u8 sgiioc4_read_status(ide_hwif_t *);
1da177e4
LT
124
125static int
126sgiioc4_clearirq(ide_drive_t * drive)
127{
128 u32 intr_reg;
129 ide_hwif_t *hwif = HWIF(drive);
4c3032d8
BZ
130 struct ide_io_ports *io_ports = &hwif->io_ports;
131 unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
1da177e4
LT
132
133 /* Code to check for PCI error conditions */
0ecdca26 134 intr_reg = readl((void __iomem *)other_ir);
1da177e4
LT
135 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
136 /*
b73c7ee2
BZ
137 * Using sgiioc4_read_status to read the Status register has a
138 * side effect of clearing the interrupt. The first read should
23579a2a
BZ
139 * clear it if it is set. The second read should return
140 * a "clear" status if it got cleared. If not, then spin
141 * for a bit trying to clear it.
1da177e4 142 */
b73c7ee2 143 u8 stat = sgiioc4_read_status(hwif);
1da177e4 144 int count = 0;
b73c7ee2
BZ
145
146 stat = sgiioc4_read_status(hwif);
aa95f0e7 147 while ((stat & ATA_BUSY) && (count++ < 100)) {
1da177e4 148 udelay(1);
b73c7ee2 149 stat = sgiioc4_read_status(hwif);
1da177e4
LT
150 }
151
152 if (intr_reg & 0x02) {
36501650 153 struct pci_dev *dev = to_pci_dev(hwif->dev);
1da177e4
LT
154 /* Error when transferring DMA data on PCI bus */
155 u32 pci_err_addr_low, pci_err_addr_high,
156 pci_stat_cmd_reg;
157
158 pci_err_addr_low =
4c3032d8 159 readl((void __iomem *)io_ports->irq_addr);
1da177e4 160 pci_err_addr_high =
4c3032d8 161 readl((void __iomem *)(io_ports->irq_addr + 4));
36501650 162 pci_read_config_dword(dev, PCI_COMMAND,
1da177e4
LT
163 &pci_stat_cmd_reg);
164 printk(KERN_ERR
165 "%s(%s) : PCI Bus Error when doing DMA:"
166 " status-cmd reg is 0x%x\n",
eb63963a 167 __func__, drive->name, pci_stat_cmd_reg);
1da177e4
LT
168 printk(KERN_ERR
169 "%s(%s) : PCI Error Address is 0x%x%x\n",
eb63963a 170 __func__, drive->name,
1da177e4
LT
171 pci_err_addr_high, pci_err_addr_low);
172 /* Clear the PCI Error indicator */
36501650 173 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
1da177e4
LT
174 }
175
176 /* Clear the Interrupt, Error bits on the IOC4 */
0ecdca26 177 writel(0x03, (void __iomem *)other_ir);
1da177e4 178
0ecdca26 179 intr_reg = readl((void __iomem *)other_ir);
1da177e4
LT
180 }
181
182 return intr_reg & 3;
183}
184
5e37bdc0 185static void sgiioc4_dma_start(ide_drive_t *drive)
1da177e4
LT
186{
187 ide_hwif_t *hwif = HWIF(drive);
0ecdca26
BZ
188 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
189 unsigned int reg = readl((void __iomem *)ioc4_dma_addr);
1da177e4
LT
190 unsigned int temp_reg = reg | IOC4_S_DMA_START;
191
0ecdca26 192 writel(temp_reg, (void __iomem *)ioc4_dma_addr);
1da177e4
LT
193}
194
195static u32
196sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
197{
0ecdca26 198 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
1da177e4
LT
199 u32 ioc4_dma;
200 int count;
201
202 count = 0;
0ecdca26 203 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
1da177e4
LT
204 while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) {
205 udelay(1);
0ecdca26 206 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
1da177e4
LT
207 }
208 return ioc4_dma;
209}
210
211/* Stops the IOC4 DMA Engine */
5e37bdc0 212static int sgiioc4_dma_end(ide_drive_t *drive)
1da177e4
LT
213{
214 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
215 ide_hwif_t *hwif = HWIF(drive);
0ecdca26 216 unsigned long dma_base = hwif->dma_base;
1da177e4 217 int dma_stat = 0;
3f63c5e8 218 unsigned long *ending_dma = ide_get_hwifdata(hwif);
1da177e4 219
0ecdca26 220 writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4));
1da177e4
LT
221
222 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
223
224 if (ioc4_dma & IOC4_S_DMA_STOP) {
225 printk(KERN_ERR
226 "%s(%s): IOC4 DMA STOP bit is still 1 :"
227 "ioc4_dma_reg 0x%x\n",
eb63963a 228 __func__, drive->name, ioc4_dma);
1da177e4
LT
229 dma_stat = 1;
230 }
231
232 /*
233 * The IOC4 will DMA 1's to the ending dma area to indicate that
234 * previous data DMA is complete. This is necessary because of relaxed
235 * ordering between register reads and DMA writes on the Altix.
236 */
237 while ((cnt++ < 200) && (!valid)) {
238 for (num = 0; num < 16; num++) {
239 if (ending_dma[num]) {
240 valid = 1;
241 break;
242 }
243 }
244 udelay(1);
245 }
246 if (!valid) {
eb63963a 247 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
1da177e4
LT
248 drive->name);
249 dma_stat = 1;
250 }
251
0ecdca26
BZ
252 bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4));
253 bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4));
1da177e4
LT
254
255 if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) {
256 if (bc_dev > bc_mem + 8) {
257 printk(KERN_ERR
258 "%s(%s): WARNING!! byte_count_dev %d "
259 "!= byte_count_mem %d\n",
eb63963a 260 __func__, drive->name, bc_dev, bc_mem);
1da177e4
LT
261 }
262 }
263
264 drive->waiting_for_dma = 0;
265 ide_destroy_dmatable(drive);
266
267 return dma_stat;
268}
269
88b2b32b 270static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
ca1997c1 271{
ca1997c1
BZ
272}
273
1da177e4 274/* returns 1 if dma irq issued, 0 otherwise */
5e37bdc0 275static int sgiioc4_dma_test_irq(ide_drive_t *drive)
1da177e4
LT
276{
277 return sgiioc4_checkirq(HWIF(drive));
278}
279
15ce926a 280static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
1da177e4 281{
15ce926a
BZ
282 if (!on)
283 sgiioc4_clearirq(drive);
1da177e4
LT
284}
285
1da177e4
LT
286static void
287sgiioc4_resetproc(ide_drive_t * drive)
288{
5e37bdc0 289 sgiioc4_dma_end(drive);
1da177e4
LT
290 sgiioc4_clearirq(drive);
291}
292
841d2a9b
SS
293static void
294sgiioc4_dma_lost_irq(ide_drive_t * drive)
295{
296 sgiioc4_resetproc(drive);
297
298 ide_dma_lost_irq(drive);
299}
300
b73c7ee2 301static u8 sgiioc4_read_status(ide_hwif_t *hwif)
1da177e4 302{
b73c7ee2 303 unsigned long port = hwif->io_ports.status_addr;
a835fa79 304 u8 reg = (u8) readb((void __iomem *) port);
1da177e4
LT
305
306 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
aa95f0e7 307 if (!(reg & ATA_BUSY)) { /* Not busy... check for interrupt */
1da177e4 308 unsigned long other_ir = port - 0x110;
a835fa79 309 unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
1da177e4
LT
310
311 /* Clear the Interrupt, Error bits on the IOC4 */
312 if (intr_reg & 0x03) {
a835fa79
JH
313 writel(0x03, (void __iomem *) other_ir);
314 intr_reg = (u32) readl((void __iomem *) other_ir);
1da177e4
LT
315 }
316 }
317 }
318
319 return reg;
320}
321
322/* Creates a dma map for the scatter-gather list entries */
ca1997c1 323static int __devinit
04216fa1 324ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
1da177e4 325{
36501650 326 struct pci_dev *dev = to_pci_dev(hwif->dev);
04216fa1 327 unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
1678df37 328 void __iomem *virt_dma_base;
1da177e4 329 int num_ports = sizeof (ioc4_dma_regs_t);
3f63c5e8 330 void *pad;
1da177e4 331
04216fa1
BZ
332 if (dma_base == 0)
333 return -1;
334
9b5a18e1 335 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
1da177e4 336
9b5a18e1
SS
337 if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) {
338 printk(KERN_ERR "%s(%s) -- ERROR: addresses 0x%08lx to 0x%08lx "
339 "already in use\n", __func__, hwif->name,
340 dma_base, dma_base + num_ports - 1);
ca1997c1 341 return -1;
1da177e4
LT
342 }
343
1678df37
JK
344 virt_dma_base = ioremap(dma_base, num_ports);
345 if (virt_dma_base == NULL) {
9b5a18e1
SS
346 printk(KERN_ERR "%s(%s) -- ERROR: unable to map addresses "
347 "0x%lx to 0x%lx\n", __func__, hwif->name,
348 dma_base, dma_base + num_ports - 1);
1678df37
JK
349 goto dma_remap_failure;
350 }
351 hwif->dma_base = (unsigned long) virt_dma_base;
352
2bbd57ca 353 hwif->sg_max_nents = IOC4_PRD_ENTRIES;
1da177e4 354
2bbd57ca
BZ
355 hwif->prd_max_nents = IOC4_PRD_ENTRIES;
356 hwif->prd_ent_size = IOC4_PRD_BYTES;
1da177e4 357
2bbd57ca
BZ
358 if (ide_allocate_dma_engine(hwif))
359 goto dma_pci_alloc_failure;
1da177e4 360
36501650 361 pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
912ef6d9 362 (dma_addr_t *)&hwif->extra_base);
3f63c5e8
SS
363 if (pad) {
364 ide_set_hwifdata(hwif, pad);
ca1997c1 365 return 0;
3f63c5e8 366 }
1da177e4 367
2bbd57ca
BZ
368 ide_release_dma_engine(hwif);
369
9b5a18e1 370 printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
eb63963a 371 __func__, hwif->name);
9b5a18e1 372 printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
1da177e4 373
1678df37
JK
374dma_pci_alloc_failure:
375 iounmap(virt_dma_base);
376
377dma_remap_failure:
378 release_mem_region(dma_base, num_ports);
379
ca1997c1 380 return -1;
1da177e4
LT
381}
382
383/* Initializes the IOC4 DMA Engine */
384static void
385sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
386{
387 u32 ioc4_dma;
388 ide_hwif_t *hwif = HWIF(drive);
0ecdca26
BZ
389 unsigned long dma_base = hwif->dma_base;
390 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
1da177e4
LT
391 u32 dma_addr, ending_dma_addr;
392
0ecdca26 393 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
1da177e4
LT
394
395 if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
396 printk(KERN_WARNING
397 "%s(%s):Warning!! DMA from previous transfer was still active\n",
eb63963a 398 __func__, drive->name);
0ecdca26 399 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
1da177e4
LT
400 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
401
402 if (ioc4_dma & IOC4_S_DMA_STOP)
403 printk(KERN_ERR
404 "%s(%s) : IOC4 Dma STOP bit is still 1\n",
eb63963a 405 __func__, drive->name);
1da177e4
LT
406 }
407
0ecdca26 408 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
1da177e4
LT
409 if (ioc4_dma & IOC4_S_DMA_ERROR) {
410 printk(KERN_WARNING
411 "%s(%s) : Warning!! - DMA Error during Previous"
412 " transfer | status 0x%x\n",
eb63963a 413 __func__, drive->name, ioc4_dma);
0ecdca26 414 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
1da177e4
LT
415 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
416
417 if (ioc4_dma & IOC4_S_DMA_STOP)
418 printk(KERN_ERR
419 "%s(%s) : IOC4 DMA STOP bit is still 1\n",
eb63963a 420 __func__, drive->name);
1da177e4
LT
421 }
422
423 /* Address of the Scatter Gather List */
424 dma_addr = cpu_to_le32(hwif->dmatable_dma);
0ecdca26 425 writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4));
1da177e4
LT
426
427 /* Address of the Ending DMA */
3f63c5e8 428 memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE);
912ef6d9 429 ending_dma_addr = cpu_to_le32(hwif->extra_base);
0ecdca26 430 writel(ending_dma_addr, (void __iomem *)(dma_base + IOC4_DMA_END_ADDR * 4));
1da177e4 431
0ecdca26 432 writel(dma_direction, (void __iomem *)ioc4_dma_addr);
1da177e4
LT
433 drive->waiting_for_dma = 1;
434}
435
436/* IOC4 Scatter Gather list Format */
437/* 128 Bit entries to support 64 bit addresses in the future */
438/* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */
439/* --------------------------------------------------------------------- */
440/* | Upper 32 bits - Zero | Lower 32 bits- address | */
441/* --------------------------------------------------------------------- */
442/* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */
443/* --------------------------------------------------------------------- */
444/* Creates the scatter gather list, DMA Table */
445static unsigned int
446sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
447{
448 ide_hwif_t *hwif = HWIF(drive);
449 unsigned int *table = hwif->dmatable_cpu;
450 unsigned int count = 0, i = 1;
451 struct scatterlist *sg;
452
453 hwif->sg_nents = i = ide_build_sglist(drive, rq);
454
455 if (!i)
456 return 0; /* sglist of length Zero */
457
458 sg = hwif->sg_table;
459 while (i && sg_dma_len(sg)) {
460 dma_addr_t cur_addr;
461 int cur_len;
462 cur_addr = sg_dma_address(sg);
463 cur_len = sg_dma_len(sg);
464
465 while (cur_len) {
466 if (count++ >= IOC4_PRD_ENTRIES) {
467 printk(KERN_WARNING
468 "%s: DMA table too small\n",
469 drive->name);
470 goto use_pio_instead;
471 } else {
0271fc2d 472 u32 bcount =
1da177e4
LT
473 0x10000 - (cur_addr & 0xffff);
474
475 if (bcount > cur_len)
476 bcount = cur_len;
477
478 /* put the addr, length in
479 * the IOC4 dma-table format */
480 *table = 0x0;
481 table++;
482 *table = cpu_to_be32(cur_addr);
483 table++;
484 *table = 0x0;
485 table++;
486
0271fc2d 487 *table = cpu_to_be32(bcount);
1da177e4
LT
488 table++;
489
490 cur_addr += bcount;
491 cur_len -= bcount;
492 }
493 }
494
55c16a70 495 sg = sg_next(sg);
1da177e4
LT
496 i--;
497 }
498
499 if (count) {
500 table--;
501 *table |= cpu_to_be32(0x80000000);
502 return count;
503 }
504
505use_pio_instead:
f6fb786d 506 ide_destroy_dmatable(drive);
1da177e4
LT
507
508 return 0; /* revert to PIO for this request */
509}
510
5e37bdc0 511static int sgiioc4_dma_setup(ide_drive_t *drive)
1da177e4
LT
512{
513 struct request *rq = HWGROUP(drive)->rq;
514 unsigned int count = 0;
515 int ddir;
516
517 if (rq_data_dir(rq))
518 ddir = PCI_DMA_TODEVICE;
519 else
520 ddir = PCI_DMA_FROMDEVICE;
521
522 if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) {
523 /* try PIO instead of DMA */
524 ide_map_sg(drive, rq);
525 return 1;
526 }
527
528 if (rq_data_dir(rq))
529 /* Writes TO the IOC4 FROM Main Memory */
530 ddir = IOC4_DMA_READ;
531 else
532 /* Writes FROM the IOC4 TO Main Memory */
533 ddir = IOC4_DMA_WRITE;
534
535 sgiioc4_configure_for_dma(ddir, drive);
536
537 return 0;
538}
539
374e042c
BZ
540static const struct ide_tp_ops sgiioc4_tp_ops = {
541 .exec_command = ide_exec_command,
542 .read_status = sgiioc4_read_status,
543 .read_altstatus = ide_read_altstatus,
544 .read_sff_dma_status = ide_read_sff_dma_status,
545
546 .set_irq = ide_set_irq,
547
548 .tf_load = ide_tf_load,
549 .tf_read = ide_tf_read,
550
551 .input_data = ide_input_data,
552 .output_data = ide_output_data,
553};
554
ac95beed
BZ
555static const struct ide_port_ops sgiioc4_port_ops = {
556 .set_dma_mode = sgiioc4_set_dma_mode,
557 /* reset DMA engine, clear IRQs */
558 .resetproc = sgiioc4_resetproc,
ac95beed
BZ
559};
560
f37afdac 561static const struct ide_dma_ops sgiioc4_dma_ops = {
5e37bdc0
BZ
562 .dma_host_set = sgiioc4_dma_host_set,
563 .dma_setup = sgiioc4_dma_setup,
564 .dma_start = sgiioc4_dma_start,
565 .dma_end = sgiioc4_dma_end,
566 .dma_test_irq = sgiioc4_dma_test_irq,
567 .dma_lost_irq = sgiioc4_dma_lost_irq,
568 .dma_timeout = ide_dma_timeout,
569};
570
c413b9b9 571static const struct ide_port_info sgiioc4_port_info __devinitdata = {
eb3aff55 572 .name = DRV_NAME,
c413b9b9 573 .chipset = ide_pci,
04216fa1 574 .init_dma = ide_dma_sgiioc4,
374e042c 575 .tp_ops = &sgiioc4_tp_ops,
ac95beed 576 .port_ops = &sgiioc4_port_ops,
5e37bdc0 577 .dma_ops = &sgiioc4_dma_ops,
c5dd43ec 578 .host_flags = IDE_HFLAG_MMIO,
c413b9b9
BZ
579 .mwdma_mask = ATA_MWDMA2_ONLY,
580};
581
1da177e4 582static int __devinit
ca1997c1 583sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
1da177e4 584{
04216fa1 585 unsigned long cmd_base, irqport;
1678df37
JK
586 unsigned long bar0, cmd_phys_base, ctl;
587 void __iomem *virt_base;
48c3c107 588 struct ide_host *host;
c97c6aca 589 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
c413b9b9 590 struct ide_port_info d = sgiioc4_port_info;
8a69580e 591 int rc;
1da177e4 592
1da177e4 593 /* Get the CmdBlk and CtrlBlk Base Registers */
1678df37
JK
594 bar0 = pci_resource_start(dev, 0);
595 virt_base = ioremap(bar0, pci_resource_len(dev, 0));
596 if (virt_base == NULL) {
597 printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
ca1997c1 598 DRV_NAME, bar0);
1678df37
JK
599 return -ENOMEM;
600 }
601 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
602 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
603 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
1da177e4 604
1678df37 605 cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
9b5a18e1
SS
606 if (request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
607 DRV_NAME) == NULL) {
608 printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx "
609 "already in use\n", DRV_NAME, pci_name(dev),
610 cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
611 return -EBUSY;
1da177e4
LT
612 }
613
8f8e8483
BZ
614 /* Initialize the IO registers */
615 memset(&hw, 0, sizeof(hw));
616 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
57c802e8
BZ
617 hw.irq = dev->irq;
618 hw.chipset = ide_pci;
619 hw.dev = &dev->dev;
ce30e401 620
1da177e4 621 /* Initializing chipset IRQ Registers */
0ecdca26 622 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
1da177e4 623
48c3c107 624 host = ide_host_alloc(&d, hws);
8a69580e
BZ
625 if (host == NULL) {
626 rc = -ENOMEM;
48c3c107 627 goto err;
8a69580e 628 }
1da177e4 629
8a69580e
BZ
630 rc = ide_host_register(host, &d, hws);
631 if (rc)
632 goto err_free;
1da177e4
LT
633
634 return 0;
8a69580e
BZ
635err_free:
636 ide_host_free(host);
ce30e401
BZ
637err:
638 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
639 iounmap(virt_base);
8a69580e 640 return rc;
1da177e4
LT
641}
642
643static unsigned int __devinit
ca1997c1 644pci_init_sgiioc4(struct pci_dev *dev)
1da177e4 645{
1da177e4
LT
646 int ret;
647
1da177e4 648 printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n",
fc212bb1
BZ
649 DRV_NAME, pci_name(dev), dev->revision);
650
651 if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) {
1da177e4 652 printk(KERN_ERR "Skipping %s IDE controller in slot %s: "
ca1997c1
BZ
653 "firmware is obsolete - please upgrade to "
654 "revision46 or higher\n",
655 DRV_NAME, pci_name(dev));
1da177e4
LT
656 ret = -EAGAIN;
657 goto out;
658 }
ca1997c1 659 ret = sgiioc4_ide_setup_pci_device(dev);
1da177e4
LT
660out:
661 return ret;
662}
663
1da177e4 664int
22329b51 665ioc4_ide_attach_one(struct ioc4_driver_data *idd)
1da177e4 666{
f5befceb
BC
667 /* PCI-RT does not bring out IDE connection.
668 * Do not attach to this particular IOC4.
669 */
670 if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
671 return 0;
672
ca1997c1 673 return pci_init_sgiioc4(idd->idd_pdev);
1da177e4
LT
674}
675
22329b51
BC
676static struct ioc4_submodule ioc4_ide_submodule = {
677 .is_name = "IOC4_ide",
678 .is_owner = THIS_MODULE,
679 .is_probe = ioc4_ide_attach_one,
680/* .is_remove = ioc4_ide_remove_one, */
681};
682
82ab1eec 683static int __init ioc4_ide_init(void)
22329b51
BC
684{
685 return ioc4_register_submodule(&ioc4_ide_submodule);
686}
687
59f14800 688late_initcall(ioc4_ide_init); /* Call only after IDE init is done */
1da177e4 689
a835fa79 690MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon");
1da177e4
LT
691MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");
692MODULE_LICENSE("GPL");
This page took 0.501817 seconds and 5 git commands to generate.