[PATCH] sata_sil: convert to new probing mechanism and add hotplug support
[deliverable/linux.git] / drivers / scsi / sata_sil.c
1 /*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/pci.h>
40 #include <linux/init.h>
41 #include <linux/blkdev.h>
42 #include <linux/delay.h>
43 #include <linux/interrupt.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <linux/libata.h>
47
48 #define DRV_NAME "sata_sil"
49 #define DRV_VERSION "1.0"
50
51 enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
56 SIL_FLAG_MOD15WRITE = (1 << 30),
57
58 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
59 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
60
61 /*
62 * Controller IDs
63 */
64 sil_3112 = 0,
65 sil_3512 = 1,
66 sil_3114 = 2,
67
68 /*
69 * Register offsets
70 */
71 SIL_SYSCFG = 0x48,
72
73 /*
74 * Register bits
75 */
76 /* SYSCFG */
77 SIL_MASK_IDE0_INT = (1 << 22),
78 SIL_MASK_IDE1_INT = (1 << 23),
79 SIL_MASK_IDE2_INT = (1 << 24),
80 SIL_MASK_IDE3_INT = (1 << 25),
81 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
82 SIL_MASK_4PORT = SIL_MASK_2PORT |
83 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
84
85 /* BMDMA/BMDMA2 */
86 SIL_INTR_STEERING = (1 << 1),
87
88 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
89 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
90 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
91 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
92 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
93 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
94 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
95 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
96 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
97 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
98
99 /* SIEN */
100 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
101
102 /*
103 * Others
104 */
105 SIL_QUIRK_MOD15WRITE = (1 << 0),
106 SIL_QUIRK_UDMA5MAX = (1 << 1),
107 };
108
109 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
110 static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
111 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
112 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
113 static void sil_post_set_mode (struct ata_port *ap);
114 static irqreturn_t sil_interrupt(int irq, void *dev_instance,
115 struct pt_regs *regs);
116 static void sil_freeze(struct ata_port *ap);
117 static void sil_thaw(struct ata_port *ap);
118
119
120 static const struct pci_device_id sil_pci_tbl[] = {
121 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
122 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
123 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
124 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
125 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
126 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
127 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
128 { } /* terminate list */
129 };
130
131
132 /* TODO firmware versions should be added - eric */
133 static const struct sil_drivelist {
134 const char * product;
135 unsigned int quirk;
136 } sil_blacklist [] = {
137 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
138 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
139 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
140 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
141 { "ST380013AS", SIL_QUIRK_MOD15WRITE },
142 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
143 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
144 { "ST3160023AS", SIL_QUIRK_MOD15WRITE },
145 { "ST3120026AS", SIL_QUIRK_MOD15WRITE },
146 { "ST3200822AS", SIL_QUIRK_MOD15WRITE },
147 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
148 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
149 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
150 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
152 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
153 { }
154 };
155
156 static struct pci_driver sil_pci_driver = {
157 .name = DRV_NAME,
158 .id_table = sil_pci_tbl,
159 .probe = sil_init_one,
160 .remove = ata_pci_remove_one,
161 };
162
163 static struct scsi_host_template sil_sht = {
164 .module = THIS_MODULE,
165 .name = DRV_NAME,
166 .ioctl = ata_scsi_ioctl,
167 .queuecommand = ata_scsi_queuecmd,
168 .can_queue = ATA_DEF_QUEUE,
169 .this_id = ATA_SHT_THIS_ID,
170 .sg_tablesize = LIBATA_MAX_PRD,
171 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
172 .emulated = ATA_SHT_EMULATED,
173 .use_clustering = ATA_SHT_USE_CLUSTERING,
174 .proc_name = DRV_NAME,
175 .dma_boundary = ATA_DMA_BOUNDARY,
176 .slave_configure = ata_scsi_slave_config,
177 .slave_destroy = ata_scsi_slave_destroy,
178 .bios_param = ata_std_bios_param,
179 };
180
181 static const struct ata_port_operations sil_ops = {
182 .port_disable = ata_port_disable,
183 .dev_config = sil_dev_config,
184 .tf_load = ata_tf_load,
185 .tf_read = ata_tf_read,
186 .check_status = ata_check_status,
187 .exec_command = ata_exec_command,
188 .dev_select = ata_std_dev_select,
189 .post_set_mode = sil_post_set_mode,
190 .bmdma_setup = ata_bmdma_setup,
191 .bmdma_start = ata_bmdma_start,
192 .bmdma_stop = ata_bmdma_stop,
193 .bmdma_status = ata_bmdma_status,
194 .qc_prep = ata_qc_prep,
195 .qc_issue = ata_qc_issue_prot,
196 .data_xfer = ata_mmio_data_xfer,
197 .freeze = sil_freeze,
198 .thaw = sil_thaw,
199 .error_handler = ata_bmdma_error_handler,
200 .post_internal_cmd = ata_bmdma_post_internal_cmd,
201 .irq_handler = sil_interrupt,
202 .irq_clear = ata_bmdma_irq_clear,
203 .scr_read = sil_scr_read,
204 .scr_write = sil_scr_write,
205 .port_start = ata_port_start,
206 .port_stop = ata_port_stop,
207 .host_stop = ata_pci_host_stop,
208 };
209
210 static const struct ata_port_info sil_port_info[] = {
211 /* sil_3112 */
212 {
213 .sht = &sil_sht,
214 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
215 .pio_mask = 0x1f, /* pio0-4 */
216 .mwdma_mask = 0x07, /* mwdma0-2 */
217 .udma_mask = 0x3f, /* udma0-5 */
218 .port_ops = &sil_ops,
219 },
220 /* sil_3512 */
221 {
222 .sht = &sil_sht,
223 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x3f, /* udma0-5 */
227 .port_ops = &sil_ops,
228 },
229 /* sil_3114 */
230 {
231 .sht = &sil_sht,
232 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
233 .pio_mask = 0x1f, /* pio0-4 */
234 .mwdma_mask = 0x07, /* mwdma0-2 */
235 .udma_mask = 0x3f, /* udma0-5 */
236 .port_ops = &sil_ops,
237 },
238 };
239
240 /* per-port register offsets */
241 /* TODO: we can probably calculate rather than use a table */
242 static const struct {
243 unsigned long tf; /* ATA taskfile register block */
244 unsigned long ctl; /* ATA control/altstatus register block */
245 unsigned long bmdma; /* DMA register block */
246 unsigned long bmdma2; /* DMA register block #2 */
247 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
248 unsigned long scr; /* SATA control register block */
249 unsigned long sien; /* SATA Interrupt Enable register */
250 unsigned long xfer_mode;/* data transfer mode register */
251 unsigned long sfis_cfg; /* SATA FIS reception config register */
252 } sil_port[] = {
253 /* port 0 ... */
254 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
255 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
256 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
257 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
258 /* ... port 3 */
259 };
260
261 MODULE_AUTHOR("Jeff Garzik");
262 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
263 MODULE_LICENSE("GPL");
264 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
265 MODULE_VERSION(DRV_VERSION);
266
267 static int slow_down = 0;
268 module_param(slow_down, int, 0444);
269 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
270
271
272 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
273 {
274 u8 cache_line = 0;
275 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
276 return cache_line;
277 }
278
279 static void sil_post_set_mode (struct ata_port *ap)
280 {
281 struct ata_host_set *host_set = ap->host_set;
282 struct ata_device *dev;
283 void __iomem *addr =
284 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
285 u32 tmp, dev_mode[2];
286 unsigned int i;
287
288 for (i = 0; i < 2; i++) {
289 dev = &ap->device[i];
290 if (!ata_dev_enabled(dev))
291 dev_mode[i] = 0; /* PIO0/1/2 */
292 else if (dev->flags & ATA_DFLAG_PIO)
293 dev_mode[i] = 1; /* PIO3/4 */
294 else
295 dev_mode[i] = 3; /* UDMA */
296 /* value 2 indicates MDMA */
297 }
298
299 tmp = readl(addr);
300 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
301 tmp |= dev_mode[0];
302 tmp |= (dev_mode[1] << 4);
303 writel(tmp, addr);
304 readl(addr); /* flush */
305 }
306
307 static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
308 {
309 unsigned long offset = ap->ioaddr.scr_addr;
310
311 switch (sc_reg) {
312 case SCR_STATUS:
313 return offset + 4;
314 case SCR_ERROR:
315 return offset + 8;
316 case SCR_CONTROL:
317 return offset;
318 default:
319 /* do nothing */
320 break;
321 }
322
323 return 0;
324 }
325
326 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
327 {
328 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
329 if (mmio)
330 return readl(mmio);
331 return 0xffffffffU;
332 }
333
334 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
335 {
336 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
337 if (mmio)
338 writel(val, mmio);
339 }
340
341 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
342 {
343 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
344 u8 status;
345
346 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
347 ata_ehi_hotplugged(&ap->eh_info);
348 goto freeze;
349 }
350
351 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
352 goto freeze;
353
354 /* Check whether we are expecting interrupt in this state */
355 switch (ap->hsm_task_state) {
356 case HSM_ST_FIRST:
357 /* Some pre-ATAPI-4 devices assert INTRQ
358 * at this state when ready to receive CDB.
359 */
360
361 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
362 * The flag was turned on only for atapi devices.
363 * No need to check is_atapi_taskfile(&qc->tf) again.
364 */
365 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
366 goto err_hsm;
367 break;
368 case HSM_ST_LAST:
369 if (qc->tf.protocol == ATA_PROT_DMA ||
370 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
371 /* clear DMA-Start bit */
372 ap->ops->bmdma_stop(qc);
373
374 if (bmdma2 & SIL_DMA_ERROR) {
375 qc->err_mask |= AC_ERR_HOST_BUS;
376 ap->hsm_task_state = HSM_ST_ERR;
377 }
378 }
379 break;
380 case HSM_ST:
381 break;
382 default:
383 goto err_hsm;
384 }
385
386 /* check main status, clearing INTRQ */
387 status = ata_chk_status(ap);
388 if (unlikely(status & ATA_BUSY))
389 goto err_hsm;
390
391 /* ack bmdma irq events */
392 ata_bmdma_irq_clear(ap);
393
394 /* kick HSM in the ass */
395 ata_hsm_move(ap, qc, status, 0);
396
397 return;
398
399 err_hsm:
400 qc->err_mask |= AC_ERR_HSM;
401 freeze:
402 ata_port_freeze(ap);
403 }
404
405 static irqreturn_t sil_interrupt(int irq, void *dev_instance,
406 struct pt_regs *regs)
407 {
408 struct ata_host_set *host_set = dev_instance;
409 void __iomem *mmio_base = host_set->mmio_base;
410 int handled = 0;
411 int i;
412
413 spin_lock(&host_set->lock);
414
415 for (i = 0; i < host_set->n_ports; i++) {
416 struct ata_port *ap = host_set->ports[i];
417 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
418
419 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
420 continue;
421
422 if (!(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
423 continue;
424
425 sil_host_intr(ap, bmdma2);
426 handled = 1;
427 }
428
429 spin_unlock(&host_set->lock);
430
431 return IRQ_RETVAL(handled);
432 }
433
434 static void sil_freeze(struct ata_port *ap)
435 {
436 void __iomem *mmio_base = ap->host_set->mmio_base;
437 u32 tmp;
438
439 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
440 writel(0, mmio_base + sil_port[ap->port_no].sien);
441
442 /* plug IRQ */
443 tmp = readl(mmio_base + SIL_SYSCFG);
444 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
445 writel(tmp, mmio_base + SIL_SYSCFG);
446 readl(mmio_base + SIL_SYSCFG); /* flush */
447 }
448
449 static void sil_thaw(struct ata_port *ap)
450 {
451 void __iomem *mmio_base = ap->host_set->mmio_base;
452 u32 tmp;
453
454 /* clear IRQ */
455 ata_chk_status(ap);
456 ata_bmdma_irq_clear(ap);
457
458 /* turn on SATA IRQ */
459 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
460
461 /* turn on IRQ */
462 tmp = readl(mmio_base + SIL_SYSCFG);
463 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
464 writel(tmp, mmio_base + SIL_SYSCFG);
465 }
466
467 /**
468 * sil_dev_config - Apply device/host-specific errata fixups
469 * @ap: Port containing device to be examined
470 * @dev: Device to be examined
471 *
472 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
473 * device is known to be present, this function is called.
474 * We apply two errata fixups which are specific to Silicon Image,
475 * a Seagate and a Maxtor fixup.
476 *
477 * For certain Seagate devices, we must limit the maximum sectors
478 * to under 8K.
479 *
480 * For certain Maxtor devices, we must not program the drive
481 * beyond udma5.
482 *
483 * Both fixups are unfairly pessimistic. As soon as I get more
484 * information on these errata, I will create a more exhaustive
485 * list, and apply the fixups to only the specific
486 * devices/hosts/firmwares that need it.
487 *
488 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
489 * The Maxtor quirk is in the blacklist, but I'm keeping the original
490 * pessimistic fix for the following reasons...
491 * - There seems to be less info on it, only one device gleaned off the
492 * Windows driver, maybe only one is affected. More info would be greatly
493 * appreciated.
494 * - But then again UDMA5 is hardly anything to complain about
495 */
496 static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
497 {
498 unsigned int n, quirks = 0;
499 unsigned char model_num[41];
500
501 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
502
503 for (n = 0; sil_blacklist[n].product; n++)
504 if (!strcmp(sil_blacklist[n].product, model_num)) {
505 quirks = sil_blacklist[n].quirk;
506 break;
507 }
508
509 /* limit requests to 15 sectors */
510 if (slow_down ||
511 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
512 (quirks & SIL_QUIRK_MOD15WRITE))) {
513 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
514 "(mod15write workaround)\n");
515 dev->max_sectors = 15;
516 return;
517 }
518
519 /* limit to udma5 */
520 if (quirks & SIL_QUIRK_UDMA5MAX) {
521 ata_dev_printk(dev, KERN_INFO,
522 "applying Maxtor errata fix %s\n", model_num);
523 dev->udma_mask &= ATA_UDMA5;
524 return;
525 }
526 }
527
528 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
529 {
530 static int printed_version;
531 struct ata_probe_ent *probe_ent = NULL;
532 unsigned long base;
533 void __iomem *mmio_base;
534 int rc;
535 unsigned int i;
536 int pci_dev_busy = 0;
537 u32 tmp;
538 u8 cls;
539
540 if (!printed_version++)
541 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
542
543 rc = pci_enable_device(pdev);
544 if (rc)
545 return rc;
546
547 rc = pci_request_regions(pdev, DRV_NAME);
548 if (rc) {
549 pci_dev_busy = 1;
550 goto err_out;
551 }
552
553 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
554 if (rc)
555 goto err_out_regions;
556 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
557 if (rc)
558 goto err_out_regions;
559
560 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
561 if (probe_ent == NULL) {
562 rc = -ENOMEM;
563 goto err_out_regions;
564 }
565
566 INIT_LIST_HEAD(&probe_ent->node);
567 probe_ent->dev = pci_dev_to_dev(pdev);
568 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
569 probe_ent->sht = sil_port_info[ent->driver_data].sht;
570 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
571 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
572 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
573 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
574 probe_ent->irq = pdev->irq;
575 probe_ent->irq_flags = SA_SHIRQ;
576 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
577
578 mmio_base = pci_iomap(pdev, 5, 0);
579 if (mmio_base == NULL) {
580 rc = -ENOMEM;
581 goto err_out_free_ent;
582 }
583
584 probe_ent->mmio_base = mmio_base;
585
586 base = (unsigned long) mmio_base;
587
588 for (i = 0; i < probe_ent->n_ports; i++) {
589 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
590 probe_ent->port[i].altstatus_addr =
591 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
592 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
593 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
594 ata_std_ports(&probe_ent->port[i]);
595 }
596
597 /* Initialize FIFO PCI bus arbitration */
598 cls = sil_get_device_cache_line(pdev);
599 if (cls) {
600 cls >>= 3;
601 cls++; /* cls = (line_size/8)+1 */
602 for (i = 0; i < probe_ent->n_ports; i++)
603 writew(cls << 8 | cls,
604 mmio_base + sil_port[i].fifo_cfg);
605 } else
606 dev_printk(KERN_WARNING, &pdev->dev,
607 "cache line size not set. Driver may not function\n");
608
609 /* Apply R_ERR on DMA activate FIS errata workaround */
610 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
611 int cnt;
612
613 for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
614 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
615 if ((tmp & 0x3) != 0x01)
616 continue;
617 if (!cnt)
618 dev_printk(KERN_INFO, &pdev->dev,
619 "Applying R_ERR on DMA activate "
620 "FIS errata fix\n");
621 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
622 cnt++;
623 }
624 }
625
626 if (ent->driver_data == sil_3114) {
627 /* flip the magic "make 4 ports work" bit */
628 tmp = readl(mmio_base + sil_port[2].bmdma);
629 if ((tmp & SIL_INTR_STEERING) == 0)
630 writel(tmp | SIL_INTR_STEERING,
631 mmio_base + sil_port[2].bmdma);
632 }
633
634 pci_set_master(pdev);
635
636 /* FIXME: check ata_device_add return value */
637 ata_device_add(probe_ent);
638 kfree(probe_ent);
639
640 return 0;
641
642 err_out_free_ent:
643 kfree(probe_ent);
644 err_out_regions:
645 pci_release_regions(pdev);
646 err_out:
647 if (!pci_dev_busy)
648 pci_disable_device(pdev);
649 return rc;
650 }
651
652 static int __init sil_init(void)
653 {
654 return pci_module_init(&sil_pci_driver);
655 }
656
657 static void __exit sil_exit(void)
658 {
659 pci_unregister_driver(&sil_pci_driver);
660 }
661
662
663 module_init(sil_init);
664 module_exit(sil_exit);
This page took 0.058645 seconds and 5 git commands to generate.