2 * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
4 * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
8 * Core code for the Via multifunction framebuffer device.
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
20 * The default port config.
22 static struct via_port_cfg adap_configs
[] = {
23 [VIA_PORT_26
] = { VIA_PORT_I2C
, VIA_MODE_OFF
, VIASR
, 0x26 },
24 [VIA_PORT_31
] = { VIA_PORT_I2C
, VIA_MODE_I2C
, VIASR
, 0x31 },
25 [VIA_PORT_25
] = { VIA_PORT_GPIO
, VIA_MODE_GPIO
, VIASR
, 0x25 },
26 [VIA_PORT_2C
] = { VIA_PORT_GPIO
, VIA_MODE_I2C
, VIASR
, 0x2c },
27 [VIA_PORT_3D
] = { VIA_PORT_GPIO
, VIA_MODE_GPIO
, VIASR
, 0x3d },
32 * We currently only support one viafb device (will there ever be
33 * more than one?), so just declare it globally here.
35 static struct viafb_dev global_dev
;
39 * Basic register access; spinlock required.
41 static inline void viafb_mmio_write(int reg
, u32 v
)
43 iowrite32(v
, global_dev
.engine_mmio
+ reg
);
46 static inline int viafb_mmio_read(int reg
)
48 return ioread32(global_dev
.engine_mmio
+ reg
);
51 /* ---------------------------------------------------------------------- */
53 * Interrupt management. We have a single IRQ line for a lot of
54 * different functions, so we need to share it. The design here
55 * is that we don't want to reimplement the shared IRQ code here;
56 * we also want to avoid having contention for a single handler thread.
57 * So each subdev driver which needs interrupts just requests
58 * them directly from the kernel. We just have what's needed for
59 * overall access to the interrupt control register.
63 * Which interrupts are enabled now?
65 static u32 viafb_enabled_ints
;
67 static void viafb_int_init(void)
69 viafb_enabled_ints
= 0;
71 viafb_mmio_write(VDE_INTERRUPT
, 0);
75 * Allow subdevs to ask for specific interrupts to be enabled. These
76 * functions must be called with reg_lock held
78 void viafb_irq_enable(u32 mask
)
80 viafb_enabled_ints
|= mask
;
81 viafb_mmio_write(VDE_INTERRUPT
, viafb_enabled_ints
| VDE_I_ENABLE
);
83 EXPORT_SYMBOL_GPL(viafb_irq_enable
);
85 void viafb_irq_disable(u32 mask
)
87 viafb_enabled_ints
&= ~mask
;
88 if (viafb_enabled_ints
== 0)
89 viafb_mmio_write(VDE_INTERRUPT
, 0); /* Disable entirely */
91 viafb_mmio_write(VDE_INTERRUPT
,
92 viafb_enabled_ints
| VDE_I_ENABLE
);
94 EXPORT_SYMBOL_GPL(viafb_irq_disable
);
96 /* ---------------------------------------------------------------------- */
98 * Access to the DMA engine. This currently provides what the camera
99 * driver needs (i.e. outgoing only) but is easily expandable if need
104 * There are four DMA channels in the vx855. For now, we only
105 * use one of them, though. Most of the time, the DMA channel
106 * will be idle, so we keep the IRQ handler unregistered except
107 * when some subsystem has indicated an interest.
109 static int viafb_dma_users
;
110 static DECLARE_COMPLETION(viafb_dma_completion
);
112 * This mutex protects viafb_dma_users and our global interrupt
113 * registration state; it also serializes access to the DMA
116 static DEFINE_MUTEX(viafb_dma_lock
);
119 * The VX855 DMA descriptor (used for s/g transfers) looks
122 struct viafb_vx855_dma_descr
{
123 u32 addr_low
; /* Low part of phys addr */
124 u32 addr_high
; /* High 12 bits of addr */
125 u32 fb_offset
; /* Offset into FB memory */
126 u32 seg_size
; /* Size, 16-byte units */
127 u32 tile_mode
; /* "tile mode" setting */
128 u32 next_desc_low
; /* Next descriptor addr */
130 u32 pad
; /* Fill out to 64 bytes */
134 * Flags added to the "next descriptor low" pointers
136 #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
137 #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
140 * The completion IRQ handler.
142 static irqreturn_t
viafb_dma_irq(int irq
, void *data
)
145 irqreturn_t ret
= IRQ_NONE
;
147 spin_lock(&global_dev
.reg_lock
);
148 csr
= viafb_mmio_read(VDMA_CSR0
);
149 if (csr
& VDMA_C_DONE
) {
150 viafb_mmio_write(VDMA_CSR0
, VDMA_C_DONE
);
151 complete(&viafb_dma_completion
);
154 spin_unlock(&global_dev
.reg_lock
);
159 * Indicate a need for DMA functionality.
161 int viafb_request_dma(void)
166 * Only VX855 is supported currently.
168 if (global_dev
.chip_type
!= UNICHROME_VX855
)
171 * Note the new user and set up our interrupt handler
174 mutex_lock(&viafb_dma_lock
);
176 if (viafb_dma_users
== 1) {
177 ret
= request_irq(global_dev
.pdev
->irq
, viafb_dma_irq
,
178 IRQF_SHARED
, "via-dma", &viafb_dma_users
);
182 viafb_irq_enable(VDE_I_DMA0TDEN
);
184 mutex_unlock(&viafb_dma_lock
);
187 EXPORT_SYMBOL_GPL(viafb_request_dma
);
189 void viafb_release_dma(void)
191 mutex_lock(&viafb_dma_lock
);
193 if (viafb_dma_users
== 0) {
194 viafb_irq_disable(VDE_I_DMA0TDEN
);
195 free_irq(global_dev
.pdev
->irq
, &viafb_dma_users
);
197 mutex_unlock(&viafb_dma_lock
);
199 EXPORT_SYMBOL_GPL(viafb_release_dma
);
204 * Copy a single buffer from FB memory, synchronously. This code works
205 * but is not currently used.
207 void viafb_dma_copy_out(unsigned int offset
, dma_addr_t paddr
, int len
)
212 mutex_lock(&viafb_dma_lock
);
213 init_completion(&viafb_dma_completion
);
215 * Program the controller.
217 spin_lock_irqsave(&global_dev
.reg_lock
, flags
);
218 viafb_mmio_write(VDMA_CSR0
, VDMA_C_ENABLE
|VDMA_C_DONE
);
219 /* Enable ints; must happen after CSR0 write! */
220 viafb_mmio_write(VDMA_MR0
, VDMA_MR_TDIE
);
221 viafb_mmio_write(VDMA_MARL0
, (int) (paddr
& 0xfffffff0));
222 viafb_mmio_write(VDMA_MARH0
, (int) ((paddr
>> 28) & 0xfff));
223 /* Data sheet suggests DAR0 should be <<4, but it lies */
224 viafb_mmio_write(VDMA_DAR0
, offset
);
225 viafb_mmio_write(VDMA_DQWCR0
, len
>> 4);
226 viafb_mmio_write(VDMA_TMR0
, 0);
227 viafb_mmio_write(VDMA_DPRL0
, 0);
228 viafb_mmio_write(VDMA_DPRH0
, 0);
229 viafb_mmio_write(VDMA_PMR0
, 0);
230 csr
= viafb_mmio_read(VDMA_CSR0
);
231 viafb_mmio_write(VDMA_CSR0
, VDMA_C_ENABLE
|VDMA_C_START
);
232 spin_unlock_irqrestore(&global_dev
.reg_lock
, flags
);
234 * Now we just wait until the interrupt handler says
237 wait_for_completion_interruptible(&viafb_dma_completion
);
238 viafb_mmio_write(VDMA_MR0
, 0); /* Reset int enable */
239 mutex_unlock(&viafb_dma_lock
);
241 EXPORT_SYMBOL_GPL(viafb_dma_copy_out
);
245 * Do a scatter/gather DMA copy from FB memory. You must have done
246 * a successful call to viafb_request_dma() first.
248 int viafb_dma_copy_out_sg(unsigned int offset
, struct scatterlist
*sg
, int nsg
)
250 struct viafb_vx855_dma_descr
*descr
;
252 dma_addr_t descr_handle
;
255 struct scatterlist
*sgentry
;
259 * Get a place to put the descriptors.
261 descrpages
= dma_alloc_coherent(&global_dev
.pdev
->dev
,
262 nsg
*sizeof(struct viafb_vx855_dma_descr
),
263 &descr_handle
, GFP_KERNEL
);
264 if (descrpages
== NULL
) {
265 dev_err(&global_dev
.pdev
->dev
, "Unable to get descr page.\n");
268 mutex_lock(&viafb_dma_lock
);
273 nextdesc
= descr_handle
+ sizeof(struct viafb_vx855_dma_descr
);
274 for_each_sg(sg
, sgentry
, nsg
, i
) {
275 dma_addr_t paddr
= sg_dma_address(sgentry
);
276 descr
->addr_low
= paddr
& 0xfffffff0;
277 descr
->addr_high
= ((u64
) paddr
>> 32) & 0x0fff;
278 descr
->fb_offset
= offset
;
279 descr
->seg_size
= sg_dma_len(sgentry
) >> 4;
280 descr
->tile_mode
= 0;
281 descr
->next_desc_low
= (nextdesc
&0xfffffff0) | VIAFB_DMA_MAGIC
;
282 descr
->next_desc_high
= ((u64
) nextdesc
>> 32) & 0x0fff;
283 descr
->pad
= 0xffffffff; /* VIA driver does this */
284 offset
+= sg_dma_len(sgentry
);
285 nextdesc
+= sizeof(struct viafb_vx855_dma_descr
);
288 descr
[-1].next_desc_low
= VIAFB_DMA_FINAL_SEGMENT
|VIAFB_DMA_MAGIC
;
290 * Program the engine.
292 spin_lock_irqsave(&global_dev
.reg_lock
, flags
);
293 init_completion(&viafb_dma_completion
);
294 viafb_mmio_write(VDMA_DQWCR0
, 0);
295 viafb_mmio_write(VDMA_CSR0
, VDMA_C_ENABLE
|VDMA_C_DONE
);
296 viafb_mmio_write(VDMA_MR0
, VDMA_MR_TDIE
| VDMA_MR_CHAIN
);
297 viafb_mmio_write(VDMA_DPRL0
, descr_handle
| VIAFB_DMA_MAGIC
);
298 viafb_mmio_write(VDMA_DPRH0
,
299 (((u64
)descr_handle
>> 32) & 0x0fff) | 0xf0000);
300 (void) viafb_mmio_read(VDMA_CSR0
);
301 viafb_mmio_write(VDMA_CSR0
, VDMA_C_ENABLE
|VDMA_C_START
);
302 spin_unlock_irqrestore(&global_dev
.reg_lock
, flags
);
304 * Now we just wait until the interrupt handler says
305 * we're done. Except that, actually, we need to wait a little
306 * longer: the interrupts seem to jump the gun a little and we
307 * get corrupted frames sometimes.
309 wait_for_completion_timeout(&viafb_dma_completion
, 1);
311 if ((viafb_mmio_read(VDMA_CSR0
)&VDMA_C_DONE
) == 0)
312 printk(KERN_ERR
"VIA DMA timeout!\n");
314 * Clean up and we're done.
316 viafb_mmio_write(VDMA_CSR0
, VDMA_C_DONE
);
317 viafb_mmio_write(VDMA_MR0
, 0); /* Reset int enable */
318 mutex_unlock(&viafb_dma_lock
);
319 dma_free_coherent(&global_dev
.pdev
->dev
,
320 nsg
*sizeof(struct viafb_vx855_dma_descr
), descrpages
,
324 EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg
);
327 /* ---------------------------------------------------------------------- */
329 * Figure out how big our framebuffer memory is. Kind of ugly,
330 * but evidently we can't trust the information found in the
331 * fbdev configuration area.
333 static u16 via_function3
[] = {
334 CLE266_FUNCTION3
, KM400_FUNCTION3
, CN400_FUNCTION3
, CN700_FUNCTION3
,
335 CX700_FUNCTION3
, KM800_FUNCTION3
, KM890_FUNCTION3
, P4M890_FUNCTION3
,
336 P4M900_FUNCTION3
, VX800_FUNCTION3
, VX855_FUNCTION3
,
339 /* Get the BIOS-configured framebuffer size from PCI configuration space
340 * of function 3 in the respective chipset */
341 static int viafb_get_fb_size_from_pci(int chip_type
)
348 /* search for the "FUNCTION3" device in this chipset */
349 for (i
= 0; i
< ARRAY_SIZE(via_function3
); i
++) {
350 struct pci_dev
*pdev
;
352 pdev
= pci_get_device(PCI_VENDOR_ID_VIA
, via_function3
[i
],
357 DEBUG_MSG(KERN_INFO
"Device ID = %x\n", pdev
->device
);
359 switch (pdev
->device
) {
360 case CLE266_FUNCTION3
:
361 case KM400_FUNCTION3
:
364 case CN400_FUNCTION3
:
365 case CN700_FUNCTION3
:
366 case CX700_FUNCTION3
:
367 case KM800_FUNCTION3
:
368 case KM890_FUNCTION3
:
369 case P4M890_FUNCTION3
:
370 case P4M900_FUNCTION3
:
371 case VX800_FUNCTION3
:
372 case VX855_FUNCTION3
:
373 /*case CN750_FUNCTION3: */
381 pci_read_config_dword(pdev
, offset
, &FBSize
);
386 printk(KERN_ERR
"cannot determine framebuffer size\n");
390 FBSize
= FBSize
& 0x00007000;
391 DEBUG_MSG(KERN_INFO
"FB Size = %x\n", FBSize
);
393 if (chip_type
< UNICHROME_CX700
) {
396 VideoMemSize
= (16 << 20); /*16M */
400 VideoMemSize
= (32 << 20); /*32M */
404 VideoMemSize
= (64 << 20); /*64M */
408 VideoMemSize
= (32 << 20); /*32M */
414 VideoMemSize
= (8 << 20); /*8M */
418 VideoMemSize
= (16 << 20); /*16M */
422 VideoMemSize
= (32 << 20); /*32M */
426 VideoMemSize
= (64 << 20); /*64M */
430 VideoMemSize
= (128 << 20); /*128M */
434 VideoMemSize
= (256 << 20); /*256M */
437 case 0x00007000: /* Only on VX855/875 */
438 VideoMemSize
= (512 << 20); /*512M */
442 VideoMemSize
= (32 << 20); /*32M */
452 * Figure out and map our MMIO regions.
454 static int __devinit
via_pci_setup_mmio(struct viafb_dev
*vdev
)
458 * Hook up to the device registers. Note that we soldier
459 * on if it fails; the framebuffer can operate (without
460 * acceleration) without this region.
462 vdev
->engine_start
= pci_resource_start(vdev
->pdev
, 1);
463 vdev
->engine_len
= pci_resource_len(vdev
->pdev
, 1);
464 vdev
->engine_mmio
= ioremap_nocache(vdev
->engine_start
,
466 if (vdev
->engine_mmio
== NULL
)
467 dev_err(&vdev
->pdev
->dev
,
468 "Unable to map engine MMIO; operation will be "
469 "slow and crippled.\n");
471 * Map in framebuffer memory. For now, failure here is
472 * fatal. Unfortunately, in the absence of significant
473 * vmalloc space, failure here is also entirely plausible.
474 * Eventually we want to move away from mapping this
477 vdev
->fbmem_start
= pci_resource_start(vdev
->pdev
, 0);
478 ret
= vdev
->fbmem_len
= viafb_get_fb_size_from_pci(vdev
->chip_type
);
481 vdev
->fbmem
= ioremap_nocache(vdev
->fbmem_start
, vdev
->fbmem_len
);
482 if (vdev
->fbmem
== NULL
) {
488 iounmap(vdev
->engine_mmio
);
492 static void __devexit
via_pci_teardown_mmio(struct viafb_dev
*vdev
)
494 iounmap(vdev
->fbmem
);
495 iounmap(vdev
->engine_mmio
);
499 * Create our subsidiary devices.
501 static struct viafb_subdev_info
{
503 struct platform_device
*platdev
;
504 } viafb_subdevs
[] = {
506 .name
= "viafb-gpio",
512 #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
514 static int __devinit
via_create_subdev(struct viafb_dev
*vdev
,
515 struct viafb_subdev_info
*info
)
519 info
->platdev
= platform_device_alloc(info
->name
, -1);
520 if (!info
->platdev
) {
521 dev_err(&vdev
->pdev
->dev
, "Unable to allocate pdev %s\n",
525 info
->platdev
->dev
.parent
= &vdev
->pdev
->dev
;
526 info
->platdev
->dev
.platform_data
= vdev
;
527 ret
= platform_device_add(info
->platdev
);
529 dev_err(&vdev
->pdev
->dev
, "Unable to add pdev %s\n",
531 platform_device_put(info
->platdev
);
532 info
->platdev
= NULL
;
537 static int __devinit
via_setup_subdevs(struct viafb_dev
*vdev
)
542 * Ignore return values. Even if some of the devices
543 * fail to be created, we'll still be able to use some
546 for (i
= 0; i
< N_SUBDEVS
; i
++)
547 via_create_subdev(vdev
, viafb_subdevs
+ i
);
551 static void __devexit
via_teardown_subdevs(void)
555 for (i
= 0; i
< N_SUBDEVS
; i
++)
556 if (viafb_subdevs
[i
].platdev
) {
557 viafb_subdevs
[i
].platdev
->dev
.platform_data
= NULL
;
558 platform_device_unregister(viafb_subdevs
[i
].platdev
);
563 static int __devinit
via_pci_probe(struct pci_dev
*pdev
,
564 const struct pci_device_id
*ent
)
568 ret
= pci_enable_device(pdev
);
572 * Global device initialization.
574 memset(&global_dev
, 0, sizeof(global_dev
));
575 global_dev
.pdev
= pdev
;
576 global_dev
.chip_type
= ent
->driver_data
;
577 global_dev
.port_cfg
= adap_configs
;
578 spin_lock_init(&global_dev
.reg_lock
);
579 ret
= via_pci_setup_mmio(&global_dev
);
583 * Set up interrupts and create our subdevices. Continue even if
587 via_setup_subdevs(&global_dev
);
589 * Set up the framebuffer device
591 ret
= via_fb_pci_probe(&global_dev
);
597 via_teardown_subdevs();
598 via_pci_teardown_mmio(&global_dev
);
600 pci_disable_device(pdev
);
604 static void __devexit
via_pci_remove(struct pci_dev
*pdev
)
606 via_teardown_subdevs();
607 via_fb_pci_remove(pdev
);
608 via_pci_teardown_mmio(&global_dev
);
609 pci_disable_device(pdev
);
613 static struct pci_device_id via_pci_table
[] __devinitdata
= {
614 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_CLE266_DID
),
615 .driver_data
= UNICHROME_CLE266
},
616 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_PM800_DID
),
617 .driver_data
= UNICHROME_PM800
},
618 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_K400_DID
),
619 .driver_data
= UNICHROME_K400
},
620 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_K800_DID
),
621 .driver_data
= UNICHROME_K800
},
622 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_P4M890_DID
),
623 .driver_data
= UNICHROME_CN700
},
624 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_K8M890_DID
),
625 .driver_data
= UNICHROME_K8M890
},
626 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_CX700_DID
),
627 .driver_data
= UNICHROME_CX700
},
628 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_P4M900_DID
),
629 .driver_data
= UNICHROME_P4M900
},
630 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_CN750_DID
),
631 .driver_data
= UNICHROME_CN750
},
632 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_VX800_DID
),
633 .driver_data
= UNICHROME_VX800
},
634 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, UNICHROME_VX855_DID
),
635 .driver_data
= UNICHROME_VX855
},
638 MODULE_DEVICE_TABLE(pci
, via_pci_table
);
640 static struct pci_driver via_driver
= {
642 .id_table
= via_pci_table
,
643 .probe
= via_pci_probe
,
644 .remove
= __devexit_p(via_pci_remove
),
647 static int __init
via_core_init(void)
656 return pci_register_driver(&via_driver
);
659 static void __exit
via_core_exit(void)
661 pci_unregister_driver(&via_driver
);
667 module_init(via_core_init
);
668 module_exit(via_core_exit
);