drm/msm: split out msm_kms.h
[deliverable/linux.git] / drivers / gpu / drm / msm / msm_drv.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_kms.h"
21
22 static void msm_fb_output_poll_changed(struct drm_device *dev)
23 {
24 struct msm_drm_private *priv = dev->dev_private;
25 if (priv->fbdev)
26 drm_fb_helper_hotplug_event(priv->fbdev);
27 }
28
29 static const struct drm_mode_config_funcs mode_config_funcs = {
30 .fb_create = msm_framebuffer_create,
31 .output_poll_changed = msm_fb_output_poll_changed,
32 };
33
34 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
35 {
36 struct msm_drm_private *priv = dev->dev_private;
37 int idx = priv->num_mmus++;
38
39 if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
40 return -EINVAL;
41
42 priv->mmus[idx] = mmu;
43
44 return idx;
45 }
46
47 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
48 static bool reglog = false;
49 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
50 module_param(reglog, bool, 0600);
51 #else
52 #define reglog 0
53 #endif
54
55 static char *vram;
56 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
57 module_param(vram, charp, 0);
58
59 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
60 const char *dbgname)
61 {
62 struct resource *res;
63 unsigned long size;
64 void __iomem *ptr;
65
66 if (name)
67 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
68 else
69 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
70
71 if (!res) {
72 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
73 return ERR_PTR(-EINVAL);
74 }
75
76 size = resource_size(res);
77
78 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
79 if (!ptr) {
80 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
81 return ERR_PTR(-ENOMEM);
82 }
83
84 if (reglog)
85 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
86
87 return ptr;
88 }
89
90 void msm_writel(u32 data, void __iomem *addr)
91 {
92 if (reglog)
93 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
94 writel(data, addr);
95 }
96
97 u32 msm_readl(const void __iomem *addr)
98 {
99 u32 val = readl(addr);
100 if (reglog)
101 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
102 return val;
103 }
104
105 /*
106 * DRM operations:
107 */
108
109 static int msm_unload(struct drm_device *dev)
110 {
111 struct msm_drm_private *priv = dev->dev_private;
112 struct msm_kms *kms = priv->kms;
113 struct msm_gpu *gpu = priv->gpu;
114
115 drm_kms_helper_poll_fini(dev);
116 drm_mode_config_cleanup(dev);
117 drm_vblank_cleanup(dev);
118
119 pm_runtime_get_sync(dev->dev);
120 drm_irq_uninstall(dev);
121 pm_runtime_put_sync(dev->dev);
122
123 flush_workqueue(priv->wq);
124 destroy_workqueue(priv->wq);
125
126 if (kms) {
127 pm_runtime_disable(dev->dev);
128 kms->funcs->destroy(kms);
129 }
130
131 if (gpu) {
132 mutex_lock(&dev->struct_mutex);
133 gpu->funcs->pm_suspend(gpu);
134 gpu->funcs->destroy(gpu);
135 mutex_unlock(&dev->struct_mutex);
136 }
137
138 if (priv->vram.paddr) {
139 DEFINE_DMA_ATTRS(attrs);
140 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
141 drm_mm_takedown(&priv->vram.mm);
142 dma_free_attrs(dev->dev, priv->vram.size, NULL,
143 priv->vram.paddr, &attrs);
144 }
145
146 dev->dev_private = NULL;
147
148 kfree(priv);
149
150 return 0;
151 }
152
153 static int msm_load(struct drm_device *dev, unsigned long flags)
154 {
155 struct platform_device *pdev = dev->platformdev;
156 struct msm_drm_private *priv;
157 struct msm_kms *kms;
158 int ret;
159
160 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
161 if (!priv) {
162 dev_err(dev->dev, "failed to allocate private data\n");
163 return -ENOMEM;
164 }
165
166 dev->dev_private = priv;
167
168 priv->wq = alloc_ordered_workqueue("msm", 0);
169 init_waitqueue_head(&priv->fence_event);
170
171 INIT_LIST_HEAD(&priv->inactive_list);
172 INIT_LIST_HEAD(&priv->fence_cbs);
173
174 drm_mode_config_init(dev);
175
176 /* if we have no IOMMU, then we need to use carveout allocator.
177 * Grab the entire CMA chunk carved out in early startup in
178 * mach-msm:
179 */
180 if (!iommu_present(&platform_bus_type)) {
181 DEFINE_DMA_ATTRS(attrs);
182 unsigned long size;
183 void *p;
184
185 DBG("using %s VRAM carveout", vram);
186 size = memparse(vram, NULL);
187 priv->vram.size = size;
188
189 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
190
191 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
192 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
193
194 /* note that for no-kernel-mapping, the vaddr returned
195 * is bogus, but non-null if allocation succeeded:
196 */
197 p = dma_alloc_attrs(dev->dev, size,
198 &priv->vram.paddr, 0, &attrs);
199 if (!p) {
200 dev_err(dev->dev, "failed to allocate VRAM\n");
201 priv->vram.paddr = 0;
202 ret = -ENOMEM;
203 goto fail;
204 }
205
206 dev_info(dev->dev, "VRAM: %08x->%08x\n",
207 (uint32_t)priv->vram.paddr,
208 (uint32_t)(priv->vram.paddr + size));
209 }
210
211 kms = mdp4_kms_init(dev);
212 if (IS_ERR(kms)) {
213 /*
214 * NOTE: once we have GPU support, having no kms should not
215 * be considered fatal.. ideally we would still support gpu
216 * and (for example) use dmabuf/prime to share buffers with
217 * imx drm driver on iMX5
218 */
219 dev_err(dev->dev, "failed to load kms\n");
220 ret = PTR_ERR(kms);
221 goto fail;
222 }
223
224 priv->kms = kms;
225
226 if (kms) {
227 pm_runtime_enable(dev->dev);
228 ret = kms->funcs->hw_init(kms);
229 if (ret) {
230 dev_err(dev->dev, "kms hw init failed: %d\n", ret);
231 goto fail;
232 }
233 }
234
235 dev->mode_config.min_width = 0;
236 dev->mode_config.min_height = 0;
237 dev->mode_config.max_width = 2048;
238 dev->mode_config.max_height = 2048;
239 dev->mode_config.funcs = &mode_config_funcs;
240
241 ret = drm_vblank_init(dev, 1);
242 if (ret < 0) {
243 dev_err(dev->dev, "failed to initialize vblank\n");
244 goto fail;
245 }
246
247 pm_runtime_get_sync(dev->dev);
248 ret = drm_irq_install(dev);
249 pm_runtime_put_sync(dev->dev);
250 if (ret < 0) {
251 dev_err(dev->dev, "failed to install IRQ handler\n");
252 goto fail;
253 }
254
255 platform_set_drvdata(pdev, dev);
256
257 #ifdef CONFIG_DRM_MSM_FBDEV
258 priv->fbdev = msm_fbdev_init(dev);
259 #endif
260
261 drm_kms_helper_poll_init(dev);
262
263 return 0;
264
265 fail:
266 msm_unload(dev);
267 return ret;
268 }
269
270 static void load_gpu(struct drm_device *dev)
271 {
272 struct msm_drm_private *priv = dev->dev_private;
273 struct msm_gpu *gpu;
274
275 if (priv->gpu)
276 return;
277
278 mutex_lock(&dev->struct_mutex);
279 gpu = a3xx_gpu_init(dev);
280 if (IS_ERR(gpu)) {
281 dev_warn(dev->dev, "failed to load a3xx gpu\n");
282 gpu = NULL;
283 /* not fatal */
284 }
285 mutex_unlock(&dev->struct_mutex);
286
287 if (gpu) {
288 int ret;
289 gpu->funcs->pm_resume(gpu);
290 ret = gpu->funcs->hw_init(gpu);
291 if (ret) {
292 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
293 gpu->funcs->destroy(gpu);
294 gpu = NULL;
295 }
296 }
297
298 priv->gpu = gpu;
299 }
300
301 static int msm_open(struct drm_device *dev, struct drm_file *file)
302 {
303 struct msm_file_private *ctx;
304
305 /* For now, load gpu on open.. to avoid the requirement of having
306 * firmware in the initrd.
307 */
308 load_gpu(dev);
309
310 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
311 if (!ctx)
312 return -ENOMEM;
313
314 file->driver_priv = ctx;
315
316 return 0;
317 }
318
319 static void msm_preclose(struct drm_device *dev, struct drm_file *file)
320 {
321 struct msm_drm_private *priv = dev->dev_private;
322 struct msm_file_private *ctx = file->driver_priv;
323 struct msm_kms *kms = priv->kms;
324
325 if (kms)
326 kms->funcs->preclose(kms, file);
327
328 mutex_lock(&dev->struct_mutex);
329 if (ctx == priv->lastctx)
330 priv->lastctx = NULL;
331 mutex_unlock(&dev->struct_mutex);
332
333 kfree(ctx);
334 }
335
336 static void msm_lastclose(struct drm_device *dev)
337 {
338 struct msm_drm_private *priv = dev->dev_private;
339 if (priv->fbdev) {
340 drm_modeset_lock_all(dev);
341 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
342 drm_modeset_unlock_all(dev);
343 }
344 }
345
346 static irqreturn_t msm_irq(int irq, void *arg)
347 {
348 struct drm_device *dev = arg;
349 struct msm_drm_private *priv = dev->dev_private;
350 struct msm_kms *kms = priv->kms;
351 BUG_ON(!kms);
352 return kms->funcs->irq(kms);
353 }
354
355 static void msm_irq_preinstall(struct drm_device *dev)
356 {
357 struct msm_drm_private *priv = dev->dev_private;
358 struct msm_kms *kms = priv->kms;
359 BUG_ON(!kms);
360 kms->funcs->irq_preinstall(kms);
361 }
362
363 static int msm_irq_postinstall(struct drm_device *dev)
364 {
365 struct msm_drm_private *priv = dev->dev_private;
366 struct msm_kms *kms = priv->kms;
367 BUG_ON(!kms);
368 return kms->funcs->irq_postinstall(kms);
369 }
370
371 static void msm_irq_uninstall(struct drm_device *dev)
372 {
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_kms *kms = priv->kms;
375 BUG_ON(!kms);
376 kms->funcs->irq_uninstall(kms);
377 }
378
379 static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
380 {
381 struct msm_drm_private *priv = dev->dev_private;
382 struct msm_kms *kms = priv->kms;
383 if (!kms)
384 return -ENXIO;
385 DBG("dev=%p, crtc=%d", dev, crtc_id);
386 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
387 }
388
389 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
390 {
391 struct msm_drm_private *priv = dev->dev_private;
392 struct msm_kms *kms = priv->kms;
393 if (!kms)
394 return;
395 DBG("dev=%p, crtc=%d", dev, crtc_id);
396 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
397 }
398
399 /*
400 * DRM debugfs:
401 */
402
403 #ifdef CONFIG_DEBUG_FS
404 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
405 {
406 struct msm_drm_private *priv = dev->dev_private;
407 struct msm_gpu *gpu = priv->gpu;
408
409 if (gpu) {
410 seq_printf(m, "%s Status:\n", gpu->name);
411 gpu->funcs->show(gpu, m);
412 }
413
414 return 0;
415 }
416
417 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
418 {
419 struct msm_drm_private *priv = dev->dev_private;
420 struct msm_gpu *gpu = priv->gpu;
421
422 if (gpu) {
423 seq_printf(m, "Active Objects (%s):\n", gpu->name);
424 msm_gem_describe_objects(&gpu->active_list, m);
425 }
426
427 seq_printf(m, "Inactive Objects:\n");
428 msm_gem_describe_objects(&priv->inactive_list, m);
429
430 return 0;
431 }
432
433 static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
434 {
435 return drm_mm_dump_table(m, dev->mm_private);
436 }
437
438 static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
439 {
440 struct msm_drm_private *priv = dev->dev_private;
441 struct drm_framebuffer *fb, *fbdev_fb = NULL;
442
443 if (priv->fbdev) {
444 seq_printf(m, "fbcon ");
445 fbdev_fb = priv->fbdev->fb;
446 msm_framebuffer_describe(fbdev_fb, m);
447 }
448
449 mutex_lock(&dev->mode_config.fb_lock);
450 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
451 if (fb == fbdev_fb)
452 continue;
453
454 seq_printf(m, "user ");
455 msm_framebuffer_describe(fb, m);
456 }
457 mutex_unlock(&dev->mode_config.fb_lock);
458
459 return 0;
460 }
461
462 static int show_locked(struct seq_file *m, void *arg)
463 {
464 struct drm_info_node *node = (struct drm_info_node *) m->private;
465 struct drm_device *dev = node->minor->dev;
466 int (*show)(struct drm_device *dev, struct seq_file *m) =
467 node->info_ent->data;
468 int ret;
469
470 ret = mutex_lock_interruptible(&dev->struct_mutex);
471 if (ret)
472 return ret;
473
474 ret = show(dev, m);
475
476 mutex_unlock(&dev->struct_mutex);
477
478 return ret;
479 }
480
481 static struct drm_info_list msm_debugfs_list[] = {
482 {"gpu", show_locked, 0, msm_gpu_show},
483 {"gem", show_locked, 0, msm_gem_show},
484 { "mm", show_locked, 0, msm_mm_show },
485 { "fb", show_locked, 0, msm_fb_show },
486 };
487
488 static int msm_debugfs_init(struct drm_minor *minor)
489 {
490 struct drm_device *dev = minor->dev;
491 int ret;
492
493 ret = drm_debugfs_create_files(msm_debugfs_list,
494 ARRAY_SIZE(msm_debugfs_list),
495 minor->debugfs_root, minor);
496
497 if (ret) {
498 dev_err(dev->dev, "could not install msm_debugfs_list\n");
499 return ret;
500 }
501
502 return ret;
503 }
504
505 static void msm_debugfs_cleanup(struct drm_minor *minor)
506 {
507 drm_debugfs_remove_files(msm_debugfs_list,
508 ARRAY_SIZE(msm_debugfs_list), minor);
509 }
510 #endif
511
512 /*
513 * Fences:
514 */
515
516 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
517 struct timespec *timeout)
518 {
519 struct msm_drm_private *priv = dev->dev_private;
520 int ret;
521
522 if (!priv->gpu)
523 return 0;
524
525 if (fence > priv->gpu->submitted_fence) {
526 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
527 fence, priv->gpu->submitted_fence);
528 return -EINVAL;
529 }
530
531 if (!timeout) {
532 /* no-wait: */
533 ret = fence_completed(dev, fence) ? 0 : -EBUSY;
534 } else {
535 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
536 unsigned long start_jiffies = jiffies;
537 unsigned long remaining_jiffies;
538
539 if (time_after(start_jiffies, timeout_jiffies))
540 remaining_jiffies = 0;
541 else
542 remaining_jiffies = timeout_jiffies - start_jiffies;
543
544 ret = wait_event_interruptible_timeout(priv->fence_event,
545 fence_completed(dev, fence),
546 remaining_jiffies);
547
548 if (ret == 0) {
549 DBG("timeout waiting for fence: %u (completed: %u)",
550 fence, priv->completed_fence);
551 ret = -ETIMEDOUT;
552 } else if (ret != -ERESTARTSYS) {
553 ret = 0;
554 }
555 }
556
557 return ret;
558 }
559
560 /* called from workqueue */
561 void msm_update_fence(struct drm_device *dev, uint32_t fence)
562 {
563 struct msm_drm_private *priv = dev->dev_private;
564
565 mutex_lock(&dev->struct_mutex);
566 priv->completed_fence = max(fence, priv->completed_fence);
567
568 while (!list_empty(&priv->fence_cbs)) {
569 struct msm_fence_cb *cb;
570
571 cb = list_first_entry(&priv->fence_cbs,
572 struct msm_fence_cb, work.entry);
573
574 if (cb->fence > priv->completed_fence)
575 break;
576
577 list_del_init(&cb->work.entry);
578 queue_work(priv->wq, &cb->work);
579 }
580
581 mutex_unlock(&dev->struct_mutex);
582
583 wake_up_all(&priv->fence_event);
584 }
585
586 void __msm_fence_worker(struct work_struct *work)
587 {
588 struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
589 cb->func(cb);
590 }
591
592 /*
593 * DRM ioctls:
594 */
595
596 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
597 struct drm_file *file)
598 {
599 struct msm_drm_private *priv = dev->dev_private;
600 struct drm_msm_param *args = data;
601 struct msm_gpu *gpu;
602
603 /* for now, we just have 3d pipe.. eventually this would need to
604 * be more clever to dispatch to appropriate gpu module:
605 */
606 if (args->pipe != MSM_PIPE_3D0)
607 return -EINVAL;
608
609 gpu = priv->gpu;
610
611 if (!gpu)
612 return -ENXIO;
613
614 return gpu->funcs->get_param(gpu, args->param, &args->value);
615 }
616
617 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
618 struct drm_file *file)
619 {
620 struct drm_msm_gem_new *args = data;
621 return msm_gem_new_handle(dev, file, args->size,
622 args->flags, &args->handle);
623 }
624
625 #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
626
627 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
628 struct drm_file *file)
629 {
630 struct drm_msm_gem_cpu_prep *args = data;
631 struct drm_gem_object *obj;
632 int ret;
633
634 obj = drm_gem_object_lookup(dev, file, args->handle);
635 if (!obj)
636 return -ENOENT;
637
638 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
639
640 drm_gem_object_unreference_unlocked(obj);
641
642 return ret;
643 }
644
645 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
646 struct drm_file *file)
647 {
648 struct drm_msm_gem_cpu_fini *args = data;
649 struct drm_gem_object *obj;
650 int ret;
651
652 obj = drm_gem_object_lookup(dev, file, args->handle);
653 if (!obj)
654 return -ENOENT;
655
656 ret = msm_gem_cpu_fini(obj);
657
658 drm_gem_object_unreference_unlocked(obj);
659
660 return ret;
661 }
662
663 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
664 struct drm_file *file)
665 {
666 struct drm_msm_gem_info *args = data;
667 struct drm_gem_object *obj;
668 int ret = 0;
669
670 if (args->pad)
671 return -EINVAL;
672
673 obj = drm_gem_object_lookup(dev, file, args->handle);
674 if (!obj)
675 return -ENOENT;
676
677 args->offset = msm_gem_mmap_offset(obj);
678
679 drm_gem_object_unreference_unlocked(obj);
680
681 return ret;
682 }
683
684 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
685 struct drm_file *file)
686 {
687 struct drm_msm_wait_fence *args = data;
688 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
689 }
690
691 static const struct drm_ioctl_desc msm_ioctls[] = {
692 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
693 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
694 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
695 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
696 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
697 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
698 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
699 };
700
701 static const struct vm_operations_struct vm_ops = {
702 .fault = msm_gem_fault,
703 .open = drm_gem_vm_open,
704 .close = drm_gem_vm_close,
705 };
706
707 static const struct file_operations fops = {
708 .owner = THIS_MODULE,
709 .open = drm_open,
710 .release = drm_release,
711 .unlocked_ioctl = drm_ioctl,
712 #ifdef CONFIG_COMPAT
713 .compat_ioctl = drm_compat_ioctl,
714 #endif
715 .poll = drm_poll,
716 .read = drm_read,
717 .llseek = no_llseek,
718 .mmap = msm_gem_mmap,
719 };
720
721 static struct drm_driver msm_driver = {
722 .driver_features = DRIVER_HAVE_IRQ |
723 DRIVER_GEM |
724 DRIVER_PRIME |
725 DRIVER_RENDER |
726 DRIVER_MODESET,
727 .load = msm_load,
728 .unload = msm_unload,
729 .open = msm_open,
730 .preclose = msm_preclose,
731 .lastclose = msm_lastclose,
732 .irq_handler = msm_irq,
733 .irq_preinstall = msm_irq_preinstall,
734 .irq_postinstall = msm_irq_postinstall,
735 .irq_uninstall = msm_irq_uninstall,
736 .get_vblank_counter = drm_vblank_count,
737 .enable_vblank = msm_enable_vblank,
738 .disable_vblank = msm_disable_vblank,
739 .gem_free_object = msm_gem_free_object,
740 .gem_vm_ops = &vm_ops,
741 .dumb_create = msm_gem_dumb_create,
742 .dumb_map_offset = msm_gem_dumb_map_offset,
743 .dumb_destroy = drm_gem_dumb_destroy,
744 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
745 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
746 .gem_prime_export = drm_gem_prime_export,
747 .gem_prime_import = drm_gem_prime_import,
748 .gem_prime_pin = msm_gem_prime_pin,
749 .gem_prime_unpin = msm_gem_prime_unpin,
750 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
751 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
752 .gem_prime_vmap = msm_gem_prime_vmap,
753 .gem_prime_vunmap = msm_gem_prime_vunmap,
754 #ifdef CONFIG_DEBUG_FS
755 .debugfs_init = msm_debugfs_init,
756 .debugfs_cleanup = msm_debugfs_cleanup,
757 #endif
758 .ioctls = msm_ioctls,
759 .num_ioctls = DRM_MSM_NUM_IOCTLS,
760 .fops = &fops,
761 .name = "msm",
762 .desc = "MSM Snapdragon DRM",
763 .date = "20130625",
764 .major = 1,
765 .minor = 0,
766 };
767
768 #ifdef CONFIG_PM_SLEEP
769 static int msm_pm_suspend(struct device *dev)
770 {
771 struct drm_device *ddev = dev_get_drvdata(dev);
772
773 drm_kms_helper_poll_disable(ddev);
774
775 return 0;
776 }
777
778 static int msm_pm_resume(struct device *dev)
779 {
780 struct drm_device *ddev = dev_get_drvdata(dev);
781
782 drm_kms_helper_poll_enable(ddev);
783
784 return 0;
785 }
786 #endif
787
788 static const struct dev_pm_ops msm_pm_ops = {
789 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
790 };
791
792 /*
793 * Platform driver:
794 */
795
796 static int msm_pdev_probe(struct platform_device *pdev)
797 {
798 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
799 return drm_platform_init(&msm_driver, pdev);
800 }
801
802 static int msm_pdev_remove(struct platform_device *pdev)
803 {
804 drm_put_dev(platform_get_drvdata(pdev));
805
806 return 0;
807 }
808
809 static const struct platform_device_id msm_id[] = {
810 { "mdp", 0 },
811 { }
812 };
813
814 static struct platform_driver msm_platform_driver = {
815 .probe = msm_pdev_probe,
816 .remove = msm_pdev_remove,
817 .driver = {
818 .owner = THIS_MODULE,
819 .name = "msm",
820 .pm = &msm_pm_ops,
821 },
822 .id_table = msm_id,
823 };
824
825 static int __init msm_drm_register(void)
826 {
827 DBG("init");
828 hdmi_register();
829 a3xx_register();
830 return platform_driver_register(&msm_platform_driver);
831 }
832
833 static void __exit msm_drm_unregister(void)
834 {
835 DBG("fini");
836 platform_driver_unregister(&msm_platform_driver);
837 hdmi_unregister();
838 a3xx_unregister();
839 }
840
841 module_init(msm_drm_register);
842 module_exit(msm_drm_unregister);
843
844 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
845 MODULE_DESCRIPTION("MSM DRM Driver");
846 MODULE_LICENSE("GPL");
This page took 0.051154 seconds and 5 git commands to generate.