2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/pm_runtime.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc_helper.h>
20 #include <linux/component.h>
22 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_crtc.h"
26 #include "exynos_drm_fbdev.h"
27 #include "exynos_drm_fb.h"
28 #include "exynos_drm_gem.h"
29 #include "exynos_drm_plane.h"
30 #include "exynos_drm_vidi.h"
31 #include "exynos_drm_g2d.h"
32 #include "exynos_drm_ipp.h"
33 #include "exynos_drm_iommu.h"
35 #define DRIVER_NAME "exynos"
36 #define DRIVER_DESC "Samsung SoC DRM"
37 #define DRIVER_DATE "20110530"
38 #define DRIVER_MAJOR 1
39 #define DRIVER_MINOR 0
41 struct exynos_atomic_commit
{
42 struct work_struct work
;
43 struct drm_device
*dev
;
44 struct drm_atomic_state
*state
;
48 static void exynos_atomic_wait_for_commit(struct drm_atomic_state
*state
)
50 struct drm_crtc_state
*crtc_state
;
51 struct drm_crtc
*crtc
;
54 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
55 struct exynos_drm_crtc
*exynos_crtc
= to_exynos_crtc(crtc
);
57 if (!crtc
->state
->enable
)
60 ret
= drm_crtc_vblank_get(crtc
);
64 exynos_drm_crtc_wait_pending_update(exynos_crtc
);
65 drm_crtc_vblank_put(crtc
);
69 static void exynos_atomic_commit_complete(struct exynos_atomic_commit
*commit
)
71 struct drm_device
*dev
= commit
->dev
;
72 struct exynos_drm_private
*priv
= dev
->dev_private
;
73 struct drm_atomic_state
*state
= commit
->state
;
74 struct drm_plane
*plane
;
75 struct drm_crtc
*crtc
;
76 struct drm_plane_state
*plane_state
;
77 struct drm_crtc_state
*crtc_state
;
80 drm_atomic_helper_commit_modeset_disables(dev
, state
);
82 drm_atomic_helper_commit_modeset_enables(dev
, state
);
85 * Exynos can't update planes with CRTCs and encoders disabled,
86 * its updates routines, specially for FIMD, requires the clocks
87 * to be enabled. So it is necessary to handle the modeset operations
88 * *before* the commit_planes() step, this way it will always
89 * have the relevant clocks enabled to perform the update.
92 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
) {
93 struct exynos_drm_crtc
*exynos_crtc
= to_exynos_crtc(crtc
);
95 atomic_set(&exynos_crtc
->pending_update
, 0);
98 for_each_plane_in_state(state
, plane
, plane_state
, i
) {
99 struct exynos_drm_crtc
*exynos_crtc
=
100 to_exynos_crtc(plane
->crtc
);
105 atomic_inc(&exynos_crtc
->pending_update
);
108 drm_atomic_helper_commit_planes(dev
, state
, false);
110 exynos_atomic_wait_for_commit(state
);
112 drm_atomic_helper_cleanup_planes(dev
, state
);
114 drm_atomic_state_free(state
);
116 spin_lock(&priv
->lock
);
117 priv
->pending
&= ~commit
->crtcs
;
118 spin_unlock(&priv
->lock
);
120 wake_up_all(&priv
->wait
);
125 static void exynos_drm_atomic_work(struct work_struct
*work
)
127 struct exynos_atomic_commit
*commit
= container_of(work
,
128 struct exynos_atomic_commit
, work
);
130 exynos_atomic_commit_complete(commit
);
133 static struct device
*exynos_drm_get_dma_device(void);
135 static int exynos_drm_load(struct drm_device
*dev
, unsigned long flags
)
137 struct exynos_drm_private
*private;
138 struct drm_encoder
*encoder
;
139 unsigned int clone_mask
;
142 private = kzalloc(sizeof(struct exynos_drm_private
), GFP_KERNEL
);
146 init_waitqueue_head(&private->wait
);
147 spin_lock_init(&private->lock
);
149 dev_set_drvdata(dev
->dev
, dev
);
150 dev
->dev_private
= (void *)private;
152 /* the first real CRTC device is used for all dma mapping operations */
153 private->dma_dev
= exynos_drm_get_dma_device();
154 if (!private->dma_dev
) {
155 DRM_ERROR("no device found for DMA mapping operations.\n");
157 goto err_free_private
;
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev
));
163 * create mapping to manage iommu table and set a pointer to iommu
164 * mapping structure to iommu_mapping of private data.
165 * also this iommu_mapping can be used to check if iommu is supported
168 ret
= drm_create_iommu_mapping(dev
);
170 DRM_ERROR("failed to create iommu mapping.\n");
171 goto err_free_private
;
174 drm_mode_config_init(dev
);
176 exynos_drm_mode_config_init(dev
);
178 /* setup possible_clones. */
181 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
)
182 clone_mask
|= (1 << (cnt
++));
184 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
)
185 encoder
->possible_clones
= clone_mask
;
187 platform_set_drvdata(dev
->platformdev
, dev
);
189 /* Try to bind all sub drivers. */
190 ret
= component_bind_all(dev
->dev
, dev
);
192 goto err_mode_config_cleanup
;
194 ret
= drm_vblank_init(dev
, dev
->mode_config
.num_crtc
);
198 /* Probe non kms sub drivers and virtual display driver. */
199 ret
= exynos_drm_device_subdrv_probe(dev
);
201 goto err_cleanup_vblank
;
203 drm_mode_config_reset(dev
);
206 * enable drm irq mode.
207 * - with irq_enabled = true, we can use the vblank feature.
209 * P.S. note that we wouldn't use drm irq handler but
210 * just specific driver own one instead because
211 * drm framework supports only one irq handler.
213 dev
->irq_enabled
= true;
215 /* init kms poll for handling hpd */
216 drm_kms_helper_poll_init(dev
);
218 /* force connectors detection */
219 drm_helper_hpd_irq_event(dev
);
224 drm_vblank_cleanup(dev
);
226 component_unbind_all(dev
->dev
, dev
);
227 err_mode_config_cleanup
:
228 drm_mode_config_cleanup(dev
);
229 drm_release_iommu_mapping(dev
);
236 static int exynos_drm_unload(struct drm_device
*dev
)
238 exynos_drm_device_subdrv_remove(dev
);
240 exynos_drm_fbdev_fini(dev
);
241 drm_kms_helper_poll_fini(dev
);
243 drm_vblank_cleanup(dev
);
244 component_unbind_all(dev
->dev
, dev
);
245 drm_mode_config_cleanup(dev
);
246 drm_release_iommu_mapping(dev
);
248 kfree(dev
->dev_private
);
249 dev
->dev_private
= NULL
;
254 static int commit_is_pending(struct exynos_drm_private
*priv
, u32 crtcs
)
258 spin_lock(&priv
->lock
);
259 pending
= priv
->pending
& crtcs
;
260 spin_unlock(&priv
->lock
);
265 int exynos_atomic_commit(struct drm_device
*dev
, struct drm_atomic_state
*state
,
268 struct exynos_drm_private
*priv
= dev
->dev_private
;
269 struct exynos_atomic_commit
*commit
;
270 struct drm_crtc
*crtc
;
271 struct drm_crtc_state
*crtc_state
;
274 commit
= kzalloc(sizeof(*commit
), GFP_KERNEL
);
278 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
284 /* This is the point of no return */
286 INIT_WORK(&commit
->work
, exynos_drm_atomic_work
);
288 commit
->state
= state
;
290 /* Wait until all affected CRTCs have completed previous commits and
291 * mark them as pending.
293 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
)
294 commit
->crtcs
|= drm_crtc_mask(crtc
);
296 wait_event(priv
->wait
, !commit_is_pending(priv
, commit
->crtcs
));
298 spin_lock(&priv
->lock
);
299 priv
->pending
|= commit
->crtcs
;
300 spin_unlock(&priv
->lock
);
302 drm_atomic_helper_swap_state(state
, true);
305 schedule_work(&commit
->work
);
307 exynos_atomic_commit_complete(commit
);
312 static int exynos_drm_open(struct drm_device
*dev
, struct drm_file
*file
)
314 struct drm_exynos_file_private
*file_priv
;
317 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
321 file
->driver_priv
= file_priv
;
323 ret
= exynos_drm_subdrv_open(dev
, file
);
325 goto err_file_priv_free
;
331 file
->driver_priv
= NULL
;
335 static void exynos_drm_preclose(struct drm_device
*dev
,
336 struct drm_file
*file
)
338 struct drm_crtc
*crtc
;
340 exynos_drm_subdrv_close(dev
, file
);
342 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
343 exynos_drm_crtc_cancel_page_flip(crtc
, file
);
346 static void exynos_drm_postclose(struct drm_device
*dev
, struct drm_file
*file
)
348 kfree(file
->driver_priv
);
349 file
->driver_priv
= NULL
;
352 static void exynos_drm_lastclose(struct drm_device
*dev
)
354 exynos_drm_fbdev_restore_mode(dev
);
357 static const struct vm_operations_struct exynos_drm_gem_vm_ops
= {
358 .fault
= exynos_drm_gem_fault
,
359 .open
= drm_gem_vm_open
,
360 .close
= drm_gem_vm_close
,
363 static const struct drm_ioctl_desc exynos_ioctls
[] = {
364 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE
, exynos_drm_gem_create_ioctl
,
365 DRM_AUTH
| DRM_RENDER_ALLOW
),
366 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP
, exynos_drm_gem_map_ioctl
,
367 DRM_AUTH
| DRM_RENDER_ALLOW
),
368 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET
, exynos_drm_gem_get_ioctl
,
370 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION
, vidi_connection_ioctl
,
372 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER
, exynos_g2d_get_ver_ioctl
,
373 DRM_AUTH
| DRM_RENDER_ALLOW
),
374 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST
, exynos_g2d_set_cmdlist_ioctl
,
375 DRM_AUTH
| DRM_RENDER_ALLOW
),
376 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC
, exynos_g2d_exec_ioctl
,
377 DRM_AUTH
| DRM_RENDER_ALLOW
),
378 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY
, exynos_drm_ipp_get_property
,
379 DRM_AUTH
| DRM_RENDER_ALLOW
),
380 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY
, exynos_drm_ipp_set_property
,
381 DRM_AUTH
| DRM_RENDER_ALLOW
),
382 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF
, exynos_drm_ipp_queue_buf
,
383 DRM_AUTH
| DRM_RENDER_ALLOW
),
384 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL
, exynos_drm_ipp_cmd_ctrl
,
385 DRM_AUTH
| DRM_RENDER_ALLOW
),
388 static const struct file_operations exynos_drm_driver_fops
= {
389 .owner
= THIS_MODULE
,
391 .mmap
= exynos_drm_gem_mmap
,
394 .unlocked_ioctl
= drm_ioctl
,
396 .compat_ioctl
= drm_compat_ioctl
,
398 .release
= drm_release
,
401 static struct drm_driver exynos_drm_driver
= {
402 .driver_features
= DRIVER_MODESET
| DRIVER_GEM
| DRIVER_PRIME
403 | DRIVER_ATOMIC
| DRIVER_RENDER
,
404 .load
= exynos_drm_load
,
405 .unload
= exynos_drm_unload
,
406 .open
= exynos_drm_open
,
407 .preclose
= exynos_drm_preclose
,
408 .lastclose
= exynos_drm_lastclose
,
409 .postclose
= exynos_drm_postclose
,
410 .get_vblank_counter
= drm_vblank_no_hw_counter
,
411 .enable_vblank
= exynos_drm_crtc_enable_vblank
,
412 .disable_vblank
= exynos_drm_crtc_disable_vblank
,
413 .gem_free_object_unlocked
= exynos_drm_gem_free_object
,
414 .gem_vm_ops
= &exynos_drm_gem_vm_ops
,
415 .dumb_create
= exynos_drm_gem_dumb_create
,
416 .dumb_map_offset
= exynos_drm_gem_dumb_map_offset
,
417 .dumb_destroy
= drm_gem_dumb_destroy
,
418 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
419 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
420 .gem_prime_export
= drm_gem_prime_export
,
421 .gem_prime_import
= drm_gem_prime_import
,
422 .gem_prime_get_sg_table
= exynos_drm_gem_prime_get_sg_table
,
423 .gem_prime_import_sg_table
= exynos_drm_gem_prime_import_sg_table
,
424 .gem_prime_vmap
= exynos_drm_gem_prime_vmap
,
425 .gem_prime_vunmap
= exynos_drm_gem_prime_vunmap
,
426 .gem_prime_mmap
= exynos_drm_gem_prime_mmap
,
427 .ioctls
= exynos_ioctls
,
428 .num_ioctls
= ARRAY_SIZE(exynos_ioctls
),
429 .fops
= &exynos_drm_driver_fops
,
433 .major
= DRIVER_MAJOR
,
434 .minor
= DRIVER_MINOR
,
437 #ifdef CONFIG_PM_SLEEP
438 static int exynos_drm_suspend(struct device
*dev
)
440 struct drm_device
*drm_dev
= dev_get_drvdata(dev
);
441 struct drm_connector
*connector
;
443 if (pm_runtime_suspended(dev
) || !drm_dev
)
446 drm_modeset_lock_all(drm_dev
);
447 drm_for_each_connector(connector
, drm_dev
) {
448 int old_dpms
= connector
->dpms
;
450 if (connector
->funcs
->dpms
)
451 connector
->funcs
->dpms(connector
, DRM_MODE_DPMS_OFF
);
453 /* Set the old mode back to the connector for resume */
454 connector
->dpms
= old_dpms
;
456 drm_modeset_unlock_all(drm_dev
);
461 static int exynos_drm_resume(struct device
*dev
)
463 struct drm_device
*drm_dev
= dev_get_drvdata(dev
);
464 struct drm_connector
*connector
;
466 if (pm_runtime_suspended(dev
) || !drm_dev
)
469 drm_modeset_lock_all(drm_dev
);
470 drm_for_each_connector(connector
, drm_dev
) {
471 if (connector
->funcs
->dpms
) {
472 int dpms
= connector
->dpms
;
474 connector
->dpms
= DRM_MODE_DPMS_OFF
;
475 connector
->funcs
->dpms(connector
, dpms
);
478 drm_modeset_unlock_all(drm_dev
);
484 static const struct dev_pm_ops exynos_drm_pm_ops
= {
485 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend
, exynos_drm_resume
)
488 /* forward declaration */
489 static struct platform_driver exynos_drm_platform_driver
;
491 struct exynos_drm_driver_info
{
492 struct platform_driver
*driver
;
496 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
497 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
498 #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
500 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
503 * Connector drivers should not be placed before associated crtc drivers,
504 * because connector requires pipe number of its crtc during initialization.
506 static struct exynos_drm_driver_info exynos_drm_drivers
[] = {
508 DRV_PTR(fimd_driver
, CONFIG_DRM_EXYNOS_FIMD
),
509 DRM_COMPONENT_DRIVER
| DRM_DMA_DEVICE
511 DRV_PTR(exynos5433_decon_driver
, CONFIG_DRM_EXYNOS5433_DECON
),
512 DRM_COMPONENT_DRIVER
| DRM_DMA_DEVICE
514 DRV_PTR(decon_driver
, CONFIG_DRM_EXYNOS7_DECON
),
515 DRM_COMPONENT_DRIVER
| DRM_DMA_DEVICE
517 DRV_PTR(mixer_driver
, CONFIG_DRM_EXYNOS_MIXER
),
518 DRM_COMPONENT_DRIVER
| DRM_DMA_DEVICE
520 DRV_PTR(mic_driver
, CONFIG_DRM_EXYNOS_MIC
),
523 DRV_PTR(dp_driver
, CONFIG_DRM_EXYNOS_DP
),
526 DRV_PTR(dsi_driver
, CONFIG_DRM_EXYNOS_DSI
),
529 DRV_PTR(hdmi_driver
, CONFIG_DRM_EXYNOS_HDMI
),
532 DRV_PTR(vidi_driver
, CONFIG_DRM_EXYNOS_VIDI
),
533 DRM_COMPONENT_DRIVER
| DRM_VIRTUAL_DEVICE
535 DRV_PTR(g2d_driver
, CONFIG_DRM_EXYNOS_G2D
),
537 DRV_PTR(fimc_driver
, CONFIG_DRM_EXYNOS_FIMC
),
539 DRV_PTR(rotator_driver
, CONFIG_DRM_EXYNOS_ROTATOR
),
541 DRV_PTR(gsc_driver
, CONFIG_DRM_EXYNOS_GSC
),
543 DRV_PTR(ipp_driver
, CONFIG_DRM_EXYNOS_IPP
),
546 &exynos_drm_platform_driver
,
551 static int compare_dev(struct device
*dev
, void *data
)
553 return dev
== (struct device
*)data
;
556 static struct component_match
*exynos_drm_match_add(struct device
*dev
)
558 struct component_match
*match
= NULL
;
561 for (i
= 0; i
< ARRAY_SIZE(exynos_drm_drivers
); ++i
) {
562 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
563 struct device
*p
= NULL
, *d
;
565 if (!info
->driver
|| !(info
->flags
& DRM_COMPONENT_DRIVER
))
568 while ((d
= bus_find_device(&platform_bus_type
, p
,
569 &info
->driver
->driver
,
570 (void *)platform_bus_type
.match
))) {
572 component_match_add(dev
, &match
, compare_dev
, d
);
578 return match
?: ERR_PTR(-ENODEV
);
581 static int exynos_drm_bind(struct device
*dev
)
583 return drm_platform_init(&exynos_drm_driver
, to_platform_device(dev
));
586 static void exynos_drm_unbind(struct device
*dev
)
588 drm_put_dev(dev_get_drvdata(dev
));
591 static const struct component_master_ops exynos_drm_ops
= {
592 .bind
= exynos_drm_bind
,
593 .unbind
= exynos_drm_unbind
,
596 static int exynos_drm_platform_probe(struct platform_device
*pdev
)
598 struct component_match
*match
;
600 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
601 exynos_drm_driver
.num_ioctls
= ARRAY_SIZE(exynos_ioctls
);
603 match
= exynos_drm_match_add(&pdev
->dev
);
605 return PTR_ERR(match
);
607 return component_master_add_with_match(&pdev
->dev
, &exynos_drm_ops
,
611 static int exynos_drm_platform_remove(struct platform_device
*pdev
)
613 component_master_del(&pdev
->dev
, &exynos_drm_ops
);
617 static struct platform_driver exynos_drm_platform_driver
= {
618 .probe
= exynos_drm_platform_probe
,
619 .remove
= exynos_drm_platform_remove
,
621 .name
= "exynos-drm",
622 .pm
= &exynos_drm_pm_ops
,
626 static struct device
*exynos_drm_get_dma_device(void)
630 for (i
= 0; i
< ARRAY_SIZE(exynos_drm_drivers
); ++i
) {
631 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
634 if (!info
->driver
|| !(info
->flags
& DRM_DMA_DEVICE
))
637 while ((dev
= bus_find_device(&platform_bus_type
, NULL
,
638 &info
->driver
->driver
,
639 (void *)platform_bus_type
.match
))) {
647 static void exynos_drm_unregister_devices(void)
651 for (i
= ARRAY_SIZE(exynos_drm_drivers
) - 1; i
>= 0; --i
) {
652 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
655 if (!info
->driver
|| !(info
->flags
& DRM_VIRTUAL_DEVICE
))
658 while ((dev
= bus_find_device(&platform_bus_type
, NULL
,
659 &info
->driver
->driver
,
660 (void *)platform_bus_type
.match
))) {
662 platform_device_unregister(to_platform_device(dev
));
667 static int exynos_drm_register_devices(void)
669 struct platform_device
*pdev
;
672 for (i
= 0; i
< ARRAY_SIZE(exynos_drm_drivers
); ++i
) {
673 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
675 if (!info
->driver
|| !(info
->flags
& DRM_VIRTUAL_DEVICE
))
678 pdev
= platform_device_register_simple(
679 info
->driver
->driver
.name
, -1, NULL
, 0);
686 exynos_drm_unregister_devices();
687 return PTR_ERR(pdev
);
690 static void exynos_drm_unregister_drivers(void)
694 for (i
= ARRAY_SIZE(exynos_drm_drivers
) - 1; i
>= 0; --i
) {
695 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
700 platform_driver_unregister(info
->driver
);
704 static int exynos_drm_register_drivers(void)
708 for (i
= 0; i
< ARRAY_SIZE(exynos_drm_drivers
); ++i
) {
709 struct exynos_drm_driver_info
*info
= &exynos_drm_drivers
[i
];
714 ret
= platform_driver_register(info
->driver
);
720 exynos_drm_unregister_drivers();
724 static int exynos_drm_init(void)
728 ret
= exynos_drm_register_devices();
732 ret
= exynos_drm_register_drivers();
734 goto err_unregister_pdevs
;
738 err_unregister_pdevs
:
739 exynos_drm_unregister_devices();
744 static void exynos_drm_exit(void)
746 exynos_drm_unregister_drivers();
747 exynos_drm_unregister_devices();
750 module_init(exynos_drm_init
);
751 module_exit(exynos_drm_exit
);
753 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
754 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
755 MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
756 MODULE_DESCRIPTION("Samsung SoC DRM Driver");
757 MODULE_LICENSE("GPL");