drm: Lobotomize set_busid nonsense for !pci drivers
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_drv.c
1 /*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14 #include <linux/pm_runtime.h>
15 #include <drm/drmP.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc_helper.h>
19
20 #include <linux/component.h>
21
22 #include <drm/exynos_drm.h>
23
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_crtc.h"
26 #include "exynos_drm_fbdev.h"
27 #include "exynos_drm_fb.h"
28 #include "exynos_drm_gem.h"
29 #include "exynos_drm_plane.h"
30 #include "exynos_drm_vidi.h"
31 #include "exynos_drm_g2d.h"
32 #include "exynos_drm_ipp.h"
33 #include "exynos_drm_iommu.h"
34
35 #define DRIVER_NAME "exynos"
36 #define DRIVER_DESC "Samsung SoC DRM"
37 #define DRIVER_DATE "20110530"
38 #define DRIVER_MAJOR 1
39 #define DRIVER_MINOR 0
40
41 struct exynos_atomic_commit {
42 struct work_struct work;
43 struct drm_device *dev;
44 struct drm_atomic_state *state;
45 u32 crtcs;
46 };
47
48 static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
49 {
50 struct drm_crtc_state *crtc_state;
51 struct drm_crtc *crtc;
52 int i, ret;
53
54 for_each_crtc_in_state(state, crtc, crtc_state, i) {
55 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
56
57 if (!crtc->state->enable)
58 continue;
59
60 ret = drm_crtc_vblank_get(crtc);
61 if (ret)
62 continue;
63
64 exynos_drm_crtc_wait_pending_update(exynos_crtc);
65 drm_crtc_vblank_put(crtc);
66 }
67 }
68
69 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
70 {
71 struct drm_device *dev = commit->dev;
72 struct exynos_drm_private *priv = dev->dev_private;
73 struct drm_atomic_state *state = commit->state;
74 struct drm_plane *plane;
75 struct drm_crtc *crtc;
76 struct drm_plane_state *plane_state;
77 struct drm_crtc_state *crtc_state;
78 int i;
79
80 drm_atomic_helper_commit_modeset_disables(dev, state);
81
82 drm_atomic_helper_commit_modeset_enables(dev, state);
83
84 /*
85 * Exynos can't update planes with CRTCs and encoders disabled,
86 * its updates routines, specially for FIMD, requires the clocks
87 * to be enabled. So it is necessary to handle the modeset operations
88 * *before* the commit_planes() step, this way it will always
89 * have the relevant clocks enabled to perform the update.
90 */
91
92 for_each_crtc_in_state(state, crtc, crtc_state, i) {
93 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
94
95 atomic_set(&exynos_crtc->pending_update, 0);
96 }
97
98 for_each_plane_in_state(state, plane, plane_state, i) {
99 struct exynos_drm_crtc *exynos_crtc =
100 to_exynos_crtc(plane->crtc);
101
102 if (!plane->crtc)
103 continue;
104
105 atomic_inc(&exynos_crtc->pending_update);
106 }
107
108 drm_atomic_helper_commit_planes(dev, state, false);
109
110 exynos_atomic_wait_for_commit(state);
111
112 drm_atomic_helper_cleanup_planes(dev, state);
113
114 drm_atomic_state_free(state);
115
116 spin_lock(&priv->lock);
117 priv->pending &= ~commit->crtcs;
118 spin_unlock(&priv->lock);
119
120 wake_up_all(&priv->wait);
121
122 kfree(commit);
123 }
124
125 static void exynos_drm_atomic_work(struct work_struct *work)
126 {
127 struct exynos_atomic_commit *commit = container_of(work,
128 struct exynos_atomic_commit, work);
129
130 exynos_atomic_commit_complete(commit);
131 }
132
133 static struct device *exynos_drm_get_dma_device(void);
134
135 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
136 {
137 struct exynos_drm_private *private;
138 struct drm_encoder *encoder;
139 unsigned int clone_mask;
140 int cnt, ret;
141
142 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
143 if (!private)
144 return -ENOMEM;
145
146 init_waitqueue_head(&private->wait);
147 spin_lock_init(&private->lock);
148
149 dev_set_drvdata(dev->dev, dev);
150 dev->dev_private = (void *)private;
151
152 /* the first real CRTC device is used for all dma mapping operations */
153 private->dma_dev = exynos_drm_get_dma_device();
154 if (!private->dma_dev) {
155 DRM_ERROR("no device found for DMA mapping operations.\n");
156 ret = -ENODEV;
157 goto err_free_private;
158 }
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev));
161
162 /*
163 * create mapping to manage iommu table and set a pointer to iommu
164 * mapping structure to iommu_mapping of private data.
165 * also this iommu_mapping can be used to check if iommu is supported
166 * or not.
167 */
168 ret = drm_create_iommu_mapping(dev);
169 if (ret < 0) {
170 DRM_ERROR("failed to create iommu mapping.\n");
171 goto err_free_private;
172 }
173
174 drm_mode_config_init(dev);
175
176 exynos_drm_mode_config_init(dev);
177
178 /* setup possible_clones. */
179 cnt = 0;
180 clone_mask = 0;
181 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
182 clone_mask |= (1 << (cnt++));
183
184 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
185 encoder->possible_clones = clone_mask;
186
187 platform_set_drvdata(dev->platformdev, dev);
188
189 /* Try to bind all sub drivers. */
190 ret = component_bind_all(dev->dev, dev);
191 if (ret)
192 goto err_mode_config_cleanup;
193
194 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
195 if (ret)
196 goto err_unbind_all;
197
198 /* Probe non kms sub drivers and virtual display driver. */
199 ret = exynos_drm_device_subdrv_probe(dev);
200 if (ret)
201 goto err_cleanup_vblank;
202
203 drm_mode_config_reset(dev);
204
205 /*
206 * enable drm irq mode.
207 * - with irq_enabled = true, we can use the vblank feature.
208 *
209 * P.S. note that we wouldn't use drm irq handler but
210 * just specific driver own one instead because
211 * drm framework supports only one irq handler.
212 */
213 dev->irq_enabled = true;
214
215 /* init kms poll for handling hpd */
216 drm_kms_helper_poll_init(dev);
217
218 /* force connectors detection */
219 drm_helper_hpd_irq_event(dev);
220
221 return 0;
222
223 err_cleanup_vblank:
224 drm_vblank_cleanup(dev);
225 err_unbind_all:
226 component_unbind_all(dev->dev, dev);
227 err_mode_config_cleanup:
228 drm_mode_config_cleanup(dev);
229 drm_release_iommu_mapping(dev);
230 err_free_private:
231 kfree(private);
232
233 return ret;
234 }
235
236 static int exynos_drm_unload(struct drm_device *dev)
237 {
238 exynos_drm_device_subdrv_remove(dev);
239
240 exynos_drm_fbdev_fini(dev);
241 drm_kms_helper_poll_fini(dev);
242
243 drm_vblank_cleanup(dev);
244 component_unbind_all(dev->dev, dev);
245 drm_mode_config_cleanup(dev);
246 drm_release_iommu_mapping(dev);
247
248 kfree(dev->dev_private);
249 dev->dev_private = NULL;
250
251 return 0;
252 }
253
254 static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
255 {
256 bool pending;
257
258 spin_lock(&priv->lock);
259 pending = priv->pending & crtcs;
260 spin_unlock(&priv->lock);
261
262 return pending;
263 }
264
265 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
266 bool nonblock)
267 {
268 struct exynos_drm_private *priv = dev->dev_private;
269 struct exynos_atomic_commit *commit;
270 struct drm_crtc *crtc;
271 struct drm_crtc_state *crtc_state;
272 int i, ret;
273
274 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
275 if (!commit)
276 return -ENOMEM;
277
278 ret = drm_atomic_helper_prepare_planes(dev, state);
279 if (ret) {
280 kfree(commit);
281 return ret;
282 }
283
284 /* This is the point of no return */
285
286 INIT_WORK(&commit->work, exynos_drm_atomic_work);
287 commit->dev = dev;
288 commit->state = state;
289
290 /* Wait until all affected CRTCs have completed previous commits and
291 * mark them as pending.
292 */
293 for_each_crtc_in_state(state, crtc, crtc_state, i)
294 commit->crtcs |= drm_crtc_mask(crtc);
295
296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
297
298 spin_lock(&priv->lock);
299 priv->pending |= commit->crtcs;
300 spin_unlock(&priv->lock);
301
302 drm_atomic_helper_swap_state(state, true);
303
304 if (nonblock)
305 schedule_work(&commit->work);
306 else
307 exynos_atomic_commit_complete(commit);
308
309 return 0;
310 }
311
312 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
313 {
314 struct drm_exynos_file_private *file_priv;
315 int ret;
316
317 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
318 if (!file_priv)
319 return -ENOMEM;
320
321 file->driver_priv = file_priv;
322
323 ret = exynos_drm_subdrv_open(dev, file);
324 if (ret)
325 goto err_file_priv_free;
326
327 return ret;
328
329 err_file_priv_free:
330 kfree(file_priv);
331 file->driver_priv = NULL;
332 return ret;
333 }
334
335 static void exynos_drm_preclose(struct drm_device *dev,
336 struct drm_file *file)
337 {
338 struct drm_crtc *crtc;
339
340 exynos_drm_subdrv_close(dev, file);
341
342 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
343 exynos_drm_crtc_cancel_page_flip(crtc, file);
344 }
345
346 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
347 {
348 kfree(file->driver_priv);
349 file->driver_priv = NULL;
350 }
351
352 static void exynos_drm_lastclose(struct drm_device *dev)
353 {
354 exynos_drm_fbdev_restore_mode(dev);
355 }
356
357 static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
358 .fault = exynos_drm_gem_fault,
359 .open = drm_gem_vm_open,
360 .close = drm_gem_vm_close,
361 };
362
363 static const struct drm_ioctl_desc exynos_ioctls[] = {
364 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
365 DRM_AUTH | DRM_RENDER_ALLOW),
366 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
367 DRM_AUTH | DRM_RENDER_ALLOW),
368 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
369 DRM_RENDER_ALLOW),
370 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
371 DRM_AUTH),
372 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
373 DRM_AUTH | DRM_RENDER_ALLOW),
374 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
375 DRM_AUTH | DRM_RENDER_ALLOW),
376 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
377 DRM_AUTH | DRM_RENDER_ALLOW),
378 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
379 DRM_AUTH | DRM_RENDER_ALLOW),
380 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
381 DRM_AUTH | DRM_RENDER_ALLOW),
382 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
383 DRM_AUTH | DRM_RENDER_ALLOW),
384 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
385 DRM_AUTH | DRM_RENDER_ALLOW),
386 };
387
388 static const struct file_operations exynos_drm_driver_fops = {
389 .owner = THIS_MODULE,
390 .open = drm_open,
391 .mmap = exynos_drm_gem_mmap,
392 .poll = drm_poll,
393 .read = drm_read,
394 .unlocked_ioctl = drm_ioctl,
395 #ifdef CONFIG_COMPAT
396 .compat_ioctl = drm_compat_ioctl,
397 #endif
398 .release = drm_release,
399 };
400
401 static struct drm_driver exynos_drm_driver = {
402 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
403 | DRIVER_ATOMIC | DRIVER_RENDER,
404 .load = exynos_drm_load,
405 .unload = exynos_drm_unload,
406 .open = exynos_drm_open,
407 .preclose = exynos_drm_preclose,
408 .lastclose = exynos_drm_lastclose,
409 .postclose = exynos_drm_postclose,
410 .get_vblank_counter = drm_vblank_no_hw_counter,
411 .enable_vblank = exynos_drm_crtc_enable_vblank,
412 .disable_vblank = exynos_drm_crtc_disable_vblank,
413 .gem_free_object_unlocked = exynos_drm_gem_free_object,
414 .gem_vm_ops = &exynos_drm_gem_vm_ops,
415 .dumb_create = exynos_drm_gem_dumb_create,
416 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
417 .dumb_destroy = drm_gem_dumb_destroy,
418 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
419 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
420 .gem_prime_export = drm_gem_prime_export,
421 .gem_prime_import = drm_gem_prime_import,
422 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
423 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
424 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
425 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
426 .gem_prime_mmap = exynos_drm_gem_prime_mmap,
427 .ioctls = exynos_ioctls,
428 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
429 .fops = &exynos_drm_driver_fops,
430 .name = DRIVER_NAME,
431 .desc = DRIVER_DESC,
432 .date = DRIVER_DATE,
433 .major = DRIVER_MAJOR,
434 .minor = DRIVER_MINOR,
435 };
436
437 #ifdef CONFIG_PM_SLEEP
438 static int exynos_drm_suspend(struct device *dev)
439 {
440 struct drm_device *drm_dev = dev_get_drvdata(dev);
441 struct drm_connector *connector;
442
443 if (pm_runtime_suspended(dev) || !drm_dev)
444 return 0;
445
446 drm_modeset_lock_all(drm_dev);
447 drm_for_each_connector(connector, drm_dev) {
448 int old_dpms = connector->dpms;
449
450 if (connector->funcs->dpms)
451 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
452
453 /* Set the old mode back to the connector for resume */
454 connector->dpms = old_dpms;
455 }
456 drm_modeset_unlock_all(drm_dev);
457
458 return 0;
459 }
460
461 static int exynos_drm_resume(struct device *dev)
462 {
463 struct drm_device *drm_dev = dev_get_drvdata(dev);
464 struct drm_connector *connector;
465
466 if (pm_runtime_suspended(dev) || !drm_dev)
467 return 0;
468
469 drm_modeset_lock_all(drm_dev);
470 drm_for_each_connector(connector, drm_dev) {
471 if (connector->funcs->dpms) {
472 int dpms = connector->dpms;
473
474 connector->dpms = DRM_MODE_DPMS_OFF;
475 connector->funcs->dpms(connector, dpms);
476 }
477 }
478 drm_modeset_unlock_all(drm_dev);
479
480 return 0;
481 }
482 #endif
483
484 static const struct dev_pm_ops exynos_drm_pm_ops = {
485 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
486 };
487
488 /* forward declaration */
489 static struct platform_driver exynos_drm_platform_driver;
490
491 struct exynos_drm_driver_info {
492 struct platform_driver *driver;
493 unsigned int flags;
494 };
495
496 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
497 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
498 #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
499
500 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
501
502 /*
503 * Connector drivers should not be placed before associated crtc drivers,
504 * because connector requires pipe number of its crtc during initialization.
505 */
506 static struct exynos_drm_driver_info exynos_drm_drivers[] = {
507 {
508 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
509 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
510 }, {
511 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
512 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
513 }, {
514 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
515 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
516 }, {
517 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
518 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
519 }, {
520 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
521 DRM_COMPONENT_DRIVER
522 }, {
523 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
524 DRM_COMPONENT_DRIVER
525 }, {
526 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
527 DRM_COMPONENT_DRIVER
528 }, {
529 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
530 DRM_COMPONENT_DRIVER
531 }, {
532 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
533 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
534 }, {
535 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
536 }, {
537 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
538 }, {
539 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
540 }, {
541 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
542 }, {
543 DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
544 DRM_VIRTUAL_DEVICE
545 }, {
546 &exynos_drm_platform_driver,
547 DRM_VIRTUAL_DEVICE
548 }
549 };
550
551 static int compare_dev(struct device *dev, void *data)
552 {
553 return dev == (struct device *)data;
554 }
555
556 static struct component_match *exynos_drm_match_add(struct device *dev)
557 {
558 struct component_match *match = NULL;
559 int i;
560
561 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
562 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
563 struct device *p = NULL, *d;
564
565 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
566 continue;
567
568 while ((d = bus_find_device(&platform_bus_type, p,
569 &info->driver->driver,
570 (void *)platform_bus_type.match))) {
571 put_device(p);
572 component_match_add(dev, &match, compare_dev, d);
573 p = d;
574 }
575 put_device(p);
576 }
577
578 return match ?: ERR_PTR(-ENODEV);
579 }
580
581 static int exynos_drm_bind(struct device *dev)
582 {
583 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
584 }
585
586 static void exynos_drm_unbind(struct device *dev)
587 {
588 drm_put_dev(dev_get_drvdata(dev));
589 }
590
591 static const struct component_master_ops exynos_drm_ops = {
592 .bind = exynos_drm_bind,
593 .unbind = exynos_drm_unbind,
594 };
595
596 static int exynos_drm_platform_probe(struct platform_device *pdev)
597 {
598 struct component_match *match;
599
600 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
601 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
602
603 match = exynos_drm_match_add(&pdev->dev);
604 if (IS_ERR(match))
605 return PTR_ERR(match);
606
607 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
608 match);
609 }
610
611 static int exynos_drm_platform_remove(struct platform_device *pdev)
612 {
613 component_master_del(&pdev->dev, &exynos_drm_ops);
614 return 0;
615 }
616
617 static struct platform_driver exynos_drm_platform_driver = {
618 .probe = exynos_drm_platform_probe,
619 .remove = exynos_drm_platform_remove,
620 .driver = {
621 .name = "exynos-drm",
622 .pm = &exynos_drm_pm_ops,
623 },
624 };
625
626 static struct device *exynos_drm_get_dma_device(void)
627 {
628 int i;
629
630 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
631 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
632 struct device *dev;
633
634 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
635 continue;
636
637 while ((dev = bus_find_device(&platform_bus_type, NULL,
638 &info->driver->driver,
639 (void *)platform_bus_type.match))) {
640 put_device(dev);
641 return dev;
642 }
643 }
644 return NULL;
645 }
646
647 static void exynos_drm_unregister_devices(void)
648 {
649 int i;
650
651 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
652 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
653 struct device *dev;
654
655 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
656 continue;
657
658 while ((dev = bus_find_device(&platform_bus_type, NULL,
659 &info->driver->driver,
660 (void *)platform_bus_type.match))) {
661 put_device(dev);
662 platform_device_unregister(to_platform_device(dev));
663 }
664 }
665 }
666
667 static int exynos_drm_register_devices(void)
668 {
669 struct platform_device *pdev;
670 int i;
671
672 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
673 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
674
675 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
676 continue;
677
678 pdev = platform_device_register_simple(
679 info->driver->driver.name, -1, NULL, 0);
680 if (IS_ERR(pdev))
681 goto fail;
682 }
683
684 return 0;
685 fail:
686 exynos_drm_unregister_devices();
687 return PTR_ERR(pdev);
688 }
689
690 static void exynos_drm_unregister_drivers(void)
691 {
692 int i;
693
694 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
695 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
696
697 if (!info->driver)
698 continue;
699
700 platform_driver_unregister(info->driver);
701 }
702 }
703
704 static int exynos_drm_register_drivers(void)
705 {
706 int i, ret;
707
708 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
709 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
710
711 if (!info->driver)
712 continue;
713
714 ret = platform_driver_register(info->driver);
715 if (ret)
716 goto fail;
717 }
718 return 0;
719 fail:
720 exynos_drm_unregister_drivers();
721 return ret;
722 }
723
724 static int exynos_drm_init(void)
725 {
726 int ret;
727
728 ret = exynos_drm_register_devices();
729 if (ret)
730 return ret;
731
732 ret = exynos_drm_register_drivers();
733 if (ret)
734 goto err_unregister_pdevs;
735
736 return 0;
737
738 err_unregister_pdevs:
739 exynos_drm_unregister_devices();
740
741 return ret;
742 }
743
744 static void exynos_drm_exit(void)
745 {
746 exynos_drm_unregister_drivers();
747 exynos_drm_unregister_devices();
748 }
749
750 module_init(exynos_drm_init);
751 module_exit(exynos_drm_exit);
752
753 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
754 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
755 MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
756 MODULE_DESCRIPTION("Samsung SoC DRM Driver");
757 MODULE_LICENSE("GPL");
This page took 0.050854 seconds and 5 git commands to generate.