drm: sti: use late_register and early_unregister callbacks
[deliverable/linux.git] / drivers / gpu / drm / sti / sti_drv.c
CommitLineData
9bbf86fe
BG
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <drm/drmP.h>
8
9#include <linux/component.h>
10#include <linux/debugfs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of_platform.h>
14
de4b00b0
BG
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
9bbf86fe
BG
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_fb_cma_helper.h>
20
9e1f05b2
VA
21#include "sti_crtc.h"
22#include "sti_drv.h"
bf8f9e4a 23#include "sti_plane.h"
9bbf86fe
BG
24
25#define DRIVER_NAME "sti"
26#define DRIVER_DESC "STMicroelectronics SoC DRM"
27#define DRIVER_DATE "20140601"
28#define DRIVER_MAJOR 1
29#define DRIVER_MINOR 0
30
31#define STI_MAX_FB_HEIGHT 4096
32#define STI_MAX_FB_WIDTH 4096
33
bf8f9e4a
VA
34static int sti_drm_fps_get(void *data, u64 *val)
35{
36 struct drm_device *drm_dev = data;
37 struct drm_plane *p;
38 unsigned int i = 0;
39
40 *val = 0;
41 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
42 struct sti_plane *plane = to_sti_plane(p);
43
44 *val |= plane->fps_info.output << i;
45 i++;
46 }
47
48 return 0;
49}
50
51static int sti_drm_fps_set(void *data, u64 val)
52{
53 struct drm_device *drm_dev = data;
54 struct drm_plane *p;
55 unsigned int i = 0;
56
57 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
58 struct sti_plane *plane = to_sti_plane(p);
59
60 plane->fps_info.output = (val >> i) & 1;
61 i++;
62 }
63
64 return 0;
65}
66
67DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
68 sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
69
70static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
71{
72 struct drm_info_node *node = s->private;
73 struct drm_device *dev = node->minor->dev;
74 struct drm_plane *p;
bf8f9e4a
VA
75
76 list_for_each_entry(p, &dev->mode_config.plane_list, head) {
77 struct sti_plane *plane = to_sti_plane(p);
78
79 seq_printf(s, "%s%s\n",
80 plane->fps_info.fps_str,
81 plane->fps_info.fips_str);
82 }
83
bf8f9e4a
VA
84 return 0;
85}
86
87static struct drm_info_list sti_drm_dbg_list[] = {
88 {"fps_get", sti_drm_fps_dbg_show, 0},
89};
90
91static int sti_drm_debugfs_create(struct dentry *root,
92 struct drm_minor *minor,
93 const char *name,
94 const struct file_operations *fops)
95{
96 struct drm_device *dev = minor->dev;
97 struct drm_info_node *node;
98 struct dentry *ent;
99
100 ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
101 if (IS_ERR(ent))
102 return PTR_ERR(ent);
103
104 node = kmalloc(sizeof(*node), GFP_KERNEL);
105 if (!node) {
106 debugfs_remove(ent);
107 return -ENOMEM;
108 }
109
110 node->minor = minor;
111 node->dent = ent;
112 node->info_ent = (void *)fops;
113
114 mutex_lock(&minor->debugfs_lock);
115 list_add(&node->list, &minor->debugfs_list);
116 mutex_unlock(&minor->debugfs_lock);
117
118 return 0;
119}
120
121static int sti_drm_dbg_init(struct drm_minor *minor)
122{
123 int ret;
124
125 ret = drm_debugfs_create_files(sti_drm_dbg_list,
126 ARRAY_SIZE(sti_drm_dbg_list),
127 minor->debugfs_root, minor);
128 if (ret)
129 goto err;
130
131 ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
132 &sti_drm_fps_fops);
133 if (ret)
134 goto err;
135
136 DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
137 return 0;
138err:
139 DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
140 return ret;
141}
142
143void sti_drm_dbg_cleanup(struct drm_minor *minor)
144{
145 drm_debugfs_remove_files(sti_drm_dbg_list,
146 ARRAY_SIZE(sti_drm_dbg_list), minor);
147
148 drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
149 1, minor);
150}
151
9e1f05b2
VA
152static void sti_atomic_schedule(struct sti_private *private,
153 struct drm_atomic_state *state)
de4b00b0
BG
154{
155 private->commit.state = state;
156 schedule_work(&private->commit.work);
157}
158
9e1f05b2
VA
159static void sti_atomic_complete(struct sti_private *private,
160 struct drm_atomic_state *state)
de4b00b0
BG
161{
162 struct drm_device *drm = private->drm_dev;
163
164 /*
165 * Everything below can be run asynchronously without the need to grab
166 * any modeset locks at all under one condition: It must be guaranteed
167 * that the asynchronous work has either been cancelled (if the driver
168 * supports it, which at least requires that the framebuffers get
169 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
170 * before the new state gets committed on the software side with
171 * drm_atomic_helper_swap_state().
172 *
173 * This scheme allows new atomic state updates to be prepared and
174 * checked in parallel to the asynchronous completion of the previous
175 * update. Which is important since compositors need to figure out the
176 * composition of the next frame right after having submitted the
177 * current layout.
178 */
179
180 drm_atomic_helper_commit_modeset_disables(drm, state);
aef9dbb8 181 drm_atomic_helper_commit_planes(drm, state, false);
de4b00b0
BG
182 drm_atomic_helper_commit_modeset_enables(drm, state);
183
184 drm_atomic_helper_wait_for_vblanks(drm, state);
185
186 drm_atomic_helper_cleanup_planes(drm, state);
187 drm_atomic_state_free(state);
188}
189
9e1f05b2 190static void sti_atomic_work(struct work_struct *work)
de4b00b0 191{
9e1f05b2
VA
192 struct sti_private *private = container_of(work,
193 struct sti_private, commit.work);
de4b00b0 194
9e1f05b2 195 sti_atomic_complete(private, private->commit.state);
de4b00b0
BG
196}
197
9e1f05b2 198static int sti_atomic_commit(struct drm_device *drm,
ab575184 199 struct drm_atomic_state *state, bool nonblock)
de4b00b0 200{
9e1f05b2 201 struct sti_private *private = drm->dev_private;
de4b00b0
BG
202 int err;
203
204 err = drm_atomic_helper_prepare_planes(drm, state);
205 if (err)
206 return err;
207
ab575184 208 /* serialize outstanding nonblocking commits */
de4b00b0
BG
209 mutex_lock(&private->commit.lock);
210 flush_work(&private->commit.work);
211
212 /*
213 * This is the point of no return - everything below never fails except
214 * when the hw goes bonghits. Which means we can commit the new state on
215 * the software side now.
216 */
217
5e84c269 218 drm_atomic_helper_swap_state(state, true);
de4b00b0 219
ab575184 220 if (nonblock)
9e1f05b2 221 sti_atomic_schedule(private, state);
de4b00b0 222 else
9e1f05b2 223 sti_atomic_complete(private, state);
de4b00b0
BG
224
225 mutex_unlock(&private->commit.lock);
226 return 0;
227}
228
c5de4853 229static const struct drm_mode_config_funcs sti_mode_config_funcs = {
9bbf86fe 230 .fb_create = drm_fb_cma_create,
de4b00b0 231 .atomic_check = drm_atomic_helper_check,
9e1f05b2 232 .atomic_commit = sti_atomic_commit,
9bbf86fe
BG
233};
234
9e1f05b2 235static void sti_mode_config_init(struct drm_device *dev)
9bbf86fe
BG
236{
237 dev->mode_config.min_width = 0;
238 dev->mode_config.min_height = 0;
239
240 /*
241 * set max width and height as default value.
242 * this value would be used to check framebuffer size limitation
243 * at drm_mode_addfb().
244 */
738be9d6
VA
245 dev->mode_config.max_width = STI_MAX_FB_WIDTH;
246 dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
9bbf86fe 247
9e1f05b2 248 dev->mode_config.funcs = &sti_mode_config_funcs;
9bbf86fe
BG
249}
250
9e1f05b2 251static int sti_load(struct drm_device *dev, unsigned long flags)
9bbf86fe 252{
9e1f05b2 253 struct sti_private *private;
9bbf86fe
BG
254 int ret;
255
9e1f05b2 256 private = kzalloc(sizeof(*private), GFP_KERNEL);
9bbf86fe
BG
257 if (!private) {
258 DRM_ERROR("Failed to allocate private\n");
259 return -ENOMEM;
260 }
261 dev->dev_private = (void *)private;
262 private->drm_dev = dev;
263
de4b00b0 264 mutex_init(&private->commit.lock);
9e1f05b2 265 INIT_WORK(&private->commit.work, sti_atomic_work);
de4b00b0 266
9bbf86fe
BG
267 drm_mode_config_init(dev);
268 drm_kms_helper_poll_init(dev);
269
9e1f05b2 270 sti_mode_config_init(dev);
9bbf86fe
BG
271
272 ret = component_bind_all(dev->dev, dev);
f78e772a
BG
273 if (ret) {
274 drm_kms_helper_poll_fini(dev);
275 drm_mode_config_cleanup(dev);
276 kfree(private);
9bbf86fe 277 return ret;
f78e772a 278 }
9bbf86fe 279
de4b00b0 280 drm_mode_config_reset(dev);
9bbf86fe 281
9bbf86fe 282 drm_fbdev_cma_init(dev, 32,
9e1f05b2
VA
283 dev->mode_config.num_crtc,
284 dev->mode_config.num_connector);
b5d34a27 285
9bbf86fe
BG
286 return 0;
287}
288
9e1f05b2 289static const struct file_operations sti_driver_fops = {
9bbf86fe
BG
290 .owner = THIS_MODULE,
291 .open = drm_open,
292 .mmap = drm_gem_cma_mmap,
293 .poll = drm_poll,
294 .read = drm_read,
295 .unlocked_ioctl = drm_ioctl,
296#ifdef CONFIG_COMPAT
297 .compat_ioctl = drm_compat_ioctl,
298#endif
299 .release = drm_release,
300};
301
9e1f05b2 302static struct drm_driver sti_driver = {
9bbf86fe 303 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
f29ddaf1 304 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
9e1f05b2 305 .load = sti_load,
d41ec9ca 306 .gem_free_object_unlocked = drm_gem_cma_free_object,
9bbf86fe
BG
307 .gem_vm_ops = &drm_gem_cma_vm_ops,
308 .dumb_create = drm_gem_cma_dumb_create,
309 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
310 .dumb_destroy = drm_gem_dumb_destroy,
9e1f05b2 311 .fops = &sti_driver_fops,
9bbf86fe 312
b44f8408 313 .get_vblank_counter = drm_vblank_no_hw_counter,
9e1f05b2
VA
314 .enable_vblank = sti_crtc_enable_vblank,
315 .disable_vblank = sti_crtc_disable_vblank,
9bbf86fe
BG
316
317 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
318 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
ffd157ce 319 .gem_prime_export = drm_gem_prime_export,
9bbf86fe
BG
320 .gem_prime_import = drm_gem_prime_import,
321 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
322 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
323 .gem_prime_vmap = drm_gem_cma_prime_vmap,
324 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
325 .gem_prime_mmap = drm_gem_cma_prime_mmap,
326
bf8f9e4a
VA
327 .debugfs_init = sti_drm_dbg_init,
328 .debugfs_cleanup = sti_drm_dbg_cleanup,
329
9bbf86fe
BG
330 .name = DRIVER_NAME,
331 .desc = DRIVER_DESC,
332 .date = DRIVER_DATE,
333 .major = DRIVER_MAJOR,
334 .minor = DRIVER_MINOR,
335};
336
337static int compare_of(struct device *dev, void *data)
338{
339 return dev->of_node == data;
340}
341
9e1f05b2 342static int sti_bind(struct device *dev)
9bbf86fe 343{
9e1f05b2 344 return drm_platform_init(&sti_driver, to_platform_device(dev));
9bbf86fe
BG
345}
346
9e1f05b2 347static void sti_unbind(struct device *dev)
9bbf86fe
BG
348{
349 drm_put_dev(dev_get_drvdata(dev));
350}
351
9e1f05b2
VA
352static const struct component_master_ops sti_ops = {
353 .bind = sti_bind,
354 .unbind = sti_unbind,
9bbf86fe
BG
355};
356
9e1f05b2 357static int sti_platform_probe(struct platform_device *pdev)
9bbf86fe
BG
358{
359 struct device *dev = &pdev->dev;
53bdcf5f 360 struct device_node *node = dev->of_node;
9bbf86fe
BG
361 struct device_node *child_np;
362 struct component_match *match = NULL;
363
364 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
365
53bdcf5f
BG
366 of_platform_populate(node, NULL, NULL, dev);
367
9bbf86fe
BG
368 child_np = of_get_next_available_child(node, NULL);
369
370 while (child_np) {
371 component_match_add(dev, &match, compare_of, child_np);
372 of_node_put(child_np);
373 child_np = of_get_next_available_child(node, child_np);
374 }
375
9e1f05b2 376 return component_master_add_with_match(dev, &sti_ops, match);
9bbf86fe
BG
377}
378
9e1f05b2 379static int sti_platform_remove(struct platform_device *pdev)
9bbf86fe 380{
9e1f05b2 381 component_master_del(&pdev->dev, &sti_ops);
9bbf86fe 382 of_platform_depopulate(&pdev->dev);
53bdcf5f 383
9bbf86fe
BG
384 return 0;
385}
386
9e1f05b2 387static const struct of_device_id sti_dt_ids[] = {
9bbf86fe
BG
388 { .compatible = "st,sti-display-subsystem", },
389 { /* end node */ },
390};
9e1f05b2 391MODULE_DEVICE_TABLE(of, sti_dt_ids);
9bbf86fe 392
9e1f05b2
VA
393static struct platform_driver sti_platform_driver = {
394 .probe = sti_platform_probe,
395 .remove = sti_platform_remove,
9bbf86fe 396 .driver = {
9bbf86fe 397 .name = DRIVER_NAME,
9e1f05b2 398 .of_match_table = sti_dt_ids,
9bbf86fe
BG
399 },
400};
401
dcec16ef
TR
402static struct platform_driver * const drivers[] = {
403 &sti_tvout_driver,
404 &sti_vtac_driver,
405 &sti_hqvdp_driver,
406 &sti_hdmi_driver,
407 &sti_hda_driver,
408 &sti_dvo_driver,
409 &sti_vtg_driver,
410 &sti_compositor_driver,
411 &sti_platform_driver,
412};
413
414static int sti_drm_init(void)
415{
416 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
417}
418module_init(sti_drm_init);
419
420static void sti_drm_exit(void)
421{
422 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
423}
424module_exit(sti_drm_exit);
9bbf86fe
BG
425
426MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
427MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
428MODULE_LICENSE("GPL");
This page took 0.105437 seconds and 5 git commands to generate.