f82a1d4a0fcb5d4c65b2f5f07c031b82709f69f1
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_fbdev.c
1 /* exynos_drm_fbdev.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15 #include <drm/drmP.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_fb_helper.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/exynos_drm.h>
20
21 #include "exynos_drm_drv.h"
22 #include "exynos_drm_fb.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_iommu.h"
25
26 #define MAX_CONNECTOR 4
27 #define PREFERRED_BPP 32
28
29 #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
30 drm_fb_helper)
31
32 struct exynos_drm_fbdev {
33 struct drm_fb_helper drm_fb_helper;
34 struct exynos_drm_gem_obj *exynos_gem_obj;
35 };
36
37 static int exynos_drm_fb_mmap(struct fb_info *info,
38 struct vm_area_struct *vma)
39 {
40 struct drm_fb_helper *helper = info->par;
41 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
42 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
43 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
44 unsigned long vm_size;
45 int ret;
46
47 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
48
49 vm_size = vma->vm_end - vma->vm_start;
50
51 if (vm_size > buffer->size)
52 return -EINVAL;
53
54 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
55 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
56 if (ret < 0) {
57 DRM_ERROR("failed to mmap.\n");
58 return ret;
59 }
60
61 return 0;
62 }
63
64 static struct fb_ops exynos_drm_fb_ops = {
65 .owner = THIS_MODULE,
66 .fb_mmap = exynos_drm_fb_mmap,
67 .fb_fillrect = cfb_fillrect,
68 .fb_copyarea = cfb_copyarea,
69 .fb_imageblit = cfb_imageblit,
70 .fb_check_var = drm_fb_helper_check_var,
71 .fb_set_par = drm_fb_helper_set_par,
72 .fb_blank = drm_fb_helper_blank,
73 .fb_pan_display = drm_fb_helper_pan_display,
74 .fb_setcmap = drm_fb_helper_setcmap,
75 };
76
77 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
78 struct drm_framebuffer *fb)
79 {
80 struct fb_info *fbi = helper->fbdev;
81 struct drm_device *dev = helper->dev;
82 struct exynos_drm_gem_buf *buffer;
83 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
84 unsigned long offset;
85
86 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
87 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
88
89 /* RGB formats use only one buffer */
90 buffer = exynos_drm_fb_buffer(fb, 0);
91 if (!buffer) {
92 DRM_LOG_KMS("buffer is null.\n");
93 return -EFAULT;
94 }
95
96 /* map pages with kernel virtual space. */
97 if (!buffer->kvaddr) {
98 if (is_drm_iommu_supported(dev)) {
99 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
100
101 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
102 pgprot_writecombine(PAGE_KERNEL));
103 } else {
104 phys_addr_t dma_addr = buffer->dma_addr;
105 if (dma_addr)
106 buffer->kvaddr = phys_to_virt(dma_addr);
107 else
108 buffer->kvaddr = (void __iomem *)NULL;
109 }
110 if (!buffer->kvaddr) {
111 DRM_ERROR("failed to map pages to kernel space.\n");
112 return -EIO;
113 }
114 }
115
116 /* buffer count to framebuffer always is 1 at booting time. */
117 exynos_drm_fb_set_buf_cnt(fb, 1);
118
119 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
120 offset += fbi->var.yoffset * fb->pitches[0];
121
122 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
123 fbi->screen_base = buffer->kvaddr + offset;
124 if (is_drm_iommu_supported(dev))
125 fbi->fix.smem_start = (unsigned long)
126 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
127 else
128 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
129
130 fbi->screen_size = size;
131 fbi->fix.smem_len = size;
132
133 return 0;
134 }
135
136 static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
137 struct drm_fb_helper_surface_size *sizes)
138 {
139 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
140 struct exynos_drm_gem_obj *exynos_gem_obj;
141 struct drm_device *dev = helper->dev;
142 struct fb_info *fbi;
143 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
144 struct platform_device *pdev = dev->platformdev;
145 unsigned long size;
146 int ret;
147
148 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
149 sizes->surface_width, sizes->surface_height,
150 sizes->surface_bpp);
151
152 mode_cmd.width = sizes->surface_width;
153 mode_cmd.height = sizes->surface_height;
154 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
155 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
156 sizes->surface_depth);
157
158 mutex_lock(&dev->struct_mutex);
159
160 fbi = framebuffer_alloc(0, &pdev->dev);
161 if (!fbi) {
162 DRM_ERROR("failed to allocate fb info.\n");
163 ret = -ENOMEM;
164 goto out;
165 }
166
167 size = mode_cmd.pitches[0] * mode_cmd.height;
168
169 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
170 /*
171 * If physically contiguous memory allocation fails and if IOMMU is
172 * supported then try to get buffer from non physically contiguous
173 * memory area.
174 */
175 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
176 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
177 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
178 size);
179 }
180
181 if (IS_ERR(exynos_gem_obj)) {
182 ret = PTR_ERR(exynos_gem_obj);
183 goto err_release_framebuffer;
184 }
185
186 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
187
188 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
189 &exynos_gem_obj->base);
190 if (IS_ERR(helper->fb)) {
191 DRM_ERROR("failed to create drm framebuffer.\n");
192 ret = PTR_ERR(helper->fb);
193 goto err_destroy_gem;
194 }
195
196 helper->fbdev = fbi;
197
198 fbi->par = helper;
199 fbi->flags = FBINFO_FLAG_DEFAULT;
200 fbi->fbops = &exynos_drm_fb_ops;
201
202 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
203 if (ret) {
204 DRM_ERROR("failed to allocate cmap.\n");
205 goto err_destroy_framebuffer;
206 }
207
208 ret = exynos_drm_fbdev_update(helper, helper->fb);
209 if (ret < 0)
210 goto err_dealloc_cmap;
211
212 mutex_unlock(&dev->struct_mutex);
213 return ret;
214
215 err_dealloc_cmap:
216 fb_dealloc_cmap(&fbi->cmap);
217 err_destroy_framebuffer:
218 drm_framebuffer_cleanup(helper->fb);
219 err_destroy_gem:
220 exynos_drm_gem_destroy(exynos_gem_obj);
221 err_release_framebuffer:
222 framebuffer_release(fbi);
223
224 /*
225 * if failed, all resources allocated above would be released by
226 * drm_mode_config_cleanup() when drm_load() had been called prior
227 * to any specific driver such as fimd or hdmi driver.
228 */
229 out:
230 mutex_unlock(&dev->struct_mutex);
231 return ret;
232 }
233
234 static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
235 .fb_probe = exynos_drm_fbdev_create,
236 };
237
238 int exynos_drm_fbdev_init(struct drm_device *dev)
239 {
240 struct exynos_drm_fbdev *fbdev;
241 struct exynos_drm_private *private = dev->dev_private;
242 struct drm_fb_helper *helper;
243 unsigned int num_crtc;
244 int ret;
245
246 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
247 return 0;
248
249 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
250 if (!fbdev) {
251 DRM_ERROR("failed to allocate drm fbdev.\n");
252 return -ENOMEM;
253 }
254
255 private->fb_helper = helper = &fbdev->drm_fb_helper;
256 helper->funcs = &exynos_drm_fb_helper_funcs;
257
258 num_crtc = dev->mode_config.num_crtc;
259
260 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
261 if (ret < 0) {
262 DRM_ERROR("failed to initialize drm fb helper.\n");
263 goto err_init;
264 }
265
266 ret = drm_fb_helper_single_add_all_connectors(helper);
267 if (ret < 0) {
268 DRM_ERROR("failed to register drm_fb_helper_connector.\n");
269 goto err_setup;
270
271 }
272
273 /* disable all the possible outputs/crtcs before entering KMS mode */
274 drm_helper_disable_unused_functions(dev);
275
276 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
277 if (ret < 0) {
278 DRM_ERROR("failed to set up hw configuration.\n");
279 goto err_setup;
280 }
281
282 return 0;
283
284 err_setup:
285 drm_fb_helper_fini(helper);
286
287 err_init:
288 private->fb_helper = NULL;
289 kfree(fbdev);
290
291 return ret;
292 }
293
294 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
295 struct drm_fb_helper *fb_helper)
296 {
297 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
298 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
299 struct drm_framebuffer *fb;
300
301 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
302 vunmap(exynos_gem_obj->buffer->kvaddr);
303
304 /* release drm framebuffer and real buffer */
305 if (fb_helper->fb && fb_helper->fb->funcs) {
306 fb = fb_helper->fb;
307 if (fb) {
308 drm_framebuffer_unregister_private(fb);
309 drm_framebuffer_remove(fb);
310 }
311 }
312
313 /* release linux framebuffer */
314 if (fb_helper->fbdev) {
315 struct fb_info *info;
316 int ret;
317
318 info = fb_helper->fbdev;
319 ret = unregister_framebuffer(info);
320 if (ret < 0)
321 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
322
323 if (info->cmap.len)
324 fb_dealloc_cmap(&info->cmap);
325
326 framebuffer_release(info);
327 }
328
329 drm_fb_helper_fini(fb_helper);
330 }
331
332 void exynos_drm_fbdev_fini(struct drm_device *dev)
333 {
334 struct exynos_drm_private *private = dev->dev_private;
335 struct exynos_drm_fbdev *fbdev;
336
337 if (!private || !private->fb_helper)
338 return;
339
340 fbdev = to_exynos_fbdev(private->fb_helper);
341
342 if (fbdev->exynos_gem_obj)
343 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
344
345 exynos_drm_fbdev_destroy(dev, private->fb_helper);
346 kfree(fbdev);
347 private->fb_helper = NULL;
348 }
349
350 void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
351 {
352 struct exynos_drm_private *private = dev->dev_private;
353
354 if (!private || !private->fb_helper)
355 return;
356
357 drm_modeset_lock_all(dev);
358 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
359 drm_modeset_unlock_all(dev);
360 }
This page took 0.038053 seconds and 4 git commands to generate.