Commit | Line | Data |
---|---|---|
5320918b DA |
1 | /* |
2 | * Copyright (C) 2012 Red Hat | |
3 | * | |
4 | * based in parts on udlfb.c: | |
5 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | |
6 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | |
7 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License v2. See the file COPYING in the main directory of this archive for | |
11 | * more details. | |
12 | */ | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/fb.h> | |
32ecd242 | 16 | #include <linux/dma-buf.h> |
5320918b | 17 | |
760285e7 DH |
18 | #include <drm/drmP.h> |
19 | #include <drm/drm_crtc.h> | |
20 | #include <drm/drm_crtc_helper.h> | |
5320918b DA |
21 | #include "udl_drv.h" |
22 | ||
760285e7 | 23 | #include <drm/drm_fb_helper.h> |
5320918b DA |
24 | |
25 | #define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ | |
26 | ||
27 | static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */ | |
28 | static int fb_bpp = 16; | |
29 | ||
30 | module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
31 | module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
32 | ||
33 | struct udl_fbdev { | |
34 | struct drm_fb_helper helper; | |
35 | struct udl_framebuffer ufb; | |
36 | struct list_head fbdev_list; | |
37 | int fb_count; | |
38 | }; | |
39 | ||
40 | #define DL_ALIGN_UP(x, a) ALIGN(x, a) | |
41 | #define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a) | |
42 | ||
43 | /** Read the red component (0..255) of a 32 bpp colour. */ | |
44 | #define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF) | |
45 | ||
46 | /** Read the green component (0..255) of a 32 bpp colour. */ | |
47 | #define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF) | |
48 | ||
49 | /** Read the blue component (0..255) of a 32 bpp colour. */ | |
50 | #define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF) | |
51 | ||
52 | /** Return red/green component of a 16 bpp colour number. */ | |
53 | #define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF) | |
54 | ||
55 | /** Return green/blue component of a 16 bpp colour number. */ | |
56 | #define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF) | |
57 | ||
58 | /** Return 8 bpp colour number from red, green and blue components. */ | |
59 | #define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF) | |
60 | ||
61 | #if 0 | |
62 | static uint8_t rgb8(uint32_t col) | |
63 | { | |
64 | uint8_t red = DLO_RGB_GETRED(col); | |
65 | uint8_t grn = DLO_RGB_GETGRN(col); | |
66 | uint8_t blu = DLO_RGB_GETBLU(col); | |
67 | ||
68 | return DLO_RGB8(red, grn, blu); | |
69 | } | |
70 | ||
71 | static uint16_t rgb16(uint32_t col) | |
72 | { | |
73 | uint8_t red = DLO_RGB_GETRED(col); | |
74 | uint8_t grn = DLO_RGB_GETGRN(col); | |
75 | uint8_t blu = DLO_RGB_GETBLU(col); | |
76 | ||
77 | return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu); | |
78 | } | |
79 | #endif | |
80 | ||
81 | /* | |
82 | * NOTE: fb_defio.c is holding info->fbdefio.mutex | |
83 | * Touching ANY framebuffer memory that triggers a page fault | |
84 | * in fb_defio will cause a deadlock, when it also tries to | |
85 | * grab the same mutex. | |
86 | */ | |
87 | static void udlfb_dpy_deferred_io(struct fb_info *info, | |
88 | struct list_head *pagelist) | |
89 | { | |
90 | struct page *cur; | |
91 | struct fb_deferred_io *fbdefio = info->fbdefio; | |
92 | struct udl_fbdev *ufbdev = info->par; | |
93 | struct drm_device *dev = ufbdev->ufb.base.dev; | |
94 | struct udl_device *udl = dev->dev_private; | |
95 | struct urb *urb; | |
96 | char *cmd; | |
97 | cycles_t start_cycles, end_cycles; | |
98 | int bytes_sent = 0; | |
99 | int bytes_identical = 0; | |
100 | int bytes_rendered = 0; | |
101 | ||
102 | if (!fb_defio) | |
103 | return; | |
104 | ||
105 | start_cycles = get_cycles(); | |
106 | ||
107 | urb = udl_get_urb(dev); | |
108 | if (!urb) | |
109 | return; | |
110 | ||
111 | cmd = urb->transfer_buffer; | |
112 | ||
113 | /* walk the written page list and render each to device */ | |
114 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | |
115 | ||
116 | if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), | |
117 | &urb, (char *) info->fix.smem_start, | |
118 | &cmd, cur->index << PAGE_SHIFT, | |
119 | PAGE_SIZE, &bytes_identical, &bytes_sent)) | |
120 | goto error; | |
121 | bytes_rendered += PAGE_SIZE; | |
122 | } | |
123 | ||
124 | if (cmd > (char *) urb->transfer_buffer) { | |
125 | /* Send partial buffer remaining before exiting */ | |
126 | int len = cmd - (char *) urb->transfer_buffer; | |
127 | udl_submit_urb(dev, urb, len); | |
128 | bytes_sent += len; | |
129 | } else | |
130 | udl_urb_completion(urb); | |
131 | ||
132 | error: | |
133 | atomic_add(bytes_sent, &udl->bytes_sent); | |
134 | atomic_add(bytes_identical, &udl->bytes_identical); | |
135 | atomic_add(bytes_rendered, &udl->bytes_rendered); | |
136 | end_cycles = get_cycles(); | |
137 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | |
138 | >> 10)), /* Kcycles */ | |
139 | &udl->cpu_kcycles_used); | |
140 | } | |
141 | ||
142 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | |
143 | int width, int height) | |
144 | { | |
145 | struct drm_device *dev = fb->base.dev; | |
146 | struct udl_device *udl = dev->dev_private; | |
147 | int i, ret; | |
148 | char *cmd; | |
149 | cycles_t start_cycles, end_cycles; | |
150 | int bytes_sent = 0; | |
151 | int bytes_identical = 0; | |
152 | struct urb *urb; | |
153 | int aligned_x; | |
154 | int bpp = (fb->base.bits_per_pixel / 8); | |
155 | ||
156 | if (!fb->active_16) | |
157 | return 0; | |
158 | ||
e8aa1d1e DA |
159 | if (!fb->obj->vmapping) { |
160 | ret = udl_gem_vmap(fb->obj); | |
161 | if (ret == -ENOMEM) { | |
162 | DRM_ERROR("failed to vmap fb\n"); | |
163 | return 0; | |
164 | } | |
165 | if (!fb->obj->vmapping) { | |
166 | DRM_ERROR("failed to vmapping\n"); | |
167 | return 0; | |
168 | } | |
169 | } | |
5320918b DA |
170 | |
171 | start_cycles = get_cycles(); | |
172 | ||
173 | aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); | |
174 | width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); | |
175 | x = aligned_x; | |
176 | ||
177 | if ((width <= 0) || | |
178 | (x + width > fb->base.width) || | |
179 | (y + height > fb->base.height)) | |
180 | return -EINVAL; | |
181 | ||
182 | urb = udl_get_urb(dev); | |
183 | if (!urb) | |
184 | return 0; | |
185 | cmd = urb->transfer_buffer; | |
186 | ||
187 | for (i = y; i < y + height ; i++) { | |
188 | const int line_offset = fb->base.pitches[0] * i; | |
189 | const int byte_offset = line_offset + (x * bpp); | |
190 | ||
191 | if (udl_render_hline(dev, bpp, &urb, | |
192 | (char *) fb->obj->vmapping, | |
193 | &cmd, byte_offset, width * bpp, | |
194 | &bytes_identical, &bytes_sent)) | |
195 | goto error; | |
196 | } | |
197 | ||
198 | if (cmd > (char *) urb->transfer_buffer) { | |
199 | /* Send partial buffer remaining before exiting */ | |
200 | int len = cmd - (char *) urb->transfer_buffer; | |
201 | ret = udl_submit_urb(dev, urb, len); | |
202 | bytes_sent += len; | |
203 | } else | |
204 | udl_urb_completion(urb); | |
205 | ||
206 | error: | |
207 | atomic_add(bytes_sent, &udl->bytes_sent); | |
208 | atomic_add(bytes_identical, &udl->bytes_identical); | |
209 | atomic_add(width*height*bpp, &udl->bytes_rendered); | |
210 | end_cycles = get_cycles(); | |
211 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | |
212 | >> 10)), /* Kcycles */ | |
213 | &udl->cpu_kcycles_used); | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) | |
219 | { | |
220 | unsigned long start = vma->vm_start; | |
221 | unsigned long size = vma->vm_end - vma->vm_start; | |
222 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
223 | unsigned long page, pos; | |
224 | ||
225 | if (offset + size > info->fix.smem_len) | |
226 | return -EINVAL; | |
227 | ||
228 | pos = (unsigned long)info->fix.smem_start + offset; | |
229 | ||
230 | pr_notice("mmap() framebuffer addr:%lu size:%lu\n", | |
231 | pos, size); | |
232 | ||
233 | while (size > 0) { | |
234 | page = vmalloc_to_pfn((void *)pos); | |
235 | if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) | |
236 | return -EAGAIN; | |
237 | ||
238 | start += PAGE_SIZE; | |
239 | pos += PAGE_SIZE; | |
240 | if (size > PAGE_SIZE) | |
241 | size -= PAGE_SIZE; | |
242 | else | |
243 | size = 0; | |
244 | } | |
245 | ||
246 | vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ | |
247 | return 0; | |
248 | } | |
249 | ||
250 | static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |
251 | { | |
252 | struct udl_fbdev *ufbdev = info->par; | |
253 | ||
254 | sys_fillrect(info, rect); | |
255 | ||
256 | udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, | |
257 | rect->height); | |
258 | } | |
259 | ||
260 | static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |
261 | { | |
262 | struct udl_fbdev *ufbdev = info->par; | |
263 | ||
264 | sys_copyarea(info, region); | |
265 | ||
266 | udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, | |
267 | region->height); | |
268 | } | |
269 | ||
270 | static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image) | |
271 | { | |
272 | struct udl_fbdev *ufbdev = info->par; | |
273 | ||
274 | sys_imageblit(info, image); | |
275 | ||
276 | udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, | |
277 | image->height); | |
278 | } | |
279 | ||
280 | /* | |
281 | * It's common for several clients to have framebuffer open simultaneously. | |
282 | * e.g. both fbcon and X. Makes things interesting. | |
283 | * Assumes caller is holding info->lock (for open and release at least) | |
284 | */ | |
285 | static int udl_fb_open(struct fb_info *info, int user) | |
286 | { | |
287 | struct udl_fbdev *ufbdev = info->par; | |
288 | struct drm_device *dev = ufbdev->ufb.base.dev; | |
289 | struct udl_device *udl = dev->dev_private; | |
290 | ||
291 | /* If the USB device is gone, we don't accept new opens */ | |
292 | if (drm_device_is_unplugged(udl->ddev)) | |
293 | return -ENODEV; | |
294 | ||
295 | ufbdev->fb_count++; | |
296 | ||
297 | if (fb_defio && (info->fbdefio == NULL)) { | |
298 | /* enable defio at last moment if not disabled by client */ | |
299 | ||
300 | struct fb_deferred_io *fbdefio; | |
301 | ||
302 | fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); | |
303 | ||
304 | if (fbdefio) { | |
305 | fbdefio->delay = DL_DEFIO_WRITE_DELAY; | |
306 | fbdefio->deferred_io = udlfb_dpy_deferred_io; | |
307 | } | |
308 | ||
309 | info->fbdefio = fbdefio; | |
310 | fb_deferred_io_init(info); | |
311 | } | |
312 | ||
313 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", | |
314 | info->node, user, info, ufbdev->fb_count); | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | ||
320 | /* | |
321 | * Assumes caller is holding info->lock mutex (for open and release at least) | |
322 | */ | |
323 | static int udl_fb_release(struct fb_info *info, int user) | |
324 | { | |
325 | struct udl_fbdev *ufbdev = info->par; | |
326 | ||
327 | ufbdev->fb_count--; | |
328 | ||
329 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { | |
330 | fb_deferred_io_cleanup(info); | |
331 | kfree(info->fbdefio); | |
332 | info->fbdefio = NULL; | |
333 | info->fbops->fb_mmap = udl_fb_mmap; | |
334 | } | |
335 | ||
336 | pr_warn("released /dev/fb%d user=%d count=%d\n", | |
337 | info->node, user, ufbdev->fb_count); | |
338 | ||
339 | return 0; | |
340 | } | |
341 | ||
342 | static struct fb_ops udlfb_ops = { | |
343 | .owner = THIS_MODULE, | |
344 | .fb_check_var = drm_fb_helper_check_var, | |
345 | .fb_set_par = drm_fb_helper_set_par, | |
346 | .fb_fillrect = udl_fb_fillrect, | |
347 | .fb_copyarea = udl_fb_copyarea, | |
348 | .fb_imageblit = udl_fb_imageblit, | |
349 | .fb_pan_display = drm_fb_helper_pan_display, | |
350 | .fb_blank = drm_fb_helper_blank, | |
351 | .fb_setcmap = drm_fb_helper_setcmap, | |
352 | .fb_debug_enter = drm_fb_helper_debug_enter, | |
353 | .fb_debug_leave = drm_fb_helper_debug_leave, | |
354 | .fb_mmap = udl_fb_mmap, | |
355 | .fb_open = udl_fb_open, | |
356 | .fb_release = udl_fb_release, | |
357 | }; | |
358 | ||
27796b46 | 359 | static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
5320918b DA |
360 | u16 blue, int regno) |
361 | { | |
362 | } | |
363 | ||
27796b46 | 364 | static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
5320918b DA |
365 | u16 *blue, int regno) |
366 | { | |
367 | *red = 0; | |
368 | *green = 0; | |
369 | *blue = 0; | |
370 | } | |
371 | ||
372 | static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |
373 | struct drm_file *file, | |
374 | unsigned flags, unsigned color, | |
375 | struct drm_clip_rect *clips, | |
376 | unsigned num_clips) | |
377 | { | |
378 | struct udl_framebuffer *ufb = to_udl_fb(fb); | |
379 | int i; | |
32ecd242 | 380 | int ret = 0; |
5320918b DA |
381 | |
382 | if (!ufb->active_16) | |
383 | return 0; | |
384 | ||
32ecd242 DA |
385 | if (ufb->obj->base.import_attach) { |
386 | ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, | |
387 | 0, ufb->obj->base.size, | |
388 | DMA_FROM_DEVICE); | |
389 | if (ret) | |
390 | return ret; | |
391 | } | |
392 | ||
5320918b | 393 | for (i = 0; i < num_clips; i++) { |
32ecd242 | 394 | ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1, |
5320918b DA |
395 | clips[i].x2 - clips[i].x1, |
396 | clips[i].y2 - clips[i].y1); | |
32ecd242 DA |
397 | if (ret) |
398 | break; | |
5320918b | 399 | } |
32ecd242 DA |
400 | |
401 | if (ufb->obj->base.import_attach) { | |
402 | dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, | |
403 | 0, ufb->obj->base.size, | |
404 | DMA_FROM_DEVICE); | |
405 | } | |
406 | return ret; | |
5320918b DA |
407 | } |
408 | ||
409 | static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) | |
410 | { | |
411 | struct udl_framebuffer *ufb = to_udl_fb(fb); | |
412 | ||
413 | if (ufb->obj) | |
414 | drm_gem_object_unreference_unlocked(&ufb->obj->base); | |
415 | ||
416 | drm_framebuffer_cleanup(fb); | |
417 | kfree(ufb); | |
418 | } | |
419 | ||
420 | static const struct drm_framebuffer_funcs udlfb_funcs = { | |
421 | .destroy = udl_user_framebuffer_destroy, | |
422 | .dirty = udl_user_framebuffer_dirty, | |
423 | .create_handle = NULL, | |
424 | }; | |
425 | ||
426 | ||
427 | static int | |
428 | udl_framebuffer_init(struct drm_device *dev, | |
429 | struct udl_framebuffer *ufb, | |
430 | struct drm_mode_fb_cmd2 *mode_cmd, | |
431 | struct udl_gem_object *obj) | |
432 | { | |
433 | int ret; | |
434 | ||
435 | ufb->obj = obj; | |
436 | ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); | |
437 | drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); | |
438 | return ret; | |
439 | } | |
440 | ||
441 | ||
442 | static int udlfb_create(struct udl_fbdev *ufbdev, | |
443 | struct drm_fb_helper_surface_size *sizes) | |
444 | { | |
445 | struct drm_device *dev = ufbdev->helper.dev; | |
446 | struct fb_info *info; | |
447 | struct device *device = &dev->usbdev->dev; | |
448 | struct drm_framebuffer *fb; | |
449 | struct drm_mode_fb_cmd2 mode_cmd; | |
450 | struct udl_gem_object *obj; | |
451 | uint32_t size; | |
452 | int ret = 0; | |
453 | ||
454 | if (sizes->surface_bpp == 24) | |
455 | sizes->surface_bpp = 32; | |
456 | ||
457 | mode_cmd.width = sizes->surface_width; | |
458 | mode_cmd.height = sizes->surface_height; | |
459 | mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); | |
460 | ||
461 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | |
462 | sizes->surface_depth); | |
463 | ||
464 | size = mode_cmd.pitches[0] * mode_cmd.height; | |
465 | size = ALIGN(size, PAGE_SIZE); | |
466 | ||
467 | obj = udl_gem_alloc_object(dev, size); | |
468 | if (!obj) | |
469 | goto out; | |
470 | ||
471 | ret = udl_gem_vmap(obj); | |
472 | if (ret) { | |
473 | DRM_ERROR("failed to vmap fb\n"); | |
474 | goto out_gfree; | |
475 | } | |
476 | ||
477 | info = framebuffer_alloc(0, device); | |
478 | if (!info) { | |
479 | ret = -ENOMEM; | |
480 | goto out_gfree; | |
481 | } | |
482 | info->par = ufbdev; | |
483 | ||
484 | ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); | |
485 | if (ret) | |
486 | goto out_gfree; | |
487 | ||
488 | fb = &ufbdev->ufb.base; | |
489 | ||
490 | ufbdev->helper.fb = fb; | |
491 | ufbdev->helper.fbdev = info; | |
492 | ||
493 | strcpy(info->fix.id, "udldrmfb"); | |
494 | ||
495 | info->screen_base = ufbdev->ufb.obj->vmapping; | |
496 | info->fix.smem_len = size; | |
497 | info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping; | |
498 | ||
499 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | |
500 | info->fbops = &udlfb_ops; | |
501 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | |
502 | drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); | |
503 | ||
504 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | |
505 | if (ret) { | |
506 | ret = -ENOMEM; | |
507 | goto out_gfree; | |
508 | } | |
509 | ||
510 | ||
511 | DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", | |
512 | fb->width, fb->height, | |
513 | ufbdev->ufb.obj->vmapping); | |
514 | ||
515 | return ret; | |
516 | out_gfree: | |
517 | drm_gem_object_unreference(&ufbdev->ufb.obj->base); | |
518 | out: | |
519 | return ret; | |
520 | } | |
521 | ||
522 | static int udl_fb_find_or_create_single(struct drm_fb_helper *helper, | |
523 | struct drm_fb_helper_surface_size *sizes) | |
524 | { | |
525 | struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper; | |
526 | int new_fb = 0; | |
527 | int ret; | |
528 | ||
529 | if (!helper->fb) { | |
530 | ret = udlfb_create(ufbdev, sizes); | |
531 | if (ret) | |
532 | return ret; | |
533 | ||
534 | new_fb = 1; | |
535 | } | |
536 | return new_fb; | |
537 | } | |
538 | ||
539 | static struct drm_fb_helper_funcs udl_fb_helper_funcs = { | |
540 | .gamma_set = udl_crtc_fb_gamma_set, | |
541 | .gamma_get = udl_crtc_fb_gamma_get, | |
542 | .fb_probe = udl_fb_find_or_create_single, | |
543 | }; | |
544 | ||
545 | static void udl_fbdev_destroy(struct drm_device *dev, | |
546 | struct udl_fbdev *ufbdev) | |
547 | { | |
548 | struct fb_info *info; | |
549 | if (ufbdev->helper.fbdev) { | |
550 | info = ufbdev->helper.fbdev; | |
551 | unregister_framebuffer(info); | |
552 | if (info->cmap.len) | |
553 | fb_dealloc_cmap(&info->cmap); | |
554 | framebuffer_release(info); | |
555 | } | |
556 | drm_fb_helper_fini(&ufbdev->helper); | |
557 | drm_framebuffer_cleanup(&ufbdev->ufb.base); | |
558 | drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); | |
559 | } | |
560 | ||
561 | int udl_fbdev_init(struct drm_device *dev) | |
562 | { | |
563 | struct udl_device *udl = dev->dev_private; | |
564 | int bpp_sel = fb_bpp; | |
565 | struct udl_fbdev *ufbdev; | |
566 | int ret; | |
567 | ||
568 | ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL); | |
569 | if (!ufbdev) | |
570 | return -ENOMEM; | |
571 | ||
572 | udl->fbdev = ufbdev; | |
573 | ufbdev->helper.funcs = &udl_fb_helper_funcs; | |
574 | ||
575 | ret = drm_fb_helper_init(dev, &ufbdev->helper, | |
576 | 1, 1); | |
577 | if (ret) { | |
578 | kfree(ufbdev); | |
579 | return ret; | |
580 | ||
581 | } | |
582 | ||
583 | drm_fb_helper_single_add_all_connectors(&ufbdev->helper); | |
584 | drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); | |
585 | return 0; | |
586 | } | |
587 | ||
588 | void udl_fbdev_cleanup(struct drm_device *dev) | |
589 | { | |
590 | struct udl_device *udl = dev->dev_private; | |
591 | if (!udl->fbdev) | |
592 | return; | |
593 | ||
594 | udl_fbdev_destroy(dev, udl->fbdev); | |
595 | kfree(udl->fbdev); | |
596 | udl->fbdev = NULL; | |
597 | } | |
598 | ||
599 | void udl_fbdev_unplug(struct drm_device *dev) | |
600 | { | |
601 | struct udl_device *udl = dev->dev_private; | |
602 | struct udl_fbdev *ufbdev; | |
603 | if (!udl->fbdev) | |
604 | return; | |
605 | ||
606 | ufbdev = udl->fbdev; | |
607 | if (ufbdev->helper.fbdev) { | |
608 | struct fb_info *info; | |
609 | info = ufbdev->helper.fbdev; | |
610 | unlink_framebuffer(info); | |
611 | } | |
612 | } | |
613 | ||
614 | struct drm_framebuffer * | |
615 | udl_fb_user_fb_create(struct drm_device *dev, | |
616 | struct drm_file *file, | |
617 | struct drm_mode_fb_cmd2 *mode_cmd) | |
618 | { | |
619 | struct drm_gem_object *obj; | |
620 | struct udl_framebuffer *ufb; | |
621 | int ret; | |
96503f59 | 622 | uint32_t size; |
5320918b DA |
623 | |
624 | obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); | |
625 | if (obj == NULL) | |
626 | return ERR_PTR(-ENOENT); | |
627 | ||
96503f59 DA |
628 | size = mode_cmd->pitches[0] * mode_cmd->height; |
629 | size = ALIGN(size, PAGE_SIZE); | |
630 | ||
631 | if (size > obj->size) { | |
632 | DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height); | |
633 | return ERR_PTR(-ENOMEM); | |
634 | } | |
635 | ||
5320918b DA |
636 | ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); |
637 | if (ufb == NULL) | |
638 | return ERR_PTR(-ENOMEM); | |
639 | ||
640 | ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj)); | |
641 | if (ret) { | |
642 | kfree(ufb); | |
643 | return ERR_PTR(-EINVAL); | |
644 | } | |
645 | return &ufb->base; | |
646 | } |