Commit | Line | Data |
---|---|---|
0a6659bd GH |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | */ | |
7 | ||
8 | #include "bochs.h" | |
9 | ||
10 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain); | |
11 | ||
12 | /* ---------------------------------------------------------------------- */ | |
13 | ||
14 | static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd) | |
15 | { | |
16 | return container_of(bd, struct bochs_device, ttm.bdev); | |
17 | } | |
18 | ||
19 | static int bochs_ttm_mem_global_init(struct drm_global_reference *ref) | |
20 | { | |
21 | return ttm_mem_global_init(ref->object); | |
22 | } | |
23 | ||
24 | static void bochs_ttm_mem_global_release(struct drm_global_reference *ref) | |
25 | { | |
26 | ttm_mem_global_release(ref->object); | |
27 | } | |
28 | ||
29 | static int bochs_ttm_global_init(struct bochs_device *bochs) | |
30 | { | |
31 | struct drm_global_reference *global_ref; | |
32 | int r; | |
33 | ||
34 | global_ref = &bochs->ttm.mem_global_ref; | |
35 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | |
36 | global_ref->size = sizeof(struct ttm_mem_global); | |
37 | global_ref->init = &bochs_ttm_mem_global_init; | |
38 | global_ref->release = &bochs_ttm_mem_global_release; | |
39 | r = drm_global_item_ref(global_ref); | |
40 | if (r != 0) { | |
41 | DRM_ERROR("Failed setting up TTM memory accounting " | |
42 | "subsystem.\n"); | |
43 | return r; | |
44 | } | |
45 | ||
46 | bochs->ttm.bo_global_ref.mem_glob = | |
47 | bochs->ttm.mem_global_ref.object; | |
48 | global_ref = &bochs->ttm.bo_global_ref.ref; | |
49 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | |
50 | global_ref->size = sizeof(struct ttm_bo_global); | |
51 | global_ref->init = &ttm_bo_global_init; | |
52 | global_ref->release = &ttm_bo_global_release; | |
53 | r = drm_global_item_ref(global_ref); | |
54 | if (r != 0) { | |
55 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
56 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | |
57 | return r; | |
58 | } | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static void bochs_ttm_global_release(struct bochs_device *bochs) | |
64 | { | |
65 | if (bochs->ttm.mem_global_ref.release == NULL) | |
66 | return; | |
67 | ||
68 | drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); | |
69 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | |
70 | bochs->ttm.mem_global_ref.release = NULL; | |
71 | } | |
72 | ||
73 | ||
74 | static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) | |
75 | { | |
76 | struct bochs_bo *bo; | |
77 | ||
78 | bo = container_of(tbo, struct bochs_bo, bo); | |
79 | drm_gem_object_release(&bo->gem); | |
80 | kfree(bo); | |
81 | } | |
82 | ||
83 | static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo) | |
84 | { | |
85 | if (bo->destroy == &bochs_bo_ttm_destroy) | |
86 | return true; | |
87 | return false; | |
88 | } | |
89 | ||
90 | static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
91 | struct ttm_mem_type_manager *man) | |
92 | { | |
93 | switch (type) { | |
94 | case TTM_PL_SYSTEM: | |
95 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
96 | man->available_caching = TTM_PL_MASK_CACHING; | |
97 | man->default_caching = TTM_PL_FLAG_CACHED; | |
98 | break; | |
99 | case TTM_PL_VRAM: | |
100 | man->func = &ttm_bo_manager_func; | |
101 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
102 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
103 | man->available_caching = TTM_PL_FLAG_UNCACHED | | |
104 | TTM_PL_FLAG_WC; | |
105 | man->default_caching = TTM_PL_FLAG_WC; | |
106 | break; | |
107 | default: | |
108 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
109 | return -EINVAL; | |
110 | } | |
111 | return 0; | |
112 | } | |
113 | ||
114 | static void | |
115 | bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
116 | { | |
117 | struct bochs_bo *bochsbo = bochs_bo(bo); | |
118 | ||
119 | if (!bochs_ttm_bo_is_bochs_bo(bo)) | |
120 | return; | |
121 | ||
122 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM); | |
123 | *pl = bochsbo->placement; | |
124 | } | |
125 | ||
126 | static int bochs_bo_verify_access(struct ttm_buffer_object *bo, | |
127 | struct file *filp) | |
128 | { | |
129 | struct bochs_bo *bochsbo = bochs_bo(bo); | |
130 | ||
131 | return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp); | |
132 | } | |
133 | ||
134 | static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev, | |
135 | struct ttm_mem_reg *mem) | |
136 | { | |
137 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
138 | struct bochs_device *bochs = bochs_bdev(bdev); | |
139 | ||
140 | mem->bus.addr = NULL; | |
141 | mem->bus.offset = 0; | |
142 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
143 | mem->bus.base = 0; | |
144 | mem->bus.is_iomem = false; | |
145 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
146 | return -EINVAL; | |
147 | switch (mem->mem_type) { | |
148 | case TTM_PL_SYSTEM: | |
149 | /* system memory */ | |
150 | return 0; | |
151 | case TTM_PL_VRAM: | |
152 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
153 | mem->bus.base = bochs->fb_base; | |
154 | mem->bus.is_iomem = true; | |
155 | break; | |
156 | default: | |
157 | return -EINVAL; | |
158 | break; | |
159 | } | |
160 | return 0; | |
161 | } | |
162 | ||
163 | static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, | |
164 | struct ttm_mem_reg *mem) | |
165 | { | |
166 | } | |
167 | ||
168 | static int bochs_bo_move(struct ttm_buffer_object *bo, | |
169 | bool evict, bool interruptible, | |
170 | bool no_wait_gpu, | |
171 | struct ttm_mem_reg *new_mem) | |
172 | { | |
173 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | |
174 | } | |
175 | ||
176 | ||
177 | static void bochs_ttm_backend_destroy(struct ttm_tt *tt) | |
178 | { | |
179 | ttm_tt_fini(tt); | |
180 | kfree(tt); | |
181 | } | |
182 | ||
183 | static struct ttm_backend_func bochs_tt_backend_func = { | |
184 | .destroy = &bochs_ttm_backend_destroy, | |
185 | }; | |
186 | ||
187 | static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, | |
188 | unsigned long size, | |
189 | uint32_t page_flags, | |
190 | struct page *dummy_read_page) | |
191 | { | |
192 | struct ttm_tt *tt; | |
193 | ||
194 | tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); | |
195 | if (tt == NULL) | |
196 | return NULL; | |
197 | tt->func = &bochs_tt_backend_func; | |
198 | if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { | |
199 | kfree(tt); | |
200 | return NULL; | |
201 | } | |
202 | return tt; | |
203 | } | |
204 | ||
205 | struct ttm_bo_driver bochs_bo_driver = { | |
206 | .ttm_tt_create = bochs_ttm_tt_create, | |
207 | .ttm_tt_populate = ttm_pool_populate, | |
208 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | |
209 | .init_mem_type = bochs_bo_init_mem_type, | |
210 | .evict_flags = bochs_bo_evict_flags, | |
211 | .move = bochs_bo_move, | |
212 | .verify_access = bochs_bo_verify_access, | |
213 | .io_mem_reserve = &bochs_ttm_io_mem_reserve, | |
214 | .io_mem_free = &bochs_ttm_io_mem_free, | |
98c2872a CK |
215 | .lru_tail = &ttm_bo_default_lru_tail, |
216 | .swap_lru_tail = &ttm_bo_default_swap_lru_tail, | |
0a6659bd GH |
217 | }; |
218 | ||
219 | int bochs_mm_init(struct bochs_device *bochs) | |
220 | { | |
221 | struct ttm_bo_device *bdev = &bochs->ttm.bdev; | |
222 | int ret; | |
223 | ||
224 | ret = bochs_ttm_global_init(bochs); | |
225 | if (ret) | |
226 | return ret; | |
227 | ||
228 | ret = ttm_bo_device_init(&bochs->ttm.bdev, | |
229 | bochs->ttm.bo_global_ref.ref.object, | |
44d847b7 DH |
230 | &bochs_bo_driver, |
231 | bochs->dev->anon_inode->i_mapping, | |
232 | DRM_FILE_PAGE_OFFSET, | |
0a6659bd GH |
233 | true); |
234 | if (ret) { | |
235 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | |
236 | return ret; | |
237 | } | |
238 | ||
239 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | |
240 | bochs->fb_size >> PAGE_SHIFT); | |
241 | if (ret) { | |
242 | DRM_ERROR("Failed ttm VRAM init: %d\n", ret); | |
243 | return ret; | |
244 | } | |
245 | ||
246 | bochs->ttm.initialized = true; | |
247 | return 0; | |
248 | } | |
249 | ||
250 | void bochs_mm_fini(struct bochs_device *bochs) | |
251 | { | |
252 | if (!bochs->ttm.initialized) | |
253 | return; | |
254 | ||
255 | ttm_bo_device_release(&bochs->ttm.bdev); | |
256 | bochs_ttm_global_release(bochs); | |
257 | bochs->ttm.initialized = false; | |
258 | } | |
259 | ||
260 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain) | |
261 | { | |
f1217ed0 | 262 | unsigned i; |
0a6659bd | 263 | u32 c = 0; |
0a6659bd GH |
264 | bo->placement.placement = bo->placements; |
265 | bo->placement.busy_placement = bo->placements; | |
266 | if (domain & TTM_PL_FLAG_VRAM) { | |
f1217ed0 CK |
267 | bo->placements[c++].flags = TTM_PL_FLAG_WC |
268 | | TTM_PL_FLAG_UNCACHED | |
0a6659bd GH |
269 | | TTM_PL_FLAG_VRAM; |
270 | } | |
271 | if (domain & TTM_PL_FLAG_SYSTEM) { | |
f1217ed0 CK |
272 | bo->placements[c++].flags = TTM_PL_MASK_CACHING |
273 | | TTM_PL_FLAG_SYSTEM; | |
0a6659bd GH |
274 | } |
275 | if (!c) { | |
f1217ed0 CK |
276 | bo->placements[c++].flags = TTM_PL_MASK_CACHING |
277 | | TTM_PL_FLAG_SYSTEM; | |
278 | } | |
279 | for (i = 0; i < c; ++i) { | |
280 | bo->placements[i].fpfn = 0; | |
281 | bo->placements[i].lpfn = 0; | |
0a6659bd GH |
282 | } |
283 | bo->placement.num_placement = c; | |
284 | bo->placement.num_busy_placement = c; | |
285 | } | |
286 | ||
287 | static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) | |
288 | { | |
289 | return bo->bo.offset; | |
290 | } | |
291 | ||
292 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) | |
293 | { | |
294 | int i, ret; | |
295 | ||
296 | if (bo->pin_count) { | |
297 | bo->pin_count++; | |
298 | if (gpu_addr) | |
299 | *gpu_addr = bochs_bo_gpu_offset(bo); | |
300 | return 0; | |
301 | } | |
302 | ||
303 | bochs_ttm_placement(bo, pl_flag); | |
304 | for (i = 0; i < bo->placement.num_placement; i++) | |
f1217ed0 | 305 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
0a6659bd GH |
306 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); |
307 | if (ret) | |
308 | return ret; | |
309 | ||
310 | bo->pin_count = 1; | |
311 | if (gpu_addr) | |
312 | *gpu_addr = bochs_bo_gpu_offset(bo); | |
313 | return 0; | |
314 | } | |
315 | ||
316 | int bochs_bo_unpin(struct bochs_bo *bo) | |
317 | { | |
318 | int i, ret; | |
319 | ||
320 | if (!bo->pin_count) { | |
321 | DRM_ERROR("unpin bad %p\n", bo); | |
322 | return 0; | |
323 | } | |
324 | bo->pin_count--; | |
325 | ||
326 | if (bo->pin_count) | |
327 | return 0; | |
328 | ||
329 | for (i = 0; i < bo->placement.num_placement; i++) | |
f1217ed0 | 330 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; |
0a6659bd GH |
331 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); |
332 | if (ret) | |
333 | return ret; | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | int bochs_mmap(struct file *filp, struct vm_area_struct *vma) | |
339 | { | |
340 | struct drm_file *file_priv; | |
341 | struct bochs_device *bochs; | |
342 | ||
343 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | |
884c6dab | 344 | return -EINVAL; |
0a6659bd GH |
345 | |
346 | file_priv = filp->private_data; | |
347 | bochs = file_priv->minor->dev->dev_private; | |
348 | return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev); | |
349 | } | |
350 | ||
351 | /* ---------------------------------------------------------------------- */ | |
352 | ||
353 | static int bochs_bo_create(struct drm_device *dev, int size, int align, | |
354 | uint32_t flags, struct bochs_bo **pbochsbo) | |
355 | { | |
356 | struct bochs_device *bochs = dev->dev_private; | |
357 | struct bochs_bo *bochsbo; | |
358 | size_t acc_size; | |
359 | int ret; | |
360 | ||
361 | bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL); | |
362 | if (!bochsbo) | |
363 | return -ENOMEM; | |
364 | ||
365 | ret = drm_gem_object_init(dev, &bochsbo->gem, size); | |
366 | if (ret) { | |
367 | kfree(bochsbo); | |
368 | return ret; | |
369 | } | |
370 | ||
371 | bochsbo->bo.bdev = &bochs->ttm.bdev; | |
6796cb16 | 372 | bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; |
0a6659bd GH |
373 | |
374 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | |
375 | ||
376 | acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, | |
377 | sizeof(struct bochs_bo)); | |
378 | ||
379 | ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, | |
380 | ttm_bo_type_device, &bochsbo->placement, | |
381 | align >> PAGE_SHIFT, false, NULL, acc_size, | |
f4f4e3e3 | 382 | NULL, NULL, bochs_bo_ttm_destroy); |
0a6659bd GH |
383 | if (ret) |
384 | return ret; | |
385 | ||
386 | *pbochsbo = bochsbo; | |
387 | return 0; | |
388 | } | |
389 | ||
390 | int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, | |
391 | struct drm_gem_object **obj) | |
392 | { | |
393 | struct bochs_bo *bochsbo; | |
394 | int ret; | |
395 | ||
396 | *obj = NULL; | |
397 | ||
a8ba29cd | 398 | size = PAGE_ALIGN(size); |
0a6659bd GH |
399 | if (size == 0) |
400 | return -EINVAL; | |
401 | ||
402 | ret = bochs_bo_create(dev, size, 0, 0, &bochsbo); | |
403 | if (ret) { | |
404 | if (ret != -ERESTARTSYS) | |
405 | DRM_ERROR("failed to allocate GEM object\n"); | |
406 | return ret; | |
407 | } | |
408 | *obj = &bochsbo->gem; | |
409 | return 0; | |
410 | } | |
411 | ||
412 | int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | |
413 | struct drm_mode_create_dumb *args) | |
414 | { | |
415 | struct drm_gem_object *gobj; | |
416 | u32 handle; | |
417 | int ret; | |
418 | ||
419 | args->pitch = args->width * ((args->bpp + 7) / 8); | |
420 | args->size = args->pitch * args->height; | |
421 | ||
422 | ret = bochs_gem_create(dev, args->size, false, | |
423 | &gobj); | |
424 | if (ret) | |
425 | return ret; | |
426 | ||
427 | ret = drm_gem_handle_create(file, gobj, &handle); | |
428 | drm_gem_object_unreference_unlocked(gobj); | |
429 | if (ret) | |
430 | return ret; | |
431 | ||
432 | args->handle = handle; | |
433 | return 0; | |
434 | } | |
435 | ||
436 | static void bochs_bo_unref(struct bochs_bo **bo) | |
437 | { | |
438 | struct ttm_buffer_object *tbo; | |
439 | ||
440 | if ((*bo) == NULL) | |
441 | return; | |
442 | ||
443 | tbo = &((*bo)->bo); | |
444 | ttm_bo_unref(&tbo); | |
dcb1ee57 | 445 | *bo = NULL; |
0a6659bd GH |
446 | } |
447 | ||
448 | void bochs_gem_free_object(struct drm_gem_object *obj) | |
449 | { | |
450 | struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj); | |
451 | ||
0a6659bd GH |
452 | bochs_bo_unref(&bochs_bo); |
453 | } | |
454 | ||
455 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | |
456 | uint32_t handle, uint64_t *offset) | |
457 | { | |
458 | struct drm_gem_object *obj; | |
0a6659bd GH |
459 | struct bochs_bo *bo; |
460 | ||
a8ad0bd8 | 461 | obj = drm_gem_object_lookup(file, handle); |
37ae75c8 DV |
462 | if (obj == NULL) |
463 | return -ENOENT; | |
0a6659bd GH |
464 | |
465 | bo = gem_to_bochs_bo(obj); | |
466 | *offset = bochs_bo_mmap_offset(bo); | |
467 | ||
37ae75c8 DV |
468 | drm_gem_object_unreference_unlocked(obj); |
469 | return 0; | |
0a6659bd GH |
470 | } |
471 | ||
472 | /* ---------------------------------------------------------------------- */ | |
473 | ||
474 | static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) | |
475 | { | |
476 | struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); | |
477 | if (bochs_fb->obj) | |
478 | drm_gem_object_unreference_unlocked(bochs_fb->obj); | |
479 | drm_framebuffer_cleanup(fb); | |
480 | kfree(fb); | |
481 | } | |
482 | ||
483 | static const struct drm_framebuffer_funcs bochs_fb_funcs = { | |
484 | .destroy = bochs_user_framebuffer_destroy, | |
485 | }; | |
486 | ||
487 | int bochs_framebuffer_init(struct drm_device *dev, | |
488 | struct bochs_framebuffer *gfb, | |
1eb83451 | 489 | const struct drm_mode_fb_cmd2 *mode_cmd, |
0a6659bd GH |
490 | struct drm_gem_object *obj) |
491 | { | |
492 | int ret; | |
493 | ||
494 | drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); | |
495 | gfb->obj = obj; | |
496 | ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs); | |
497 | if (ret) { | |
498 | DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); | |
499 | return ret; | |
500 | } | |
501 | return 0; | |
502 | } | |
503 | ||
504 | static struct drm_framebuffer * | |
505 | bochs_user_framebuffer_create(struct drm_device *dev, | |
506 | struct drm_file *filp, | |
1eb83451 | 507 | const struct drm_mode_fb_cmd2 *mode_cmd) |
0a6659bd GH |
508 | { |
509 | struct drm_gem_object *obj; | |
510 | struct bochs_framebuffer *bochs_fb; | |
511 | int ret; | |
512 | ||
513 | DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n", | |
514 | mode_cmd->width, mode_cmd->height, | |
515 | (mode_cmd->pixel_format) & 0xff, | |
516 | (mode_cmd->pixel_format >> 8) & 0xff, | |
517 | (mode_cmd->pixel_format >> 16) & 0xff, | |
518 | (mode_cmd->pixel_format >> 24) & 0xff); | |
519 | ||
520 | if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888) | |
521 | return ERR_PTR(-ENOENT); | |
522 | ||
a8ad0bd8 | 523 | obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); |
0a6659bd GH |
524 | if (obj == NULL) |
525 | return ERR_PTR(-ENOENT); | |
526 | ||
527 | bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL); | |
528 | if (!bochs_fb) { | |
529 | drm_gem_object_unreference_unlocked(obj); | |
530 | return ERR_PTR(-ENOMEM); | |
531 | } | |
532 | ||
533 | ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj); | |
534 | if (ret) { | |
535 | drm_gem_object_unreference_unlocked(obj); | |
536 | kfree(bochs_fb); | |
537 | return ERR_PTR(ret); | |
538 | } | |
539 | return &bochs_fb->base; | |
540 | } | |
541 | ||
542 | const struct drm_mode_config_funcs bochs_mode_funcs = { | |
543 | .fb_create = bochs_user_framebuffer_create, | |
544 | }; |