drm/radeon: allow userptr write access under certain conditions
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gem.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_bo_unref(&robj);
40 }
41 }
42
43 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
44 int alignment, int initial_domain,
45 u32 flags, bool kernel,
46 struct drm_gem_object **obj)
47 {
48 struct radeon_bo *robj;
49 unsigned long max_size;
50 int r;
51
52 *obj = NULL;
53 /* At least align on page size */
54 if (alignment < PAGE_SIZE) {
55 alignment = PAGE_SIZE;
56 }
57
58 /* Maximum bo size is the unpinned gtt size since we use the gtt to
59 * handle vram to system pool migrations.
60 */
61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
62 if (size > max_size) {
63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
64 size >> 20, max_size >> 20);
65 return -ENOMEM;
66 }
67
68 retry:
69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
70 flags, NULL, &robj);
71 if (r) {
72 if (r != -ERESTARTSYS) {
73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
74 initial_domain |= RADEON_GEM_DOMAIN_GTT;
75 goto retry;
76 }
77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
78 size, initial_domain, alignment, r);
79 }
80 return r;
81 }
82 *obj = &robj->gem_base;
83 robj->pid = task_pid_nr(current);
84
85 mutex_lock(&rdev->gem.mutex);
86 list_add_tail(&robj->list, &rdev->gem.objects);
87 mutex_unlock(&rdev->gem.mutex);
88
89 return 0;
90 }
91
92 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
93 uint32_t rdomain, uint32_t wdomain)
94 {
95 struct radeon_bo *robj;
96 uint32_t domain;
97 int r;
98
99 /* FIXME: reeimplement */
100 robj = gem_to_radeon_bo(gobj);
101 /* work out where to validate the buffer to */
102 domain = wdomain;
103 if (!domain) {
104 domain = rdomain;
105 }
106 if (!domain) {
107 /* Do nothings */
108 printk(KERN_WARNING "Set domain without domain !\n");
109 return 0;
110 }
111 if (domain == RADEON_GEM_DOMAIN_CPU) {
112 /* Asking for cpu access wait for object idle */
113 r = radeon_bo_wait(robj, NULL, false);
114 if (r) {
115 printk(KERN_ERR "Failed to wait for object !\n");
116 return r;
117 }
118 }
119 return 0;
120 }
121
122 int radeon_gem_init(struct radeon_device *rdev)
123 {
124 INIT_LIST_HEAD(&rdev->gem.objects);
125 return 0;
126 }
127
128 void radeon_gem_fini(struct radeon_device *rdev)
129 {
130 radeon_bo_force_delete(rdev);
131 }
132
133 /*
134 * Call from drm_gem_handle_create which appear in both new and open ioctl
135 * case.
136 */
137 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
138 {
139 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
140 struct radeon_device *rdev = rbo->rdev;
141 struct radeon_fpriv *fpriv = file_priv->driver_priv;
142 struct radeon_vm *vm = &fpriv->vm;
143 struct radeon_bo_va *bo_va;
144 int r;
145
146 if (rdev->family < CHIP_CAYMAN) {
147 return 0;
148 }
149
150 r = radeon_bo_reserve(rbo, false);
151 if (r) {
152 return r;
153 }
154
155 bo_va = radeon_vm_bo_find(vm, rbo);
156 if (!bo_va) {
157 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
158 } else {
159 ++bo_va->ref_count;
160 }
161 radeon_bo_unreserve(rbo);
162
163 return 0;
164 }
165
166 void radeon_gem_object_close(struct drm_gem_object *obj,
167 struct drm_file *file_priv)
168 {
169 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
170 struct radeon_device *rdev = rbo->rdev;
171 struct radeon_fpriv *fpriv = file_priv->driver_priv;
172 struct radeon_vm *vm = &fpriv->vm;
173 struct radeon_bo_va *bo_va;
174 int r;
175
176 if (rdev->family < CHIP_CAYMAN) {
177 return;
178 }
179
180 r = radeon_bo_reserve(rbo, true);
181 if (r) {
182 dev_err(rdev->dev, "leaking bo va because "
183 "we fail to reserve bo (%d)\n", r);
184 return;
185 }
186 bo_va = radeon_vm_bo_find(vm, rbo);
187 if (bo_va) {
188 if (--bo_va->ref_count == 0) {
189 radeon_vm_bo_rmv(rdev, bo_va);
190 }
191 }
192 radeon_bo_unreserve(rbo);
193 }
194
195 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
196 {
197 if (r == -EDEADLK) {
198 r = radeon_gpu_reset(rdev);
199 if (!r)
200 r = -EAGAIN;
201 }
202 return r;
203 }
204
205 /*
206 * GEM ioctls.
207 */
208 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *filp)
210 {
211 struct radeon_device *rdev = dev->dev_private;
212 struct drm_radeon_gem_info *args = data;
213 struct ttm_mem_type_manager *man;
214
215 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
216
217 args->vram_size = rdev->mc.real_vram_size;
218 args->vram_visible = (u64)man->size << PAGE_SHIFT;
219 args->vram_visible -= rdev->vram_pin_size;
220 args->gart_size = rdev->mc.gtt_size;
221 args->gart_size -= rdev->gart_pin_size;
222
223 return 0;
224 }
225
226 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *filp)
228 {
229 /* TODO: implement */
230 DRM_ERROR("unimplemented %s\n", __func__);
231 return -ENOSYS;
232 }
233
234 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *filp)
236 {
237 /* TODO: implement */
238 DRM_ERROR("unimplemented %s\n", __func__);
239 return -ENOSYS;
240 }
241
242 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *filp)
244 {
245 struct radeon_device *rdev = dev->dev_private;
246 struct drm_radeon_gem_create *args = data;
247 struct drm_gem_object *gobj;
248 uint32_t handle;
249 int r;
250
251 down_read(&rdev->exclusive_lock);
252 /* create a gem object to contain this object in */
253 args->size = roundup(args->size, PAGE_SIZE);
254 r = radeon_gem_object_create(rdev, args->size, args->alignment,
255 args->initial_domain, args->flags,
256 false, &gobj);
257 if (r) {
258 up_read(&rdev->exclusive_lock);
259 r = radeon_gem_handle_lockup(rdev, r);
260 return r;
261 }
262 r = drm_gem_handle_create(filp, gobj, &handle);
263 /* drop reference from allocate - handle holds it now */
264 drm_gem_object_unreference_unlocked(gobj);
265 if (r) {
266 up_read(&rdev->exclusive_lock);
267 r = radeon_gem_handle_lockup(rdev, r);
268 return r;
269 }
270 args->handle = handle;
271 up_read(&rdev->exclusive_lock);
272 return 0;
273 }
274
275 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp)
277 {
278 struct radeon_device *rdev = dev->dev_private;
279 struct drm_radeon_gem_userptr *args = data;
280 struct drm_gem_object *gobj;
281 struct radeon_bo *bo;
282 uint32_t handle;
283 int r;
284
285 if (offset_in_page(args->addr | args->size))
286 return -EINVAL;
287
288 /* reject unknown flag values */
289 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
290 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
291 RADEON_GEM_USERPTR_REGISTER))
292 return -EINVAL;
293
294 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
295 /* readonly pages not tested on older hardware */
296 if (rdev->family < CHIP_R600)
297 return -EINVAL;
298
299 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
300 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
301
302 /* if we want to write to it we must require anonymous
303 memory and install a MMU notifier */
304 return -EACCES;
305 }
306
307 down_read(&rdev->exclusive_lock);
308
309 /* create a gem object to contain this object in */
310 r = radeon_gem_object_create(rdev, args->size, 0,
311 RADEON_GEM_DOMAIN_CPU, 0,
312 false, &gobj);
313 if (r)
314 goto handle_lockup;
315
316 bo = gem_to_radeon_bo(gobj);
317 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
318 if (r)
319 goto release_object;
320
321 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
322 r = radeon_mn_register(bo, args->addr);
323 if (r)
324 goto release_object;
325 }
326
327 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
328 down_read(&current->mm->mmap_sem);
329 r = radeon_bo_reserve(bo, true);
330 if (r) {
331 up_read(&current->mm->mmap_sem);
332 goto release_object;
333 }
334
335 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
336 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
337 radeon_bo_unreserve(bo);
338 up_read(&current->mm->mmap_sem);
339 if (r)
340 goto release_object;
341 }
342
343 r = drm_gem_handle_create(filp, gobj, &handle);
344 /* drop reference from allocate - handle holds it now */
345 drm_gem_object_unreference_unlocked(gobj);
346 if (r)
347 goto handle_lockup;
348
349 args->handle = handle;
350 up_read(&rdev->exclusive_lock);
351 return 0;
352
353 release_object:
354 drm_gem_object_unreference_unlocked(gobj);
355
356 handle_lockup:
357 up_read(&rdev->exclusive_lock);
358 r = radeon_gem_handle_lockup(rdev, r);
359
360 return r;
361 }
362
363 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *filp)
365 {
366 /* transition the BO to a domain -
367 * just validate the BO into a certain domain */
368 struct radeon_device *rdev = dev->dev_private;
369 struct drm_radeon_gem_set_domain *args = data;
370 struct drm_gem_object *gobj;
371 struct radeon_bo *robj;
372 int r;
373
374 /* for now if someone requests domain CPU -
375 * just make sure the buffer is finished with */
376 down_read(&rdev->exclusive_lock);
377
378 /* just do a BO wait for now */
379 gobj = drm_gem_object_lookup(dev, filp, args->handle);
380 if (gobj == NULL) {
381 up_read(&rdev->exclusive_lock);
382 return -ENOENT;
383 }
384 robj = gem_to_radeon_bo(gobj);
385
386 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
387
388 drm_gem_object_unreference_unlocked(gobj);
389 up_read(&rdev->exclusive_lock);
390 r = radeon_gem_handle_lockup(robj->rdev, r);
391 return r;
392 }
393
394 int radeon_mode_dumb_mmap(struct drm_file *filp,
395 struct drm_device *dev,
396 uint32_t handle, uint64_t *offset_p)
397 {
398 struct drm_gem_object *gobj;
399 struct radeon_bo *robj;
400
401 gobj = drm_gem_object_lookup(dev, filp, handle);
402 if (gobj == NULL) {
403 return -ENOENT;
404 }
405 robj = gem_to_radeon_bo(gobj);
406 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
407 drm_gem_object_unreference_unlocked(gobj);
408 return -EPERM;
409 }
410 *offset_p = radeon_bo_mmap_offset(robj);
411 drm_gem_object_unreference_unlocked(gobj);
412 return 0;
413 }
414
415 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
416 struct drm_file *filp)
417 {
418 struct drm_radeon_gem_mmap *args = data;
419
420 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
421 }
422
423 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
424 struct drm_file *filp)
425 {
426 struct radeon_device *rdev = dev->dev_private;
427 struct drm_radeon_gem_busy *args = data;
428 struct drm_gem_object *gobj;
429 struct radeon_bo *robj;
430 int r;
431 uint32_t cur_placement = 0;
432
433 gobj = drm_gem_object_lookup(dev, filp, args->handle);
434 if (gobj == NULL) {
435 return -ENOENT;
436 }
437 robj = gem_to_radeon_bo(gobj);
438 r = radeon_bo_wait(robj, &cur_placement, true);
439 args->domain = radeon_mem_type_to_domain(cur_placement);
440 drm_gem_object_unreference_unlocked(gobj);
441 r = radeon_gem_handle_lockup(rdev, r);
442 return r;
443 }
444
445 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *filp)
447 {
448 struct radeon_device *rdev = dev->dev_private;
449 struct drm_radeon_gem_wait_idle *args = data;
450 struct drm_gem_object *gobj;
451 struct radeon_bo *robj;
452 int r;
453 uint32_t cur_placement = 0;
454
455 gobj = drm_gem_object_lookup(dev, filp, args->handle);
456 if (gobj == NULL) {
457 return -ENOENT;
458 }
459 robj = gem_to_radeon_bo(gobj);
460 r = radeon_bo_wait(robj, &cur_placement, false);
461 /* Flush HDP cache via MMIO if necessary */
462 if (rdev->asic->mmio_hdp_flush &&
463 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
464 robj->rdev->asic->mmio_hdp_flush(rdev);
465 drm_gem_object_unreference_unlocked(gobj);
466 r = radeon_gem_handle_lockup(rdev, r);
467 return r;
468 }
469
470 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
471 struct drm_file *filp)
472 {
473 struct drm_radeon_gem_set_tiling *args = data;
474 struct drm_gem_object *gobj;
475 struct radeon_bo *robj;
476 int r = 0;
477
478 DRM_DEBUG("%d \n", args->handle);
479 gobj = drm_gem_object_lookup(dev, filp, args->handle);
480 if (gobj == NULL)
481 return -ENOENT;
482 robj = gem_to_radeon_bo(gobj);
483 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
484 drm_gem_object_unreference_unlocked(gobj);
485 return r;
486 }
487
488 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *filp)
490 {
491 struct drm_radeon_gem_get_tiling *args = data;
492 struct drm_gem_object *gobj;
493 struct radeon_bo *rbo;
494 int r = 0;
495
496 DRM_DEBUG("\n");
497 gobj = drm_gem_object_lookup(dev, filp, args->handle);
498 if (gobj == NULL)
499 return -ENOENT;
500 rbo = gem_to_radeon_bo(gobj);
501 r = radeon_bo_reserve(rbo, false);
502 if (unlikely(r != 0))
503 goto out;
504 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
505 radeon_bo_unreserve(rbo);
506 out:
507 drm_gem_object_unreference_unlocked(gobj);
508 return r;
509 }
510
511 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *filp)
513 {
514 struct drm_radeon_gem_va *args = data;
515 struct drm_gem_object *gobj;
516 struct radeon_device *rdev = dev->dev_private;
517 struct radeon_fpriv *fpriv = filp->driver_priv;
518 struct radeon_bo *rbo;
519 struct radeon_bo_va *bo_va;
520 u32 invalid_flags;
521 int r = 0;
522
523 if (!rdev->vm_manager.enabled) {
524 args->operation = RADEON_VA_RESULT_ERROR;
525 return -ENOTTY;
526 }
527
528 /* !! DONT REMOVE !!
529 * We don't support vm_id yet, to be sure we don't have have broken
530 * userspace, reject anyone trying to use non 0 value thus moving
531 * forward we can use those fields without breaking existant userspace
532 */
533 if (args->vm_id) {
534 args->operation = RADEON_VA_RESULT_ERROR;
535 return -EINVAL;
536 }
537
538 if (args->offset < RADEON_VA_RESERVED_SIZE) {
539 dev_err(&dev->pdev->dev,
540 "offset 0x%lX is in reserved area 0x%X\n",
541 (unsigned long)args->offset,
542 RADEON_VA_RESERVED_SIZE);
543 args->operation = RADEON_VA_RESULT_ERROR;
544 return -EINVAL;
545 }
546
547 /* don't remove, we need to enforce userspace to set the snooped flag
548 * otherwise we will endup with broken userspace and we won't be able
549 * to enable this feature without adding new interface
550 */
551 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
552 if ((args->flags & invalid_flags)) {
553 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
554 args->flags, invalid_flags);
555 args->operation = RADEON_VA_RESULT_ERROR;
556 return -EINVAL;
557 }
558
559 switch (args->operation) {
560 case RADEON_VA_MAP:
561 case RADEON_VA_UNMAP:
562 break;
563 default:
564 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
565 args->operation);
566 args->operation = RADEON_VA_RESULT_ERROR;
567 return -EINVAL;
568 }
569
570 gobj = drm_gem_object_lookup(dev, filp, args->handle);
571 if (gobj == NULL) {
572 args->operation = RADEON_VA_RESULT_ERROR;
573 return -ENOENT;
574 }
575 rbo = gem_to_radeon_bo(gobj);
576 r = radeon_bo_reserve(rbo, false);
577 if (r) {
578 args->operation = RADEON_VA_RESULT_ERROR;
579 drm_gem_object_unreference_unlocked(gobj);
580 return r;
581 }
582 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
583 if (!bo_va) {
584 args->operation = RADEON_VA_RESULT_ERROR;
585 drm_gem_object_unreference_unlocked(gobj);
586 return -ENOENT;
587 }
588
589 switch (args->operation) {
590 case RADEON_VA_MAP:
591 if (bo_va->it.start) {
592 args->operation = RADEON_VA_RESULT_VA_EXIST;
593 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
594 goto out;
595 }
596 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
597 break;
598 case RADEON_VA_UNMAP:
599 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
600 break;
601 default:
602 break;
603 }
604 args->operation = RADEON_VA_RESULT_OK;
605 if (r) {
606 args->operation = RADEON_VA_RESULT_ERROR;
607 }
608 out:
609 radeon_bo_unreserve(rbo);
610 drm_gem_object_unreference_unlocked(gobj);
611 return r;
612 }
613
614 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
615 struct drm_file *filp)
616 {
617 struct drm_radeon_gem_op *args = data;
618 struct drm_gem_object *gobj;
619 struct radeon_bo *robj;
620 int r;
621
622 gobj = drm_gem_object_lookup(dev, filp, args->handle);
623 if (gobj == NULL) {
624 return -ENOENT;
625 }
626 robj = gem_to_radeon_bo(gobj);
627
628 r = -EPERM;
629 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
630 goto out;
631
632 r = radeon_bo_reserve(robj, false);
633 if (unlikely(r))
634 goto out;
635
636 switch (args->op) {
637 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
638 args->value = robj->initial_domain;
639 break;
640 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
641 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
642 RADEON_GEM_DOMAIN_GTT |
643 RADEON_GEM_DOMAIN_CPU);
644 break;
645 default:
646 r = -EINVAL;
647 }
648
649 radeon_bo_unreserve(robj);
650 out:
651 drm_gem_object_unreference_unlocked(gobj);
652 return r;
653 }
654
655 int radeon_mode_dumb_create(struct drm_file *file_priv,
656 struct drm_device *dev,
657 struct drm_mode_create_dumb *args)
658 {
659 struct radeon_device *rdev = dev->dev_private;
660 struct drm_gem_object *gobj;
661 uint32_t handle;
662 int r;
663
664 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
665 args->size = args->pitch * args->height;
666 args->size = ALIGN(args->size, PAGE_SIZE);
667
668 r = radeon_gem_object_create(rdev, args->size, 0,
669 RADEON_GEM_DOMAIN_VRAM, 0,
670 false, &gobj);
671 if (r)
672 return -ENOMEM;
673
674 r = drm_gem_handle_create(file_priv, gobj, &handle);
675 /* drop reference from allocate - handle holds it now */
676 drm_gem_object_unreference_unlocked(gobj);
677 if (r) {
678 return r;
679 }
680 args->handle = handle;
681 return 0;
682 }
683
684 #if defined(CONFIG_DEBUG_FS)
685 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
686 {
687 struct drm_info_node *node = (struct drm_info_node *)m->private;
688 struct drm_device *dev = node->minor->dev;
689 struct radeon_device *rdev = dev->dev_private;
690 struct radeon_bo *rbo;
691 unsigned i = 0;
692
693 mutex_lock(&rdev->gem.mutex);
694 list_for_each_entry(rbo, &rdev->gem.objects, list) {
695 unsigned domain;
696 const char *placement;
697
698 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
699 switch (domain) {
700 case RADEON_GEM_DOMAIN_VRAM:
701 placement = "VRAM";
702 break;
703 case RADEON_GEM_DOMAIN_GTT:
704 placement = " GTT";
705 break;
706 case RADEON_GEM_DOMAIN_CPU:
707 default:
708 placement = " CPU";
709 break;
710 }
711 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
712 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
713 placement, (unsigned long)rbo->pid);
714 i++;
715 }
716 mutex_unlock(&rdev->gem.mutex);
717 return 0;
718 }
719
720 static struct drm_info_list radeon_debugfs_gem_list[] = {
721 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
722 };
723 #endif
724
725 int radeon_gem_debugfs_init(struct radeon_device *rdev)
726 {
727 #if defined(CONFIG_DEBUG_FS)
728 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
729 #endif
730 return 0;
731 }
This page took 0.115474 seconds and 5 git commands to generate.