drm/i915: Fix for ringbuf space wait in LRC mode
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gem.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_bo_unref(&robj);
40 }
41 }
42
43 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
44 int alignment, int initial_domain,
45 u32 flags, bool kernel,
46 struct drm_gem_object **obj)
47 {
48 struct radeon_bo *robj;
49 unsigned long max_size;
50 int r;
51
52 *obj = NULL;
53 /* At least align on page size */
54 if (alignment < PAGE_SIZE) {
55 alignment = PAGE_SIZE;
56 }
57
58 /* Maximum bo size is the unpinned gtt size since we use the gtt to
59 * handle vram to system pool migrations.
60 */
61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
62 if (size > max_size) {
63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
64 size >> 20, max_size >> 20);
65 return -ENOMEM;
66 }
67
68 retry:
69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
70 flags, NULL, NULL, &robj);
71 if (r) {
72 if (r != -ERESTARTSYS) {
73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
74 initial_domain |= RADEON_GEM_DOMAIN_GTT;
75 goto retry;
76 }
77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
78 size, initial_domain, alignment, r);
79 }
80 return r;
81 }
82 *obj = &robj->gem_base;
83 robj->pid = task_pid_nr(current);
84
85 mutex_lock(&rdev->gem.mutex);
86 list_add_tail(&robj->list, &rdev->gem.objects);
87 mutex_unlock(&rdev->gem.mutex);
88
89 return 0;
90 }
91
92 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
93 uint32_t rdomain, uint32_t wdomain)
94 {
95 struct radeon_bo *robj;
96 uint32_t domain;
97 long r;
98
99 /* FIXME: reeimplement */
100 robj = gem_to_radeon_bo(gobj);
101 /* work out where to validate the buffer to */
102 domain = wdomain;
103 if (!domain) {
104 domain = rdomain;
105 }
106 if (!domain) {
107 /* Do nothings */
108 printk(KERN_WARNING "Set domain without domain !\n");
109 return 0;
110 }
111 if (domain == RADEON_GEM_DOMAIN_CPU) {
112 /* Asking for cpu access wait for object idle */
113 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
114 if (!r)
115 r = -EBUSY;
116
117 if (r < 0 && r != -EINTR) {
118 printk(KERN_ERR "Failed to wait for object: %li\n", r);
119 return r;
120 }
121 }
122 return 0;
123 }
124
125 int radeon_gem_init(struct radeon_device *rdev)
126 {
127 INIT_LIST_HEAD(&rdev->gem.objects);
128 return 0;
129 }
130
131 void radeon_gem_fini(struct radeon_device *rdev)
132 {
133 radeon_bo_force_delete(rdev);
134 }
135
136 /*
137 * Call from drm_gem_handle_create which appear in both new and open ioctl
138 * case.
139 */
140 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
141 {
142 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
143 struct radeon_device *rdev = rbo->rdev;
144 struct radeon_fpriv *fpriv = file_priv->driver_priv;
145 struct radeon_vm *vm = &fpriv->vm;
146 struct radeon_bo_va *bo_va;
147 int r;
148
149 if ((rdev->family < CHIP_CAYMAN) ||
150 (!rdev->accel_working)) {
151 return 0;
152 }
153
154 r = radeon_bo_reserve(rbo, false);
155 if (r) {
156 return r;
157 }
158
159 bo_va = radeon_vm_bo_find(vm, rbo);
160 if (!bo_va) {
161 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
162 } else {
163 ++bo_va->ref_count;
164 }
165 radeon_bo_unreserve(rbo);
166
167 return 0;
168 }
169
170 void radeon_gem_object_close(struct drm_gem_object *obj,
171 struct drm_file *file_priv)
172 {
173 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
174 struct radeon_device *rdev = rbo->rdev;
175 struct radeon_fpriv *fpriv = file_priv->driver_priv;
176 struct radeon_vm *vm = &fpriv->vm;
177 struct radeon_bo_va *bo_va;
178 int r;
179
180 if ((rdev->family < CHIP_CAYMAN) ||
181 (!rdev->accel_working)) {
182 return;
183 }
184
185 r = radeon_bo_reserve(rbo, true);
186 if (r) {
187 dev_err(rdev->dev, "leaking bo va because "
188 "we fail to reserve bo (%d)\n", r);
189 return;
190 }
191 bo_va = radeon_vm_bo_find(vm, rbo);
192 if (bo_va) {
193 if (--bo_va->ref_count == 0) {
194 radeon_vm_bo_rmv(rdev, bo_va);
195 }
196 }
197 radeon_bo_unreserve(rbo);
198 }
199
200 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
201 {
202 if (r == -EDEADLK) {
203 r = radeon_gpu_reset(rdev);
204 if (!r)
205 r = -EAGAIN;
206 }
207 return r;
208 }
209
210 /*
211 * GEM ioctls.
212 */
213 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
214 struct drm_file *filp)
215 {
216 struct radeon_device *rdev = dev->dev_private;
217 struct drm_radeon_gem_info *args = data;
218 struct ttm_mem_type_manager *man;
219
220 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
221
222 args->vram_size = rdev->mc.real_vram_size;
223 args->vram_visible = (u64)man->size << PAGE_SHIFT;
224 args->vram_visible -= rdev->vram_pin_size;
225 args->gart_size = rdev->mc.gtt_size;
226 args->gart_size -= rdev->gart_pin_size;
227
228 return 0;
229 }
230
231 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
232 struct drm_file *filp)
233 {
234 /* TODO: implement */
235 DRM_ERROR("unimplemented %s\n", __func__);
236 return -ENOSYS;
237 }
238
239 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
240 struct drm_file *filp)
241 {
242 /* TODO: implement */
243 DRM_ERROR("unimplemented %s\n", __func__);
244 return -ENOSYS;
245 }
246
247 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
248 struct drm_file *filp)
249 {
250 struct radeon_device *rdev = dev->dev_private;
251 struct drm_radeon_gem_create *args = data;
252 struct drm_gem_object *gobj;
253 uint32_t handle;
254 int r;
255
256 down_read(&rdev->exclusive_lock);
257 /* create a gem object to contain this object in */
258 args->size = roundup(args->size, PAGE_SIZE);
259 r = radeon_gem_object_create(rdev, args->size, args->alignment,
260 args->initial_domain, args->flags,
261 false, &gobj);
262 if (r) {
263 up_read(&rdev->exclusive_lock);
264 r = radeon_gem_handle_lockup(rdev, r);
265 return r;
266 }
267 r = drm_gem_handle_create(filp, gobj, &handle);
268 /* drop reference from allocate - handle holds it now */
269 drm_gem_object_unreference_unlocked(gobj);
270 if (r) {
271 up_read(&rdev->exclusive_lock);
272 r = radeon_gem_handle_lockup(rdev, r);
273 return r;
274 }
275 args->handle = handle;
276 up_read(&rdev->exclusive_lock);
277 return 0;
278 }
279
280 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
281 struct drm_file *filp)
282 {
283 struct radeon_device *rdev = dev->dev_private;
284 struct drm_radeon_gem_userptr *args = data;
285 struct drm_gem_object *gobj;
286 struct radeon_bo *bo;
287 uint32_t handle;
288 int r;
289
290 if (offset_in_page(args->addr | args->size))
291 return -EINVAL;
292
293 /* reject unknown flag values */
294 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
295 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
296 RADEON_GEM_USERPTR_REGISTER))
297 return -EINVAL;
298
299 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
300 /* readonly pages not tested on older hardware */
301 if (rdev->family < CHIP_R600)
302 return -EINVAL;
303
304 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
305 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
306
307 /* if we want to write to it we must require anonymous
308 memory and install a MMU notifier */
309 return -EACCES;
310 }
311
312 down_read(&rdev->exclusive_lock);
313
314 /* create a gem object to contain this object in */
315 r = radeon_gem_object_create(rdev, args->size, 0,
316 RADEON_GEM_DOMAIN_CPU, 0,
317 false, &gobj);
318 if (r)
319 goto handle_lockup;
320
321 bo = gem_to_radeon_bo(gobj);
322 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
323 if (r)
324 goto release_object;
325
326 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
327 r = radeon_mn_register(bo, args->addr);
328 if (r)
329 goto release_object;
330 }
331
332 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
333 down_read(&current->mm->mmap_sem);
334 r = radeon_bo_reserve(bo, true);
335 if (r) {
336 up_read(&current->mm->mmap_sem);
337 goto release_object;
338 }
339
340 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
341 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
342 radeon_bo_unreserve(bo);
343 up_read(&current->mm->mmap_sem);
344 if (r)
345 goto release_object;
346 }
347
348 r = drm_gem_handle_create(filp, gobj, &handle);
349 /* drop reference from allocate - handle holds it now */
350 drm_gem_object_unreference_unlocked(gobj);
351 if (r)
352 goto handle_lockup;
353
354 args->handle = handle;
355 up_read(&rdev->exclusive_lock);
356 return 0;
357
358 release_object:
359 drm_gem_object_unreference_unlocked(gobj);
360
361 handle_lockup:
362 up_read(&rdev->exclusive_lock);
363 r = radeon_gem_handle_lockup(rdev, r);
364
365 return r;
366 }
367
368 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *filp)
370 {
371 /* transition the BO to a domain -
372 * just validate the BO into a certain domain */
373 struct radeon_device *rdev = dev->dev_private;
374 struct drm_radeon_gem_set_domain *args = data;
375 struct drm_gem_object *gobj;
376 struct radeon_bo *robj;
377 int r;
378
379 /* for now if someone requests domain CPU -
380 * just make sure the buffer is finished with */
381 down_read(&rdev->exclusive_lock);
382
383 /* just do a BO wait for now */
384 gobj = drm_gem_object_lookup(dev, filp, args->handle);
385 if (gobj == NULL) {
386 up_read(&rdev->exclusive_lock);
387 return -ENOENT;
388 }
389 robj = gem_to_radeon_bo(gobj);
390
391 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
392
393 drm_gem_object_unreference_unlocked(gobj);
394 up_read(&rdev->exclusive_lock);
395 r = radeon_gem_handle_lockup(robj->rdev, r);
396 return r;
397 }
398
399 int radeon_mode_dumb_mmap(struct drm_file *filp,
400 struct drm_device *dev,
401 uint32_t handle, uint64_t *offset_p)
402 {
403 struct drm_gem_object *gobj;
404 struct radeon_bo *robj;
405
406 gobj = drm_gem_object_lookup(dev, filp, handle);
407 if (gobj == NULL) {
408 return -ENOENT;
409 }
410 robj = gem_to_radeon_bo(gobj);
411 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
412 drm_gem_object_unreference_unlocked(gobj);
413 return -EPERM;
414 }
415 *offset_p = radeon_bo_mmap_offset(robj);
416 drm_gem_object_unreference_unlocked(gobj);
417 return 0;
418 }
419
420 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
421 struct drm_file *filp)
422 {
423 struct drm_radeon_gem_mmap *args = data;
424
425 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
426 }
427
428 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp)
430 {
431 struct radeon_device *rdev = dev->dev_private;
432 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj;
434 struct radeon_bo *robj;
435 int r;
436 uint32_t cur_placement = 0;
437
438 gobj = drm_gem_object_lookup(dev, filp, args->handle);
439 if (gobj == NULL) {
440 return -ENOENT;
441 }
442 robj = gem_to_radeon_bo(gobj);
443 r = radeon_bo_wait(robj, &cur_placement, true);
444 args->domain = radeon_mem_type_to_domain(cur_placement);
445 drm_gem_object_unreference_unlocked(gobj);
446 r = radeon_gem_handle_lockup(rdev, r);
447 return r;
448 }
449
450 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
451 struct drm_file *filp)
452 {
453 struct radeon_device *rdev = dev->dev_private;
454 struct drm_radeon_gem_wait_idle *args = data;
455 struct drm_gem_object *gobj;
456 struct radeon_bo *robj;
457 int r = 0;
458 uint32_t cur_placement = 0;
459 long ret;
460
461 gobj = drm_gem_object_lookup(dev, filp, args->handle);
462 if (gobj == NULL) {
463 return -ENOENT;
464 }
465 robj = gem_to_radeon_bo(gobj);
466
467 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
468 if (ret == 0)
469 r = -EBUSY;
470 else if (ret < 0)
471 r = ret;
472
473 /* Flush HDP cache via MMIO if necessary */
474 if (rdev->asic->mmio_hdp_flush &&
475 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
476 robj->rdev->asic->mmio_hdp_flush(rdev);
477 drm_gem_object_unreference_unlocked(gobj);
478 r = radeon_gem_handle_lockup(rdev, r);
479 return r;
480 }
481
482 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *filp)
484 {
485 struct drm_radeon_gem_set_tiling *args = data;
486 struct drm_gem_object *gobj;
487 struct radeon_bo *robj;
488 int r = 0;
489
490 DRM_DEBUG("%d \n", args->handle);
491 gobj = drm_gem_object_lookup(dev, filp, args->handle);
492 if (gobj == NULL)
493 return -ENOENT;
494 robj = gem_to_radeon_bo(gobj);
495 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
496 drm_gem_object_unreference_unlocked(gobj);
497 return r;
498 }
499
500 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
501 struct drm_file *filp)
502 {
503 struct drm_radeon_gem_get_tiling *args = data;
504 struct drm_gem_object *gobj;
505 struct radeon_bo *rbo;
506 int r = 0;
507
508 DRM_DEBUG("\n");
509 gobj = drm_gem_object_lookup(dev, filp, args->handle);
510 if (gobj == NULL)
511 return -ENOENT;
512 rbo = gem_to_radeon_bo(gobj);
513 r = radeon_bo_reserve(rbo, false);
514 if (unlikely(r != 0))
515 goto out;
516 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
517 radeon_bo_unreserve(rbo);
518 out:
519 drm_gem_object_unreference_unlocked(gobj);
520 return r;
521 }
522
523 /**
524 * radeon_gem_va_update_vm -update the bo_va in its VM
525 *
526 * @rdev: radeon_device pointer
527 * @bo_va: bo_va to update
528 *
529 * Update the bo_va directly after setting it's address. Errors are not
530 * vital here, so they are not reported back to userspace.
531 */
532 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
533 struct radeon_bo_va *bo_va)
534 {
535 struct ttm_validate_buffer tv, *entry;
536 struct radeon_bo_list *vm_bos;
537 struct ww_acquire_ctx ticket;
538 struct list_head list;
539 unsigned domain;
540 int r;
541
542 INIT_LIST_HEAD(&list);
543
544 tv.bo = &bo_va->bo->tbo;
545 tv.shared = true;
546 list_add(&tv.head, &list);
547
548 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
549 if (!vm_bos)
550 return;
551
552 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
553 if (r)
554 goto error_free;
555
556 list_for_each_entry(entry, &list, head) {
557 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
558 /* if anything is swapped out don't swap it in here,
559 just abort and wait for the next CS */
560 if (domain == RADEON_GEM_DOMAIN_CPU)
561 goto error_unreserve;
562 }
563
564 mutex_lock(&bo_va->vm->mutex);
565 r = radeon_vm_clear_freed(rdev, bo_va->vm);
566 if (r)
567 goto error_unlock;
568
569 if (bo_va->it.start)
570 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
571
572 error_unlock:
573 mutex_unlock(&bo_va->vm->mutex);
574
575 error_unreserve:
576 ttm_eu_backoff_reservation(&ticket, &list);
577
578 error_free:
579 drm_free_large(vm_bos);
580
581 if (r && r != -ERESTARTSYS)
582 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
583 }
584
585 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
586 struct drm_file *filp)
587 {
588 struct drm_radeon_gem_va *args = data;
589 struct drm_gem_object *gobj;
590 struct radeon_device *rdev = dev->dev_private;
591 struct radeon_fpriv *fpriv = filp->driver_priv;
592 struct radeon_bo *rbo;
593 struct radeon_bo_va *bo_va;
594 u32 invalid_flags;
595 int r = 0;
596
597 if (!rdev->vm_manager.enabled) {
598 args->operation = RADEON_VA_RESULT_ERROR;
599 return -ENOTTY;
600 }
601
602 /* !! DONT REMOVE !!
603 * We don't support vm_id yet, to be sure we don't have have broken
604 * userspace, reject anyone trying to use non 0 value thus moving
605 * forward we can use those fields without breaking existant userspace
606 */
607 if (args->vm_id) {
608 args->operation = RADEON_VA_RESULT_ERROR;
609 return -EINVAL;
610 }
611
612 if (args->offset < RADEON_VA_RESERVED_SIZE) {
613 dev_err(&dev->pdev->dev,
614 "offset 0x%lX is in reserved area 0x%X\n",
615 (unsigned long)args->offset,
616 RADEON_VA_RESERVED_SIZE);
617 args->operation = RADEON_VA_RESULT_ERROR;
618 return -EINVAL;
619 }
620
621 /* don't remove, we need to enforce userspace to set the snooped flag
622 * otherwise we will endup with broken userspace and we won't be able
623 * to enable this feature without adding new interface
624 */
625 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
626 if ((args->flags & invalid_flags)) {
627 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
628 args->flags, invalid_flags);
629 args->operation = RADEON_VA_RESULT_ERROR;
630 return -EINVAL;
631 }
632
633 switch (args->operation) {
634 case RADEON_VA_MAP:
635 case RADEON_VA_UNMAP:
636 break;
637 default:
638 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
639 args->operation);
640 args->operation = RADEON_VA_RESULT_ERROR;
641 return -EINVAL;
642 }
643
644 gobj = drm_gem_object_lookup(dev, filp, args->handle);
645 if (gobj == NULL) {
646 args->operation = RADEON_VA_RESULT_ERROR;
647 return -ENOENT;
648 }
649 rbo = gem_to_radeon_bo(gobj);
650 r = radeon_bo_reserve(rbo, false);
651 if (r) {
652 args->operation = RADEON_VA_RESULT_ERROR;
653 drm_gem_object_unreference_unlocked(gobj);
654 return r;
655 }
656 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
657 if (!bo_va) {
658 args->operation = RADEON_VA_RESULT_ERROR;
659 drm_gem_object_unreference_unlocked(gobj);
660 return -ENOENT;
661 }
662
663 switch (args->operation) {
664 case RADEON_VA_MAP:
665 if (bo_va->it.start) {
666 args->operation = RADEON_VA_RESULT_VA_EXIST;
667 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
668 radeon_bo_unreserve(rbo);
669 goto out;
670 }
671 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
672 break;
673 case RADEON_VA_UNMAP:
674 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
675 break;
676 default:
677 break;
678 }
679 if (!r)
680 radeon_gem_va_update_vm(rdev, bo_va);
681 args->operation = RADEON_VA_RESULT_OK;
682 if (r) {
683 args->operation = RADEON_VA_RESULT_ERROR;
684 }
685 out:
686 drm_gem_object_unreference_unlocked(gobj);
687 return r;
688 }
689
690 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
691 struct drm_file *filp)
692 {
693 struct drm_radeon_gem_op *args = data;
694 struct drm_gem_object *gobj;
695 struct radeon_bo *robj;
696 int r;
697
698 gobj = drm_gem_object_lookup(dev, filp, args->handle);
699 if (gobj == NULL) {
700 return -ENOENT;
701 }
702 robj = gem_to_radeon_bo(gobj);
703
704 r = -EPERM;
705 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
706 goto out;
707
708 r = radeon_bo_reserve(robj, false);
709 if (unlikely(r))
710 goto out;
711
712 switch (args->op) {
713 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
714 args->value = robj->initial_domain;
715 break;
716 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
717 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
718 RADEON_GEM_DOMAIN_GTT |
719 RADEON_GEM_DOMAIN_CPU);
720 break;
721 default:
722 r = -EINVAL;
723 }
724
725 radeon_bo_unreserve(robj);
726 out:
727 drm_gem_object_unreference_unlocked(gobj);
728 return r;
729 }
730
731 int radeon_mode_dumb_create(struct drm_file *file_priv,
732 struct drm_device *dev,
733 struct drm_mode_create_dumb *args)
734 {
735 struct radeon_device *rdev = dev->dev_private;
736 struct drm_gem_object *gobj;
737 uint32_t handle;
738 int r;
739
740 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
741 args->size = args->pitch * args->height;
742 args->size = ALIGN(args->size, PAGE_SIZE);
743
744 r = radeon_gem_object_create(rdev, args->size, 0,
745 RADEON_GEM_DOMAIN_VRAM, 0,
746 false, &gobj);
747 if (r)
748 return -ENOMEM;
749
750 r = drm_gem_handle_create(file_priv, gobj, &handle);
751 /* drop reference from allocate - handle holds it now */
752 drm_gem_object_unreference_unlocked(gobj);
753 if (r) {
754 return r;
755 }
756 args->handle = handle;
757 return 0;
758 }
759
760 #if defined(CONFIG_DEBUG_FS)
761 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
762 {
763 struct drm_info_node *node = (struct drm_info_node *)m->private;
764 struct drm_device *dev = node->minor->dev;
765 struct radeon_device *rdev = dev->dev_private;
766 struct radeon_bo *rbo;
767 unsigned i = 0;
768
769 mutex_lock(&rdev->gem.mutex);
770 list_for_each_entry(rbo, &rdev->gem.objects, list) {
771 unsigned domain;
772 const char *placement;
773
774 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
775 switch (domain) {
776 case RADEON_GEM_DOMAIN_VRAM:
777 placement = "VRAM";
778 break;
779 case RADEON_GEM_DOMAIN_GTT:
780 placement = " GTT";
781 break;
782 case RADEON_GEM_DOMAIN_CPU:
783 default:
784 placement = " CPU";
785 break;
786 }
787 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
788 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
789 placement, (unsigned long)rbo->pid);
790 i++;
791 }
792 mutex_unlock(&rdev->gem.mutex);
793 return 0;
794 }
795
796 static struct drm_info_list radeon_debugfs_gem_list[] = {
797 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
798 };
799 #endif
800
801 int radeon_gem_debugfs_init(struct radeon_device *rdev)
802 {
803 #if defined(CONFIG_DEBUG_FS)
804 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
805 #endif
806 return 0;
807 }
This page took 0.046829 seconds and 5 git commands to generate.