2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
32 void radeon_gem_object_free(struct drm_gem_object
*gobj
)
34 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
37 if (robj
->gem_base
.import_attach
)
38 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
39 radeon_bo_unref(&robj
);
43 int radeon_gem_object_create(struct radeon_device
*rdev
, unsigned long size
,
44 int alignment
, int initial_domain
,
45 u32 flags
, bool kernel
,
46 struct drm_gem_object
**obj
)
48 struct radeon_bo
*robj
;
49 unsigned long max_size
;
53 /* At least align on page size */
54 if (alignment
< PAGE_SIZE
) {
55 alignment
= PAGE_SIZE
;
58 /* Maximum bo size is the unpinned gtt size since we use the gtt to
59 * handle vram to system pool migrations.
61 max_size
= rdev
->mc
.gtt_size
- rdev
->gart_pin_size
;
62 if (size
> max_size
) {
63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
64 size
>> 20, max_size
>> 20);
69 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
,
72 if (r
!= -ERESTARTSYS
) {
73 if (initial_domain
== RADEON_GEM_DOMAIN_VRAM
) {
74 initial_domain
|= RADEON_GEM_DOMAIN_GTT
;
77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
78 size
, initial_domain
, alignment
, r
);
82 *obj
= &robj
->gem_base
;
83 robj
->pid
= task_pid_nr(current
);
85 mutex_lock(&rdev
->gem
.mutex
);
86 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
87 mutex_unlock(&rdev
->gem
.mutex
);
92 static int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
93 uint32_t rdomain
, uint32_t wdomain
)
95 struct radeon_bo
*robj
;
99 /* FIXME: reeimplement */
100 robj
= gem_to_radeon_bo(gobj
);
101 /* work out where to validate the buffer to */
108 printk(KERN_WARNING
"Set domain without domain !\n");
111 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
112 /* Asking for cpu access wait for object idle */
113 r
= radeon_bo_wait(robj
, NULL
, false);
115 printk(KERN_ERR
"Failed to wait for object !\n");
122 int radeon_gem_init(struct radeon_device
*rdev
)
124 INIT_LIST_HEAD(&rdev
->gem
.objects
);
128 void radeon_gem_fini(struct radeon_device
*rdev
)
130 radeon_bo_force_delete(rdev
);
134 * Call from drm_gem_handle_create which appear in both new and open ioctl
137 int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
139 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
140 struct radeon_device
*rdev
= rbo
->rdev
;
141 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
142 struct radeon_vm
*vm
= &fpriv
->vm
;
143 struct radeon_bo_va
*bo_va
;
146 if (rdev
->family
< CHIP_CAYMAN
) {
150 r
= radeon_bo_reserve(rbo
, false);
155 bo_va
= radeon_vm_bo_find(vm
, rbo
);
157 bo_va
= radeon_vm_bo_add(rdev
, vm
, rbo
);
161 radeon_bo_unreserve(rbo
);
166 void radeon_gem_object_close(struct drm_gem_object
*obj
,
167 struct drm_file
*file_priv
)
169 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
170 struct radeon_device
*rdev
= rbo
->rdev
;
171 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
172 struct radeon_vm
*vm
= &fpriv
->vm
;
173 struct radeon_bo_va
*bo_va
;
176 if (rdev
->family
< CHIP_CAYMAN
) {
180 r
= radeon_bo_reserve(rbo
, true);
182 dev_err(rdev
->dev
, "leaking bo va because "
183 "we fail to reserve bo (%d)\n", r
);
186 bo_va
= radeon_vm_bo_find(vm
, rbo
);
188 if (--bo_va
->ref_count
== 0) {
189 radeon_vm_bo_rmv(rdev
, bo_va
);
192 radeon_bo_unreserve(rbo
);
195 static int radeon_gem_handle_lockup(struct radeon_device
*rdev
, int r
)
198 r
= radeon_gpu_reset(rdev
);
208 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
209 struct drm_file
*filp
)
211 struct radeon_device
*rdev
= dev
->dev_private
;
212 struct drm_radeon_gem_info
*args
= data
;
213 struct ttm_mem_type_manager
*man
;
215 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
217 args
->vram_size
= rdev
->mc
.real_vram_size
;
218 args
->vram_visible
= (u64
)man
->size
<< PAGE_SHIFT
;
219 args
->vram_visible
-= rdev
->vram_pin_size
;
220 args
->gart_size
= rdev
->mc
.gtt_size
;
221 args
->gart_size
-= rdev
->gart_pin_size
;
226 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
227 struct drm_file
*filp
)
229 /* TODO: implement */
230 DRM_ERROR("unimplemented %s\n", __func__
);
234 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
235 struct drm_file
*filp
)
237 /* TODO: implement */
238 DRM_ERROR("unimplemented %s\n", __func__
);
242 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
243 struct drm_file
*filp
)
245 struct radeon_device
*rdev
= dev
->dev_private
;
246 struct drm_radeon_gem_create
*args
= data
;
247 struct drm_gem_object
*gobj
;
251 down_read(&rdev
->exclusive_lock
);
252 /* create a gem object to contain this object in */
253 args
->size
= roundup(args
->size
, PAGE_SIZE
);
254 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
255 args
->initial_domain
, args
->flags
,
258 up_read(&rdev
->exclusive_lock
);
259 r
= radeon_gem_handle_lockup(rdev
, r
);
262 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
263 /* drop reference from allocate - handle holds it now */
264 drm_gem_object_unreference_unlocked(gobj
);
266 up_read(&rdev
->exclusive_lock
);
267 r
= radeon_gem_handle_lockup(rdev
, r
);
270 args
->handle
= handle
;
271 up_read(&rdev
->exclusive_lock
);
275 int radeon_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
276 struct drm_file
*filp
)
278 struct radeon_device
*rdev
= dev
->dev_private
;
279 struct drm_radeon_gem_userptr
*args
= data
;
280 struct drm_gem_object
*gobj
;
281 struct radeon_bo
*bo
;
285 if (offset_in_page(args
->addr
| args
->size
))
288 /* reject unknown flag values */
289 if (args
->flags
& ~(RADEON_GEM_USERPTR_READONLY
|
290 RADEON_GEM_USERPTR_ANONONLY
| RADEON_GEM_USERPTR_VALIDATE
|
291 RADEON_GEM_USERPTR_REGISTER
))
294 if (args
->flags
& RADEON_GEM_USERPTR_READONLY
) {
295 /* readonly pages not tested on older hardware */
296 if (rdev
->family
< CHIP_R600
)
299 } else if (!(args
->flags
& RADEON_GEM_USERPTR_ANONONLY
) ||
300 !(args
->flags
& RADEON_GEM_USERPTR_REGISTER
)) {
302 /* if we want to write to it we must require anonymous
303 memory and install a MMU notifier */
307 down_read(&rdev
->exclusive_lock
);
309 /* create a gem object to contain this object in */
310 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
311 RADEON_GEM_DOMAIN_CPU
, 0,
316 bo
= gem_to_radeon_bo(gobj
);
317 r
= radeon_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
321 if (args
->flags
& RADEON_GEM_USERPTR_REGISTER
) {
322 r
= radeon_mn_register(bo
, args
->addr
);
327 if (args
->flags
& RADEON_GEM_USERPTR_VALIDATE
) {
328 down_read(¤t
->mm
->mmap_sem
);
329 r
= radeon_bo_reserve(bo
, true);
331 up_read(¤t
->mm
->mmap_sem
);
335 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_GTT
);
336 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
337 radeon_bo_unreserve(bo
);
338 up_read(¤t
->mm
->mmap_sem
);
343 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
344 /* drop reference from allocate - handle holds it now */
345 drm_gem_object_unreference_unlocked(gobj
);
349 args
->handle
= handle
;
350 up_read(&rdev
->exclusive_lock
);
354 drm_gem_object_unreference_unlocked(gobj
);
357 up_read(&rdev
->exclusive_lock
);
358 r
= radeon_gem_handle_lockup(rdev
, r
);
363 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
364 struct drm_file
*filp
)
366 /* transition the BO to a domain -
367 * just validate the BO into a certain domain */
368 struct radeon_device
*rdev
= dev
->dev_private
;
369 struct drm_radeon_gem_set_domain
*args
= data
;
370 struct drm_gem_object
*gobj
;
371 struct radeon_bo
*robj
;
374 /* for now if someone requests domain CPU -
375 * just make sure the buffer is finished with */
376 down_read(&rdev
->exclusive_lock
);
378 /* just do a BO wait for now */
379 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
381 up_read(&rdev
->exclusive_lock
);
384 robj
= gem_to_radeon_bo(gobj
);
386 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
388 drm_gem_object_unreference_unlocked(gobj
);
389 up_read(&rdev
->exclusive_lock
);
390 r
= radeon_gem_handle_lockup(robj
->rdev
, r
);
394 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
395 struct drm_device
*dev
,
396 uint32_t handle
, uint64_t *offset_p
)
398 struct drm_gem_object
*gobj
;
399 struct radeon_bo
*robj
;
401 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
405 robj
= gem_to_radeon_bo(gobj
);
406 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
407 drm_gem_object_unreference_unlocked(gobj
);
410 *offset_p
= radeon_bo_mmap_offset(robj
);
411 drm_gem_object_unreference_unlocked(gobj
);
415 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
416 struct drm_file
*filp
)
418 struct drm_radeon_gem_mmap
*args
= data
;
420 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, &args
->addr_ptr
);
423 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
424 struct drm_file
*filp
)
426 struct radeon_device
*rdev
= dev
->dev_private
;
427 struct drm_radeon_gem_busy
*args
= data
;
428 struct drm_gem_object
*gobj
;
429 struct radeon_bo
*robj
;
431 uint32_t cur_placement
= 0;
433 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
437 robj
= gem_to_radeon_bo(gobj
);
438 r
= radeon_bo_wait(robj
, &cur_placement
, true);
439 args
->domain
= radeon_mem_type_to_domain(cur_placement
);
440 drm_gem_object_unreference_unlocked(gobj
);
441 r
= radeon_gem_handle_lockup(rdev
, r
);
445 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
446 struct drm_file
*filp
)
448 struct radeon_device
*rdev
= dev
->dev_private
;
449 struct drm_radeon_gem_wait_idle
*args
= data
;
450 struct drm_gem_object
*gobj
;
451 struct radeon_bo
*robj
;
453 uint32_t cur_placement
= 0;
455 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
459 robj
= gem_to_radeon_bo(gobj
);
460 r
= radeon_bo_wait(robj
, &cur_placement
, false);
461 /* Flush HDP cache via MMIO if necessary */
462 if (rdev
->asic
->mmio_hdp_flush
&&
463 radeon_mem_type_to_domain(cur_placement
) == RADEON_GEM_DOMAIN_VRAM
)
464 robj
->rdev
->asic
->mmio_hdp_flush(rdev
);
465 drm_gem_object_unreference_unlocked(gobj
);
466 r
= radeon_gem_handle_lockup(rdev
, r
);
470 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
471 struct drm_file
*filp
)
473 struct drm_radeon_gem_set_tiling
*args
= data
;
474 struct drm_gem_object
*gobj
;
475 struct radeon_bo
*robj
;
478 DRM_DEBUG("%d \n", args
->handle
);
479 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
482 robj
= gem_to_radeon_bo(gobj
);
483 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
484 drm_gem_object_unreference_unlocked(gobj
);
488 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
489 struct drm_file
*filp
)
491 struct drm_radeon_gem_get_tiling
*args
= data
;
492 struct drm_gem_object
*gobj
;
493 struct radeon_bo
*rbo
;
497 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
500 rbo
= gem_to_radeon_bo(gobj
);
501 r
= radeon_bo_reserve(rbo
, false);
502 if (unlikely(r
!= 0))
504 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
505 radeon_bo_unreserve(rbo
);
507 drm_gem_object_unreference_unlocked(gobj
);
511 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
512 struct drm_file
*filp
)
514 struct drm_radeon_gem_va
*args
= data
;
515 struct drm_gem_object
*gobj
;
516 struct radeon_device
*rdev
= dev
->dev_private
;
517 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
518 struct radeon_bo
*rbo
;
519 struct radeon_bo_va
*bo_va
;
523 if (!rdev
->vm_manager
.enabled
) {
524 args
->operation
= RADEON_VA_RESULT_ERROR
;
529 * We don't support vm_id yet, to be sure we don't have have broken
530 * userspace, reject anyone trying to use non 0 value thus moving
531 * forward we can use those fields without breaking existant userspace
534 args
->operation
= RADEON_VA_RESULT_ERROR
;
538 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
539 dev_err(&dev
->pdev
->dev
,
540 "offset 0x%lX is in reserved area 0x%X\n",
541 (unsigned long)args
->offset
,
542 RADEON_VA_RESERVED_SIZE
);
543 args
->operation
= RADEON_VA_RESULT_ERROR
;
547 /* don't remove, we need to enforce userspace to set the snooped flag
548 * otherwise we will endup with broken userspace and we won't be able
549 * to enable this feature without adding new interface
551 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
552 if ((args
->flags
& invalid_flags
)) {
553 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
554 args
->flags
, invalid_flags
);
555 args
->operation
= RADEON_VA_RESULT_ERROR
;
559 switch (args
->operation
) {
561 case RADEON_VA_UNMAP
:
564 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
566 args
->operation
= RADEON_VA_RESULT_ERROR
;
570 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
572 args
->operation
= RADEON_VA_RESULT_ERROR
;
575 rbo
= gem_to_radeon_bo(gobj
);
576 r
= radeon_bo_reserve(rbo
, false);
578 args
->operation
= RADEON_VA_RESULT_ERROR
;
579 drm_gem_object_unreference_unlocked(gobj
);
582 bo_va
= radeon_vm_bo_find(&fpriv
->vm
, rbo
);
584 args
->operation
= RADEON_VA_RESULT_ERROR
;
585 drm_gem_object_unreference_unlocked(gobj
);
589 switch (args
->operation
) {
591 if (bo_va
->it
.start
) {
592 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
593 args
->offset
= bo_va
->it
.start
* RADEON_GPU_PAGE_SIZE
;
596 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, args
->offset
, args
->flags
);
598 case RADEON_VA_UNMAP
:
599 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, 0, 0);
604 args
->operation
= RADEON_VA_RESULT_OK
;
606 args
->operation
= RADEON_VA_RESULT_ERROR
;
609 radeon_bo_unreserve(rbo
);
610 drm_gem_object_unreference_unlocked(gobj
);
614 int radeon_gem_op_ioctl(struct drm_device
*dev
, void *data
,
615 struct drm_file
*filp
)
617 struct drm_radeon_gem_op
*args
= data
;
618 struct drm_gem_object
*gobj
;
619 struct radeon_bo
*robj
;
622 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
626 robj
= gem_to_radeon_bo(gobj
);
629 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
))
632 r
= radeon_bo_reserve(robj
, false);
637 case RADEON_GEM_OP_GET_INITIAL_DOMAIN
:
638 args
->value
= robj
->initial_domain
;
640 case RADEON_GEM_OP_SET_INITIAL_DOMAIN
:
641 robj
->initial_domain
= args
->value
& (RADEON_GEM_DOMAIN_VRAM
|
642 RADEON_GEM_DOMAIN_GTT
|
643 RADEON_GEM_DOMAIN_CPU
);
649 radeon_bo_unreserve(robj
);
651 drm_gem_object_unreference_unlocked(gobj
);
655 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
656 struct drm_device
*dev
,
657 struct drm_mode_create_dumb
*args
)
659 struct radeon_device
*rdev
= dev
->dev_private
;
660 struct drm_gem_object
*gobj
;
664 args
->pitch
= radeon_align_pitch(rdev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
665 args
->size
= args
->pitch
* args
->height
;
666 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
668 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
669 RADEON_GEM_DOMAIN_VRAM
, 0,
674 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
675 /* drop reference from allocate - handle holds it now */
676 drm_gem_object_unreference_unlocked(gobj
);
680 args
->handle
= handle
;
684 #if defined(CONFIG_DEBUG_FS)
685 static int radeon_debugfs_gem_info(struct seq_file
*m
, void *data
)
687 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
688 struct drm_device
*dev
= node
->minor
->dev
;
689 struct radeon_device
*rdev
= dev
->dev_private
;
690 struct radeon_bo
*rbo
;
693 mutex_lock(&rdev
->gem
.mutex
);
694 list_for_each_entry(rbo
, &rdev
->gem
.objects
, list
) {
696 const char *placement
;
698 domain
= radeon_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
700 case RADEON_GEM_DOMAIN_VRAM
:
703 case RADEON_GEM_DOMAIN_GTT
:
706 case RADEON_GEM_DOMAIN_CPU
:
711 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
712 i
, radeon_bo_size(rbo
) >> 10, radeon_bo_size(rbo
) >> 20,
713 placement
, (unsigned long)rbo
->pid
);
716 mutex_unlock(&rdev
->gem
.mutex
);
720 static struct drm_info_list radeon_debugfs_gem_list
[] = {
721 {"radeon_gem_info", &radeon_debugfs_gem_info
, 0, NULL
},
725 int radeon_gem_debugfs_init(struct radeon_device
*rdev
)
727 #if defined(CONFIG_DEBUG_FS)
728 return radeon_debugfs_add_files(rdev
, radeon_debugfs_gem_list
, 1);