2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
30 #include <drm/amdgpu_drm.h>
33 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
35 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
38 if (robj
->gem_base
.import_attach
)
39 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
40 amdgpu_mn_unregister(robj
);
41 amdgpu_bo_unref(&robj
);
45 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
46 int alignment
, u32 initial_domain
,
47 u64 flags
, bool kernel
,
48 struct drm_gem_object
**obj
)
50 struct amdgpu_bo
*robj
;
51 unsigned long max_size
;
55 /* At least align on page size */
56 if (alignment
< PAGE_SIZE
) {
57 alignment
= PAGE_SIZE
;
60 if (!(initial_domain
& (AMDGPU_GEM_DOMAIN_GDS
| AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
64 max_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
65 if (size
> max_size
) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size
>> 20, max_size
>> 20);
72 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
,
73 flags
, NULL
, NULL
, &robj
);
75 if (r
!= -ERESTARTSYS
) {
76 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
77 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81 size
, initial_domain
, alignment
, r
);
85 *obj
= &robj
->gem_base
;
86 robj
->pid
= task_pid_nr(current
);
88 mutex_lock(&adev
->gem
.mutex
);
89 list_add_tail(&robj
->list
, &adev
->gem
.objects
);
90 mutex_unlock(&adev
->gem
.mutex
);
95 int amdgpu_gem_init(struct amdgpu_device
*adev
)
97 INIT_LIST_HEAD(&adev
->gem
.objects
);
101 void amdgpu_gem_fini(struct amdgpu_device
*adev
)
103 amdgpu_bo_force_delete(adev
);
107 * Call from drm_gem_handle_create which appear in both new and open ioctl
110 int amdgpu_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
112 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
113 struct amdgpu_device
*adev
= rbo
->adev
;
114 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
115 struct amdgpu_vm
*vm
= &fpriv
->vm
;
116 struct amdgpu_bo_va
*bo_va
;
119 r
= amdgpu_bo_reserve(rbo
, false);
124 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
126 bo_va
= amdgpu_vm_bo_add(adev
, vm
, rbo
);
130 amdgpu_bo_unreserve(rbo
);
135 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
136 struct drm_file
*file_priv
)
138 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
139 struct amdgpu_device
*adev
= rbo
->adev
;
140 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
141 struct amdgpu_vm
*vm
= &fpriv
->vm
;
142 struct amdgpu_bo_va
*bo_va
;
145 r
= amdgpu_bo_reserve(rbo
, true);
147 dev_err(adev
->dev
, "leaking bo va because "
148 "we fail to reserve bo (%d)\n", r
);
151 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
153 if (--bo_va
->ref_count
== 0) {
154 amdgpu_vm_bo_rmv(adev
, bo_va
);
157 amdgpu_bo_unreserve(rbo
);
160 static int amdgpu_gem_handle_lockup(struct amdgpu_device
*adev
, int r
)
163 r
= amdgpu_gpu_reset(adev
);
173 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
174 struct drm_file
*filp
)
176 struct amdgpu_device
*adev
= dev
->dev_private
;
177 union drm_amdgpu_gem_create
*args
= data
;
178 uint64_t size
= args
->in
.bo_size
;
179 struct drm_gem_object
*gobj
;
184 down_read(&adev
->exclusive_lock
);
185 /* create a gem object to contain this object in */
186 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
187 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
189 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
190 size
= size
<< AMDGPU_GDS_SHIFT
;
191 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
192 size
= size
<< AMDGPU_GWS_SHIFT
;
193 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
194 size
= size
<< AMDGPU_OA_SHIFT
;
200 size
= roundup(size
, PAGE_SIZE
);
202 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
203 (u32
)(0xffffffff & args
->in
.domains
),
204 args
->in
.domain_flags
,
209 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
210 /* drop reference from allocate - handle holds it now */
211 drm_gem_object_unreference_unlocked(gobj
);
215 memset(args
, 0, sizeof(*args
));
216 args
->out
.handle
= handle
;
217 up_read(&adev
->exclusive_lock
);
221 up_read(&adev
->exclusive_lock
);
222 r
= amdgpu_gem_handle_lockup(adev
, r
);
226 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
227 struct drm_file
*filp
)
229 struct amdgpu_device
*adev
= dev
->dev_private
;
230 struct drm_amdgpu_gem_userptr
*args
= data
;
231 struct drm_gem_object
*gobj
;
232 struct amdgpu_bo
*bo
;
236 if (offset_in_page(args
->addr
| args
->size
))
239 /* reject unknown flag values */
240 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
241 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
242 AMDGPU_GEM_USERPTR_REGISTER
))
245 if (!(args
->flags
& AMDGPU_GEM_USERPTR_ANONONLY
) ||
246 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
248 /* if we want to write to it we must require anonymous
249 memory and install a MMU notifier */
253 down_read(&adev
->exclusive_lock
);
255 /* create a gem object to contain this object in */
256 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
257 AMDGPU_GEM_DOMAIN_CPU
, 0,
262 bo
= gem_to_amdgpu_bo(gobj
);
263 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
267 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
268 r
= amdgpu_mn_register(bo
, args
->addr
);
273 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
274 down_read(¤t
->mm
->mmap_sem
);
275 r
= amdgpu_bo_reserve(bo
, true);
277 up_read(¤t
->mm
->mmap_sem
);
281 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
282 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
283 amdgpu_bo_unreserve(bo
);
284 up_read(¤t
->mm
->mmap_sem
);
289 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
290 /* drop reference from allocate - handle holds it now */
291 drm_gem_object_unreference_unlocked(gobj
);
295 args
->handle
= handle
;
296 up_read(&adev
->exclusive_lock
);
300 drm_gem_object_unreference_unlocked(gobj
);
303 up_read(&adev
->exclusive_lock
);
304 r
= amdgpu_gem_handle_lockup(adev
, r
);
309 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
310 struct drm_device
*dev
,
311 uint32_t handle
, uint64_t *offset_p
)
313 struct drm_gem_object
*gobj
;
314 struct amdgpu_bo
*robj
;
316 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
320 robj
= gem_to_amdgpu_bo(gobj
);
321 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
) ||
322 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
323 drm_gem_object_unreference_unlocked(gobj
);
326 *offset_p
= amdgpu_bo_mmap_offset(robj
);
327 drm_gem_object_unreference_unlocked(gobj
);
331 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
332 struct drm_file
*filp
)
334 union drm_amdgpu_gem_mmap
*args
= data
;
335 uint32_t handle
= args
->in
.handle
;
336 memset(args
, 0, sizeof(*args
));
337 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
341 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
343 * @timeout_ns: timeout in ns
345 * Calculate the timeout in jiffies from an absolute timeout in ns.
347 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
349 unsigned long timeout_jiffies
;
352 /* clamp timeout if it's to large */
353 if (((int64_t)timeout_ns
) < 0)
354 return MAX_SCHEDULE_TIMEOUT
;
356 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
357 if (ktime_to_ns(timeout
) < 0)
360 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
361 /* clamp timeout to avoid unsigned-> signed overflow */
362 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
363 return MAX_SCHEDULE_TIMEOUT
- 1;
365 return timeout_jiffies
;
368 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
369 struct drm_file
*filp
)
371 struct amdgpu_device
*adev
= dev
->dev_private
;
372 union drm_amdgpu_gem_wait_idle
*args
= data
;
373 struct drm_gem_object
*gobj
;
374 struct amdgpu_bo
*robj
;
375 uint32_t handle
= args
->in
.handle
;
376 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
380 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
384 robj
= gem_to_amdgpu_bo(gobj
);
386 ret
= reservation_object_test_signaled_rcu(robj
->tbo
.resv
, true);
388 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, timeout
);
390 /* ret == 0 means not signaled,
391 * ret > 0 means signaled
392 * ret < 0 means interrupted before timeout
395 memset(args
, 0, sizeof(*args
));
396 args
->out
.status
= (ret
== 0);
400 drm_gem_object_unreference_unlocked(gobj
);
401 r
= amdgpu_gem_handle_lockup(adev
, r
);
405 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
406 struct drm_file
*filp
)
408 struct drm_amdgpu_gem_metadata
*args
= data
;
409 struct drm_gem_object
*gobj
;
410 struct amdgpu_bo
*robj
;
413 DRM_DEBUG("%d \n", args
->handle
);
414 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
417 robj
= gem_to_amdgpu_bo(gobj
);
419 r
= amdgpu_bo_reserve(robj
, false);
420 if (unlikely(r
!= 0))
423 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
424 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
425 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
426 sizeof(args
->data
.data
),
427 &args
->data
.data_size_bytes
,
429 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
430 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
434 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
436 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
437 args
->data
.data_size_bytes
,
442 amdgpu_bo_unreserve(robj
);
444 drm_gem_object_unreference_unlocked(gobj
);
449 * amdgpu_gem_va_update_vm -update the bo_va in its VM
451 * @adev: amdgpu_device pointer
452 * @bo_va: bo_va to update
454 * Update the bo_va directly after setting it's address. Errors are not
455 * vital here, so they are not reported back to userspace.
457 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
458 struct amdgpu_bo_va
*bo_va
, uint32_t operation
)
460 struct ttm_validate_buffer tv
, *entry
;
461 struct amdgpu_bo_list_entry
*vm_bos
;
462 struct ww_acquire_ctx ticket
;
463 struct list_head list
, duplicates
;
467 INIT_LIST_HEAD(&list
);
468 INIT_LIST_HEAD(&duplicates
);
470 tv
.bo
= &bo_va
->bo
->tbo
;
472 list_add(&tv
.head
, &list
);
474 vm_bos
= amdgpu_vm_get_bos(adev
, bo_va
->vm
, &list
);
478 /* Provide duplicates to avoid -EALREADY */
479 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
483 list_for_each_entry(entry
, &list
, head
) {
484 domain
= amdgpu_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
485 /* if anything is swapped out don't swap it in here,
486 just abort and wait for the next CS */
487 if (domain
== AMDGPU_GEM_DOMAIN_CPU
)
488 goto error_unreserve
;
491 mutex_lock(&bo_va
->vm
->mutex
);
492 r
= amdgpu_vm_clear_freed(adev
, bo_va
->vm
);
497 if (operation
== AMDGPU_VA_OP_MAP
)
498 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
501 mutex_unlock(&bo_va
->vm
->mutex
);
504 ttm_eu_backoff_reservation(&ticket
, &list
);
507 drm_free_large(vm_bos
);
509 if (r
&& r
!= -ERESTARTSYS
)
510 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
515 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
516 struct drm_file
*filp
)
518 struct drm_amdgpu_gem_va
*args
= data
;
519 struct drm_gem_object
*gobj
;
520 struct amdgpu_device
*adev
= dev
->dev_private
;
521 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
522 struct amdgpu_bo
*rbo
;
523 struct amdgpu_bo_va
*bo_va
;
524 uint32_t invalid_flags
, va_flags
= 0;
527 if (!adev
->vm_manager
.enabled
)
530 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
531 dev_err(&dev
->pdev
->dev
,
532 "va_address 0x%lX is in reserved area 0x%X\n",
533 (unsigned long)args
->va_address
,
534 AMDGPU_VA_RESERVED_SIZE
);
538 invalid_flags
= ~(AMDGPU_VM_DELAY_UPDATE
| AMDGPU_VM_PAGE_READABLE
|
539 AMDGPU_VM_PAGE_WRITEABLE
| AMDGPU_VM_PAGE_EXECUTABLE
);
540 if ((args
->flags
& invalid_flags
)) {
541 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
542 args
->flags
, invalid_flags
);
546 switch (args
->operation
) {
547 case AMDGPU_VA_OP_MAP
:
548 case AMDGPU_VA_OP_UNMAP
:
551 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
556 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
560 rbo
= gem_to_amdgpu_bo(gobj
);
561 r
= amdgpu_bo_reserve(rbo
, false);
563 drm_gem_object_unreference_unlocked(gobj
);
567 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, rbo
);
569 amdgpu_bo_unreserve(rbo
);
573 switch (args
->operation
) {
574 case AMDGPU_VA_OP_MAP
:
575 if (args
->flags
& AMDGPU_VM_PAGE_READABLE
)
576 va_flags
|= AMDGPU_PTE_READABLE
;
577 if (args
->flags
& AMDGPU_VM_PAGE_WRITEABLE
)
578 va_flags
|= AMDGPU_PTE_WRITEABLE
;
579 if (args
->flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
580 va_flags
|= AMDGPU_PTE_EXECUTABLE
;
581 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
582 args
->offset_in_bo
, args
->map_size
,
585 case AMDGPU_VA_OP_UNMAP
:
586 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
592 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
))
593 amdgpu_gem_va_update_vm(adev
, bo_va
, args
->operation
);
595 drm_gem_object_unreference_unlocked(gobj
);
599 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
600 struct drm_file
*filp
)
602 struct drm_amdgpu_gem_op
*args
= data
;
603 struct drm_gem_object
*gobj
;
604 struct amdgpu_bo
*robj
;
607 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
611 robj
= gem_to_amdgpu_bo(gobj
);
613 r
= amdgpu_bo_reserve(robj
, false);
618 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
619 struct drm_amdgpu_gem_create_in info
;
620 void __user
*out
= (void __user
*)(long)args
->value
;
622 info
.bo_size
= robj
->gem_base
.size
;
623 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
624 info
.domains
= robj
->initial_domain
;
625 info
.domain_flags
= robj
->flags
;
626 amdgpu_bo_unreserve(robj
);
627 if (copy_to_user(out
, &info
, sizeof(info
)))
631 case AMDGPU_GEM_OP_SET_PLACEMENT
:
632 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
634 amdgpu_bo_unreserve(robj
);
637 robj
->initial_domain
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
638 AMDGPU_GEM_DOMAIN_GTT
|
639 AMDGPU_GEM_DOMAIN_CPU
);
640 amdgpu_bo_unreserve(robj
);
643 amdgpu_bo_unreserve(robj
);
648 drm_gem_object_unreference_unlocked(gobj
);
652 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
653 struct drm_device
*dev
,
654 struct drm_mode_create_dumb
*args
)
656 struct amdgpu_device
*adev
= dev
->dev_private
;
657 struct drm_gem_object
*gobj
;
661 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
662 args
->size
= (u64
)args
->pitch
* args
->height
;
663 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
665 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
666 AMDGPU_GEM_DOMAIN_VRAM
,
667 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
673 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
674 /* drop reference from allocate - handle holds it now */
675 drm_gem_object_unreference_unlocked(gobj
);
679 args
->handle
= handle
;
683 #if defined(CONFIG_DEBUG_FS)
684 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
686 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
687 struct drm_device
*dev
= node
->minor
->dev
;
688 struct amdgpu_device
*adev
= dev
->dev_private
;
689 struct amdgpu_bo
*rbo
;
692 mutex_lock(&adev
->gem
.mutex
);
693 list_for_each_entry(rbo
, &adev
->gem
.objects
, list
) {
695 const char *placement
;
697 domain
= amdgpu_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
699 case AMDGPU_GEM_DOMAIN_VRAM
:
702 case AMDGPU_GEM_DOMAIN_GTT
:
705 case AMDGPU_GEM_DOMAIN_CPU
:
710 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
711 i
, amdgpu_bo_size(rbo
) >> 10, amdgpu_bo_size(rbo
) >> 20,
712 placement
, (unsigned long)rbo
->pid
);
715 mutex_unlock(&adev
->gem
.mutex
);
719 static struct drm_info_list amdgpu_debugfs_gem_list
[] = {
720 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
724 int amdgpu_gem_debugfs_init(struct amdgpu_device
*adev
)
726 #if defined(CONFIG_DEBUG_FS)
727 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);