2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
30 #include <drm/amdgpu_drm.h>
33 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
35 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
38 if (robj
->gem_base
.import_attach
)
39 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
40 amdgpu_mn_unregister(robj
);
41 amdgpu_bo_unref(&robj
);
45 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
46 int alignment
, u32 initial_domain
,
47 u64 flags
, bool kernel
,
48 struct drm_gem_object
**obj
)
50 struct amdgpu_bo
*robj
;
51 unsigned long max_size
;
55 /* At least align on page size */
56 if (alignment
< PAGE_SIZE
) {
57 alignment
= PAGE_SIZE
;
60 if (!(initial_domain
& (AMDGPU_GEM_DOMAIN_GDS
| AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
64 max_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
65 if (size
> max_size
) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size
>> 20, max_size
>> 20);
72 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
,
73 flags
, NULL
, NULL
, &robj
);
75 if (r
!= -ERESTARTSYS
) {
76 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
77 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81 size
, initial_domain
, alignment
, r
);
85 *obj
= &robj
->gem_base
;
86 robj
->pid
= task_pid_nr(current
);
88 mutex_lock(&adev
->gem
.mutex
);
89 list_add_tail(&robj
->list
, &adev
->gem
.objects
);
90 mutex_unlock(&adev
->gem
.mutex
);
95 int amdgpu_gem_init(struct amdgpu_device
*adev
)
97 INIT_LIST_HEAD(&adev
->gem
.objects
);
101 void amdgpu_gem_fini(struct amdgpu_device
*adev
)
103 amdgpu_bo_force_delete(adev
);
107 * Call from drm_gem_handle_create which appear in both new and open ioctl
110 int amdgpu_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
112 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
113 struct amdgpu_device
*adev
= rbo
->adev
;
114 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
115 struct amdgpu_vm
*vm
= &fpriv
->vm
;
116 struct amdgpu_bo_va
*bo_va
;
118 mutex_lock(&vm
->mutex
);
119 r
= amdgpu_bo_reserve(rbo
, false);
121 mutex_unlock(&vm
->mutex
);
125 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
127 bo_va
= amdgpu_vm_bo_add(adev
, vm
, rbo
);
131 amdgpu_bo_unreserve(rbo
);
132 mutex_unlock(&vm
->mutex
);
136 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
137 struct drm_file
*file_priv
)
139 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
140 struct amdgpu_device
*adev
= rbo
->adev
;
141 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
142 struct amdgpu_vm
*vm
= &fpriv
->vm
;
143 struct amdgpu_bo_va
*bo_va
;
145 mutex_lock(&vm
->mutex
);
146 r
= amdgpu_bo_reserve(rbo
, true);
148 mutex_unlock(&vm
->mutex
);
149 dev_err(adev
->dev
, "leaking bo va because "
150 "we fail to reserve bo (%d)\n", r
);
153 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
155 if (--bo_va
->ref_count
== 0) {
156 amdgpu_vm_bo_rmv(adev
, bo_va
);
159 amdgpu_bo_unreserve(rbo
);
160 mutex_unlock(&vm
->mutex
);
163 static int amdgpu_gem_handle_lockup(struct amdgpu_device
*adev
, int r
)
166 r
= amdgpu_gpu_reset(adev
);
176 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
177 struct drm_file
*filp
)
179 struct amdgpu_device
*adev
= dev
->dev_private
;
180 union drm_amdgpu_gem_create
*args
= data
;
181 uint64_t size
= args
->in
.bo_size
;
182 struct drm_gem_object
*gobj
;
187 /* create a gem object to contain this object in */
188 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
189 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
191 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
192 size
= size
<< AMDGPU_GDS_SHIFT
;
193 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
194 size
= size
<< AMDGPU_GWS_SHIFT
;
195 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
196 size
= size
<< AMDGPU_OA_SHIFT
;
202 size
= roundup(size
, PAGE_SIZE
);
204 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
205 (u32
)(0xffffffff & args
->in
.domains
),
206 args
->in
.domain_flags
,
211 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
212 /* drop reference from allocate - handle holds it now */
213 drm_gem_object_unreference_unlocked(gobj
);
217 memset(args
, 0, sizeof(*args
));
218 args
->out
.handle
= handle
;
222 r
= amdgpu_gem_handle_lockup(adev
, r
);
226 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
227 struct drm_file
*filp
)
229 struct amdgpu_device
*adev
= dev
->dev_private
;
230 struct drm_amdgpu_gem_userptr
*args
= data
;
231 struct drm_gem_object
*gobj
;
232 struct amdgpu_bo
*bo
;
236 if (offset_in_page(args
->addr
| args
->size
))
239 /* reject unknown flag values */
240 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
241 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
242 AMDGPU_GEM_USERPTR_REGISTER
))
245 if (!(args
->flags
& AMDGPU_GEM_USERPTR_ANONONLY
) ||
246 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
248 /* if we want to write to it we must require anonymous
249 memory and install a MMU notifier */
253 /* create a gem object to contain this object in */
254 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
255 AMDGPU_GEM_DOMAIN_CPU
, 0,
260 bo
= gem_to_amdgpu_bo(gobj
);
261 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
265 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
266 r
= amdgpu_mn_register(bo
, args
->addr
);
271 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
272 down_read(¤t
->mm
->mmap_sem
);
273 r
= amdgpu_bo_reserve(bo
, true);
275 up_read(¤t
->mm
->mmap_sem
);
279 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
280 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
281 amdgpu_bo_unreserve(bo
);
282 up_read(¤t
->mm
->mmap_sem
);
287 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
288 /* drop reference from allocate - handle holds it now */
289 drm_gem_object_unreference_unlocked(gobj
);
293 args
->handle
= handle
;
297 drm_gem_object_unreference_unlocked(gobj
);
300 r
= amdgpu_gem_handle_lockup(adev
, r
);
305 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
306 struct drm_device
*dev
,
307 uint32_t handle
, uint64_t *offset_p
)
309 struct drm_gem_object
*gobj
;
310 struct amdgpu_bo
*robj
;
312 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
316 robj
= gem_to_amdgpu_bo(gobj
);
317 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
) ||
318 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
319 drm_gem_object_unreference_unlocked(gobj
);
322 *offset_p
= amdgpu_bo_mmap_offset(robj
);
323 drm_gem_object_unreference_unlocked(gobj
);
327 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
328 struct drm_file
*filp
)
330 union drm_amdgpu_gem_mmap
*args
= data
;
331 uint32_t handle
= args
->in
.handle
;
332 memset(args
, 0, sizeof(*args
));
333 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
337 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
339 * @timeout_ns: timeout in ns
341 * Calculate the timeout in jiffies from an absolute timeout in ns.
343 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
345 unsigned long timeout_jiffies
;
348 /* clamp timeout if it's to large */
349 if (((int64_t)timeout_ns
) < 0)
350 return MAX_SCHEDULE_TIMEOUT
;
352 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
353 if (ktime_to_ns(timeout
) < 0)
356 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
357 /* clamp timeout to avoid unsigned-> signed overflow */
358 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
359 return MAX_SCHEDULE_TIMEOUT
- 1;
361 return timeout_jiffies
;
364 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
365 struct drm_file
*filp
)
367 struct amdgpu_device
*adev
= dev
->dev_private
;
368 union drm_amdgpu_gem_wait_idle
*args
= data
;
369 struct drm_gem_object
*gobj
;
370 struct amdgpu_bo
*robj
;
371 uint32_t handle
= args
->in
.handle
;
372 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
376 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
380 robj
= gem_to_amdgpu_bo(gobj
);
382 ret
= reservation_object_test_signaled_rcu(robj
->tbo
.resv
, true);
384 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, timeout
);
386 /* ret == 0 means not signaled,
387 * ret > 0 means signaled
388 * ret < 0 means interrupted before timeout
391 memset(args
, 0, sizeof(*args
));
392 args
->out
.status
= (ret
== 0);
396 drm_gem_object_unreference_unlocked(gobj
);
397 r
= amdgpu_gem_handle_lockup(adev
, r
);
401 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
402 struct drm_file
*filp
)
404 struct drm_amdgpu_gem_metadata
*args
= data
;
405 struct drm_gem_object
*gobj
;
406 struct amdgpu_bo
*robj
;
409 DRM_DEBUG("%d \n", args
->handle
);
410 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
413 robj
= gem_to_amdgpu_bo(gobj
);
415 r
= amdgpu_bo_reserve(robj
, false);
416 if (unlikely(r
!= 0))
419 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
420 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
421 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
422 sizeof(args
->data
.data
),
423 &args
->data
.data_size_bytes
,
425 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
426 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
430 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
432 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
433 args
->data
.data_size_bytes
,
438 amdgpu_bo_unreserve(robj
);
440 drm_gem_object_unreference_unlocked(gobj
);
445 * amdgpu_gem_va_update_vm -update the bo_va in its VM
447 * @adev: amdgpu_device pointer
448 * @bo_va: bo_va to update
450 * Update the bo_va directly after setting it's address. Errors are not
451 * vital here, so they are not reported back to userspace.
453 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
454 struct amdgpu_bo_va
*bo_va
, uint32_t operation
)
456 struct ttm_validate_buffer tv
, *entry
;
457 struct amdgpu_bo_list_entry
*vm_bos
;
458 struct ww_acquire_ctx ticket
;
459 struct list_head list
, duplicates
;
463 INIT_LIST_HEAD(&list
);
464 INIT_LIST_HEAD(&duplicates
);
466 tv
.bo
= &bo_va
->bo
->tbo
;
468 list_add(&tv
.head
, &list
);
470 vm_bos
= amdgpu_vm_get_bos(adev
, bo_va
->vm
, &list
);
474 /* Provide duplicates to avoid -EALREADY */
475 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
479 list_for_each_entry(entry
, &list
, head
) {
480 domain
= amdgpu_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
481 /* if anything is swapped out don't swap it in here,
482 just abort and wait for the next CS */
483 if (domain
== AMDGPU_GEM_DOMAIN_CPU
)
484 goto error_unreserve
;
487 r
= amdgpu_vm_clear_freed(adev
, bo_va
->vm
);
489 goto error_unreserve
;
491 if (operation
== AMDGPU_VA_OP_MAP
)
492 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
495 ttm_eu_backoff_reservation(&ticket
, &list
);
498 drm_free_large(vm_bos
);
500 if (r
&& r
!= -ERESTARTSYS
)
501 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
506 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
507 struct drm_file
*filp
)
509 struct drm_amdgpu_gem_va
*args
= data
;
510 struct drm_gem_object
*gobj
;
511 struct amdgpu_device
*adev
= dev
->dev_private
;
512 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
513 struct amdgpu_bo
*rbo
;
514 struct amdgpu_bo_va
*bo_va
;
515 uint32_t invalid_flags
, va_flags
= 0;
518 if (!adev
->vm_manager
.enabled
)
521 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
522 dev_err(&dev
->pdev
->dev
,
523 "va_address 0x%lX is in reserved area 0x%X\n",
524 (unsigned long)args
->va_address
,
525 AMDGPU_VA_RESERVED_SIZE
);
529 invalid_flags
= ~(AMDGPU_VM_DELAY_UPDATE
| AMDGPU_VM_PAGE_READABLE
|
530 AMDGPU_VM_PAGE_WRITEABLE
| AMDGPU_VM_PAGE_EXECUTABLE
);
531 if ((args
->flags
& invalid_flags
)) {
532 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
533 args
->flags
, invalid_flags
);
537 switch (args
->operation
) {
538 case AMDGPU_VA_OP_MAP
:
539 case AMDGPU_VA_OP_UNMAP
:
542 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
547 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
550 mutex_lock(&fpriv
->vm
.mutex
);
551 rbo
= gem_to_amdgpu_bo(gobj
);
552 r
= amdgpu_bo_reserve(rbo
, false);
554 mutex_unlock(&fpriv
->vm
.mutex
);
555 drm_gem_object_unreference_unlocked(gobj
);
559 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, rbo
);
561 amdgpu_bo_unreserve(rbo
);
562 mutex_unlock(&fpriv
->vm
.mutex
);
566 switch (args
->operation
) {
567 case AMDGPU_VA_OP_MAP
:
568 if (args
->flags
& AMDGPU_VM_PAGE_READABLE
)
569 va_flags
|= AMDGPU_PTE_READABLE
;
570 if (args
->flags
& AMDGPU_VM_PAGE_WRITEABLE
)
571 va_flags
|= AMDGPU_PTE_WRITEABLE
;
572 if (args
->flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
573 va_flags
|= AMDGPU_PTE_EXECUTABLE
;
574 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
575 args
->offset_in_bo
, args
->map_size
,
578 case AMDGPU_VA_OP_UNMAP
:
579 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
585 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
))
586 amdgpu_gem_va_update_vm(adev
, bo_va
, args
->operation
);
587 mutex_unlock(&fpriv
->vm
.mutex
);
588 drm_gem_object_unreference_unlocked(gobj
);
592 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
593 struct drm_file
*filp
)
595 struct drm_amdgpu_gem_op
*args
= data
;
596 struct drm_gem_object
*gobj
;
597 struct amdgpu_bo
*robj
;
600 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
604 robj
= gem_to_amdgpu_bo(gobj
);
606 r
= amdgpu_bo_reserve(robj
, false);
611 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
612 struct drm_amdgpu_gem_create_in info
;
613 void __user
*out
= (void __user
*)(long)args
->value
;
615 info
.bo_size
= robj
->gem_base
.size
;
616 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
617 info
.domains
= robj
->initial_domain
;
618 info
.domain_flags
= robj
->flags
;
619 amdgpu_bo_unreserve(robj
);
620 if (copy_to_user(out
, &info
, sizeof(info
)))
624 case AMDGPU_GEM_OP_SET_PLACEMENT
:
625 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
627 amdgpu_bo_unreserve(robj
);
630 robj
->initial_domain
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
631 AMDGPU_GEM_DOMAIN_GTT
|
632 AMDGPU_GEM_DOMAIN_CPU
);
633 amdgpu_bo_unreserve(robj
);
636 amdgpu_bo_unreserve(robj
);
641 drm_gem_object_unreference_unlocked(gobj
);
645 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
646 struct drm_device
*dev
,
647 struct drm_mode_create_dumb
*args
)
649 struct amdgpu_device
*adev
= dev
->dev_private
;
650 struct drm_gem_object
*gobj
;
654 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
655 args
->size
= (u64
)args
->pitch
* args
->height
;
656 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
658 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
659 AMDGPU_GEM_DOMAIN_VRAM
,
660 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
666 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
667 /* drop reference from allocate - handle holds it now */
668 drm_gem_object_unreference_unlocked(gobj
);
672 args
->handle
= handle
;
676 #if defined(CONFIG_DEBUG_FS)
677 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
679 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
680 struct drm_device
*dev
= node
->minor
->dev
;
681 struct amdgpu_device
*adev
= dev
->dev_private
;
682 struct amdgpu_bo
*rbo
;
685 mutex_lock(&adev
->gem
.mutex
);
686 list_for_each_entry(rbo
, &adev
->gem
.objects
, list
) {
688 const char *placement
;
690 domain
= amdgpu_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
692 case AMDGPU_GEM_DOMAIN_VRAM
:
695 case AMDGPU_GEM_DOMAIN_GTT
:
698 case AMDGPU_GEM_DOMAIN_CPU
:
703 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
704 i
, amdgpu_bo_size(rbo
) >> 10, amdgpu_bo_size(rbo
) >> 20,
705 placement
, (unsigned long)rbo
->pid
);
708 mutex_unlock(&adev
->gem
.mutex
);
712 static struct drm_info_list amdgpu_debugfs_gem_list
[] = {
713 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
717 int amdgpu_gem_debugfs_init(struct amdgpu_device
*adev
)
719 #if defined(CONFIG_DEBUG_FS)
720 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);