2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 * amdgpu_fence_write - write a fence value
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
56 * Writes a fence value to memory (all asics).
58 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
60 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
63 *drv
->cpu_addr
= cpu_to_le32(seq
);
67 * amdgpu_fence_read - read a fence value
69 * @ring: ring the fence is associated with
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
74 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
76 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
80 seq
= le32_to_cpu(*drv
->cpu_addr
);
82 seq
= lower_32_bits(atomic64_read(&drv
->last_seq
));
88 * amdgpu_fence_schedule_check - schedule lockup check
90 * @ring: pointer to struct amdgpu_ring
92 * Queues a delayed work item to check for lockups.
94 static void amdgpu_fence_schedule_check(struct amdgpu_ring
*ring
)
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
100 queue_delayed_work(system_power_efficient_wq
,
101 &ring
->fence_drv
.lockup_work
,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT
);
106 * amdgpu_fence_emit - emit a fence on the requested ring
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
115 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, void *owner
,
116 struct amdgpu_fence
**fence
)
118 struct amdgpu_device
*adev
= ring
->adev
;
120 /* we are protected by the ring emission mutex */
121 *fence
= kmalloc(sizeof(struct amdgpu_fence
), GFP_KERNEL
);
122 if ((*fence
) == NULL
) {
125 (*fence
)->seq
= ++ring
->fence_drv
.sync_seq
[ring
->idx
];
126 (*fence
)->ring
= ring
;
127 (*fence
)->owner
= owner
;
128 fence_init(&(*fence
)->base
, &amdgpu_fence_ops
,
129 &ring
->fence_drv
.fence_queue
.lock
,
130 adev
->fence_context
+ ring
->idx
,
132 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
134 AMDGPU_FENCE_FLAG_INT
);
135 trace_amdgpu_fence_emit(ring
->adev
->ddev
, ring
->idx
, (*fence
)->seq
);
140 * amdgpu_fence_activity - check for fence activity
142 * @ring: pointer to struct amdgpu_ring
144 * Checks the current fence value and calculates the last
145 * signalled fence value. Returns true if activity occured
146 * on the ring, and the fence_queue should be waken up.
148 static bool amdgpu_fence_activity(struct amdgpu_ring
*ring
)
150 uint64_t seq
, last_seq
, last_emitted
;
151 unsigned count_loop
= 0;
154 /* Note there is a scenario here for an infinite loop but it's
155 * very unlikely to happen. For it to happen, the current polling
156 * process need to be interrupted by another process and another
157 * process needs to update the last_seq btw the atomic read and
158 * xchg of the current process.
160 * More over for this to go in infinite loop there need to be
161 * continuously new fence signaled ie amdgpu_fence_read needs
162 * to return a different value each time for both the currently
163 * polling process and the other process that xchg the last_seq
164 * btw atomic read and xchg of the current process. And the
165 * value the other process set as last seq must be higher than
166 * the seq value we just read. Which means that current process
167 * need to be interrupted after amdgpu_fence_read and before
170 * To be even more safe we count the number of time we loop and
171 * we bail after 10 loop just accepting the fact that we might
172 * have temporarly set the last_seq not to the true real last
173 * seq but to an older one.
175 last_seq
= atomic64_read(&ring
->fence_drv
.last_seq
);
177 last_emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
];
178 seq
= amdgpu_fence_read(ring
);
179 seq
|= last_seq
& 0xffffffff00000000LL
;
180 if (seq
< last_seq
) {
182 seq
|= last_emitted
& 0xffffffff00000000LL
;
185 if (seq
<= last_seq
|| seq
> last_emitted
) {
188 /* If we loop over we don't want to return without
189 * checking if a fence is signaled as it means that the
190 * seq we just read is different from the previous on.
194 if ((count_loop
++) > 10) {
195 /* We looped over too many time leave with the
196 * fact that we might have set an older fence
197 * seq then the current real last seq as signaled
202 } while (atomic64_xchg(&ring
->fence_drv
.last_seq
, seq
) > seq
);
204 if (seq
< last_emitted
)
205 amdgpu_fence_schedule_check(ring
);
211 * amdgpu_fence_check_lockup - check for hardware lockup
213 * @work: delayed work item
215 * Checks for fence activity and if there is none probe
216 * the hardware if a lockup occured.
218 static void amdgpu_fence_check_lockup(struct work_struct
*work
)
220 struct amdgpu_fence_driver
*fence_drv
;
221 struct amdgpu_ring
*ring
;
223 fence_drv
= container_of(work
, struct amdgpu_fence_driver
,
225 ring
= fence_drv
->ring
;
227 if (amdgpu_fence_activity(ring
))
228 wake_up_all(&ring
->fence_drv
.fence_queue
);
232 * amdgpu_fence_process - process a fence
234 * @adev: amdgpu_device pointer
235 * @ring: ring index the fence is associated with
237 * Checks the current fence value and wakes the fence queue
238 * if the sequence number has increased (all asics).
240 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
242 if (amdgpu_fence_activity(ring
))
243 wake_up_all(&ring
->fence_drv
.fence_queue
);
247 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
249 * @ring: ring the fence is associated with
250 * @seq: sequence number
252 * Check if the last signaled fence sequnce number is >= the requested
253 * sequence number (all asics).
254 * Returns true if the fence has signaled (current fence value
255 * is >= requested value) or false if it has not (current fence
256 * value is < the requested value. Helper function for
257 * amdgpu_fence_signaled().
259 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring
*ring
, u64 seq
)
261 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
264 /* poll new last sequence at least once */
265 amdgpu_fence_process(ring
);
266 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
273 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
274 * @ring: ring to wait on for the seq number
275 * @seq: seq number wait for
278 * 0: seq signaled, and gpu not hang
279 * -EDEADL: GPU hang detected
280 * -EINVAL: some paramter is not valid
282 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring
*ring
, uint64_t seq
)
284 bool signaled
= false;
287 if (seq
> ring
->fence_drv
.sync_seq
[ring
->idx
])
290 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
293 amdgpu_fence_schedule_check(ring
);
294 wait_event(ring
->fence_drv
.fence_queue
, (
295 (signaled
= amdgpu_fence_seq_signaled(ring
, seq
))));
304 * amdgpu_fence_wait_next - wait for the next fence to signal
306 * @adev: amdgpu device pointer
307 * @ring: ring index the fence is associated with
309 * Wait for the next fence on the requested ring to signal (all asics).
310 * Returns 0 if the next fence has passed, error for all other cases.
311 * Caller must hold ring lock.
313 int amdgpu_fence_wait_next(struct amdgpu_ring
*ring
)
315 uint64_t seq
= atomic64_read(&ring
->fence_drv
.last_seq
) + 1ULL;
317 if (seq
>= ring
->fence_drv
.sync_seq
[ring
->idx
])
320 return amdgpu_fence_ring_wait_seq(ring
, seq
);
324 * amdgpu_fence_wait_empty - wait for all fences to signal
326 * @adev: amdgpu device pointer
327 * @ring: ring index the fence is associated with
329 * Wait for all fences on the requested ring to signal (all asics).
330 * Returns 0 if the fences have passed, error for all other cases.
331 * Caller must hold ring lock.
333 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
335 uint64_t seq
= ring
->fence_drv
.sync_seq
[ring
->idx
];
340 return amdgpu_fence_ring_wait_seq(ring
, seq
);
344 * amdgpu_fence_count_emitted - get the count of emitted fences
346 * @ring: ring the fence is associated with
348 * Get the number of fences emitted on the requested ring (all asics).
349 * Returns the number of emitted fences on the ring. Used by the
350 * dynpm code to ring track activity.
352 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
356 /* We are not protected by ring lock when reading the last sequence
357 * but it's ok to report slightly wrong fence count here.
359 amdgpu_fence_process(ring
);
360 emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
]
361 - atomic64_read(&ring
->fence_drv
.last_seq
);
362 /* to avoid 32bits warp around */
363 if (emitted
> 0x10000000)
364 emitted
= 0x10000000;
366 return (unsigned)emitted
;
370 * amdgpu_fence_need_sync - do we need a semaphore
372 * @fence: amdgpu fence object
373 * @dst_ring: which ring to check against
375 * Check if the fence needs to be synced against another ring
376 * (all asics). If so, we need to emit a semaphore.
377 * Returns true if we need to sync with another ring, false if
380 bool amdgpu_fence_need_sync(struct amdgpu_fence
*fence
,
381 struct amdgpu_ring
*dst_ring
)
383 struct amdgpu_fence_driver
*fdrv
;
388 if (fence
->ring
== dst_ring
)
391 /* we are protected by the ring mutex */
392 fdrv
= &dst_ring
->fence_drv
;
393 if (fence
->seq
<= fdrv
->sync_seq
[fence
->ring
->idx
])
400 * amdgpu_fence_note_sync - record the sync point
402 * @fence: amdgpu fence object
403 * @dst_ring: which ring to check against
405 * Note the sequence number at which point the fence will
406 * be synced with the requested ring (all asics).
408 void amdgpu_fence_note_sync(struct amdgpu_fence
*fence
,
409 struct amdgpu_ring
*dst_ring
)
411 struct amdgpu_fence_driver
*dst
, *src
;
417 if (fence
->ring
== dst_ring
)
420 /* we are protected by the ring mutex */
421 src
= &fence
->ring
->fence_drv
;
422 dst
= &dst_ring
->fence_drv
;
423 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
424 if (i
== dst_ring
->idx
)
427 dst
->sync_seq
[i
] = max(dst
->sync_seq
[i
], src
->sync_seq
[i
]);
432 * amdgpu_fence_driver_start_ring - make the fence driver
433 * ready for use on the requested ring.
435 * @ring: ring to start the fence driver on
436 * @irq_src: interrupt source to use for this ring
437 * @irq_type: interrupt type to use for this ring
439 * Make the fence driver ready for processing (all asics).
440 * Not all asics have all rings, so each asic will only
441 * start the fence driver on the rings it has.
442 * Returns 0 for success, errors for failure.
444 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
445 struct amdgpu_irq_src
*irq_src
,
448 struct amdgpu_device
*adev
= ring
->adev
;
451 if (ring
!= &adev
->uvd
.ring
) {
452 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
453 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
455 /* put fence directly behind firmware */
456 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
457 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
458 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
460 amdgpu_fence_write(ring
, atomic64_read(&ring
->fence_drv
.last_seq
));
461 amdgpu_irq_get(adev
, irq_src
, irq_type
);
463 ring
->fence_drv
.irq_src
= irq_src
;
464 ring
->fence_drv
.irq_type
= irq_type
;
465 ring
->fence_drv
.initialized
= true;
467 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
468 "cpu addr 0x%p\n", ring
->idx
,
469 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
474 * amdgpu_fence_driver_init_ring - init the fence driver
475 * for the requested ring.
477 * @ring: ring to init the fence driver on
479 * Init the fence driver for the requested ring (all asics).
480 * Helper function for amdgpu_fence_driver_init().
482 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
)
486 ring
->fence_drv
.cpu_addr
= NULL
;
487 ring
->fence_drv
.gpu_addr
= 0;
488 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
489 ring
->fence_drv
.sync_seq
[i
] = 0;
491 atomic64_set(&ring
->fence_drv
.last_seq
, 0);
492 ring
->fence_drv
.initialized
= false;
494 INIT_DELAYED_WORK(&ring
->fence_drv
.lockup_work
,
495 amdgpu_fence_check_lockup
);
496 ring
->fence_drv
.ring
= ring
;
498 init_waitqueue_head(&ring
->fence_drv
.fence_queue
);
500 if (amdgpu_enable_scheduler
) {
501 long timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
505 * Delayed workqueue cannot use it directly,
506 * so the scheduler will not use delayed workqueue if
507 * MAX_SCHEDULE_TIMEOUT is set.
508 * Currently keep it simple and silly.
510 timeout
= MAX_SCHEDULE_TIMEOUT
;
512 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
513 amdgpu_sched_hw_submission
,
514 timeout
, ring
->name
);
516 DRM_ERROR("Failed to create scheduler on ring %s.\n",
526 * amdgpu_fence_driver_init - init the fence driver
527 * for all possible rings.
529 * @adev: amdgpu device pointer
531 * Init the fence driver for all possible rings (all asics).
532 * Not all asics have all rings, so each asic will only
533 * start the fence driver on the rings it has using
534 * amdgpu_fence_driver_start_ring().
535 * Returns 0 for success.
537 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
539 if (amdgpu_debugfs_fence_init(adev
))
540 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
546 * amdgpu_fence_driver_fini - tear down the fence driver
547 * for all possible rings.
549 * @adev: amdgpu device pointer
551 * Tear down the fence driver for all possible rings (all asics).
553 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
557 mutex_lock(&adev
->ring_lock
);
558 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
559 struct amdgpu_ring
*ring
= adev
->rings
[i
];
560 if (!ring
|| !ring
->fence_drv
.initialized
)
562 r
= amdgpu_fence_wait_empty(ring
);
564 /* no need to trigger GPU reset as we are unloading */
565 amdgpu_fence_driver_force_completion(adev
);
567 wake_up_all(&ring
->fence_drv
.fence_queue
);
568 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
569 ring
->fence_drv
.irq_type
);
570 amd_sched_fini(&ring
->sched
);
571 ring
->fence_drv
.initialized
= false;
573 mutex_unlock(&adev
->ring_lock
);
577 * amdgpu_fence_driver_suspend - suspend the fence driver
578 * for all possible rings.
580 * @adev: amdgpu device pointer
582 * Suspend the fence driver for all possible rings (all asics).
584 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
588 mutex_lock(&adev
->ring_lock
);
589 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
590 struct amdgpu_ring
*ring
= adev
->rings
[i
];
591 if (!ring
|| !ring
->fence_drv
.initialized
)
594 /* wait for gpu to finish processing current batch */
595 r
= amdgpu_fence_wait_empty(ring
);
597 /* delay GPU reset to resume */
598 amdgpu_fence_driver_force_completion(adev
);
601 /* disable the interrupt */
602 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
603 ring
->fence_drv
.irq_type
);
605 mutex_unlock(&adev
->ring_lock
);
609 * amdgpu_fence_driver_resume - resume the fence driver
610 * for all possible rings.
612 * @adev: amdgpu device pointer
614 * Resume the fence driver for all possible rings (all asics).
615 * Not all asics have all rings, so each asic will only
616 * start the fence driver on the rings it has using
617 * amdgpu_fence_driver_start_ring().
618 * Returns 0 for success.
620 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
624 mutex_lock(&adev
->ring_lock
);
625 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
626 struct amdgpu_ring
*ring
= adev
->rings
[i
];
627 if (!ring
|| !ring
->fence_drv
.initialized
)
630 /* enable the interrupt */
631 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
632 ring
->fence_drv
.irq_type
);
634 mutex_unlock(&adev
->ring_lock
);
638 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
640 * @adev: amdgpu device pointer
642 * In case of GPU reset failure make sure no process keep waiting on fence
643 * that will never complete.
645 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
649 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
650 struct amdgpu_ring
*ring
= adev
->rings
[i
];
651 if (!ring
|| !ring
->fence_drv
.initialized
)
654 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
[i
]);
659 * Common fence implementation
662 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
667 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
669 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
670 return (const char *)fence
->ring
->name
;
674 * amdgpu_fence_is_signaled - test if fence is signaled
678 * Test the fence sequence number if it is already signaled. If it isn't
679 * signaled start fence processing. Returns True if the fence is signaled.
681 static bool amdgpu_fence_is_signaled(struct fence
*f
)
683 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
684 struct amdgpu_ring
*ring
= fence
->ring
;
686 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
689 amdgpu_fence_process(ring
);
691 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
698 * amdgpu_fence_check_signaled - callback from fence_queue
700 * this function is called with fence_queue lock held, which is also used
701 * for the fence locking itself, so unlocked variants are used for
702 * fence_signal, and remove_wait_queue.
704 static int amdgpu_fence_check_signaled(wait_queue_t
*wait
, unsigned mode
, int flags
, void *key
)
706 struct amdgpu_fence
*fence
;
707 struct amdgpu_device
*adev
;
711 fence
= container_of(wait
, struct amdgpu_fence
, fence_wake
);
712 adev
= fence
->ring
->adev
;
715 * We cannot use amdgpu_fence_process here because we're already
716 * in the waitqueue, in a call from wake_up_all.
718 seq
= atomic64_read(&fence
->ring
->fence_drv
.last_seq
);
719 if (seq
>= fence
->seq
) {
720 ret
= fence_signal_locked(&fence
->base
);
722 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
724 FENCE_TRACE(&fence
->base
, "was already signaled\n");
726 __remove_wait_queue(&fence
->ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
727 fence_put(&fence
->base
);
729 FENCE_TRACE(&fence
->base
, "pending\n");
734 * amdgpu_fence_enable_signaling - enable signalling on fence
737 * This function is called with fence_queue lock held, and adds a callback
738 * to fence_queue that checks if this fence is signaled, and if so it
739 * signals the fence and removes itself.
741 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
743 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
744 struct amdgpu_ring
*ring
= fence
->ring
;
746 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
749 fence
->fence_wake
.flags
= 0;
750 fence
->fence_wake
.private = NULL
;
751 fence
->fence_wake
.func
= amdgpu_fence_check_signaled
;
752 __add_wait_queue(&ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
754 amdgpu_fence_schedule_check(ring
);
755 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
759 const struct fence_ops amdgpu_fence_ops
= {
760 .get_driver_name
= amdgpu_fence_get_driver_name
,
761 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
762 .enable_signaling
= amdgpu_fence_enable_signaling
,
763 .signaled
= amdgpu_fence_is_signaled
,
764 .wait
= fence_default_wait
,
771 #if defined(CONFIG_DEBUG_FS)
772 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
774 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
775 struct drm_device
*dev
= node
->minor
->dev
;
776 struct amdgpu_device
*adev
= dev
->dev_private
;
779 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
780 struct amdgpu_ring
*ring
= adev
->rings
[i
];
781 if (!ring
|| !ring
->fence_drv
.initialized
)
784 amdgpu_fence_process(ring
);
786 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
787 seq_printf(m
, "Last signaled fence 0x%016llx\n",
788 (unsigned long long)atomic64_read(&ring
->fence_drv
.last_seq
));
789 seq_printf(m
, "Last emitted 0x%016llx\n",
790 ring
->fence_drv
.sync_seq
[i
]);
792 for (j
= 0; j
< AMDGPU_MAX_RINGS
; ++j
) {
793 struct amdgpu_ring
*other
= adev
->rings
[j
];
794 if (i
!= j
&& other
&& other
->fence_drv
.initialized
&&
795 ring
->fence_drv
.sync_seq
[j
])
796 seq_printf(m
, "Last sync to ring %d 0x%016llx\n",
797 j
, ring
->fence_drv
.sync_seq
[j
]);
803 static struct drm_info_list amdgpu_debugfs_fence_list
[] = {
804 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
808 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
810 #if defined(CONFIG_DEBUG_FS)
811 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 1);