2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 * amdgpu_fence_write - write a fence value
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
56 * Writes a fence value to memory (all asics).
58 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
60 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
63 *drv
->cpu_addr
= cpu_to_le32(seq
);
67 * amdgpu_fence_read - read a fence value
69 * @ring: ring the fence is associated with
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
74 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
76 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
80 seq
= le32_to_cpu(*drv
->cpu_addr
);
82 seq
= lower_32_bits(atomic64_read(&drv
->last_seq
));
88 * amdgpu_fence_schedule_check - schedule lockup check
90 * @ring: pointer to struct amdgpu_ring
92 * Queues a delayed work item to check for lockups.
94 static void amdgpu_fence_schedule_check(struct amdgpu_ring
*ring
)
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
100 queue_delayed_work(system_power_efficient_wq
,
101 &ring
->fence_drv
.lockup_work
,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT
);
106 * amdgpu_fence_emit - emit a fence on the requested ring
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
115 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, void *owner
,
116 struct amdgpu_fence
**fence
)
118 struct amdgpu_device
*adev
= ring
->adev
;
120 /* we are protected by the ring emission mutex */
121 *fence
= kmalloc(sizeof(struct amdgpu_fence
), GFP_KERNEL
);
122 if ((*fence
) == NULL
) {
125 (*fence
)->seq
= ++ring
->fence_drv
.sync_seq
[ring
->idx
];
126 (*fence
)->ring
= ring
;
127 (*fence
)->owner
= owner
;
128 fence_init(&(*fence
)->base
, &amdgpu_fence_ops
,
129 &ring
->fence_drv
.fence_queue
.lock
,
130 adev
->fence_context
+ ring
->idx
,
132 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
134 AMDGPU_FENCE_FLAG_INT
);
135 trace_amdgpu_fence_emit(ring
->adev
->ddev
, ring
->idx
, (*fence
)->seq
);
140 * amdgpu_fence_check_signaled - callback from fence_queue
142 * this function is called with fence_queue lock held, which is also used
143 * for the fence locking itself, so unlocked variants are used for
144 * fence_signal, and remove_wait_queue.
146 static int amdgpu_fence_check_signaled(wait_queue_t
*wait
, unsigned mode
, int flags
, void *key
)
148 struct amdgpu_fence
*fence
;
149 struct amdgpu_device
*adev
;
153 fence
= container_of(wait
, struct amdgpu_fence
, fence_wake
);
154 adev
= fence
->ring
->adev
;
157 * We cannot use amdgpu_fence_process here because we're already
158 * in the waitqueue, in a call from wake_up_all.
160 seq
= atomic64_read(&fence
->ring
->fence_drv
.last_seq
);
161 if (seq
>= fence
->seq
) {
162 ret
= fence_signal_locked(&fence
->base
);
164 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
166 FENCE_TRACE(&fence
->base
, "was already signaled\n");
168 __remove_wait_queue(&fence
->ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
169 fence_put(&fence
->base
);
171 FENCE_TRACE(&fence
->base
, "pending\n");
176 * amdgpu_fence_activity - check for fence activity
178 * @ring: pointer to struct amdgpu_ring
180 * Checks the current fence value and calculates the last
181 * signalled fence value. Returns true if activity occured
182 * on the ring, and the fence_queue should be waken up.
184 static bool amdgpu_fence_activity(struct amdgpu_ring
*ring
)
186 uint64_t seq
, last_seq
, last_emitted
;
187 unsigned count_loop
= 0;
190 /* Note there is a scenario here for an infinite loop but it's
191 * very unlikely to happen. For it to happen, the current polling
192 * process need to be interrupted by another process and another
193 * process needs to update the last_seq btw the atomic read and
194 * xchg of the current process.
196 * More over for this to go in infinite loop there need to be
197 * continuously new fence signaled ie amdgpu_fence_read needs
198 * to return a different value each time for both the currently
199 * polling process and the other process that xchg the last_seq
200 * btw atomic read and xchg of the current process. And the
201 * value the other process set as last seq must be higher than
202 * the seq value we just read. Which means that current process
203 * need to be interrupted after amdgpu_fence_read and before
206 * To be even more safe we count the number of time we loop and
207 * we bail after 10 loop just accepting the fact that we might
208 * have temporarly set the last_seq not to the true real last
209 * seq but to an older one.
211 last_seq
= atomic64_read(&ring
->fence_drv
.last_seq
);
213 last_emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
];
214 seq
= amdgpu_fence_read(ring
);
215 seq
|= last_seq
& 0xffffffff00000000LL
;
216 if (seq
< last_seq
) {
218 seq
|= last_emitted
& 0xffffffff00000000LL
;
221 if (seq
<= last_seq
|| seq
> last_emitted
) {
224 /* If we loop over we don't want to return without
225 * checking if a fence is signaled as it means that the
226 * seq we just read is different from the previous on.
230 if ((count_loop
++) > 10) {
231 /* We looped over too many time leave with the
232 * fact that we might have set an older fence
233 * seq then the current real last seq as signaled
238 } while (atomic64_xchg(&ring
->fence_drv
.last_seq
, seq
) > seq
);
240 if (seq
< last_emitted
)
241 amdgpu_fence_schedule_check(ring
);
247 * amdgpu_fence_check_lockup - check for hardware lockup
249 * @work: delayed work item
251 * Checks for fence activity and if there is none probe
252 * the hardware if a lockup occured.
254 static void amdgpu_fence_check_lockup(struct work_struct
*work
)
256 struct amdgpu_fence_driver
*fence_drv
;
257 struct amdgpu_ring
*ring
;
259 fence_drv
= container_of(work
, struct amdgpu_fence_driver
,
261 ring
= fence_drv
->ring
;
263 if (amdgpu_fence_activity(ring
))
264 wake_up_all(&ring
->fence_drv
.fence_queue
);
268 * amdgpu_fence_process - process a fence
270 * @adev: amdgpu_device pointer
271 * @ring: ring index the fence is associated with
273 * Checks the current fence value and wakes the fence queue
274 * if the sequence number has increased (all asics).
276 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
278 if (amdgpu_fence_activity(ring
))
279 wake_up_all(&ring
->fence_drv
.fence_queue
);
283 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
285 * @ring: ring the fence is associated with
286 * @seq: sequence number
288 * Check if the last signaled fence sequnce number is >= the requested
289 * sequence number (all asics).
290 * Returns true if the fence has signaled (current fence value
291 * is >= requested value) or false if it has not (current fence
292 * value is < the requested value. Helper function for
293 * amdgpu_fence_signaled().
295 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring
*ring
, u64 seq
)
297 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
300 /* poll new last sequence at least once */
301 amdgpu_fence_process(ring
);
302 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
308 static bool amdgpu_fence_is_signaled(struct fence
*f
)
310 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
311 struct amdgpu_ring
*ring
= fence
->ring
;
313 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
316 amdgpu_fence_process(ring
);
318 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
325 * amdgpu_fence_enable_signaling - enable signalling on fence
328 * This function is called with fence_queue lock held, and adds a callback
329 * to fence_queue that checks if this fence is signaled, and if so it
330 * signals the fence and removes itself.
332 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
334 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
335 struct amdgpu_ring
*ring
= fence
->ring
;
337 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
340 fence
->fence_wake
.flags
= 0;
341 fence
->fence_wake
.private = NULL
;
342 fence
->fence_wake
.func
= amdgpu_fence_check_signaled
;
343 __add_wait_queue(&ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
345 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
350 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
351 * @ring: ring to wait on for the seq number
352 * @seq: seq number wait for
355 * 0: seq signaled, and gpu not hang
356 * -EDEADL: GPU hang detected
357 * -EINVAL: some paramter is not valid
359 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring
*ring
, uint64_t seq
)
361 bool signaled
= false;
364 if (seq
> ring
->fence_drv
.sync_seq
[ring
->idx
])
367 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
370 wait_event(ring
->fence_drv
.fence_queue
, (
371 (signaled
= amdgpu_fence_seq_signaled(ring
, seq
))));
380 * amdgpu_fence_wait_next - wait for the next fence to signal
382 * @adev: amdgpu device pointer
383 * @ring: ring index the fence is associated with
385 * Wait for the next fence on the requested ring to signal (all asics).
386 * Returns 0 if the next fence has passed, error for all other cases.
387 * Caller must hold ring lock.
389 int amdgpu_fence_wait_next(struct amdgpu_ring
*ring
)
391 uint64_t seq
= atomic64_read(&ring
->fence_drv
.last_seq
) + 1ULL;
393 if (seq
>= ring
->fence_drv
.sync_seq
[ring
->idx
])
396 return amdgpu_fence_ring_wait_seq(ring
, seq
);
400 * amdgpu_fence_wait_empty - wait for all fences to signal
402 * @adev: amdgpu device pointer
403 * @ring: ring index the fence is associated with
405 * Wait for all fences on the requested ring to signal (all asics).
406 * Returns 0 if the fences have passed, error for all other cases.
407 * Caller must hold ring lock.
409 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
411 uint64_t seq
= ring
->fence_drv
.sync_seq
[ring
->idx
];
416 return amdgpu_fence_ring_wait_seq(ring
, seq
);
420 * amdgpu_fence_ref - take a ref on a fence
422 * @fence: amdgpu fence object
424 * Take a reference on a fence (all asics).
427 struct amdgpu_fence
*amdgpu_fence_ref(struct amdgpu_fence
*fence
)
429 fence_get(&fence
->base
);
434 * amdgpu_fence_unref - remove a ref on a fence
436 * @fence: amdgpu fence object
438 * Remove a reference on a fence (all asics).
440 void amdgpu_fence_unref(struct amdgpu_fence
**fence
)
442 struct amdgpu_fence
*tmp
= *fence
;
446 fence_put(&tmp
->base
);
450 * amdgpu_fence_count_emitted - get the count of emitted fences
452 * @ring: ring the fence is associated with
454 * Get the number of fences emitted on the requested ring (all asics).
455 * Returns the number of emitted fences on the ring. Used by the
456 * dynpm code to ring track activity.
458 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
462 /* We are not protected by ring lock when reading the last sequence
463 * but it's ok to report slightly wrong fence count here.
465 amdgpu_fence_process(ring
);
466 emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
]
467 - atomic64_read(&ring
->fence_drv
.last_seq
);
468 /* to avoid 32bits warp around */
469 if (emitted
> 0x10000000)
470 emitted
= 0x10000000;
472 return (unsigned)emitted
;
476 * amdgpu_fence_need_sync - do we need a semaphore
478 * @fence: amdgpu fence object
479 * @dst_ring: which ring to check against
481 * Check if the fence needs to be synced against another ring
482 * (all asics). If so, we need to emit a semaphore.
483 * Returns true if we need to sync with another ring, false if
486 bool amdgpu_fence_need_sync(struct amdgpu_fence
*fence
,
487 struct amdgpu_ring
*dst_ring
)
489 struct amdgpu_fence_driver
*fdrv
;
494 if (fence
->ring
== dst_ring
)
497 /* we are protected by the ring mutex */
498 fdrv
= &dst_ring
->fence_drv
;
499 if (fence
->seq
<= fdrv
->sync_seq
[fence
->ring
->idx
])
506 * amdgpu_fence_note_sync - record the sync point
508 * @fence: amdgpu fence object
509 * @dst_ring: which ring to check against
511 * Note the sequence number at which point the fence will
512 * be synced with the requested ring (all asics).
514 void amdgpu_fence_note_sync(struct amdgpu_fence
*fence
,
515 struct amdgpu_ring
*dst_ring
)
517 struct amdgpu_fence_driver
*dst
, *src
;
523 if (fence
->ring
== dst_ring
)
526 /* we are protected by the ring mutex */
527 src
= &fence
->ring
->fence_drv
;
528 dst
= &dst_ring
->fence_drv
;
529 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
530 if (i
== dst_ring
->idx
)
533 dst
->sync_seq
[i
] = max(dst
->sync_seq
[i
], src
->sync_seq
[i
]);
538 * amdgpu_fence_driver_start_ring - make the fence driver
539 * ready for use on the requested ring.
541 * @ring: ring to start the fence driver on
542 * @irq_src: interrupt source to use for this ring
543 * @irq_type: interrupt type to use for this ring
545 * Make the fence driver ready for processing (all asics).
546 * Not all asics have all rings, so each asic will only
547 * start the fence driver on the rings it has.
548 * Returns 0 for success, errors for failure.
550 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
551 struct amdgpu_irq_src
*irq_src
,
554 struct amdgpu_device
*adev
= ring
->adev
;
557 if (ring
!= &adev
->uvd
.ring
) {
558 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
559 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
561 /* put fence directly behind firmware */
562 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
563 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
564 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
566 amdgpu_fence_write(ring
, atomic64_read(&ring
->fence_drv
.last_seq
));
567 amdgpu_irq_get(adev
, irq_src
, irq_type
);
569 ring
->fence_drv
.irq_src
= irq_src
;
570 ring
->fence_drv
.irq_type
= irq_type
;
571 ring
->fence_drv
.initialized
= true;
573 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
574 "cpu addr 0x%p\n", ring
->idx
,
575 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
580 * amdgpu_fence_driver_init_ring - init the fence driver
581 * for the requested ring.
583 * @ring: ring to init the fence driver on
585 * Init the fence driver for the requested ring (all asics).
586 * Helper function for amdgpu_fence_driver_init().
588 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
)
592 ring
->fence_drv
.cpu_addr
= NULL
;
593 ring
->fence_drv
.gpu_addr
= 0;
594 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
595 ring
->fence_drv
.sync_seq
[i
] = 0;
597 atomic64_set(&ring
->fence_drv
.last_seq
, 0);
598 ring
->fence_drv
.initialized
= false;
600 INIT_DELAYED_WORK(&ring
->fence_drv
.lockup_work
,
601 amdgpu_fence_check_lockup
);
602 ring
->fence_drv
.ring
= ring
;
604 init_waitqueue_head(&ring
->fence_drv
.fence_queue
);
606 if (amdgpu_enable_scheduler
) {
607 long timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
611 * Delayed workqueue cannot use it directly,
612 * so the scheduler will not use delayed workqueue if
613 * MAX_SCHEDULE_TIMEOUT is set.
614 * Currently keep it simple and silly.
616 timeout
= MAX_SCHEDULE_TIMEOUT
;
618 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
619 amdgpu_sched_hw_submission
,
620 timeout
, ring
->name
);
622 DRM_ERROR("Failed to create scheduler on ring %s.\n",
632 * amdgpu_fence_driver_init - init the fence driver
633 * for all possible rings.
635 * @adev: amdgpu device pointer
637 * Init the fence driver for all possible rings (all asics).
638 * Not all asics have all rings, so each asic will only
639 * start the fence driver on the rings it has using
640 * amdgpu_fence_driver_start_ring().
641 * Returns 0 for success.
643 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
645 if (amdgpu_debugfs_fence_init(adev
))
646 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
652 * amdgpu_fence_driver_fini - tear down the fence driver
653 * for all possible rings.
655 * @adev: amdgpu device pointer
657 * Tear down the fence driver for all possible rings (all asics).
659 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
663 mutex_lock(&adev
->ring_lock
);
664 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
665 struct amdgpu_ring
*ring
= adev
->rings
[i
];
666 if (!ring
|| !ring
->fence_drv
.initialized
)
668 r
= amdgpu_fence_wait_empty(ring
);
670 /* no need to trigger GPU reset as we are unloading */
671 amdgpu_fence_driver_force_completion(adev
);
673 wake_up_all(&ring
->fence_drv
.fence_queue
);
674 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
675 ring
->fence_drv
.irq_type
);
676 amd_sched_fini(&ring
->sched
);
677 ring
->fence_drv
.initialized
= false;
679 mutex_unlock(&adev
->ring_lock
);
683 * amdgpu_fence_driver_suspend - suspend the fence driver
684 * for all possible rings.
686 * @adev: amdgpu device pointer
688 * Suspend the fence driver for all possible rings (all asics).
690 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
694 mutex_lock(&adev
->ring_lock
);
695 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
696 struct amdgpu_ring
*ring
= adev
->rings
[i
];
697 if (!ring
|| !ring
->fence_drv
.initialized
)
700 /* wait for gpu to finish processing current batch */
701 r
= amdgpu_fence_wait_empty(ring
);
703 /* delay GPU reset to resume */
704 amdgpu_fence_driver_force_completion(adev
);
707 /* disable the interrupt */
708 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
709 ring
->fence_drv
.irq_type
);
711 mutex_unlock(&adev
->ring_lock
);
715 * amdgpu_fence_driver_resume - resume the fence driver
716 * for all possible rings.
718 * @adev: amdgpu device pointer
720 * Resume the fence driver for all possible rings (all asics).
721 * Not all asics have all rings, so each asic will only
722 * start the fence driver on the rings it has using
723 * amdgpu_fence_driver_start_ring().
724 * Returns 0 for success.
726 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
730 mutex_lock(&adev
->ring_lock
);
731 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
732 struct amdgpu_ring
*ring
= adev
->rings
[i
];
733 if (!ring
|| !ring
->fence_drv
.initialized
)
736 /* enable the interrupt */
737 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
738 ring
->fence_drv
.irq_type
);
740 mutex_unlock(&adev
->ring_lock
);
744 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
746 * @adev: amdgpu device pointer
748 * In case of GPU reset failure make sure no process keep waiting on fence
749 * that will never complete.
751 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
755 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
756 struct amdgpu_ring
*ring
= adev
->rings
[i
];
757 if (!ring
|| !ring
->fence_drv
.initialized
)
760 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
[i
]);
768 #if defined(CONFIG_DEBUG_FS)
769 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
771 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
772 struct drm_device
*dev
= node
->minor
->dev
;
773 struct amdgpu_device
*adev
= dev
->dev_private
;
776 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
777 struct amdgpu_ring
*ring
= adev
->rings
[i
];
778 if (!ring
|| !ring
->fence_drv
.initialized
)
781 amdgpu_fence_process(ring
);
783 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
784 seq_printf(m
, "Last signaled fence 0x%016llx\n",
785 (unsigned long long)atomic64_read(&ring
->fence_drv
.last_seq
));
786 seq_printf(m
, "Last emitted 0x%016llx\n",
787 ring
->fence_drv
.sync_seq
[i
]);
789 for (j
= 0; j
< AMDGPU_MAX_RINGS
; ++j
) {
790 struct amdgpu_ring
*other
= adev
->rings
[j
];
791 if (i
!= j
&& other
&& other
->fence_drv
.initialized
&&
792 ring
->fence_drv
.sync_seq
[j
])
793 seq_printf(m
, "Last sync to ring %d 0x%016llx\n",
794 j
, ring
->fence_drv
.sync_seq
[j
]);
800 static struct drm_info_list amdgpu_debugfs_fence_list
[] = {
801 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
805 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
807 #if defined(CONFIG_DEBUG_FS)
808 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 1);
814 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
819 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
821 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
822 return (const char *)fence
->ring
->name
;
825 static inline bool amdgpu_test_signaled(struct amdgpu_fence
*fence
)
827 return test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
);
830 static bool amdgpu_test_signaled_any(struct fence
**fences
, uint32_t count
)
835 for (idx
= 0; idx
< count
; ++idx
) {
838 if (test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
845 struct amdgpu_wait_cb
{
846 struct fence_cb base
;
847 struct task_struct
*task
;
850 static void amdgpu_fence_wait_cb(struct fence
*fence
, struct fence_cb
*cb
)
852 struct amdgpu_wait_cb
*wait
=
853 container_of(cb
, struct amdgpu_wait_cb
, base
);
854 wake_up_process(wait
->task
);
857 static signed long amdgpu_fence_default_wait(struct fence
*f
, bool intr
,
860 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
861 struct amdgpu_device
*adev
= fence
->ring
->adev
;
863 return amdgpu_fence_wait_any(adev
, &f
, 1, intr
, t
);
867 * Wait the fence array with timeout
869 * @adev: amdgpu device
870 * @array: the fence array with amdgpu fence pointer
871 * @count: the number of the fence array
872 * @intr: when sleep, set the current task interruptable or not
873 * @t: timeout to wait
875 * It will return when any fence is signaled or timeout.
877 signed long amdgpu_fence_wait_any(struct amdgpu_device
*adev
,
878 struct fence
**array
, uint32_t count
,
879 bool intr
, signed long t
)
881 struct amdgpu_wait_cb
*cb
;
887 cb
= kcalloc(count
, sizeof(struct amdgpu_wait_cb
), GFP_KERNEL
);
893 for (idx
= 0; idx
< count
; ++idx
) {
896 cb
[idx
].task
= current
;
897 if (fence_add_callback(fence
,
898 &cb
[idx
].base
, amdgpu_fence_wait_cb
)) {
899 /* The fence is already signaled */
907 set_current_state(TASK_INTERRUPTIBLE
);
909 set_current_state(TASK_UNINTERRUPTIBLE
);
912 * amdgpu_test_signaled_any must be called after
913 * set_current_state to prevent a race with wake_up_process
915 if (amdgpu_test_signaled_any(array
, count
))
918 t
= schedule_timeout(t
);
920 if (t
> 0 && intr
&& signal_pending(current
))
924 __set_current_state(TASK_RUNNING
);
927 for (idx
= 0; idx
< count
; ++idx
) {
929 if (fence
&& cb
[idx
].base
.func
)
930 fence_remove_callback(fence
, &cb
[idx
].base
);
939 const struct fence_ops amdgpu_fence_ops
= {
940 .get_driver_name
= amdgpu_fence_get_driver_name
,
941 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
942 .enable_signaling
= amdgpu_fence_enable_signaling
,
943 .signaled
= amdgpu_fence_is_signaled
,
944 .wait
= amdgpu_fence_default_wait
,