2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
51 * amdgpu_fence_write - write a fence value
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
56 * Writes a fence value to memory (all asics).
58 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
60 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
63 *drv
->cpu_addr
= cpu_to_le32(seq
);
67 * amdgpu_fence_read - read a fence value
69 * @ring: ring the fence is associated with
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
74 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
76 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
80 seq
= le32_to_cpu(*drv
->cpu_addr
);
82 seq
= lower_32_bits(atomic64_read(&drv
->last_seq
));
88 * amdgpu_fence_schedule_check - schedule lockup check
90 * @ring: pointer to struct amdgpu_ring
92 * Queues a delayed work item to check for lockups.
94 static void amdgpu_fence_schedule_check(struct amdgpu_ring
*ring
)
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
100 queue_delayed_work(system_power_efficient_wq
,
101 &ring
->fence_drv
.lockup_work
,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT
);
106 * amdgpu_fence_emit - emit a fence on the requested ring
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
115 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, void *owner
,
116 struct amdgpu_fence
**fence
)
118 struct amdgpu_device
*adev
= ring
->adev
;
120 /* we are protected by the ring emission mutex */
121 *fence
= kmalloc(sizeof(struct amdgpu_fence
), GFP_KERNEL
);
122 if ((*fence
) == NULL
) {
125 (*fence
)->seq
= ++ring
->fence_drv
.sync_seq
[ring
->idx
];
126 (*fence
)->ring
= ring
;
127 (*fence
)->owner
= owner
;
128 fence_init(&(*fence
)->base
, &amdgpu_fence_ops
,
129 &ring
->fence_drv
.fence_queue
.lock
,
130 adev
->fence_context
+ ring
->idx
,
132 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
134 AMDGPU_FENCE_FLAG_INT
);
135 trace_amdgpu_fence_emit(ring
->adev
->ddev
, ring
->idx
, (*fence
)->seq
);
140 * amdgpu_fence_check_signaled - callback from fence_queue
142 * this function is called with fence_queue lock held, which is also used
143 * for the fence locking itself, so unlocked variants are used for
144 * fence_signal, and remove_wait_queue.
146 static int amdgpu_fence_check_signaled(wait_queue_t
*wait
, unsigned mode
, int flags
, void *key
)
148 struct amdgpu_fence
*fence
;
149 struct amdgpu_device
*adev
;
153 fence
= container_of(wait
, struct amdgpu_fence
, fence_wake
);
154 adev
= fence
->ring
->adev
;
157 * We cannot use amdgpu_fence_process here because we're already
158 * in the waitqueue, in a call from wake_up_all.
160 seq
= atomic64_read(&fence
->ring
->fence_drv
.last_seq
);
161 if (seq
>= fence
->seq
) {
162 ret
= fence_signal_locked(&fence
->base
);
164 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
166 FENCE_TRACE(&fence
->base
, "was already signaled\n");
168 __remove_wait_queue(&fence
->ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
169 fence_put(&fence
->base
);
171 FENCE_TRACE(&fence
->base
, "pending\n");
176 * amdgpu_fence_activity - check for fence activity
178 * @ring: pointer to struct amdgpu_ring
180 * Checks the current fence value and calculates the last
181 * signalled fence value. Returns true if activity occured
182 * on the ring, and the fence_queue should be waken up.
184 static bool amdgpu_fence_activity(struct amdgpu_ring
*ring
)
186 uint64_t seq
, last_seq
, last_emitted
;
187 unsigned count_loop
= 0;
190 /* Note there is a scenario here for an infinite loop but it's
191 * very unlikely to happen. For it to happen, the current polling
192 * process need to be interrupted by another process and another
193 * process needs to update the last_seq btw the atomic read and
194 * xchg of the current process.
196 * More over for this to go in infinite loop there need to be
197 * continuously new fence signaled ie amdgpu_fence_read needs
198 * to return a different value each time for both the currently
199 * polling process and the other process that xchg the last_seq
200 * btw atomic read and xchg of the current process. And the
201 * value the other process set as last seq must be higher than
202 * the seq value we just read. Which means that current process
203 * need to be interrupted after amdgpu_fence_read and before
206 * To be even more safe we count the number of time we loop and
207 * we bail after 10 loop just accepting the fact that we might
208 * have temporarly set the last_seq not to the true real last
209 * seq but to an older one.
211 last_seq
= atomic64_read(&ring
->fence_drv
.last_seq
);
213 last_emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
];
214 seq
= amdgpu_fence_read(ring
);
215 seq
|= last_seq
& 0xffffffff00000000LL
;
216 if (seq
< last_seq
) {
218 seq
|= last_emitted
& 0xffffffff00000000LL
;
221 if (seq
<= last_seq
|| seq
> last_emitted
) {
224 /* If we loop over we don't want to return without
225 * checking if a fence is signaled as it means that the
226 * seq we just read is different from the previous on.
230 if ((count_loop
++) > 10) {
231 /* We looped over too many time leave with the
232 * fact that we might have set an older fence
233 * seq then the current real last seq as signaled
238 } while (atomic64_xchg(&ring
->fence_drv
.last_seq
, seq
) > seq
);
240 if (seq
< last_emitted
)
241 amdgpu_fence_schedule_check(ring
);
247 * amdgpu_fence_check_lockup - check for hardware lockup
249 * @work: delayed work item
251 * Checks for fence activity and if there is none probe
252 * the hardware if a lockup occured.
254 static void amdgpu_fence_check_lockup(struct work_struct
*work
)
256 struct amdgpu_fence_driver
*fence_drv
;
257 struct amdgpu_ring
*ring
;
259 fence_drv
= container_of(work
, struct amdgpu_fence_driver
,
261 ring
= fence_drv
->ring
;
263 if (!down_read_trylock(&ring
->adev
->exclusive_lock
)) {
264 /* just reschedule the check if a reset is going on */
265 amdgpu_fence_schedule_check(ring
);
269 if (amdgpu_fence_activity(ring
)) {
270 wake_up_all(&ring
->fence_drv
.fence_queue
);
272 else if (amdgpu_ring_is_lockup(ring
)) {
273 /* good news we believe it's a lockup */
274 dev_warn(ring
->adev
->dev
, "GPU lockup (current fence id "
275 "0x%016llx last fence id 0x%016llx on ring %d)\n",
276 (uint64_t)atomic64_read(&fence_drv
->last_seq
),
277 fence_drv
->sync_seq
[ring
->idx
], ring
->idx
);
279 /* remember that we need an reset */
280 ring
->adev
->needs_reset
= true;
281 wake_up_all(&ring
->fence_drv
.fence_queue
);
283 up_read(&ring
->adev
->exclusive_lock
);
287 * amdgpu_fence_process - process a fence
289 * @adev: amdgpu_device pointer
290 * @ring: ring index the fence is associated with
292 * Checks the current fence value and wakes the fence queue
293 * if the sequence number has increased (all asics).
295 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
297 if (amdgpu_fence_activity(ring
))
298 wake_up_all(&ring
->fence_drv
.fence_queue
);
302 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
304 * @ring: ring the fence is associated with
305 * @seq: sequence number
307 * Check if the last signaled fence sequnce number is >= the requested
308 * sequence number (all asics).
309 * Returns true if the fence has signaled (current fence value
310 * is >= requested value) or false if it has not (current fence
311 * value is < the requested value. Helper function for
312 * amdgpu_fence_signaled().
314 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring
*ring
, u64 seq
)
316 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
319 /* poll new last sequence at least once */
320 amdgpu_fence_process(ring
);
321 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
327 static bool amdgpu_fence_is_signaled(struct fence
*f
)
329 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
330 struct amdgpu_ring
*ring
= fence
->ring
;
331 struct amdgpu_device
*adev
= ring
->adev
;
333 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
336 if (down_read_trylock(&adev
->exclusive_lock
)) {
337 amdgpu_fence_process(ring
);
338 up_read(&adev
->exclusive_lock
);
340 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
347 * amdgpu_fence_enable_signaling - enable signalling on fence
350 * This function is called with fence_queue lock held, and adds a callback
351 * to fence_queue that checks if this fence is signaled, and if so it
352 * signals the fence and removes itself.
354 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
356 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
357 struct amdgpu_ring
*ring
= fence
->ring
;
359 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= fence
->seq
)
362 fence
->fence_wake
.flags
= 0;
363 fence
->fence_wake
.private = NULL
;
364 fence
->fence_wake
.func
= amdgpu_fence_check_signaled
;
365 __add_wait_queue(&ring
->fence_drv
.fence_queue
, &fence
->fence_wake
);
367 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
372 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
373 * @ring: ring to wait on for the seq number
374 * @seq: seq number wait for
377 * 0: seq signaled, and gpu not hang
378 * -EDEADL: GPU hang detected
379 * -EINVAL: some paramter is not valid
381 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring
*ring
, uint64_t seq
)
383 struct amdgpu_device
*adev
= ring
->adev
;
384 bool signaled
= false;
387 if (seq
> ring
->fence_drv
.sync_seq
[ring
->idx
])
390 if (atomic64_read(&ring
->fence_drv
.last_seq
) >= seq
)
393 wait_event(ring
->fence_drv
.fence_queue
, (
394 (signaled
= amdgpu_fence_seq_signaled(ring
, seq
))
395 || adev
->needs_reset
));
404 * amdgpu_fence_wait_next - wait for the next fence to signal
406 * @adev: amdgpu device pointer
407 * @ring: ring index the fence is associated with
409 * Wait for the next fence on the requested ring to signal (all asics).
410 * Returns 0 if the next fence has passed, error for all other cases.
411 * Caller must hold ring lock.
413 int amdgpu_fence_wait_next(struct amdgpu_ring
*ring
)
415 uint64_t seq
= atomic64_read(&ring
->fence_drv
.last_seq
) + 1ULL;
417 if (seq
>= ring
->fence_drv
.sync_seq
[ring
->idx
])
420 return amdgpu_fence_ring_wait_seq(ring
, seq
);
424 * amdgpu_fence_wait_empty - wait for all fences to signal
426 * @adev: amdgpu device pointer
427 * @ring: ring index the fence is associated with
429 * Wait for all fences on the requested ring to signal (all asics).
430 * Returns 0 if the fences have passed, error for all other cases.
431 * Caller must hold ring lock.
433 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
435 uint64_t seq
= ring
->fence_drv
.sync_seq
[ring
->idx
];
440 return amdgpu_fence_ring_wait_seq(ring
, seq
);
444 * amdgpu_fence_ref - take a ref on a fence
446 * @fence: amdgpu fence object
448 * Take a reference on a fence (all asics).
451 struct amdgpu_fence
*amdgpu_fence_ref(struct amdgpu_fence
*fence
)
453 fence_get(&fence
->base
);
458 * amdgpu_fence_unref - remove a ref on a fence
460 * @fence: amdgpu fence object
462 * Remove a reference on a fence (all asics).
464 void amdgpu_fence_unref(struct amdgpu_fence
**fence
)
466 struct amdgpu_fence
*tmp
= *fence
;
470 fence_put(&tmp
->base
);
474 * amdgpu_fence_count_emitted - get the count of emitted fences
476 * @ring: ring the fence is associated with
478 * Get the number of fences emitted on the requested ring (all asics).
479 * Returns the number of emitted fences on the ring. Used by the
480 * dynpm code to ring track activity.
482 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
486 /* We are not protected by ring lock when reading the last sequence
487 * but it's ok to report slightly wrong fence count here.
489 amdgpu_fence_process(ring
);
490 emitted
= ring
->fence_drv
.sync_seq
[ring
->idx
]
491 - atomic64_read(&ring
->fence_drv
.last_seq
);
492 /* to avoid 32bits warp around */
493 if (emitted
> 0x10000000)
494 emitted
= 0x10000000;
496 return (unsigned)emitted
;
500 * amdgpu_fence_need_sync - do we need a semaphore
502 * @fence: amdgpu fence object
503 * @dst_ring: which ring to check against
505 * Check if the fence needs to be synced against another ring
506 * (all asics). If so, we need to emit a semaphore.
507 * Returns true if we need to sync with another ring, false if
510 bool amdgpu_fence_need_sync(struct amdgpu_fence
*fence
,
511 struct amdgpu_ring
*dst_ring
)
513 struct amdgpu_fence_driver
*fdrv
;
518 if (fence
->ring
== dst_ring
)
521 /* we are protected by the ring mutex */
522 fdrv
= &dst_ring
->fence_drv
;
523 if (fence
->seq
<= fdrv
->sync_seq
[fence
->ring
->idx
])
530 * amdgpu_fence_note_sync - record the sync point
532 * @fence: amdgpu fence object
533 * @dst_ring: which ring to check against
535 * Note the sequence number at which point the fence will
536 * be synced with the requested ring (all asics).
538 void amdgpu_fence_note_sync(struct amdgpu_fence
*fence
,
539 struct amdgpu_ring
*dst_ring
)
541 struct amdgpu_fence_driver
*dst
, *src
;
547 if (fence
->ring
== dst_ring
)
550 /* we are protected by the ring mutex */
551 src
= &fence
->ring
->fence_drv
;
552 dst
= &dst_ring
->fence_drv
;
553 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
554 if (i
== dst_ring
->idx
)
557 dst
->sync_seq
[i
] = max(dst
->sync_seq
[i
], src
->sync_seq
[i
]);
562 * amdgpu_fence_driver_start_ring - make the fence driver
563 * ready for use on the requested ring.
565 * @ring: ring to start the fence driver on
566 * @irq_src: interrupt source to use for this ring
567 * @irq_type: interrupt type to use for this ring
569 * Make the fence driver ready for processing (all asics).
570 * Not all asics have all rings, so each asic will only
571 * start the fence driver on the rings it has.
572 * Returns 0 for success, errors for failure.
574 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
575 struct amdgpu_irq_src
*irq_src
,
578 struct amdgpu_device
*adev
= ring
->adev
;
581 if (ring
!= &adev
->uvd
.ring
) {
582 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
583 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
585 /* put fence directly behind firmware */
586 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
587 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
588 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
590 amdgpu_fence_write(ring
, atomic64_read(&ring
->fence_drv
.last_seq
));
591 amdgpu_irq_get(adev
, irq_src
, irq_type
);
593 ring
->fence_drv
.irq_src
= irq_src
;
594 ring
->fence_drv
.irq_type
= irq_type
;
595 ring
->fence_drv
.initialized
= true;
597 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
598 "cpu addr 0x%p\n", ring
->idx
,
599 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
604 * amdgpu_fence_driver_init_ring - init the fence driver
605 * for the requested ring.
607 * @ring: ring to init the fence driver on
609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init().
612 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
)
616 ring
->fence_drv
.cpu_addr
= NULL
;
617 ring
->fence_drv
.gpu_addr
= 0;
618 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
619 ring
->fence_drv
.sync_seq
[i
] = 0;
621 atomic64_set(&ring
->fence_drv
.last_seq
, 0);
622 ring
->fence_drv
.initialized
= false;
624 INIT_DELAYED_WORK(&ring
->fence_drv
.lockup_work
,
625 amdgpu_fence_check_lockup
);
626 ring
->fence_drv
.ring
= ring
;
628 init_waitqueue_head(&ring
->fence_drv
.fence_queue
);
630 if (amdgpu_enable_scheduler
) {
631 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
632 amdgpu_sched_hw_submission
, ring
->name
);
634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
644 * amdgpu_fence_driver_init - init the fence driver
645 * for all possible rings.
647 * @adev: amdgpu device pointer
649 * Init the fence driver for all possible rings (all asics).
650 * Not all asics have all rings, so each asic will only
651 * start the fence driver on the rings it has using
652 * amdgpu_fence_driver_start_ring().
653 * Returns 0 for success.
655 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
657 if (amdgpu_debugfs_fence_init(adev
))
658 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
664 * amdgpu_fence_driver_fini - tear down the fence driver
665 * for all possible rings.
667 * @adev: amdgpu device pointer
669 * Tear down the fence driver for all possible rings (all asics).
671 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
675 mutex_lock(&adev
->ring_lock
);
676 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
677 struct amdgpu_ring
*ring
= adev
->rings
[i
];
678 if (!ring
|| !ring
->fence_drv
.initialized
)
680 r
= amdgpu_fence_wait_empty(ring
);
682 /* no need to trigger GPU reset as we are unloading */
683 amdgpu_fence_driver_force_completion(adev
);
685 wake_up_all(&ring
->fence_drv
.fence_queue
);
686 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
687 ring
->fence_drv
.irq_type
);
688 amd_sched_fini(&ring
->sched
);
689 ring
->fence_drv
.initialized
= false;
691 mutex_unlock(&adev
->ring_lock
);
695 * amdgpu_fence_driver_suspend - suspend the fence driver
696 * for all possible rings.
698 * @adev: amdgpu device pointer
700 * Suspend the fence driver for all possible rings (all asics).
702 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
706 mutex_lock(&adev
->ring_lock
);
707 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
708 struct amdgpu_ring
*ring
= adev
->rings
[i
];
709 if (!ring
|| !ring
->fence_drv
.initialized
)
712 /* wait for gpu to finish processing current batch */
713 r
= amdgpu_fence_wait_empty(ring
);
715 /* delay GPU reset to resume */
716 amdgpu_fence_driver_force_completion(adev
);
719 /* disable the interrupt */
720 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
721 ring
->fence_drv
.irq_type
);
723 mutex_unlock(&adev
->ring_lock
);
727 * amdgpu_fence_driver_resume - resume the fence driver
728 * for all possible rings.
730 * @adev: amdgpu device pointer
732 * Resume the fence driver for all possible rings (all asics).
733 * Not all asics have all rings, so each asic will only
734 * start the fence driver on the rings it has using
735 * amdgpu_fence_driver_start_ring().
736 * Returns 0 for success.
738 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
742 mutex_lock(&adev
->ring_lock
);
743 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
744 struct amdgpu_ring
*ring
= adev
->rings
[i
];
745 if (!ring
|| !ring
->fence_drv
.initialized
)
748 /* enable the interrupt */
749 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
750 ring
->fence_drv
.irq_type
);
752 mutex_unlock(&adev
->ring_lock
);
756 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
758 * @adev: amdgpu device pointer
760 * In case of GPU reset failure make sure no process keep waiting on fence
761 * that will never complete.
763 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
767 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
768 struct amdgpu_ring
*ring
= adev
->rings
[i
];
769 if (!ring
|| !ring
->fence_drv
.initialized
)
772 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
[i
]);
780 #if defined(CONFIG_DEBUG_FS)
781 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
783 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
784 struct drm_device
*dev
= node
->minor
->dev
;
785 struct amdgpu_device
*adev
= dev
->dev_private
;
788 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
789 struct amdgpu_ring
*ring
= adev
->rings
[i
];
790 if (!ring
|| !ring
->fence_drv
.initialized
)
793 amdgpu_fence_process(ring
);
795 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
796 seq_printf(m
, "Last signaled fence 0x%016llx\n",
797 (unsigned long long)atomic64_read(&ring
->fence_drv
.last_seq
));
798 seq_printf(m
, "Last emitted 0x%016llx\n",
799 ring
->fence_drv
.sync_seq
[i
]);
801 for (j
= 0; j
< AMDGPU_MAX_RINGS
; ++j
) {
802 struct amdgpu_ring
*other
= adev
->rings
[j
];
803 if (i
!= j
&& other
&& other
->fence_drv
.initialized
&&
804 ring
->fence_drv
.sync_seq
[j
])
805 seq_printf(m
, "Last sync to ring %d 0x%016llx\n",
806 j
, ring
->fence_drv
.sync_seq
[j
]);
812 static struct drm_info_list amdgpu_debugfs_fence_list
[] = {
813 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
817 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
819 #if defined(CONFIG_DEBUG_FS)
820 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 1);
826 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
831 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
833 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
834 return (const char *)fence
->ring
->name
;
837 static inline bool amdgpu_test_signaled(struct amdgpu_fence
*fence
)
839 return test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
);
842 static bool amdgpu_test_signaled_any(struct fence
**fences
, uint32_t count
)
847 for (idx
= 0; idx
< count
; ++idx
) {
850 if (test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
857 struct amdgpu_wait_cb
{
858 struct fence_cb base
;
859 struct task_struct
*task
;
862 static void amdgpu_fence_wait_cb(struct fence
*fence
, struct fence_cb
*cb
)
864 struct amdgpu_wait_cb
*wait
=
865 container_of(cb
, struct amdgpu_wait_cb
, base
);
866 wake_up_process(wait
->task
);
869 static signed long amdgpu_fence_default_wait(struct fence
*f
, bool intr
,
872 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
873 struct amdgpu_device
*adev
= fence
->ring
->adev
;
875 return amdgpu_fence_wait_any(adev
, &f
, 1, intr
, t
);
879 * Wait the fence array with timeout
881 * @adev: amdgpu device
882 * @array: the fence array with amdgpu fence pointer
883 * @count: the number of the fence array
884 * @intr: when sleep, set the current task interruptable or not
885 * @t: timeout to wait
887 * It will return when any fence is signaled or timeout.
889 signed long amdgpu_fence_wait_any(struct amdgpu_device
*adev
,
890 struct fence
**array
, uint32_t count
,
891 bool intr
, signed long t
)
893 struct amdgpu_wait_cb
*cb
;
899 cb
= kcalloc(count
, sizeof(struct amdgpu_wait_cb
), GFP_KERNEL
);
905 for (idx
= 0; idx
< count
; ++idx
) {
908 cb
[idx
].task
= current
;
909 if (fence_add_callback(fence
,
910 &cb
[idx
].base
, amdgpu_fence_wait_cb
)) {
911 /* The fence is already signaled */
919 set_current_state(TASK_INTERRUPTIBLE
);
921 set_current_state(TASK_UNINTERRUPTIBLE
);
924 * amdgpu_test_signaled_any must be called after
925 * set_current_state to prevent a race with wake_up_process
927 if (amdgpu_test_signaled_any(array
, count
))
930 if (adev
->needs_reset
) {
935 t
= schedule_timeout(t
);
937 if (t
> 0 && intr
&& signal_pending(current
))
941 __set_current_state(TASK_RUNNING
);
944 for (idx
= 0; idx
< count
; ++idx
) {
946 if (fence
&& cb
[idx
].base
.func
)
947 fence_remove_callback(fence
, &cb
[idx
].base
);
956 const struct fence_ops amdgpu_fence_ops
= {
957 .get_driver_name
= amdgpu_fence_get_driver_name
,
958 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
959 .enable_signaling
= amdgpu_fence_enable_signaling
,
960 .signaled
= amdgpu_fence_is_signaled
,
961 .wait
= amdgpu_fence_default_wait
,