2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
54 struct amdgpu_ring
*ring
;
57 static struct kmem_cache
*amdgpu_fence_slab
;
58 static atomic_t amdgpu_fence_slab_ref
= ATOMIC_INIT(0);
63 static const struct fence_ops amdgpu_fence_ops
;
64 static inline struct amdgpu_fence
*to_amdgpu_fence(struct fence
*f
)
66 struct amdgpu_fence
*__f
= container_of(f
, struct amdgpu_fence
, base
);
68 if (__f
->base
.ops
== &amdgpu_fence_ops
)
75 * amdgpu_fence_write - write a fence value
77 * @ring: ring the fence is associated with
78 * @seq: sequence number to write
80 * Writes a fence value to memory (all asics).
82 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
84 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
87 *drv
->cpu_addr
= cpu_to_le32(seq
);
91 * amdgpu_fence_read - read a fence value
93 * @ring: ring the fence is associated with
95 * Reads a fence value from memory (all asics).
96 * Returns the value of the fence read from memory.
98 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
100 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
104 seq
= le32_to_cpu(*drv
->cpu_addr
);
106 seq
= atomic_read(&drv
->last_seq
);
112 * amdgpu_fence_emit - emit a fence on the requested ring
114 * @ring: ring the fence is associated with
115 * @f: resulting fence object
117 * Emits a fence command on the requested ring (all asics).
118 * Returns 0 on success, -ENOMEM on failure.
120 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, struct fence
**f
)
122 struct amdgpu_device
*adev
= ring
->adev
;
123 struct amdgpu_fence
*fence
;
124 struct fence
*old
, **ptr
;
127 fence
= kmem_cache_alloc(amdgpu_fence_slab
, GFP_KERNEL
);
131 seq
= ++ring
->fence_drv
.sync_seq
;
133 fence_init(&fence
->base
, &amdgpu_fence_ops
,
134 &ring
->fence_drv
.lock
,
135 adev
->fence_context
+ ring
->idx
,
137 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
138 seq
, AMDGPU_FENCE_FLAG_INT
);
140 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
141 /* This function can't be called concurrently anyway, otherwise
142 * emitting the fence would mess up the hardware ring buffer.
144 old
= rcu_dereference_protected(*ptr
, 1);
145 if (old
&& !fence_is_signaled(old
)) {
146 DRM_INFO("rcu slot is busy\n");
147 fence_wait(old
, false);
150 rcu_assign_pointer(*ptr
, fence_get(&fence
->base
));
158 * amdgpu_fence_schedule_fallback - schedule fallback check
160 * @ring: pointer to struct amdgpu_ring
162 * Start a timer as fallback to our interrupts.
164 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring
*ring
)
166 mod_timer(&ring
->fence_drv
.fallback_timer
,
167 jiffies
+ AMDGPU_FENCE_JIFFIES_TIMEOUT
);
171 * amdgpu_fence_process - check for fence activity
173 * @ring: pointer to struct amdgpu_ring
175 * Checks the current fence value and calculates the last
176 * signalled fence value. Wakes the fence queue if the
177 * sequence number has increased.
179 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
181 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
182 uint32_t seq
, last_seq
;
186 last_seq
= atomic_read(&ring
->fence_drv
.last_seq
);
187 seq
= amdgpu_fence_read(ring
);
189 } while (atomic_cmpxchg(&drv
->last_seq
, last_seq
, seq
) != last_seq
);
191 if (seq
!= ring
->fence_drv
.sync_seq
)
192 amdgpu_fence_schedule_fallback(ring
);
194 while (last_seq
!= seq
) {
195 struct fence
*fence
, **ptr
;
197 ptr
= &drv
->fences
[++last_seq
& drv
->num_fences_mask
];
199 /* There is always exactly one thread signaling this fence slot */
200 fence
= rcu_dereference_protected(*ptr
, 1);
201 rcu_assign_pointer(*ptr
, NULL
);
205 r
= fence_signal(fence
);
207 FENCE_TRACE(fence
, "signaled from irq context\n");
216 * amdgpu_fence_fallback - fallback for hardware interrupts
218 * @work: delayed work item
220 * Checks for fence activity.
222 static void amdgpu_fence_fallback(unsigned long arg
)
224 struct amdgpu_ring
*ring
= (void *)arg
;
226 amdgpu_fence_process(ring
);
230 * amdgpu_fence_wait_empty - wait for all fences to signal
232 * @adev: amdgpu device pointer
233 * @ring: ring index the fence is associated with
235 * Wait for all fences on the requested ring to signal (all asics).
236 * Returns 0 if the fences have passed, error for all other cases.
238 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
240 uint64_t seq
= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
241 struct fence
*fence
, **ptr
;
247 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
249 fence
= rcu_dereference(*ptr
);
250 if (!fence
|| !fence_get_rcu(fence
)) {
256 r
= fence_wait(fence
, false);
262 * amdgpu_fence_count_emitted - get the count of emitted fences
264 * @ring: ring the fence is associated with
266 * Get the number of fences emitted on the requested ring (all asics).
267 * Returns the number of emitted fences on the ring. Used by the
268 * dynpm code to ring track activity.
270 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
274 /* We are not protected by ring lock when reading the last sequence
275 * but it's ok to report slightly wrong fence count here.
277 amdgpu_fence_process(ring
);
278 emitted
= 0x100000000ull
;
279 emitted
-= atomic_read(&ring
->fence_drv
.last_seq
);
280 emitted
+= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
281 return lower_32_bits(emitted
);
285 * amdgpu_fence_driver_start_ring - make the fence driver
286 * ready for use on the requested ring.
288 * @ring: ring to start the fence driver on
289 * @irq_src: interrupt source to use for this ring
290 * @irq_type: interrupt type to use for this ring
292 * Make the fence driver ready for processing (all asics).
293 * Not all asics have all rings, so each asic will only
294 * start the fence driver on the rings it has.
295 * Returns 0 for success, errors for failure.
297 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
298 struct amdgpu_irq_src
*irq_src
,
301 struct amdgpu_device
*adev
= ring
->adev
;
304 if (ring
!= &adev
->uvd
.ring
) {
305 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
306 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
308 /* put fence directly behind firmware */
309 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
310 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
311 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
313 amdgpu_fence_write(ring
, atomic_read(&ring
->fence_drv
.last_seq
));
314 amdgpu_irq_get(adev
, irq_src
, irq_type
);
316 ring
->fence_drv
.irq_src
= irq_src
;
317 ring
->fence_drv
.irq_type
= irq_type
;
318 ring
->fence_drv
.initialized
= true;
320 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
321 "cpu addr 0x%p\n", ring
->idx
,
322 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
327 * amdgpu_fence_driver_init_ring - init the fence driver
328 * for the requested ring.
330 * @ring: ring to init the fence driver on
331 * @num_hw_submission: number of entries on the hardware queue
333 * Init the fence driver for the requested ring (all asics).
334 * Helper function for amdgpu_fence_driver_init().
336 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
,
337 unsigned num_hw_submission
)
342 /* Check that num_hw_submission is a power of two */
343 if ((num_hw_submission
& (num_hw_submission
- 1)) != 0)
346 ring
->fence_drv
.cpu_addr
= NULL
;
347 ring
->fence_drv
.gpu_addr
= 0;
348 ring
->fence_drv
.sync_seq
= 0;
349 atomic_set(&ring
->fence_drv
.last_seq
, 0);
350 ring
->fence_drv
.initialized
= false;
352 setup_timer(&ring
->fence_drv
.fallback_timer
, amdgpu_fence_fallback
,
353 (unsigned long)ring
);
355 ring
->fence_drv
.num_fences_mask
= num_hw_submission
- 1;
356 spin_lock_init(&ring
->fence_drv
.lock
);
357 ring
->fence_drv
.fences
= kcalloc(num_hw_submission
, sizeof(void *),
359 if (!ring
->fence_drv
.fences
)
362 timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
366 * Delayed workqueue cannot use it directly,
367 * so the scheduler will not use delayed workqueue if
368 * MAX_SCHEDULE_TIMEOUT is set.
369 * Currently keep it simple and silly.
371 timeout
= MAX_SCHEDULE_TIMEOUT
;
373 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
375 timeout
, ring
->name
);
377 DRM_ERROR("Failed to create scheduler on ring %s.\n",
386 * amdgpu_fence_driver_init - init the fence driver
387 * for all possible rings.
389 * @adev: amdgpu device pointer
391 * Init the fence driver for all possible rings (all asics).
392 * Not all asics have all rings, so each asic will only
393 * start the fence driver on the rings it has using
394 * amdgpu_fence_driver_start_ring().
395 * Returns 0 for success.
397 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
399 if (atomic_inc_return(&amdgpu_fence_slab_ref
) == 1) {
400 amdgpu_fence_slab
= kmem_cache_create(
401 "amdgpu_fence", sizeof(struct amdgpu_fence
), 0,
402 SLAB_HWCACHE_ALIGN
, NULL
);
403 if (!amdgpu_fence_slab
)
406 if (amdgpu_debugfs_fence_init(adev
))
407 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
413 * amdgpu_fence_driver_fini - tear down the fence driver
414 * for all possible rings.
416 * @adev: amdgpu device pointer
418 * Tear down the fence driver for all possible rings (all asics).
420 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
425 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
426 struct amdgpu_ring
*ring
= adev
->rings
[i
];
428 if (!ring
|| !ring
->fence_drv
.initialized
)
430 r
= amdgpu_fence_wait_empty(ring
);
432 /* no need to trigger GPU reset as we are unloading */
433 amdgpu_fence_driver_force_completion(adev
);
435 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
436 ring
->fence_drv
.irq_type
);
437 amd_sched_fini(&ring
->sched
);
438 del_timer_sync(&ring
->fence_drv
.fallback_timer
);
439 for (j
= 0; j
<= ring
->fence_drv
.num_fences_mask
; ++j
)
440 fence_put(ring
->fence_drv
.fences
[i
]);
441 kfree(ring
->fence_drv
.fences
);
442 ring
->fence_drv
.initialized
= false;
445 if (atomic_dec_and_test(&amdgpu_fence_slab_ref
))
446 kmem_cache_destroy(amdgpu_fence_slab
);
450 * amdgpu_fence_driver_suspend - suspend the fence driver
451 * for all possible rings.
453 * @adev: amdgpu device pointer
455 * Suspend the fence driver for all possible rings (all asics).
457 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
461 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
462 struct amdgpu_ring
*ring
= adev
->rings
[i
];
463 if (!ring
|| !ring
->fence_drv
.initialized
)
466 /* wait for gpu to finish processing current batch */
467 r
= amdgpu_fence_wait_empty(ring
);
469 /* delay GPU reset to resume */
470 amdgpu_fence_driver_force_completion(adev
);
473 /* disable the interrupt */
474 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
475 ring
->fence_drv
.irq_type
);
480 * amdgpu_fence_driver_resume - resume the fence driver
481 * for all possible rings.
483 * @adev: amdgpu device pointer
485 * Resume the fence driver for all possible rings (all asics).
486 * Not all asics have all rings, so each asic will only
487 * start the fence driver on the rings it has using
488 * amdgpu_fence_driver_start_ring().
489 * Returns 0 for success.
491 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
495 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
496 struct amdgpu_ring
*ring
= adev
->rings
[i
];
497 if (!ring
|| !ring
->fence_drv
.initialized
)
500 /* enable the interrupt */
501 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
502 ring
->fence_drv
.irq_type
);
507 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
509 * @adev: amdgpu device pointer
511 * In case of GPU reset failure make sure no process keep waiting on fence
512 * that will never complete.
514 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
518 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
519 struct amdgpu_ring
*ring
= adev
->rings
[i
];
520 if (!ring
|| !ring
->fence_drv
.initialized
)
523 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
528 * Common fence implementation
531 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
536 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
538 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
539 return (const char *)fence
->ring
->name
;
543 * amdgpu_fence_enable_signaling - enable signalling on fence
546 * This function is called with fence_queue lock held, and adds a callback
547 * to fence_queue that checks if this fence is signaled, and if so it
548 * signals the fence and removes itself.
550 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
552 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
553 struct amdgpu_ring
*ring
= fence
->ring
;
555 if (!timer_pending(&ring
->fence_drv
.fallback_timer
))
556 amdgpu_fence_schedule_fallback(ring
);
558 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
564 * amdgpu_fence_free - free up the fence memory
566 * @rcu: RCU callback head
568 * Free up the fence memory after the RCU grace period.
570 static void amdgpu_fence_free(struct rcu_head
*rcu
)
572 struct fence
*f
= container_of(rcu
, struct fence
, rcu
);
573 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
574 kmem_cache_free(amdgpu_fence_slab
, fence
);
578 * amdgpu_fence_release - callback that fence can be freed
582 * This function is called when the reference count becomes zero.
583 * It just RCU schedules freeing up the fence.
585 static void amdgpu_fence_release(struct fence
*f
)
587 call_rcu(&f
->rcu
, amdgpu_fence_free
);
590 static const struct fence_ops amdgpu_fence_ops
= {
591 .get_driver_name
= amdgpu_fence_get_driver_name
,
592 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
593 .enable_signaling
= amdgpu_fence_enable_signaling
,
594 .wait
= fence_default_wait
,
595 .release
= amdgpu_fence_release
,
601 #if defined(CONFIG_DEBUG_FS)
602 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
604 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
605 struct drm_device
*dev
= node
->minor
->dev
;
606 struct amdgpu_device
*adev
= dev
->dev_private
;
609 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
610 struct amdgpu_ring
*ring
= adev
->rings
[i
];
611 if (!ring
|| !ring
->fence_drv
.initialized
)
614 amdgpu_fence_process(ring
);
616 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
617 seq_printf(m
, "Last signaled fence 0x%08x\n",
618 atomic_read(&ring
->fence_drv
.last_seq
));
619 seq_printf(m
, "Last emitted 0x%08x\n",
620 ring
->fence_drv
.sync_seq
);
626 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
628 * Manually trigger a gpu reset at the next fence wait.
630 static int amdgpu_debugfs_gpu_reset(struct seq_file
*m
, void *data
)
632 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
633 struct drm_device
*dev
= node
->minor
->dev
;
634 struct amdgpu_device
*adev
= dev
->dev_private
;
636 seq_printf(m
, "gpu reset\n");
637 amdgpu_gpu_reset(adev
);
642 static struct drm_info_list amdgpu_debugfs_fence_list
[] = {
643 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
644 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset
, 0, NULL
}
648 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
650 #if defined(CONFIG_DEBUG_FS)
651 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 2);