2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
54 struct amdgpu_ring
*ring
;
57 static struct kmem_cache
*amdgpu_fence_slab
;
59 int amdgpu_fence_slab_init(void)
61 amdgpu_fence_slab
= kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence
), 0,
63 SLAB_HWCACHE_ALIGN
, NULL
);
64 if (!amdgpu_fence_slab
)
69 void amdgpu_fence_slab_fini(void)
71 kmem_cache_destroy(amdgpu_fence_slab
);
76 static const struct fence_ops amdgpu_fence_ops
;
77 static inline struct amdgpu_fence
*to_amdgpu_fence(struct fence
*f
)
79 struct amdgpu_fence
*__f
= container_of(f
, struct amdgpu_fence
, base
);
81 if (__f
->base
.ops
== &amdgpu_fence_ops
)
88 * amdgpu_fence_write - write a fence value
90 * @ring: ring the fence is associated with
91 * @seq: sequence number to write
93 * Writes a fence value to memory (all asics).
95 static void amdgpu_fence_write(struct amdgpu_ring
*ring
, u32 seq
)
97 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
100 *drv
->cpu_addr
= cpu_to_le32(seq
);
104 * amdgpu_fence_read - read a fence value
106 * @ring: ring the fence is associated with
108 * Reads a fence value from memory (all asics).
109 * Returns the value of the fence read from memory.
111 static u32
amdgpu_fence_read(struct amdgpu_ring
*ring
)
113 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
117 seq
= le32_to_cpu(*drv
->cpu_addr
);
119 seq
= atomic_read(&drv
->last_seq
);
125 * amdgpu_fence_emit - emit a fence on the requested ring
127 * @ring: ring the fence is associated with
128 * @f: resulting fence object
130 * Emits a fence command on the requested ring (all asics).
131 * Returns 0 on success, -ENOMEM on failure.
133 int amdgpu_fence_emit(struct amdgpu_ring
*ring
, struct fence
**f
)
135 struct amdgpu_device
*adev
= ring
->adev
;
136 struct amdgpu_fence
*fence
;
137 struct fence
*old
, **ptr
;
140 fence
= kmem_cache_alloc(amdgpu_fence_slab
, GFP_KERNEL
);
144 seq
= ++ring
->fence_drv
.sync_seq
;
146 fence_init(&fence
->base
, &amdgpu_fence_ops
,
147 &ring
->fence_drv
.lock
,
148 adev
->fence_context
+ ring
->idx
,
150 amdgpu_ring_emit_fence(ring
, ring
->fence_drv
.gpu_addr
,
151 seq
, AMDGPU_FENCE_FLAG_INT
);
153 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
154 /* This function can't be called concurrently anyway, otherwise
155 * emitting the fence would mess up the hardware ring buffer.
157 old
= rcu_dereference_protected(*ptr
, 1);
158 if (old
&& !fence_is_signaled(old
)) {
159 DRM_INFO("rcu slot is busy\n");
160 fence_wait(old
, false);
163 rcu_assign_pointer(*ptr
, fence_get(&fence
->base
));
171 * amdgpu_fence_schedule_fallback - schedule fallback check
173 * @ring: pointer to struct amdgpu_ring
175 * Start a timer as fallback to our interrupts.
177 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring
*ring
)
179 mod_timer(&ring
->fence_drv
.fallback_timer
,
180 jiffies
+ AMDGPU_FENCE_JIFFIES_TIMEOUT
);
184 * amdgpu_fence_process - check for fence activity
186 * @ring: pointer to struct amdgpu_ring
188 * Checks the current fence value and calculates the last
189 * signalled fence value. Wakes the fence queue if the
190 * sequence number has increased.
192 void amdgpu_fence_process(struct amdgpu_ring
*ring
)
194 struct amdgpu_fence_driver
*drv
= &ring
->fence_drv
;
195 uint32_t seq
, last_seq
;
199 last_seq
= atomic_read(&ring
->fence_drv
.last_seq
);
200 seq
= amdgpu_fence_read(ring
);
202 } while (atomic_cmpxchg(&drv
->last_seq
, last_seq
, seq
) != last_seq
);
204 if (seq
!= ring
->fence_drv
.sync_seq
)
205 amdgpu_fence_schedule_fallback(ring
);
207 if (unlikely(seq
== last_seq
))
210 last_seq
&= drv
->num_fences_mask
;
211 seq
&= drv
->num_fences_mask
;
214 struct fence
*fence
, **ptr
;
217 last_seq
&= drv
->num_fences_mask
;
218 ptr
= &drv
->fences
[last_seq
];
220 /* There is always exactly one thread signaling this fence slot */
221 fence
= rcu_dereference_protected(*ptr
, 1);
222 RCU_INIT_POINTER(*ptr
, NULL
);
227 r
= fence_signal(fence
);
229 FENCE_TRACE(fence
, "signaled from irq context\n");
234 } while (last_seq
!= seq
);
238 * amdgpu_fence_fallback - fallback for hardware interrupts
240 * @work: delayed work item
242 * Checks for fence activity.
244 static void amdgpu_fence_fallback(unsigned long arg
)
246 struct amdgpu_ring
*ring
= (void *)arg
;
248 amdgpu_fence_process(ring
);
252 * amdgpu_fence_wait_empty - wait for all fences to signal
254 * @adev: amdgpu device pointer
255 * @ring: ring index the fence is associated with
257 * Wait for all fences on the requested ring to signal (all asics).
258 * Returns 0 if the fences have passed, error for all other cases.
260 int amdgpu_fence_wait_empty(struct amdgpu_ring
*ring
)
262 uint64_t seq
= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
263 struct fence
*fence
, **ptr
;
269 ptr
= &ring
->fence_drv
.fences
[seq
& ring
->fence_drv
.num_fences_mask
];
271 fence
= rcu_dereference(*ptr
);
272 if (!fence
|| !fence_get_rcu(fence
)) {
278 r
= fence_wait(fence
, false);
284 * amdgpu_fence_count_emitted - get the count of emitted fences
286 * @ring: ring the fence is associated with
288 * Get the number of fences emitted on the requested ring (all asics).
289 * Returns the number of emitted fences on the ring. Used by the
290 * dynpm code to ring track activity.
292 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring
*ring
)
296 /* We are not protected by ring lock when reading the last sequence
297 * but it's ok to report slightly wrong fence count here.
299 amdgpu_fence_process(ring
);
300 emitted
= 0x100000000ull
;
301 emitted
-= atomic_read(&ring
->fence_drv
.last_seq
);
302 emitted
+= ACCESS_ONCE(ring
->fence_drv
.sync_seq
);
303 return lower_32_bits(emitted
);
307 * amdgpu_fence_driver_start_ring - make the fence driver
308 * ready for use on the requested ring.
310 * @ring: ring to start the fence driver on
311 * @irq_src: interrupt source to use for this ring
312 * @irq_type: interrupt type to use for this ring
314 * Make the fence driver ready for processing (all asics).
315 * Not all asics have all rings, so each asic will only
316 * start the fence driver on the rings it has.
317 * Returns 0 for success, errors for failure.
319 int amdgpu_fence_driver_start_ring(struct amdgpu_ring
*ring
,
320 struct amdgpu_irq_src
*irq_src
,
323 struct amdgpu_device
*adev
= ring
->adev
;
326 if (ring
!= &adev
->uvd
.ring
) {
327 ring
->fence_drv
.cpu_addr
= &adev
->wb
.wb
[ring
->fence_offs
];
328 ring
->fence_drv
.gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->fence_offs
* 4);
330 /* put fence directly behind firmware */
331 index
= ALIGN(adev
->uvd
.fw
->size
, 8);
332 ring
->fence_drv
.cpu_addr
= adev
->uvd
.cpu_addr
+ index
;
333 ring
->fence_drv
.gpu_addr
= adev
->uvd
.gpu_addr
+ index
;
335 amdgpu_fence_write(ring
, atomic_read(&ring
->fence_drv
.last_seq
));
336 amdgpu_irq_get(adev
, irq_src
, irq_type
);
338 ring
->fence_drv
.irq_src
= irq_src
;
339 ring
->fence_drv
.irq_type
= irq_type
;
340 ring
->fence_drv
.initialized
= true;
342 dev_info(adev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx, "
343 "cpu addr 0x%p\n", ring
->idx
,
344 ring
->fence_drv
.gpu_addr
, ring
->fence_drv
.cpu_addr
);
349 * amdgpu_fence_driver_init_ring - init the fence driver
350 * for the requested ring.
352 * @ring: ring to init the fence driver on
353 * @num_hw_submission: number of entries on the hardware queue
355 * Init the fence driver for the requested ring (all asics).
356 * Helper function for amdgpu_fence_driver_init().
358 int amdgpu_fence_driver_init_ring(struct amdgpu_ring
*ring
,
359 unsigned num_hw_submission
)
364 /* Check that num_hw_submission is a power of two */
365 if ((num_hw_submission
& (num_hw_submission
- 1)) != 0)
368 ring
->fence_drv
.cpu_addr
= NULL
;
369 ring
->fence_drv
.gpu_addr
= 0;
370 ring
->fence_drv
.sync_seq
= 0;
371 atomic_set(&ring
->fence_drv
.last_seq
, 0);
372 ring
->fence_drv
.initialized
= false;
374 setup_timer(&ring
->fence_drv
.fallback_timer
, amdgpu_fence_fallback
,
375 (unsigned long)ring
);
377 ring
->fence_drv
.num_fences_mask
= num_hw_submission
* 2 - 1;
378 spin_lock_init(&ring
->fence_drv
.lock
);
379 ring
->fence_drv
.fences
= kcalloc(num_hw_submission
* 2, sizeof(void *),
381 if (!ring
->fence_drv
.fences
)
384 timeout
= msecs_to_jiffies(amdgpu_lockup_timeout
);
388 * Delayed workqueue cannot use it directly,
389 * so the scheduler will not use delayed workqueue if
390 * MAX_SCHEDULE_TIMEOUT is set.
391 * Currently keep it simple and silly.
393 timeout
= MAX_SCHEDULE_TIMEOUT
;
395 r
= amd_sched_init(&ring
->sched
, &amdgpu_sched_ops
,
397 timeout
, ring
->name
);
399 DRM_ERROR("Failed to create scheduler on ring %s.\n",
408 * amdgpu_fence_driver_init - init the fence driver
409 * for all possible rings.
411 * @adev: amdgpu device pointer
413 * Init the fence driver for all possible rings (all asics).
414 * Not all asics have all rings, so each asic will only
415 * start the fence driver on the rings it has using
416 * amdgpu_fence_driver_start_ring().
417 * Returns 0 for success.
419 int amdgpu_fence_driver_init(struct amdgpu_device
*adev
)
421 if (amdgpu_debugfs_fence_init(adev
))
422 dev_err(adev
->dev
, "fence debugfs file creation failed\n");
428 * amdgpu_fence_driver_fini - tear down the fence driver
429 * for all possible rings.
431 * @adev: amdgpu device pointer
433 * Tear down the fence driver for all possible rings (all asics).
435 void amdgpu_fence_driver_fini(struct amdgpu_device
*adev
)
440 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
441 struct amdgpu_ring
*ring
= adev
->rings
[i
];
443 if (!ring
|| !ring
->fence_drv
.initialized
)
445 r
= amdgpu_fence_wait_empty(ring
);
447 /* no need to trigger GPU reset as we are unloading */
448 amdgpu_fence_driver_force_completion(adev
);
450 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
451 ring
->fence_drv
.irq_type
);
452 amd_sched_fini(&ring
->sched
);
453 del_timer_sync(&ring
->fence_drv
.fallback_timer
);
454 for (j
= 0; j
<= ring
->fence_drv
.num_fences_mask
; ++j
)
455 fence_put(ring
->fence_drv
.fences
[j
]);
456 kfree(ring
->fence_drv
.fences
);
457 ring
->fence_drv
.initialized
= false;
462 * amdgpu_fence_driver_suspend - suspend the fence driver
463 * for all possible rings.
465 * @adev: amdgpu device pointer
467 * Suspend the fence driver for all possible rings (all asics).
469 void amdgpu_fence_driver_suspend(struct amdgpu_device
*adev
)
473 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
474 struct amdgpu_ring
*ring
= adev
->rings
[i
];
475 if (!ring
|| !ring
->fence_drv
.initialized
)
478 /* wait for gpu to finish processing current batch */
479 r
= amdgpu_fence_wait_empty(ring
);
481 /* delay GPU reset to resume */
482 amdgpu_fence_driver_force_completion(adev
);
485 /* disable the interrupt */
486 amdgpu_irq_put(adev
, ring
->fence_drv
.irq_src
,
487 ring
->fence_drv
.irq_type
);
492 * amdgpu_fence_driver_resume - resume the fence driver
493 * for all possible rings.
495 * @adev: amdgpu device pointer
497 * Resume the fence driver for all possible rings (all asics).
498 * Not all asics have all rings, so each asic will only
499 * start the fence driver on the rings it has using
500 * amdgpu_fence_driver_start_ring().
501 * Returns 0 for success.
503 void amdgpu_fence_driver_resume(struct amdgpu_device
*adev
)
507 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
508 struct amdgpu_ring
*ring
= adev
->rings
[i
];
509 if (!ring
|| !ring
->fence_drv
.initialized
)
512 /* enable the interrupt */
513 amdgpu_irq_get(adev
, ring
->fence_drv
.irq_src
,
514 ring
->fence_drv
.irq_type
);
519 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
521 * @adev: amdgpu device pointer
523 * In case of GPU reset failure make sure no process keep waiting on fence
524 * that will never complete.
526 void amdgpu_fence_driver_force_completion(struct amdgpu_device
*adev
)
530 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
531 struct amdgpu_ring
*ring
= adev
->rings
[i
];
532 if (!ring
|| !ring
->fence_drv
.initialized
)
535 amdgpu_fence_write(ring
, ring
->fence_drv
.sync_seq
);
540 * Common fence implementation
543 static const char *amdgpu_fence_get_driver_name(struct fence
*fence
)
548 static const char *amdgpu_fence_get_timeline_name(struct fence
*f
)
550 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
551 return (const char *)fence
->ring
->name
;
555 * amdgpu_fence_enable_signaling - enable signalling on fence
558 * This function is called with fence_queue lock held, and adds a callback
559 * to fence_queue that checks if this fence is signaled, and if so it
560 * signals the fence and removes itself.
562 static bool amdgpu_fence_enable_signaling(struct fence
*f
)
564 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
565 struct amdgpu_ring
*ring
= fence
->ring
;
567 if (!timer_pending(&ring
->fence_drv
.fallback_timer
))
568 amdgpu_fence_schedule_fallback(ring
);
570 FENCE_TRACE(&fence
->base
, "armed on ring %i!\n", ring
->idx
);
576 * amdgpu_fence_free - free up the fence memory
578 * @rcu: RCU callback head
580 * Free up the fence memory after the RCU grace period.
582 static void amdgpu_fence_free(struct rcu_head
*rcu
)
584 struct fence
*f
= container_of(rcu
, struct fence
, rcu
);
585 struct amdgpu_fence
*fence
= to_amdgpu_fence(f
);
586 kmem_cache_free(amdgpu_fence_slab
, fence
);
590 * amdgpu_fence_release - callback that fence can be freed
594 * This function is called when the reference count becomes zero.
595 * It just RCU schedules freeing up the fence.
597 static void amdgpu_fence_release(struct fence
*f
)
599 call_rcu(&f
->rcu
, amdgpu_fence_free
);
602 static const struct fence_ops amdgpu_fence_ops
= {
603 .get_driver_name
= amdgpu_fence_get_driver_name
,
604 .get_timeline_name
= amdgpu_fence_get_timeline_name
,
605 .enable_signaling
= amdgpu_fence_enable_signaling
,
606 .wait
= fence_default_wait
,
607 .release
= amdgpu_fence_release
,
613 #if defined(CONFIG_DEBUG_FS)
614 static int amdgpu_debugfs_fence_info(struct seq_file
*m
, void *data
)
616 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
617 struct drm_device
*dev
= node
->minor
->dev
;
618 struct amdgpu_device
*adev
= dev
->dev_private
;
621 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
622 struct amdgpu_ring
*ring
= adev
->rings
[i
];
623 if (!ring
|| !ring
->fence_drv
.initialized
)
626 amdgpu_fence_process(ring
);
628 seq_printf(m
, "--- ring %d (%s) ---\n", i
, ring
->name
);
629 seq_printf(m
, "Last signaled fence 0x%08x\n",
630 atomic_read(&ring
->fence_drv
.last_seq
));
631 seq_printf(m
, "Last emitted 0x%08x\n",
632 ring
->fence_drv
.sync_seq
);
638 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
640 * Manually trigger a gpu reset at the next fence wait.
642 static int amdgpu_debugfs_gpu_reset(struct seq_file
*m
, void *data
)
644 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
645 struct drm_device
*dev
= node
->minor
->dev
;
646 struct amdgpu_device
*adev
= dev
->dev_private
;
648 seq_printf(m
, "gpu reset\n");
649 amdgpu_gpu_reset(adev
);
654 static const struct drm_info_list amdgpu_debugfs_fence_list
[] = {
655 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info
, 0, NULL
},
656 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset
, 0, NULL
}
660 int amdgpu_debugfs_fence_init(struct amdgpu_device
*adev
)
662 #if defined(CONFIG_DEBUG_FS)
663 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_fence_list
, 2);