drm/amdgpu: add core driver (v4)
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40
41 /*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
50 /**
51 * amdgpu_fence_write - write a fence value
52 *
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
55 *
56 * Writes a fence value to memory (all asics).
57 */
58 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
59 {
60 struct amdgpu_fence_driver *drv = &ring->fence_drv;
61
62 if (drv->cpu_addr)
63 *drv->cpu_addr = cpu_to_le32(seq);
64 }
65
66 /**
67 * amdgpu_fence_read - read a fence value
68 *
69 * @ring: ring the fence is associated with
70 *
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
73 */
74 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
75 {
76 struct amdgpu_fence_driver *drv = &ring->fence_drv;
77 u32 seq = 0;
78
79 if (drv->cpu_addr)
80 seq = le32_to_cpu(*drv->cpu_addr);
81 else
82 seq = lower_32_bits(atomic64_read(&drv->last_seq));
83
84 return seq;
85 }
86
87 /**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94 static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95 {
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103 }
104
105 /**
106 * amdgpu_fence_emit - emit a fence on the requested ring
107 *
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
111 *
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
114 */
115 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
116 struct amdgpu_fence **fence)
117 {
118 struct amdgpu_device *adev = ring->adev;
119
120 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
122 if ((*fence) == NULL) {
123 return -ENOMEM;
124 }
125 (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
126 (*fence)->ring = ring;
127 (*fence)->owner = owner;
128 fence_init(&(*fence)->base, &amdgpu_fence_ops,
129 &adev->fence_queue.lock, adev->fence_context + ring->idx,
130 (*fence)->seq);
131 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false);
132 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
133 return 0;
134 }
135
136 /**
137 * amdgpu_fence_check_signaled - callback from fence_queue
138 *
139 * this function is called with fence_queue lock held, which is also used
140 * for the fence locking itself, so unlocked variants are used for
141 * fence_signal, and remove_wait_queue.
142 */
143 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
144 {
145 struct amdgpu_fence *fence;
146 struct amdgpu_device *adev;
147 u64 seq;
148 int ret;
149
150 fence = container_of(wait, struct amdgpu_fence, fence_wake);
151 adev = fence->ring->adev;
152
153 /*
154 * We cannot use amdgpu_fence_process here because we're already
155 * in the waitqueue, in a call from wake_up_all.
156 */
157 seq = atomic64_read(&fence->ring->fence_drv.last_seq);
158 if (seq >= fence->seq) {
159 ret = fence_signal_locked(&fence->base);
160 if (!ret)
161 FENCE_TRACE(&fence->base, "signaled from irq context\n");
162 else
163 FENCE_TRACE(&fence->base, "was already signaled\n");
164
165 amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
166 fence->ring->fence_drv.irq_type);
167 __remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
168 fence_put(&fence->base);
169 } else
170 FENCE_TRACE(&fence->base, "pending\n");
171 return 0;
172 }
173
174 /**
175 * amdgpu_fence_activity - check for fence activity
176 *
177 * @ring: pointer to struct amdgpu_ring
178 *
179 * Checks the current fence value and calculates the last
180 * signalled fence value. Returns true if activity occured
181 * on the ring, and the fence_queue should be waken up.
182 */
183 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
184 {
185 uint64_t seq, last_seq, last_emitted;
186 unsigned count_loop = 0;
187 bool wake = false;
188
189 /* Note there is a scenario here for an infinite loop but it's
190 * very unlikely to happen. For it to happen, the current polling
191 * process need to be interrupted by another process and another
192 * process needs to update the last_seq btw the atomic read and
193 * xchg of the current process.
194 *
195 * More over for this to go in infinite loop there need to be
196 * continuously new fence signaled ie radeon_fence_read needs
197 * to return a different value each time for both the currently
198 * polling process and the other process that xchg the last_seq
199 * btw atomic read and xchg of the current process. And the
200 * value the other process set as last seq must be higher than
201 * the seq value we just read. Which means that current process
202 * need to be interrupted after radeon_fence_read and before
203 * atomic xchg.
204 *
205 * To be even more safe we count the number of time we loop and
206 * we bail after 10 loop just accepting the fact that we might
207 * have temporarly set the last_seq not to the true real last
208 * seq but to an older one.
209 */
210 last_seq = atomic64_read(&ring->fence_drv.last_seq);
211 do {
212 last_emitted = ring->fence_drv.sync_seq[ring->idx];
213 seq = amdgpu_fence_read(ring);
214 seq |= last_seq & 0xffffffff00000000LL;
215 if (seq < last_seq) {
216 seq &= 0xffffffff;
217 seq |= last_emitted & 0xffffffff00000000LL;
218 }
219
220 if (seq <= last_seq || seq > last_emitted) {
221 break;
222 }
223 /* If we loop over we don't want to return without
224 * checking if a fence is signaled as it means that the
225 * seq we just read is different from the previous on.
226 */
227 wake = true;
228 last_seq = seq;
229 if ((count_loop++) > 10) {
230 /* We looped over too many time leave with the
231 * fact that we might have set an older fence
232 * seq then the current real last seq as signaled
233 * by the hw.
234 */
235 break;
236 }
237 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
238
239 if (seq < last_emitted)
240 amdgpu_fence_schedule_check(ring);
241
242 return wake;
243 }
244
245 /**
246 * amdgpu_fence_check_lockup - check for hardware lockup
247 *
248 * @work: delayed work item
249 *
250 * Checks for fence activity and if there is none probe
251 * the hardware if a lockup occured.
252 */
253 static void amdgpu_fence_check_lockup(struct work_struct *work)
254 {
255 struct amdgpu_fence_driver *fence_drv;
256 struct amdgpu_ring *ring;
257
258 fence_drv = container_of(work, struct amdgpu_fence_driver,
259 lockup_work.work);
260 ring = fence_drv->ring;
261
262 if (!down_read_trylock(&ring->adev->exclusive_lock)) {
263 /* just reschedule the check if a reset is going on */
264 amdgpu_fence_schedule_check(ring);
265 return;
266 }
267
268 if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
269 fence_drv->delayed_irq = false;
270 amdgpu_irq_update(ring->adev, fence_drv->irq_src,
271 fence_drv->irq_type);
272 }
273
274 if (amdgpu_fence_activity(ring))
275 wake_up_all(&ring->adev->fence_queue);
276 else if (amdgpu_ring_is_lockup(ring)) {
277 /* good news we believe it's a lockup */
278 dev_warn(ring->adev->dev, "GPU lockup (current fence id "
279 "0x%016llx last fence id 0x%016llx on ring %d)\n",
280 (uint64_t)atomic64_read(&fence_drv->last_seq),
281 fence_drv->sync_seq[ring->idx], ring->idx);
282
283 /* remember that we need an reset */
284 ring->adev->needs_reset = true;
285 wake_up_all(&ring->adev->fence_queue);
286 }
287 up_read(&ring->adev->exclusive_lock);
288 }
289
290 /**
291 * amdgpu_fence_process - process a fence
292 *
293 * @adev: amdgpu_device pointer
294 * @ring: ring index the fence is associated with
295 *
296 * Checks the current fence value and wakes the fence queue
297 * if the sequence number has increased (all asics).
298 */
299 void amdgpu_fence_process(struct amdgpu_ring *ring)
300 {
301 uint64_t seq, last_seq, last_emitted;
302 unsigned count_loop = 0;
303 bool wake = false;
304
305 /* Note there is a scenario here for an infinite loop but it's
306 * very unlikely to happen. For it to happen, the current polling
307 * process need to be interrupted by another process and another
308 * process needs to update the last_seq btw the atomic read and
309 * xchg of the current process.
310 *
311 * More over for this to go in infinite loop there need to be
312 * continuously new fence signaled ie amdgpu_fence_read needs
313 * to return a different value each time for both the currently
314 * polling process and the other process that xchg the last_seq
315 * btw atomic read and xchg of the current process. And the
316 * value the other process set as last seq must be higher than
317 * the seq value we just read. Which means that current process
318 * need to be interrupted after amdgpu_fence_read and before
319 * atomic xchg.
320 *
321 * To be even more safe we count the number of time we loop and
322 * we bail after 10 loop just accepting the fact that we might
323 * have temporarly set the last_seq not to the true real last
324 * seq but to an older one.
325 */
326 last_seq = atomic64_read(&ring->fence_drv.last_seq);
327 do {
328 last_emitted = ring->fence_drv.sync_seq[ring->idx];
329 seq = amdgpu_fence_read(ring);
330 seq |= last_seq & 0xffffffff00000000LL;
331 if (seq < last_seq) {
332 seq &= 0xffffffff;
333 seq |= last_emitted & 0xffffffff00000000LL;
334 }
335
336 if (seq <= last_seq || seq > last_emitted) {
337 break;
338 }
339 /* If we loop over we don't want to return without
340 * checking if a fence is signaled as it means that the
341 * seq we just read is different from the previous on.
342 */
343 wake = true;
344 last_seq = seq;
345 if ((count_loop++) > 10) {
346 /* We looped over too many time leave with the
347 * fact that we might have set an older fence
348 * seq then the current real last seq as signaled
349 * by the hw.
350 */
351 break;
352 }
353 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
354
355 if (wake)
356 wake_up_all(&ring->adev->fence_queue);
357 }
358
359 /**
360 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
361 *
362 * @ring: ring the fence is associated with
363 * @seq: sequence number
364 *
365 * Check if the last signaled fence sequnce number is >= the requested
366 * sequence number (all asics).
367 * Returns true if the fence has signaled (current fence value
368 * is >= requested value) or false if it has not (current fence
369 * value is < the requested value. Helper function for
370 * amdgpu_fence_signaled().
371 */
372 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
373 {
374 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
375 return true;
376
377 /* poll new last sequence at least once */
378 amdgpu_fence_process(ring);
379 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
380 return true;
381
382 return false;
383 }
384
385 static bool amdgpu_fence_is_signaled(struct fence *f)
386 {
387 struct amdgpu_fence *fence = to_amdgpu_fence(f);
388 struct amdgpu_ring *ring = fence->ring;
389 struct amdgpu_device *adev = ring->adev;
390
391 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
392 return true;
393
394 if (down_read_trylock(&adev->exclusive_lock)) {
395 amdgpu_fence_process(ring);
396 up_read(&adev->exclusive_lock);
397
398 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
399 return true;
400 }
401 return false;
402 }
403
404 /**
405 * amdgpu_fence_enable_signaling - enable signalling on fence
406 * @fence: fence
407 *
408 * This function is called with fence_queue lock held, and adds a callback
409 * to fence_queue that checks if this fence is signaled, and if so it
410 * signals the fence and removes itself.
411 */
412 static bool amdgpu_fence_enable_signaling(struct fence *f)
413 {
414 struct amdgpu_fence *fence = to_amdgpu_fence(f);
415 struct amdgpu_ring *ring = fence->ring;
416 struct amdgpu_device *adev = ring->adev;
417
418 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
419 return false;
420
421 if (down_read_trylock(&adev->exclusive_lock)) {
422 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
423 ring->fence_drv.irq_type);
424 if (amdgpu_fence_activity(ring))
425 wake_up_all_locked(&adev->fence_queue);
426
427 /* did fence get signaled after we enabled the sw irq? */
428 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
429 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
430 ring->fence_drv.irq_type);
431 up_read(&adev->exclusive_lock);
432 return false;
433 }
434
435 up_read(&adev->exclusive_lock);
436 } else {
437 /* we're probably in a lockup, lets not fiddle too much */
438 if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
439 ring->fence_drv.irq_type))
440 ring->fence_drv.delayed_irq = true;
441 amdgpu_fence_schedule_check(ring);
442 }
443
444 fence->fence_wake.flags = 0;
445 fence->fence_wake.private = NULL;
446 fence->fence_wake.func = amdgpu_fence_check_signaled;
447 __add_wait_queue(&adev->fence_queue, &fence->fence_wake);
448 fence_get(f);
449 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
450 return true;
451 }
452
453 /**
454 * amdgpu_fence_signaled - check if a fence has signaled
455 *
456 * @fence: amdgpu fence object
457 *
458 * Check if the requested fence has signaled (all asics).
459 * Returns true if the fence has signaled or false if it has not.
460 */
461 bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
462 {
463 if (!fence)
464 return true;
465
466 if (fence->seq == AMDGPU_FENCE_SIGNALED_SEQ)
467 return true;
468
469 if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
470 fence->seq = AMDGPU_FENCE_SIGNALED_SEQ;
471 if (!fence_signal(&fence->base))
472 FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
473 return true;
474 }
475
476 return false;
477 }
478
479 /**
480 * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
481 *
482 * @adev: amdgpu device pointer
483 * @seq: sequence numbers
484 *
485 * Check if the last signaled fence sequnce number is >= the requested
486 * sequence number (all asics).
487 * Returns true if any has signaled (current value is >= requested value)
488 * or false if it has not. Helper function for amdgpu_fence_wait_seq.
489 */
490 static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
491 {
492 unsigned i;
493
494 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
495 if (!adev->rings[i] || !seq[i])
496 continue;
497
498 if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
499 return true;
500 }
501
502 return false;
503 }
504
505 /**
506 * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
507 *
508 * @adev: amdgpu device pointer
509 * @target_seq: sequence number(s) we want to wait for
510 * @intr: use interruptable sleep
511 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
512 *
513 * Wait for the requested sequence number(s) to be written by any ring
514 * (all asics). Sequnce number array is indexed by ring id.
515 * @intr selects whether to use interruptable (true) or non-interruptable
516 * (false) sleep when waiting for the sequence number. Helper function
517 * for amdgpu_fence_wait_*().
518 * Returns remaining time if the sequence number has passed, 0 when
519 * the wait timeout, or an error for all other cases.
520 * -EDEADLK is returned when a GPU lockup has been detected.
521 */
522 long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
523 bool intr, long timeout)
524 {
525 uint64_t last_seq[AMDGPU_MAX_RINGS];
526 bool signaled;
527 int i, r;
528
529 while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
530
531 /* Save current sequence values, used to check for GPU lockups */
532 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
533 struct amdgpu_ring *ring = adev->rings[i];
534
535 if (!ring || !target_seq[i])
536 continue;
537
538 last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
539 trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
540 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
541 ring->fence_drv.irq_type);
542 }
543
544 if (intr) {
545 r = wait_event_interruptible_timeout(adev->fence_queue, (
546 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
547 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
548 } else {
549 r = wait_event_timeout(adev->fence_queue, (
550 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
551 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
552 }
553
554 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
555 struct amdgpu_ring *ring = adev->rings[i];
556
557 if (!ring || !target_seq[i])
558 continue;
559
560 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
561 ring->fence_drv.irq_type);
562 trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
563 }
564
565 if (unlikely(r < 0))
566 return r;
567
568 if (unlikely(!signaled)) {
569
570 if (adev->needs_reset)
571 return -EDEADLK;
572
573 /* we were interrupted for some reason and fence
574 * isn't signaled yet, resume waiting */
575 if (r)
576 continue;
577
578 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
579 struct amdgpu_ring *ring = adev->rings[i];
580
581 if (!ring || !target_seq[i])
582 continue;
583
584 if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
585 break;
586 }
587
588 if (i != AMDGPU_MAX_RINGS)
589 continue;
590
591 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
592 if (!adev->rings[i] || !target_seq[i])
593 continue;
594
595 if (amdgpu_ring_is_lockup(adev->rings[i]))
596 break;
597 }
598
599 if (i < AMDGPU_MAX_RINGS) {
600 /* good news we believe it's a lockup */
601 dev_warn(adev->dev, "GPU lockup (waiting for "
602 "0x%016llx last fence id 0x%016llx on"
603 " ring %d)\n",
604 target_seq[i], last_seq[i], i);
605
606 /* remember that we need an reset */
607 adev->needs_reset = true;
608 wake_up_all(&adev->fence_queue);
609 return -EDEADLK;
610 }
611
612 if (timeout < MAX_SCHEDULE_TIMEOUT) {
613 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
614 if (timeout <= 0) {
615 return 0;
616 }
617 }
618 }
619 }
620 return timeout;
621 }
622
623 /**
624 * amdgpu_fence_wait - wait for a fence to signal
625 *
626 * @fence: amdgpu fence object
627 * @intr: use interruptable sleep
628 *
629 * Wait for the requested fence to signal (all asics).
630 * @intr selects whether to use interruptable (true) or non-interruptable
631 * (false) sleep when waiting for the fence.
632 * Returns 0 if the fence has passed, error for all other cases.
633 */
634 int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
635 {
636 uint64_t seq[AMDGPU_MAX_RINGS] = {};
637 long r;
638
639 seq[fence->ring->idx] = fence->seq;
640 if (seq[fence->ring->idx] == AMDGPU_FENCE_SIGNALED_SEQ)
641 return 0;
642
643 r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
644 if (r < 0) {
645 return r;
646 }
647
648 fence->seq = AMDGPU_FENCE_SIGNALED_SEQ;
649 r = fence_signal(&fence->base);
650 if (!r)
651 FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
652 return 0;
653 }
654
655 /**
656 * amdgpu_fence_wait_any - wait for a fence to signal on any ring
657 *
658 * @adev: amdgpu device pointer
659 * @fences: amdgpu fence object(s)
660 * @intr: use interruptable sleep
661 *
662 * Wait for any requested fence to signal (all asics). Fence
663 * array is indexed by ring id. @intr selects whether to use
664 * interruptable (true) or non-interruptable (false) sleep when
665 * waiting for the fences. Used by the suballocator.
666 * Returns 0 if any fence has passed, error for all other cases.
667 */
668 int amdgpu_fence_wait_any(struct amdgpu_device *adev,
669 struct amdgpu_fence **fences,
670 bool intr)
671 {
672 uint64_t seq[AMDGPU_MAX_RINGS];
673 unsigned i, num_rings = 0;
674 long r;
675
676 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
677 seq[i] = 0;
678
679 if (!fences[i]) {
680 continue;
681 }
682
683 seq[i] = fences[i]->seq;
684 ++num_rings;
685
686 /* test if something was allready signaled */
687 if (seq[i] == AMDGPU_FENCE_SIGNALED_SEQ)
688 return 0;
689 }
690
691 /* nothing to wait for ? */
692 if (num_rings == 0)
693 return -ENOENT;
694
695 r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
696 if (r < 0) {
697 return r;
698 }
699 return 0;
700 }
701
702 /**
703 * amdgpu_fence_wait_next - wait for the next fence to signal
704 *
705 * @adev: amdgpu device pointer
706 * @ring: ring index the fence is associated with
707 *
708 * Wait for the next fence on the requested ring to signal (all asics).
709 * Returns 0 if the next fence has passed, error for all other cases.
710 * Caller must hold ring lock.
711 */
712 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
713 {
714 uint64_t seq[AMDGPU_MAX_RINGS] = {};
715 long r;
716
717 seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
718 if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
719 /* nothing to wait for, last_seq is
720 already the last emited fence */
721 return -ENOENT;
722 }
723 r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
724 if (r < 0)
725 return r;
726 return 0;
727 }
728
729 /**
730 * amdgpu_fence_wait_empty - wait for all fences to signal
731 *
732 * @adev: amdgpu device pointer
733 * @ring: ring index the fence is associated with
734 *
735 * Wait for all fences on the requested ring to signal (all asics).
736 * Returns 0 if the fences have passed, error for all other cases.
737 * Caller must hold ring lock.
738 */
739 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
740 {
741 struct amdgpu_device *adev = ring->adev;
742 uint64_t seq[AMDGPU_MAX_RINGS] = {};
743 long r;
744
745 seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
746 if (!seq[ring->idx])
747 return 0;
748
749 r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
750 if (r < 0) {
751 if (r == -EDEADLK)
752 return -EDEADLK;
753
754 dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
755 ring->idx, r);
756 }
757 return 0;
758 }
759
760 /**
761 * amdgpu_fence_ref - take a ref on a fence
762 *
763 * @fence: amdgpu fence object
764 *
765 * Take a reference on a fence (all asics).
766 * Returns the fence.
767 */
768 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
769 {
770 fence_get(&fence->base);
771 return fence;
772 }
773
774 /**
775 * amdgpu_fence_unref - remove a ref on a fence
776 *
777 * @fence: amdgpu fence object
778 *
779 * Remove a reference on a fence (all asics).
780 */
781 void amdgpu_fence_unref(struct amdgpu_fence **fence)
782 {
783 struct amdgpu_fence *tmp = *fence;
784
785 *fence = NULL;
786 if (tmp)
787 fence_put(&tmp->base);
788 }
789
790 /**
791 * amdgpu_fence_count_emitted - get the count of emitted fences
792 *
793 * @ring: ring the fence is associated with
794 *
795 * Get the number of fences emitted on the requested ring (all asics).
796 * Returns the number of emitted fences on the ring. Used by the
797 * dynpm code to ring track activity.
798 */
799 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
800 {
801 uint64_t emitted;
802
803 /* We are not protected by ring lock when reading the last sequence
804 * but it's ok to report slightly wrong fence count here.
805 */
806 amdgpu_fence_process(ring);
807 emitted = ring->fence_drv.sync_seq[ring->idx]
808 - atomic64_read(&ring->fence_drv.last_seq);
809 /* to avoid 32bits warp around */
810 if (emitted > 0x10000000)
811 emitted = 0x10000000;
812
813 return (unsigned)emitted;
814 }
815
816 /**
817 * amdgpu_fence_need_sync - do we need a semaphore
818 *
819 * @fence: amdgpu fence object
820 * @dst_ring: which ring to check against
821 *
822 * Check if the fence needs to be synced against another ring
823 * (all asics). If so, we need to emit a semaphore.
824 * Returns true if we need to sync with another ring, false if
825 * not.
826 */
827 bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
828 struct amdgpu_ring *dst_ring)
829 {
830 struct amdgpu_fence_driver *fdrv;
831
832 if (!fence)
833 return false;
834
835 if (fence->ring == dst_ring)
836 return false;
837
838 /* we are protected by the ring mutex */
839 fdrv = &dst_ring->fence_drv;
840 if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
841 return false;
842
843 return true;
844 }
845
846 /**
847 * amdgpu_fence_note_sync - record the sync point
848 *
849 * @fence: amdgpu fence object
850 * @dst_ring: which ring to check against
851 *
852 * Note the sequence number at which point the fence will
853 * be synced with the requested ring (all asics).
854 */
855 void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
856 struct amdgpu_ring *dst_ring)
857 {
858 struct amdgpu_fence_driver *dst, *src;
859 unsigned i;
860
861 if (!fence)
862 return;
863
864 if (fence->ring == dst_ring)
865 return;
866
867 /* we are protected by the ring mutex */
868 src = &fence->ring->fence_drv;
869 dst = &dst_ring->fence_drv;
870 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
871 if (i == dst_ring->idx)
872 continue;
873
874 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
875 }
876 }
877
878 /**
879 * amdgpu_fence_driver_start_ring - make the fence driver
880 * ready for use on the requested ring.
881 *
882 * @ring: ring to start the fence driver on
883 * @irq_src: interrupt source to use for this ring
884 * @irq_type: interrupt type to use for this ring
885 *
886 * Make the fence driver ready for processing (all asics).
887 * Not all asics have all rings, so each asic will only
888 * start the fence driver on the rings it has.
889 * Returns 0 for success, errors for failure.
890 */
891 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
892 struct amdgpu_irq_src *irq_src,
893 unsigned irq_type)
894 {
895 struct amdgpu_device *adev = ring->adev;
896 uint64_t index;
897
898 if (ring != &adev->uvd.ring) {
899 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
900 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
901 } else {
902 /* put fence directly behind firmware */
903 index = ALIGN(adev->uvd.fw->size, 8);
904 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
905 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
906 }
907 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
908 ring->fence_drv.initialized = true;
909 ring->fence_drv.irq_src = irq_src;
910 ring->fence_drv.irq_type = irq_type;
911 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
912 "cpu addr 0x%p\n", ring->idx,
913 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
914 return 0;
915 }
916
917 /**
918 * amdgpu_fence_driver_init_ring - init the fence driver
919 * for the requested ring.
920 *
921 * @ring: ring to init the fence driver on
922 *
923 * Init the fence driver for the requested ring (all asics).
924 * Helper function for amdgpu_fence_driver_init().
925 */
926 void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
927 {
928 int i;
929
930 ring->fence_drv.cpu_addr = NULL;
931 ring->fence_drv.gpu_addr = 0;
932 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
933 ring->fence_drv.sync_seq[i] = 0;
934
935 atomic64_set(&ring->fence_drv.last_seq, 0);
936 ring->fence_drv.initialized = false;
937
938 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
939 amdgpu_fence_check_lockup);
940 ring->fence_drv.ring = ring;
941 }
942
943 /**
944 * amdgpu_fence_driver_init - init the fence driver
945 * for all possible rings.
946 *
947 * @adev: amdgpu device pointer
948 *
949 * Init the fence driver for all possible rings (all asics).
950 * Not all asics have all rings, so each asic will only
951 * start the fence driver on the rings it has using
952 * amdgpu_fence_driver_start_ring().
953 * Returns 0 for success.
954 */
955 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
956 {
957 init_waitqueue_head(&adev->fence_queue);
958 if (amdgpu_debugfs_fence_init(adev))
959 dev_err(adev->dev, "fence debugfs file creation failed\n");
960
961 return 0;
962 }
963
964 /**
965 * amdgpu_fence_driver_fini - tear down the fence driver
966 * for all possible rings.
967 *
968 * @adev: amdgpu device pointer
969 *
970 * Tear down the fence driver for all possible rings (all asics).
971 */
972 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
973 {
974 int i, r;
975
976 mutex_lock(&adev->ring_lock);
977 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
978 struct amdgpu_ring *ring = adev->rings[i];
979 if (!ring || !ring->fence_drv.initialized)
980 continue;
981 r = amdgpu_fence_wait_empty(ring);
982 if (r) {
983 /* no need to trigger GPU reset as we are unloading */
984 amdgpu_fence_driver_force_completion(adev);
985 }
986 wake_up_all(&adev->fence_queue);
987 ring->fence_drv.initialized = false;
988 }
989 mutex_unlock(&adev->ring_lock);
990 }
991
992 /**
993 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
994 *
995 * @adev: amdgpu device pointer
996 *
997 * In case of GPU reset failure make sure no process keep waiting on fence
998 * that will never complete.
999 */
1000 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
1001 {
1002 int i;
1003
1004 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1005 struct amdgpu_ring *ring = adev->rings[i];
1006 if (!ring || !ring->fence_drv.initialized)
1007 continue;
1008
1009 amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
1010 }
1011 }
1012
1013
1014 /*
1015 * Fence debugfs
1016 */
1017 #if defined(CONFIG_DEBUG_FS)
1018 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
1019 {
1020 struct drm_info_node *node = (struct drm_info_node *)m->private;
1021 struct drm_device *dev = node->minor->dev;
1022 struct amdgpu_device *adev = dev->dev_private;
1023 int i, j;
1024
1025 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1026 struct amdgpu_ring *ring = adev->rings[i];
1027 if (!ring || !ring->fence_drv.initialized)
1028 continue;
1029
1030 amdgpu_fence_process(ring);
1031
1032 seq_printf(m, "--- ring %d ---\n", i);
1033 seq_printf(m, "Last signaled fence 0x%016llx\n",
1034 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
1035 seq_printf(m, "Last emitted 0x%016llx\n",
1036 ring->fence_drv.sync_seq[i]);
1037
1038 for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
1039 struct amdgpu_ring *other = adev->rings[j];
1040 if (i != j && other && other->fence_drv.initialized)
1041 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
1042 j, ring->fence_drv.sync_seq[j]);
1043 }
1044 }
1045 return 0;
1046 }
1047
1048 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
1049 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
1050 };
1051 #endif
1052
1053 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
1054 {
1055 #if defined(CONFIG_DEBUG_FS)
1056 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
1057 #else
1058 return 0;
1059 #endif
1060 }
1061
1062 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
1063 {
1064 return "amdgpu";
1065 }
1066
1067 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
1068 {
1069 struct amdgpu_fence *fence = to_amdgpu_fence(f);
1070 return (const char *)fence->ring->name;
1071 }
1072
1073 static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
1074 {
1075 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1076 }
1077
1078 struct amdgpu_wait_cb {
1079 struct fence_cb base;
1080 struct task_struct *task;
1081 };
1082
1083 static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1084 {
1085 struct amdgpu_wait_cb *wait =
1086 container_of(cb, struct amdgpu_wait_cb, base);
1087 wake_up_process(wait->task);
1088 }
1089
1090 static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1091 signed long t)
1092 {
1093 struct amdgpu_fence *fence = to_amdgpu_fence(f);
1094 struct amdgpu_device *adev = fence->ring->adev;
1095 struct amdgpu_wait_cb cb;
1096
1097 cb.task = current;
1098
1099 if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
1100 return t;
1101
1102 while (t > 0) {
1103 if (intr)
1104 set_current_state(TASK_INTERRUPTIBLE);
1105 else
1106 set_current_state(TASK_UNINTERRUPTIBLE);
1107
1108 /*
1109 * amdgpu_test_signaled must be called after
1110 * set_current_state to prevent a race with wake_up_process
1111 */
1112 if (amdgpu_test_signaled(fence))
1113 break;
1114
1115 if (adev->needs_reset) {
1116 t = -EDEADLK;
1117 break;
1118 }
1119
1120 t = schedule_timeout(t);
1121
1122 if (t > 0 && intr && signal_pending(current))
1123 t = -ERESTARTSYS;
1124 }
1125
1126 __set_current_state(TASK_RUNNING);
1127 fence_remove_callback(f, &cb.base);
1128
1129 return t;
1130 }
1131
1132 const struct fence_ops amdgpu_fence_ops = {
1133 .get_driver_name = amdgpu_fence_get_driver_name,
1134 .get_timeline_name = amdgpu_fence_get_timeline_name,
1135 .enable_signaling = amdgpu_fence_enable_signaling,
1136 .signaled = amdgpu_fence_is_signaled,
1137 .wait = amdgpu_fence_default_wait,
1138 .release = NULL,
1139 };
This page took 0.05567 seconds and 5 git commands to generate.