Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Dave Airlie | |
30 | */ | |
31 | #include <linux/seq_file.h> | |
60063497 | 32 | #include <linux/atomic.h> |
771fe6b9 | 33 | #include <linux/wait.h> |
771fe6b9 | 34 | #include <linux/kref.h> |
5a0e3ad6 | 35 | #include <linux/slab.h> |
f2ba57b5 | 36 | #include <linux/firmware.h> |
760285e7 | 37 | #include <drm/drmP.h> |
771fe6b9 JG |
38 | #include "radeon_reg.h" |
39 | #include "radeon.h" | |
99ee7fac | 40 | #include "radeon_trace.h" |
771fe6b9 | 41 | |
d66b7ec2 AD |
42 | /* |
43 | * Fences | |
44 | * Fences mark an event in the GPUs pipeline and are used | |
45 | * for GPU/CPU synchronization. When the fence is written, | |
46 | * it is expected that all buffers associated with that fence | |
47 | * are no longer in use by the associated ring on the GPU and | |
48 | * that the the relevant GPU caches have been flushed. Whether | |
49 | * we use a scratch register or memory location depends on the asic | |
50 | * and whether writeback is enabled. | |
51 | */ | |
52 | ||
53 | /** | |
54 | * radeon_fence_write - write a fence value | |
55 | * | |
56 | * @rdev: radeon_device pointer | |
57 | * @seq: sequence number to write | |
58 | * @ring: ring index the fence is associated with | |
59 | * | |
60 | * Writes a fence value to memory or a scratch register (all asics). | |
61 | */ | |
7465280c | 62 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
b81157d0 | 63 | { |
bf66625e CK |
64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { | |
089920f2 JG |
66 | if (drv->cpu_addr) { |
67 | *drv->cpu_addr = cpu_to_le32(seq); | |
68 | } | |
30eb77f4 | 69 | } else { |
bf66625e | 70 | WREG32(drv->scratch_reg, seq); |
30eb77f4 | 71 | } |
b81157d0 AD |
72 | } |
73 | ||
d66b7ec2 AD |
74 | /** |
75 | * radeon_fence_read - read a fence value | |
76 | * | |
77 | * @rdev: radeon_device pointer | |
78 | * @ring: ring index the fence is associated with | |
79 | * | |
80 | * Reads a fence value from memory or a scratch register (all asics). | |
81 | * Returns the value of the fence read from memory or register. | |
82 | */ | |
7465280c | 83 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
b81157d0 | 84 | { |
bf66625e | 85 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
7465280c | 86 | u32 seq = 0; |
b81157d0 | 87 | |
bf66625e | 88 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
089920f2 JG |
89 | if (drv->cpu_addr) { |
90 | seq = le32_to_cpu(*drv->cpu_addr); | |
91 | } else { | |
92 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); | |
93 | } | |
30eb77f4 | 94 | } else { |
bf66625e | 95 | seq = RREG32(drv->scratch_reg); |
30eb77f4 | 96 | } |
b81157d0 AD |
97 | return seq; |
98 | } | |
99 | ||
d66b7ec2 AD |
100 | /** |
101 | * radeon_fence_emit - emit a fence on the requested ring | |
102 | * | |
103 | * @rdev: radeon_device pointer | |
104 | * @fence: radeon fence object | |
105 | * @ring: ring index the fence is associated with | |
106 | * | |
107 | * Emits a fence command on the requested ring (all asics). | |
108 | * Returns 0 on success, -ENOMEM on failure. | |
109 | */ | |
876dc9f3 CK |
110 | int radeon_fence_emit(struct radeon_device *rdev, |
111 | struct radeon_fence **fence, | |
112 | int ring) | |
771fe6b9 | 113 | { |
3b7a2b24 | 114 | /* we are protected by the ring emission mutex */ |
876dc9f3 CK |
115 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
116 | if ((*fence) == NULL) { | |
117 | return -ENOMEM; | |
771fe6b9 | 118 | } |
876dc9f3 CK |
119 | kref_init(&((*fence)->kref)); |
120 | (*fence)->rdev = rdev; | |
68e250b7 | 121 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
876dc9f3 CK |
122 | (*fence)->ring = ring; |
123 | radeon_fence_ring_emit(rdev, ring, *fence); | |
1d784167 | 124 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
771fe6b9 JG |
125 | return 0; |
126 | } | |
127 | ||
d66b7ec2 AD |
128 | /** |
129 | * radeon_fence_process - process a fence | |
130 | * | |
131 | * @rdev: radeon_device pointer | |
132 | * @ring: ring index the fence is associated with | |
133 | * | |
134 | * Checks the current fence value and wakes the fence queue | |
135 | * if the sequence number has increased (all asics). | |
136 | */ | |
3b7a2b24 | 137 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
771fe6b9 | 138 | { |
f492c171 | 139 | uint64_t seq, last_seq, last_emitted; |
bb635567 | 140 | unsigned count_loop = 0; |
771fe6b9 JG |
141 | bool wake = false; |
142 | ||
bb635567 JG |
143 | /* Note there is a scenario here for an infinite loop but it's |
144 | * very unlikely to happen. For it to happen, the current polling | |
145 | * process need to be interrupted by another process and another | |
146 | * process needs to update the last_seq btw the atomic read and | |
147 | * xchg of the current process. | |
148 | * | |
149 | * More over for this to go in infinite loop there need to be | |
150 | * continuously new fence signaled ie radeon_fence_read needs | |
151 | * to return a different value each time for both the currently | |
152 | * polling process and the other process that xchg the last_seq | |
153 | * btw atomic read and xchg of the current process. And the | |
154 | * value the other process set as last seq must be higher than | |
155 | * the seq value we just read. Which means that current process | |
156 | * need to be interrupted after radeon_fence_read and before | |
157 | * atomic xchg. | |
158 | * | |
159 | * To be even more safe we count the number of time we loop and | |
160 | * we bail after 10 loop just accepting the fact that we might | |
161 | * have temporarly set the last_seq not to the true real last | |
162 | * seq but to an older one. | |
163 | */ | |
164 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); | |
165 | do { | |
f492c171 | 166 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
bb635567 JG |
167 | seq = radeon_fence_read(rdev, ring); |
168 | seq |= last_seq & 0xffffffff00000000LL; | |
169 | if (seq < last_seq) { | |
f492c171 CK |
170 | seq &= 0xffffffff; |
171 | seq |= last_emitted & 0xffffffff00000000LL; | |
bb635567 | 172 | } |
36abacae | 173 | |
f492c171 | 174 | if (seq <= last_seq || seq > last_emitted) { |
3b7a2b24 | 175 | break; |
bb635567 JG |
176 | } |
177 | /* If we loop over we don't want to return without | |
178 | * checking if a fence is signaled as it means that the | |
179 | * seq we just read is different from the previous on. | |
180 | */ | |
181 | wake = true; | |
3b7a2b24 | 182 | last_seq = seq; |
bb635567 JG |
183 | if ((count_loop++) > 10) { |
184 | /* We looped over too many time leave with the | |
185 | * fact that we might have set an older fence | |
186 | * seq then the current real last seq as signaled | |
187 | * by the hw. | |
188 | */ | |
189 | break; | |
190 | } | |
bb635567 JG |
191 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
192 | ||
f9eaf9ae | 193 | if (wake) |
0085c950 | 194 | wake_up_all(&rdev->fence_queue); |
771fe6b9 JG |
195 | } |
196 | ||
d66b7ec2 AD |
197 | /** |
198 | * radeon_fence_destroy - destroy a fence | |
199 | * | |
200 | * @kref: fence kref | |
201 | * | |
202 | * Frees the fence object (all asics). | |
203 | */ | |
771fe6b9 JG |
204 | static void radeon_fence_destroy(struct kref *kref) |
205 | { | |
3b7a2b24 | 206 | struct radeon_fence *fence; |
771fe6b9 JG |
207 | |
208 | fence = container_of(kref, struct radeon_fence, kref); | |
771fe6b9 JG |
209 | kfree(fence); |
210 | } | |
211 | ||
d66b7ec2 | 212 | /** |
f9eaf9ae | 213 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
d66b7ec2 AD |
214 | * |
215 | * @rdev: radeon device pointer | |
216 | * @seq: sequence number | |
217 | * @ring: ring index the fence is associated with | |
218 | * | |
f9eaf9ae | 219 | * Check if the last signaled fence sequnce number is >= the requested |
d66b7ec2 AD |
220 | * sequence number (all asics). |
221 | * Returns true if the fence has signaled (current fence value | |
222 | * is >= requested value) or false if it has not (current fence | |
223 | * value is < the requested value. Helper function for | |
224 | * radeon_fence_signaled(). | |
225 | */ | |
3b7a2b24 JG |
226 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
227 | u64 seq, unsigned ring) | |
771fe6b9 | 228 | { |
3b7a2b24 JG |
229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
230 | return true; | |
231 | } | |
232 | /* poll new last sequence at least once */ | |
233 | radeon_fence_process(rdev, ring); | |
234 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { | |
771fe6b9 | 235 | return true; |
3b7a2b24 JG |
236 | } |
237 | return false; | |
238 | } | |
3655d54a | 239 | |
d66b7ec2 AD |
240 | /** |
241 | * radeon_fence_signaled - check if a fence has signaled | |
242 | * | |
243 | * @fence: radeon fence object | |
244 | * | |
245 | * Check if the requested fence has signaled (all asics). | |
246 | * Returns true if the fence has signaled or false if it has not. | |
247 | */ | |
3b7a2b24 JG |
248 | bool radeon_fence_signaled(struct radeon_fence *fence) |
249 | { | |
250 | if (!fence) { | |
251 | return true; | |
771fe6b9 | 252 | } |
3b7a2b24 JG |
253 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
254 | return true; | |
771fe6b9 | 255 | } |
3b7a2b24 JG |
256 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
257 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | |
258 | return true; | |
259 | } | |
260 | return false; | |
771fe6b9 JG |
261 | } |
262 | ||
d66b7ec2 | 263 | /** |
f9eaf9ae | 264 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
d66b7ec2 AD |
265 | * |
266 | * @rdev: radeon device pointer | |
f9eaf9ae CK |
267 | * @seq: sequence numbers |
268 | * | |
269 | * Check if the last signaled fence sequnce number is >= the requested | |
270 | * sequence number (all asics). | |
271 | * Returns true if any has signaled (current value is >= requested value) | |
272 | * or false if it has not. Helper function for radeon_fence_wait_seq. | |
273 | */ | |
274 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |
275 | { | |
276 | unsigned i; | |
277 | ||
278 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
279 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) | |
280 | return true; | |
281 | } | |
282 | return false; | |
283 | } | |
284 | ||
285 | /** | |
286 | * radeon_fence_wait_seq - wait for a specific sequence numbers | |
287 | * | |
288 | * @rdev: radeon device pointer | |
289 | * @target_seq: sequence number(s) we want to wait for | |
d66b7ec2 | 290 | * @intr: use interruptable sleep |
d66b7ec2 | 291 | * |
f9eaf9ae CK |
292 | * Wait for the requested sequence number(s) to be written by any ring |
293 | * (all asics). Sequnce number array is indexed by ring id. | |
d66b7ec2 AD |
294 | * @intr selects whether to use interruptable (true) or non-interruptable |
295 | * (false) sleep when waiting for the sequence number. Helper function | |
f9eaf9ae | 296 | * for radeon_fence_wait_*(). |
d66b7ec2 | 297 | * Returns 0 if the sequence number has passed, error for all other cases. |
f9eaf9ae | 298 | * -EDEADLK is returned when a GPU lockup has been detected. |
d66b7ec2 | 299 | */ |
f9eaf9ae | 300 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
37615527 | 301 | bool intr) |
771fe6b9 | 302 | { |
f9eaf9ae | 303 | uint64_t last_seq[RADEON_NUM_RINGS]; |
36abacae | 304 | bool signaled; |
f9eaf9ae CK |
305 | int i, r; |
306 | ||
307 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { | |
308 | ||
309 | /* Save current sequence values, used to check for GPU lockups */ | |
310 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
311 | if (!target_seq[i]) | |
312 | continue; | |
771fe6b9 | 313 | |
f9eaf9ae | 314 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
1d784167 | 315 | trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
f9eaf9ae | 316 | radeon_irq_kms_sw_irq_get(rdev, i); |
3b7a2b24 | 317 | } |
36abacae | 318 | |
f9eaf9ae CK |
319 | if (intr) { |
320 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( | |
321 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) | |
322 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); | |
36abacae | 323 | } else { |
f9eaf9ae CK |
324 | r = wait_event_timeout(rdev->fence_queue, ( |
325 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) | |
326 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); | |
36abacae | 327 | } |
36abacae | 328 | |
f9eaf9ae CK |
329 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
330 | if (!target_seq[i]) | |
331 | continue; | |
332 | ||
333 | radeon_irq_kms_sw_irq_put(rdev, i); | |
1d784167 | 334 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
36abacae | 335 | } |
f9eaf9ae CK |
336 | |
337 | if (unlikely(r < 0)) | |
5cc6fbab | 338 | return r; |
25a9e352 | 339 | |
36abacae | 340 | if (unlikely(!signaled)) { |
f9eaf9ae CK |
341 | if (rdev->needs_reset) |
342 | return -EDEADLK; | |
343 | ||
36abacae CK |
344 | /* we were interrupted for some reason and fence |
345 | * isn't signaled yet, resume waiting */ | |
f9eaf9ae | 346 | if (r) |
36abacae | 347 | continue; |
f9eaf9ae CK |
348 | |
349 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
350 | if (!target_seq[i]) | |
351 | continue; | |
352 | ||
353 | if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) | |
354 | break; | |
36abacae | 355 | } |
25a9e352 | 356 | |
f9eaf9ae | 357 | if (i != RADEON_NUM_RINGS) |
3b7a2b24 | 358 | continue; |
8a47cc9e | 359 | |
f9eaf9ae CK |
360 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
361 | if (!target_seq[i]) | |
362 | continue; | |
363 | ||
364 | if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) | |
365 | break; | |
36abacae CK |
366 | } |
367 | ||
f9eaf9ae | 368 | if (i < RADEON_NUM_RINGS) { |
36abacae | 369 | /* good news we believe it's a lockup */ |
f9eaf9ae CK |
370 | dev_warn(rdev->dev, "GPU lockup (waiting for " |
371 | "0x%016llx last fence id 0x%016llx on" | |
372 | " ring %d)\n", | |
373 | target_seq[i], last_seq[i], i); | |
374 | ||
375 | /* remember that we need an reset */ | |
376 | rdev->needs_reset = true; | |
f9eaf9ae | 377 | wake_up_all(&rdev->fence_queue); |
6c6f4783 | 378 | return -EDEADLK; |
36abacae | 379 | } |
771fe6b9 | 380 | } |
771fe6b9 | 381 | } |
771fe6b9 JG |
382 | return 0; |
383 | } | |
384 | ||
d66b7ec2 AD |
385 | /** |
386 | * radeon_fence_wait - wait for a fence to signal | |
387 | * | |
388 | * @fence: radeon fence object | |
389 | * @intr: use interruptable sleep | |
390 | * | |
391 | * Wait for the requested fence to signal (all asics). | |
392 | * @intr selects whether to use interruptable (true) or non-interruptable | |
393 | * (false) sleep when waiting for the fence. | |
394 | * Returns 0 if the fence has passed, error for all other cases. | |
395 | */ | |
3b7a2b24 | 396 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
771fe6b9 | 397 | { |
f9eaf9ae | 398 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
771fe6b9 JG |
399 | int r; |
400 | ||
3b7a2b24 JG |
401 | if (fence == NULL) { |
402 | WARN(1, "Querying an invalid fence : %p !\n", fence); | |
403 | return -EINVAL; | |
25a9e352 | 404 | } |
3b7a2b24 | 405 | |
f9eaf9ae CK |
406 | seq[fence->ring] = fence->seq; |
407 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | |
408 | return 0; | |
0085c950 | 409 | |
37615527 | 410 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
f9eaf9ae CK |
411 | if (r) |
412 | return r; | |
0085c950 | 413 | |
f9eaf9ae | 414 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
0085c950 JG |
415 | return 0; |
416 | } | |
417 | ||
d66b7ec2 AD |
418 | /** |
419 | * radeon_fence_wait_any - wait for a fence to signal on any ring | |
420 | * | |
421 | * @rdev: radeon device pointer | |
422 | * @fences: radeon fence object(s) | |
423 | * @intr: use interruptable sleep | |
424 | * | |
425 | * Wait for any requested fence to signal (all asics). Fence | |
426 | * array is indexed by ring id. @intr selects whether to use | |
427 | * interruptable (true) or non-interruptable (false) sleep when | |
428 | * waiting for the fences. Used by the suballocator. | |
429 | * Returns 0 if any fence has passed, error for all other cases. | |
430 | */ | |
0085c950 JG |
431 | int radeon_fence_wait_any(struct radeon_device *rdev, |
432 | struct radeon_fence **fences, | |
433 | bool intr) | |
434 | { | |
435 | uint64_t seq[RADEON_NUM_RINGS]; | |
f9eaf9ae | 436 | unsigned i, num_rings = 0; |
0085c950 JG |
437 | int r; |
438 | ||
439 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
440 | seq[i] = 0; | |
441 | ||
442 | if (!fences[i]) { | |
443 | continue; | |
444 | } | |
445 | ||
876dc9f3 | 446 | seq[i] = fences[i]->seq; |
f9eaf9ae CK |
447 | ++num_rings; |
448 | ||
449 | /* test if something was allready signaled */ | |
450 | if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) | |
451 | return 0; | |
0085c950 JG |
452 | } |
453 | ||
f9eaf9ae CK |
454 | /* nothing to wait for ? */ |
455 | if (num_rings == 0) | |
456 | return -ENOENT; | |
457 | ||
37615527 | 458 | r = radeon_fence_wait_seq(rdev, seq, intr); |
0085c950 JG |
459 | if (r) { |
460 | return r; | |
461 | } | |
462 | return 0; | |
463 | } | |
464 | ||
1654b817 | 465 | /** |
37615527 | 466 | * radeon_fence_wait_next - wait for the next fence to signal |
d66b7ec2 AD |
467 | * |
468 | * @rdev: radeon device pointer | |
469 | * @ring: ring index the fence is associated with | |
470 | * | |
471 | * Wait for the next fence on the requested ring to signal (all asics). | |
472 | * Returns 0 if the next fence has passed, error for all other cases. | |
473 | * Caller must hold ring lock. | |
474 | */ | |
37615527 | 475 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
771fe6b9 | 476 | { |
f9eaf9ae | 477 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
771fe6b9 | 478 | |
f9eaf9ae CK |
479 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
480 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { | |
8a47cc9e CK |
481 | /* nothing to wait for, last_seq is |
482 | already the last emited fence */ | |
483 | return -ENOENT; | |
771fe6b9 | 484 | } |
37615527 | 485 | return radeon_fence_wait_seq(rdev, seq, false); |
3b7a2b24 JG |
486 | } |
487 | ||
d66b7ec2 | 488 | /** |
37615527 | 489 | * radeon_fence_wait_empty - wait for all fences to signal |
d66b7ec2 AD |
490 | * |
491 | * @rdev: radeon device pointer | |
492 | * @ring: ring index the fence is associated with | |
493 | * | |
494 | * Wait for all fences on the requested ring to signal (all asics). | |
495 | * Returns 0 if the fences have passed, error for all other cases. | |
496 | * Caller must hold ring lock. | |
497 | */ | |
37615527 | 498 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
3b7a2b24 | 499 | { |
f9eaf9ae | 500 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
5f8f635e | 501 | int r; |
7ecc45e3 | 502 | |
f9eaf9ae | 503 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
721529b5 CK |
504 | if (!seq[ring]) |
505 | return 0; | |
506 | ||
37615527 | 507 | r = radeon_fence_wait_seq(rdev, seq, false); |
5f8f635e | 508 | if (r) { |
f9eaf9ae | 509 | if (r == -EDEADLK) |
5f8f635e | 510 | return -EDEADLK; |
f9eaf9ae | 511 | |
5f8f635e JG |
512 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
513 | ring, r); | |
7ecc45e3 | 514 | } |
5f8f635e | 515 | return 0; |
771fe6b9 JG |
516 | } |
517 | ||
d66b7ec2 AD |
518 | /** |
519 | * radeon_fence_ref - take a ref on a fence | |
520 | * | |
521 | * @fence: radeon fence object | |
522 | * | |
523 | * Take a reference on a fence (all asics). | |
524 | * Returns the fence. | |
525 | */ | |
771fe6b9 JG |
526 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
527 | { | |
528 | kref_get(&fence->kref); | |
529 | return fence; | |
530 | } | |
531 | ||
d66b7ec2 AD |
532 | /** |
533 | * radeon_fence_unref - remove a ref on a fence | |
534 | * | |
535 | * @fence: radeon fence object | |
536 | * | |
537 | * Remove a reference on a fence (all asics). | |
538 | */ | |
771fe6b9 JG |
539 | void radeon_fence_unref(struct radeon_fence **fence) |
540 | { | |
541 | struct radeon_fence *tmp = *fence; | |
542 | ||
543 | *fence = NULL; | |
544 | if (tmp) { | |
cdb650a4 | 545 | kref_put(&tmp->kref, radeon_fence_destroy); |
771fe6b9 JG |
546 | } |
547 | } | |
548 | ||
d66b7ec2 AD |
549 | /** |
550 | * radeon_fence_count_emitted - get the count of emitted fences | |
551 | * | |
552 | * @rdev: radeon device pointer | |
553 | * @ring: ring index the fence is associated with | |
554 | * | |
555 | * Get the number of fences emitted on the requested ring (all asics). | |
556 | * Returns the number of emitted fences on the ring. Used by the | |
557 | * dynpm code to ring track activity. | |
558 | */ | |
3b7a2b24 | 559 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
771fe6b9 | 560 | { |
3b7a2b24 | 561 | uint64_t emitted; |
771fe6b9 | 562 | |
3b7a2b24 JG |
563 | /* We are not protected by ring lock when reading the last sequence |
564 | * but it's ok to report slightly wrong fence count here. | |
565 | */ | |
0085c950 | 566 | radeon_fence_process(rdev, ring); |
68e250b7 CK |
567 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
568 | - atomic64_read(&rdev->fence_drv[ring].last_seq); | |
3b7a2b24 JG |
569 | /* to avoid 32bits warp around */ |
570 | if (emitted > 0x10000000) { | |
571 | emitted = 0x10000000; | |
47492a23 | 572 | } |
3b7a2b24 | 573 | return (unsigned)emitted; |
47492a23 CK |
574 | } |
575 | ||
d66b7ec2 AD |
576 | /** |
577 | * radeon_fence_need_sync - do we need a semaphore | |
578 | * | |
579 | * @fence: radeon fence object | |
580 | * @dst_ring: which ring to check against | |
581 | * | |
582 | * Check if the fence needs to be synced against another ring | |
583 | * (all asics). If so, we need to emit a semaphore. | |
584 | * Returns true if we need to sync with another ring, false if | |
585 | * not. | |
586 | */ | |
68e250b7 CK |
587 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
588 | { | |
589 | struct radeon_fence_driver *fdrv; | |
590 | ||
591 | if (!fence) { | |
592 | return false; | |
593 | } | |
594 | ||
595 | if (fence->ring == dst_ring) { | |
596 | return false; | |
597 | } | |
598 | ||
599 | /* we are protected by the ring mutex */ | |
600 | fdrv = &fence->rdev->fence_drv[dst_ring]; | |
601 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { | |
602 | return false; | |
603 | } | |
604 | ||
605 | return true; | |
606 | } | |
607 | ||
d66b7ec2 AD |
608 | /** |
609 | * radeon_fence_note_sync - record the sync point | |
610 | * | |
611 | * @fence: radeon fence object | |
612 | * @dst_ring: which ring to check against | |
613 | * | |
614 | * Note the sequence number at which point the fence will | |
615 | * be synced with the requested ring (all asics). | |
616 | */ | |
68e250b7 CK |
617 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
618 | { | |
619 | struct radeon_fence_driver *dst, *src; | |
620 | unsigned i; | |
621 | ||
622 | if (!fence) { | |
623 | return; | |
624 | } | |
625 | ||
626 | if (fence->ring == dst_ring) { | |
627 | return; | |
628 | } | |
629 | ||
630 | /* we are protected by the ring mutex */ | |
631 | src = &fence->rdev->fence_drv[fence->ring]; | |
632 | dst = &fence->rdev->fence_drv[dst_ring]; | |
633 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
634 | if (i == dst_ring) { | |
635 | continue; | |
636 | } | |
637 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); | |
638 | } | |
639 | } | |
640 | ||
d66b7ec2 AD |
641 | /** |
642 | * radeon_fence_driver_start_ring - make the fence driver | |
643 | * ready for use on the requested ring. | |
644 | * | |
645 | * @rdev: radeon device pointer | |
646 | * @ring: ring index to start the fence driver on | |
647 | * | |
648 | * Make the fence driver ready for processing (all asics). | |
649 | * Not all asics have all rings, so each asic will only | |
650 | * start the fence driver on the rings it has. | |
651 | * Returns 0 for success, errors for failure. | |
652 | */ | |
30eb77f4 | 653 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
771fe6b9 | 654 | { |
30eb77f4 JG |
655 | uint64_t index; |
656 | int r; | |
771fe6b9 | 657 | |
30eb77f4 | 658 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
86a1881d | 659 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
581bc3a9 | 660 | rdev->fence_drv[ring].scratch_reg = 0; |
f2ba57b5 | 661 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
f2ba57b5 CK |
662 | index = R600_WB_EVENT_OFFSET + ring * 4; |
663 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; | |
664 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + | |
665 | index; | |
666 | ||
667 | } else { | |
668 | /* put fence directly behind firmware */ | |
4ad9c1c7 | 669 | index = ALIGN(rdev->uvd_fw->size, 8); |
d7c605a2 CK |
670 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
671 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; | |
f2ba57b5 CK |
672 | } |
673 | ||
30eb77f4 | 674 | } else { |
7465280c AD |
675 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
676 | if (r) { | |
677 | dev_err(rdev->dev, "fence failed to get scratch register\n"); | |
7465280c AD |
678 | return r; |
679 | } | |
30eb77f4 JG |
680 | index = RADEON_WB_SCRATCH_OFFSET + |
681 | rdev->fence_drv[ring].scratch_reg - | |
682 | rdev->scratch.reg_base; | |
f2ba57b5 CK |
683 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
684 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; | |
7465280c | 685 | } |
31be6183 | 686 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
30eb77f4 | 687 | rdev->fence_drv[ring].initialized = true; |
3b7a2b24 | 688 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
30eb77f4 | 689 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
30eb77f4 JG |
690 | return 0; |
691 | } | |
692 | ||
d66b7ec2 AD |
693 | /** |
694 | * radeon_fence_driver_init_ring - init the fence driver | |
695 | * for the requested ring. | |
696 | * | |
697 | * @rdev: radeon device pointer | |
698 | * @ring: ring index to start the fence driver on | |
699 | * | |
700 | * Init the fence driver for the requested ring (all asics). | |
701 | * Helper function for radeon_fence_driver_init(). | |
702 | */ | |
30eb77f4 JG |
703 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
704 | { | |
68e250b7 CK |
705 | int i; |
706 | ||
30eb77f4 JG |
707 | rdev->fence_drv[ring].scratch_reg = -1; |
708 | rdev->fence_drv[ring].cpu_addr = NULL; | |
709 | rdev->fence_drv[ring].gpu_addr = 0; | |
68e250b7 CK |
710 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
711 | rdev->fence_drv[ring].sync_seq[i] = 0; | |
bb635567 | 712 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
30eb77f4 JG |
713 | rdev->fence_drv[ring].initialized = false; |
714 | } | |
715 | ||
d66b7ec2 AD |
716 | /** |
717 | * radeon_fence_driver_init - init the fence driver | |
718 | * for all possible rings. | |
719 | * | |
720 | * @rdev: radeon device pointer | |
721 | * | |
722 | * Init the fence driver for all possible rings (all asics). | |
723 | * Not all asics have all rings, so each asic will only | |
724 | * start the fence driver on the rings it has using | |
725 | * radeon_fence_driver_start_ring(). | |
726 | * Returns 0 for success. | |
727 | */ | |
30eb77f4 JG |
728 | int radeon_fence_driver_init(struct radeon_device *rdev) |
729 | { | |
30eb77f4 JG |
730 | int ring; |
731 | ||
0085c950 | 732 | init_waitqueue_head(&rdev->fence_queue); |
30eb77f4 JG |
733 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
734 | radeon_fence_driver_init_ring(rdev, ring); | |
771fe6b9 | 735 | } |
771fe6b9 | 736 | if (radeon_debugfs_fence_init(rdev)) { |
0a0c7596 | 737 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
771fe6b9 JG |
738 | } |
739 | return 0; | |
740 | } | |
741 | ||
d66b7ec2 AD |
742 | /** |
743 | * radeon_fence_driver_fini - tear down the fence driver | |
744 | * for all possible rings. | |
745 | * | |
746 | * @rdev: radeon device pointer | |
747 | * | |
748 | * Tear down the fence driver for all possible rings (all asics). | |
749 | */ | |
771fe6b9 JG |
750 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
751 | { | |
5f8f635e | 752 | int ring, r; |
7465280c | 753 | |
8a47cc9e | 754 | mutex_lock(&rdev->ring_lock); |
7465280c AD |
755 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
756 | if (!rdev->fence_drv[ring].initialized) | |
757 | continue; | |
37615527 | 758 | r = radeon_fence_wait_empty(rdev, ring); |
5f8f635e JG |
759 | if (r) { |
760 | /* no need to trigger GPU reset as we are unloading */ | |
761 | radeon_fence_driver_force_completion(rdev); | |
762 | } | |
0085c950 | 763 | wake_up_all(&rdev->fence_queue); |
7465280c | 764 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
7465280c AD |
765 | rdev->fence_drv[ring].initialized = false; |
766 | } | |
8a47cc9e | 767 | mutex_unlock(&rdev->ring_lock); |
771fe6b9 JG |
768 | } |
769 | ||
76903b96 JG |
770 | /** |
771 | * radeon_fence_driver_force_completion - force all fence waiter to complete | |
772 | * | |
773 | * @rdev: radeon device pointer | |
774 | * | |
775 | * In case of GPU reset failure make sure no process keep waiting on fence | |
776 | * that will never complete. | |
777 | */ | |
778 | void radeon_fence_driver_force_completion(struct radeon_device *rdev) | |
779 | { | |
780 | int ring; | |
781 | ||
782 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { | |
783 | if (!rdev->fence_drv[ring].initialized) | |
784 | continue; | |
785 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); | |
786 | } | |
787 | } | |
788 | ||
771fe6b9 JG |
789 | |
790 | /* | |
791 | * Fence debugfs | |
792 | */ | |
793 | #if defined(CONFIG_DEBUG_FS) | |
794 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) | |
795 | { | |
796 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
797 | struct drm_device *dev = node->minor->dev; | |
798 | struct radeon_device *rdev = dev->dev_private; | |
68e250b7 | 799 | int i, j; |
7465280c AD |
800 | |
801 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
802 | if (!rdev->fence_drv[i].initialized) | |
803 | continue; | |
804 | ||
e290b634 CK |
805 | radeon_fence_process(rdev, i); |
806 | ||
7465280c | 807 | seq_printf(m, "--- ring %d ---\n", i); |
d3029b4e DA |
808 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
809 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); | |
68e250b7 CK |
810 | seq_printf(m, "Last emitted 0x%016llx\n", |
811 | rdev->fence_drv[i].sync_seq[i]); | |
812 | ||
813 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { | |
814 | if (i != j && rdev->fence_drv[j].initialized) | |
815 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", | |
816 | j, rdev->fence_drv[i].sync_seq[j]); | |
817 | } | |
771fe6b9 JG |
818 | } |
819 | return 0; | |
820 | } | |
821 | ||
822 | static struct drm_info_list radeon_debugfs_fence_list[] = { | |
823 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, | |
824 | }; | |
825 | #endif | |
826 | ||
827 | int radeon_debugfs_fence_init(struct radeon_device *rdev) | |
828 | { | |
829 | #if defined(CONFIG_DEBUG_FS) | |
830 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); | |
831 | #else | |
832 | return 0; | |
833 | #endif | |
834 | } |