Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Dave Airlie | |
30 | */ | |
31 | #include <linux/seq_file.h> | |
60063497 | 32 | #include <linux/atomic.h> |
771fe6b9 JG |
33 | #include <linux/wait.h> |
34 | #include <linux/list.h> | |
35 | #include <linux/kref.h> | |
5a0e3ad6 | 36 | #include <linux/slab.h> |
771fe6b9 JG |
37 | #include "drmP.h" |
38 | #include "drm.h" | |
39 | #include "radeon_reg.h" | |
40 | #include "radeon.h" | |
99ee7fac | 41 | #include "radeon_trace.h" |
771fe6b9 | 42 | |
7465280c | 43 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
b81157d0 AD |
44 | { |
45 | if (rdev->wb.enabled) { | |
30eb77f4 JG |
46 | *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq); |
47 | } else { | |
7465280c | 48 | WREG32(rdev->fence_drv[ring].scratch_reg, seq); |
30eb77f4 | 49 | } |
b81157d0 AD |
50 | } |
51 | ||
7465280c | 52 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
b81157d0 | 53 | { |
7465280c | 54 | u32 seq = 0; |
b81157d0 AD |
55 | |
56 | if (rdev->wb.enabled) { | |
30eb77f4 JG |
57 | seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr); |
58 | } else { | |
7465280c | 59 | seq = RREG32(rdev->fence_drv[ring].scratch_reg); |
30eb77f4 | 60 | } |
b81157d0 AD |
61 | return seq; |
62 | } | |
63 | ||
876dc9f3 CK |
64 | int radeon_fence_emit(struct radeon_device *rdev, |
65 | struct radeon_fence **fence, | |
66 | int ring) | |
771fe6b9 | 67 | { |
3b7a2b24 | 68 | /* we are protected by the ring emission mutex */ |
876dc9f3 CK |
69 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
70 | if ((*fence) == NULL) { | |
71 | return -ENOMEM; | |
771fe6b9 | 72 | } |
876dc9f3 CK |
73 | kref_init(&((*fence)->kref)); |
74 | (*fence)->rdev = rdev; | |
68e250b7 | 75 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
876dc9f3 CK |
76 | (*fence)->ring = ring; |
77 | radeon_fence_ring_emit(rdev, ring, *fence); | |
78 | trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); | |
771fe6b9 JG |
79 | return 0; |
80 | } | |
81 | ||
3b7a2b24 | 82 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
771fe6b9 | 83 | { |
bb635567 JG |
84 | uint64_t seq, last_seq; |
85 | unsigned count_loop = 0; | |
771fe6b9 JG |
86 | bool wake = false; |
87 | ||
bb635567 JG |
88 | /* Note there is a scenario here for an infinite loop but it's |
89 | * very unlikely to happen. For it to happen, the current polling | |
90 | * process need to be interrupted by another process and another | |
91 | * process needs to update the last_seq btw the atomic read and | |
92 | * xchg of the current process. | |
93 | * | |
94 | * More over for this to go in infinite loop there need to be | |
95 | * continuously new fence signaled ie radeon_fence_read needs | |
96 | * to return a different value each time for both the currently | |
97 | * polling process and the other process that xchg the last_seq | |
98 | * btw atomic read and xchg of the current process. And the | |
99 | * value the other process set as last seq must be higher than | |
100 | * the seq value we just read. Which means that current process | |
101 | * need to be interrupted after radeon_fence_read and before | |
102 | * atomic xchg. | |
103 | * | |
104 | * To be even more safe we count the number of time we loop and | |
105 | * we bail after 10 loop just accepting the fact that we might | |
106 | * have temporarly set the last_seq not to the true real last | |
107 | * seq but to an older one. | |
108 | */ | |
109 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); | |
110 | do { | |
111 | seq = radeon_fence_read(rdev, ring); | |
112 | seq |= last_seq & 0xffffffff00000000LL; | |
113 | if (seq < last_seq) { | |
114 | seq += 0x100000000LL; | |
115 | } | |
36abacae | 116 | |
3b7a2b24 JG |
117 | if (seq == last_seq) { |
118 | break; | |
bb635567 JG |
119 | } |
120 | /* If we loop over we don't want to return without | |
121 | * checking if a fence is signaled as it means that the | |
122 | * seq we just read is different from the previous on. | |
123 | */ | |
124 | wake = true; | |
3b7a2b24 | 125 | last_seq = seq; |
bb635567 JG |
126 | if ((count_loop++) > 10) { |
127 | /* We looped over too many time leave with the | |
128 | * fact that we might have set an older fence | |
129 | * seq then the current real last seq as signaled | |
130 | * by the hw. | |
131 | */ | |
132 | break; | |
133 | } | |
bb635567 JG |
134 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
135 | ||
3b7a2b24 JG |
136 | if (wake) { |
137 | rdev->fence_drv[ring].last_activity = jiffies; | |
0085c950 | 138 | wake_up_all(&rdev->fence_queue); |
771fe6b9 | 139 | } |
771fe6b9 JG |
140 | } |
141 | ||
142 | static void radeon_fence_destroy(struct kref *kref) | |
143 | { | |
3b7a2b24 | 144 | struct radeon_fence *fence; |
771fe6b9 JG |
145 | |
146 | fence = container_of(kref, struct radeon_fence, kref); | |
771fe6b9 JG |
147 | kfree(fence); |
148 | } | |
149 | ||
3b7a2b24 JG |
150 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
151 | u64 seq, unsigned ring) | |
771fe6b9 | 152 | { |
3b7a2b24 JG |
153 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
154 | return true; | |
155 | } | |
156 | /* poll new last sequence at least once */ | |
157 | radeon_fence_process(rdev, ring); | |
158 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { | |
771fe6b9 | 159 | return true; |
3b7a2b24 JG |
160 | } |
161 | return false; | |
162 | } | |
3655d54a | 163 | |
3b7a2b24 JG |
164 | bool radeon_fence_signaled(struct radeon_fence *fence) |
165 | { | |
166 | if (!fence) { | |
167 | return true; | |
771fe6b9 | 168 | } |
3b7a2b24 JG |
169 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
170 | return true; | |
771fe6b9 | 171 | } |
3b7a2b24 JG |
172 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
173 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | |
174 | return true; | |
175 | } | |
176 | return false; | |
771fe6b9 JG |
177 | } |
178 | ||
3b7a2b24 | 179 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
8a47cc9e | 180 | unsigned ring, bool intr, bool lock_ring) |
771fe6b9 | 181 | { |
3b7a2b24 | 182 | unsigned long timeout, last_activity; |
bb635567 | 183 | uint64_t seq; |
3b7a2b24 | 184 | unsigned i; |
36abacae | 185 | bool signaled; |
3b7a2b24 | 186 | int r; |
771fe6b9 | 187 | |
3b7a2b24 JG |
188 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
189 | if (!rdev->ring[ring].ready) { | |
190 | return -EBUSY; | |
191 | } | |
36abacae | 192 | |
36abacae | 193 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; |
3b7a2b24 | 194 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
36abacae | 195 | /* the normal case, timeout is somewhere before last_activity */ |
3b7a2b24 | 196 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
36abacae CK |
197 | } else { |
198 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms | |
3b7a2b24 JG |
199 | * anyway we will just wait for the minimum amount and then check for a lockup |
200 | */ | |
36abacae CK |
201 | timeout = 1; |
202 | } | |
3b7a2b24 | 203 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
bb635567 | 204 | /* Save current last activity valuee, used to check for GPU lockups */ |
3b7a2b24 | 205 | last_activity = rdev->fence_drv[ring].last_activity; |
36abacae CK |
206 | |
207 | trace_radeon_fence_wait_begin(rdev->ddev, seq); | |
3b7a2b24 | 208 | radeon_irq_kms_sw_irq_get(rdev, ring); |
36abacae | 209 | if (intr) { |
0085c950 | 210 | r = wait_event_interruptible_timeout(rdev->fence_queue, |
3b7a2b24 JG |
211 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
212 | timeout); | |
213 | } else { | |
0085c950 | 214 | r = wait_event_timeout(rdev->fence_queue, |
3b7a2b24 JG |
215 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
216 | timeout); | |
36abacae | 217 | } |
3b7a2b24 | 218 | radeon_irq_kms_sw_irq_put(rdev, ring); |
90aca4d2 | 219 | if (unlikely(r < 0)) { |
5cc6fbab | 220 | return r; |
90aca4d2 | 221 | } |
36abacae | 222 | trace_radeon_fence_wait_end(rdev->ddev, seq); |
25a9e352 | 223 | |
36abacae CK |
224 | if (unlikely(!signaled)) { |
225 | /* we were interrupted for some reason and fence | |
226 | * isn't signaled yet, resume waiting */ | |
227 | if (r) { | |
228 | continue; | |
229 | } | |
25a9e352 | 230 | |
3b7a2b24 JG |
231 | /* check if sequence value has changed since last_activity */ |
232 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { | |
233 | continue; | |
234 | } | |
8a47cc9e CK |
235 | |
236 | if (lock_ring) { | |
237 | mutex_lock(&rdev->ring_lock); | |
238 | } | |
239 | ||
bb635567 | 240 | /* test if somebody else has already decided that this is a lockup */ |
3b7a2b24 | 241 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
8a47cc9e CK |
242 | if (lock_ring) { |
243 | mutex_unlock(&rdev->ring_lock); | |
244 | } | |
36abacae CK |
245 | continue; |
246 | } | |
247 | ||
3b7a2b24 | 248 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
36abacae | 249 | /* good news we believe it's a lockup */ |
bb635567 | 250 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
3b7a2b24 JG |
251 | target_seq, seq); |
252 | ||
253 | /* change last activity so nobody else think there is a lockup */ | |
254 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
255 | rdev->fence_drv[i].last_activity = jiffies; | |
256 | } | |
bb635567 | 257 | |
36abacae | 258 | /* mark the ring as not ready any more */ |
3b7a2b24 | 259 | rdev->ring[ring].ready = false; |
8a47cc9e CK |
260 | if (lock_ring) { |
261 | mutex_unlock(&rdev->ring_lock); | |
262 | } | |
6c6f4783 | 263 | return -EDEADLK; |
36abacae | 264 | } |
8a47cc9e CK |
265 | |
266 | if (lock_ring) { | |
267 | mutex_unlock(&rdev->ring_lock); | |
268 | } | |
771fe6b9 | 269 | } |
771fe6b9 | 270 | } |
771fe6b9 JG |
271 | return 0; |
272 | } | |
273 | ||
3b7a2b24 | 274 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
771fe6b9 | 275 | { |
771fe6b9 JG |
276 | int r; |
277 | ||
3b7a2b24 JG |
278 | if (fence == NULL) { |
279 | WARN(1, "Querying an invalid fence : %p !\n", fence); | |
280 | return -EINVAL; | |
25a9e352 | 281 | } |
3b7a2b24 | 282 | |
8a47cc9e CK |
283 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
284 | fence->ring, intr, true); | |
3b7a2b24 JG |
285 | if (r) { |
286 | return r; | |
771fe6b9 | 287 | } |
3b7a2b24 JG |
288 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
289 | return 0; | |
771fe6b9 JG |
290 | } |
291 | ||
0085c950 JG |
292 | bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
293 | { | |
294 | unsigned i; | |
295 | ||
296 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
297 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { | |
298 | return true; | |
299 | } | |
300 | } | |
301 | return false; | |
302 | } | |
303 | ||
304 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, | |
305 | u64 *target_seq, bool intr) | |
306 | { | |
307 | unsigned long timeout, last_activity, tmp; | |
308 | unsigned i, ring = RADEON_NUM_RINGS; | |
309 | bool signaled; | |
310 | int r; | |
311 | ||
312 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { | |
313 | if (!target_seq[i]) { | |
314 | continue; | |
315 | } | |
316 | ||
317 | /* use the most recent one as indicator */ | |
318 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { | |
319 | last_activity = rdev->fence_drv[i].last_activity; | |
320 | } | |
321 | ||
322 | /* For lockup detection just pick the lowest ring we are | |
323 | * actively waiting for | |
324 | */ | |
325 | if (i < ring) { | |
326 | ring = i; | |
327 | } | |
328 | } | |
329 | ||
330 | /* nothing to wait for ? */ | |
331 | if (ring == RADEON_NUM_RINGS) { | |
332 | return 0; | |
333 | } | |
334 | ||
335 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { | |
336 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; | |
337 | if (time_after(last_activity, timeout)) { | |
338 | /* the normal case, timeout is somewhere before last_activity */ | |
339 | timeout = last_activity - timeout; | |
340 | } else { | |
341 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms | |
342 | * anyway we will just wait for the minimum amount and then check for a lockup | |
343 | */ | |
344 | timeout = 1; | |
345 | } | |
346 | ||
347 | trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); | |
348 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
349 | if (target_seq[i]) { | |
350 | radeon_irq_kms_sw_irq_get(rdev, i); | |
351 | } | |
352 | } | |
353 | if (intr) { | |
354 | r = wait_event_interruptible_timeout(rdev->fence_queue, | |
355 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), | |
356 | timeout); | |
357 | } else { | |
358 | r = wait_event_timeout(rdev->fence_queue, | |
359 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), | |
360 | timeout); | |
361 | } | |
362 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
363 | if (target_seq[i]) { | |
364 | radeon_irq_kms_sw_irq_put(rdev, i); | |
365 | } | |
366 | } | |
367 | if (unlikely(r < 0)) { | |
368 | return r; | |
369 | } | |
370 | trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); | |
371 | ||
372 | if (unlikely(!signaled)) { | |
373 | /* we were interrupted for some reason and fence | |
374 | * isn't signaled yet, resume waiting */ | |
375 | if (r) { | |
376 | continue; | |
377 | } | |
378 | ||
379 | mutex_lock(&rdev->ring_lock); | |
380 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { | |
381 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { | |
382 | tmp = rdev->fence_drv[i].last_activity; | |
383 | } | |
384 | } | |
385 | /* test if somebody else has already decided that this is a lockup */ | |
386 | if (last_activity != tmp) { | |
387 | last_activity = tmp; | |
388 | mutex_unlock(&rdev->ring_lock); | |
389 | continue; | |
390 | } | |
391 | ||
392 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { | |
393 | /* good news we believe it's a lockup */ | |
394 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", | |
395 | target_seq[ring]); | |
396 | ||
397 | /* change last activity so nobody else think there is a lockup */ | |
398 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
399 | rdev->fence_drv[i].last_activity = jiffies; | |
400 | } | |
401 | ||
402 | /* mark the ring as not ready any more */ | |
403 | rdev->ring[ring].ready = false; | |
404 | mutex_unlock(&rdev->ring_lock); | |
405 | return -EDEADLK; | |
406 | } | |
407 | mutex_unlock(&rdev->ring_lock); | |
408 | } | |
409 | } | |
410 | return 0; | |
411 | } | |
412 | ||
413 | int radeon_fence_wait_any(struct radeon_device *rdev, | |
414 | struct radeon_fence **fences, | |
415 | bool intr) | |
416 | { | |
417 | uint64_t seq[RADEON_NUM_RINGS]; | |
418 | unsigned i; | |
419 | int r; | |
420 | ||
421 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
422 | seq[i] = 0; | |
423 | ||
424 | if (!fences[i]) { | |
425 | continue; | |
426 | } | |
427 | ||
428 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { | |
429 | /* something was allready signaled */ | |
430 | return 0; | |
431 | } | |
432 | ||
876dc9f3 | 433 | seq[i] = fences[i]->seq; |
0085c950 JG |
434 | } |
435 | ||
436 | r = radeon_fence_wait_any_seq(rdev, seq, intr); | |
437 | if (r) { | |
438 | return r; | |
439 | } | |
440 | return 0; | |
441 | } | |
442 | ||
7ecc45e3 | 443 | /* caller must hold ring lock */ |
8a47cc9e | 444 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
771fe6b9 | 445 | { |
3b7a2b24 | 446 | uint64_t seq; |
771fe6b9 | 447 | |
3b7a2b24 | 448 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
68e250b7 | 449 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { |
8a47cc9e CK |
450 | /* nothing to wait for, last_seq is |
451 | already the last emited fence */ | |
452 | return -ENOENT; | |
771fe6b9 | 453 | } |
8a47cc9e | 454 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
3b7a2b24 JG |
455 | } |
456 | ||
7ecc45e3 CK |
457 | /* caller must hold ring lock */ |
458 | void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | |
3b7a2b24 | 459 | { |
7ecc45e3 CK |
460 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
461 | ||
462 | while(1) { | |
463 | int r; | |
464 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); | |
465 | if (r == -EDEADLK) { | |
466 | mutex_unlock(&rdev->ring_lock); | |
467 | r = radeon_gpu_reset(rdev); | |
468 | mutex_lock(&rdev->ring_lock); | |
469 | if (!r) | |
470 | continue; | |
471 | } | |
472 | if (r) { | |
473 | dev_err(rdev->dev, "error waiting for ring to become" | |
474 | " idle (%d)\n", r); | |
475 | } | |
476 | return; | |
477 | } | |
771fe6b9 JG |
478 | } |
479 | ||
480 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) | |
481 | { | |
482 | kref_get(&fence->kref); | |
483 | return fence; | |
484 | } | |
485 | ||
486 | void radeon_fence_unref(struct radeon_fence **fence) | |
487 | { | |
488 | struct radeon_fence *tmp = *fence; | |
489 | ||
490 | *fence = NULL; | |
491 | if (tmp) { | |
cdb650a4 | 492 | kref_put(&tmp->kref, radeon_fence_destroy); |
771fe6b9 JG |
493 | } |
494 | } | |
495 | ||
3b7a2b24 | 496 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
771fe6b9 | 497 | { |
3b7a2b24 | 498 | uint64_t emitted; |
771fe6b9 | 499 | |
3b7a2b24 JG |
500 | /* We are not protected by ring lock when reading the last sequence |
501 | * but it's ok to report slightly wrong fence count here. | |
502 | */ | |
0085c950 | 503 | radeon_fence_process(rdev, ring); |
68e250b7 CK |
504 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
505 | - atomic64_read(&rdev->fence_drv[ring].last_seq); | |
3b7a2b24 JG |
506 | /* to avoid 32bits warp around */ |
507 | if (emitted > 0x10000000) { | |
508 | emitted = 0x10000000; | |
47492a23 | 509 | } |
3b7a2b24 | 510 | return (unsigned)emitted; |
47492a23 CK |
511 | } |
512 | ||
68e250b7 CK |
513 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
514 | { | |
515 | struct radeon_fence_driver *fdrv; | |
516 | ||
517 | if (!fence) { | |
518 | return false; | |
519 | } | |
520 | ||
521 | if (fence->ring == dst_ring) { | |
522 | return false; | |
523 | } | |
524 | ||
525 | /* we are protected by the ring mutex */ | |
526 | fdrv = &fence->rdev->fence_drv[dst_ring]; | |
527 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { | |
528 | return false; | |
529 | } | |
530 | ||
531 | return true; | |
532 | } | |
533 | ||
534 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) | |
535 | { | |
536 | struct radeon_fence_driver *dst, *src; | |
537 | unsigned i; | |
538 | ||
539 | if (!fence) { | |
540 | return; | |
541 | } | |
542 | ||
543 | if (fence->ring == dst_ring) { | |
544 | return; | |
545 | } | |
546 | ||
547 | /* we are protected by the ring mutex */ | |
548 | src = &fence->rdev->fence_drv[fence->ring]; | |
549 | dst = &fence->rdev->fence_drv[dst_ring]; | |
550 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
551 | if (i == dst_ring) { | |
552 | continue; | |
553 | } | |
554 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); | |
555 | } | |
556 | } | |
557 | ||
30eb77f4 | 558 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
771fe6b9 | 559 | { |
30eb77f4 JG |
560 | uint64_t index; |
561 | int r; | |
771fe6b9 | 562 | |
30eb77f4 JG |
563 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
564 | if (rdev->wb.use_event) { | |
565 | rdev->fence_drv[ring].scratch_reg = 0; | |
566 | index = R600_WB_EVENT_OFFSET + ring * 4; | |
567 | } else { | |
7465280c AD |
568 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
569 | if (r) { | |
570 | dev_err(rdev->dev, "fence failed to get scratch register\n"); | |
7465280c AD |
571 | return r; |
572 | } | |
30eb77f4 JG |
573 | index = RADEON_WB_SCRATCH_OFFSET + |
574 | rdev->fence_drv[ring].scratch_reg - | |
575 | rdev->scratch.reg_base; | |
7465280c | 576 | } |
30eb77f4 JG |
577 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
578 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; | |
68e250b7 | 579 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
30eb77f4 | 580 | rdev->fence_drv[ring].initialized = true; |
3b7a2b24 | 581 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
30eb77f4 | 582 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
30eb77f4 JG |
583 | return 0; |
584 | } | |
585 | ||
586 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) | |
587 | { | |
68e250b7 CK |
588 | int i; |
589 | ||
30eb77f4 JG |
590 | rdev->fence_drv[ring].scratch_reg = -1; |
591 | rdev->fence_drv[ring].cpu_addr = NULL; | |
592 | rdev->fence_drv[ring].gpu_addr = 0; | |
68e250b7 CK |
593 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
594 | rdev->fence_drv[ring].sync_seq[i] = 0; | |
bb635567 | 595 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
3b7a2b24 | 596 | rdev->fence_drv[ring].last_activity = jiffies; |
30eb77f4 JG |
597 | rdev->fence_drv[ring].initialized = false; |
598 | } | |
599 | ||
600 | int radeon_fence_driver_init(struct radeon_device *rdev) | |
601 | { | |
30eb77f4 JG |
602 | int ring; |
603 | ||
0085c950 | 604 | init_waitqueue_head(&rdev->fence_queue); |
30eb77f4 JG |
605 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
606 | radeon_fence_driver_init_ring(rdev, ring); | |
771fe6b9 | 607 | } |
771fe6b9 | 608 | if (radeon_debugfs_fence_init(rdev)) { |
0a0c7596 | 609 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
771fe6b9 JG |
610 | } |
611 | return 0; | |
612 | } | |
613 | ||
614 | void radeon_fence_driver_fini(struct radeon_device *rdev) | |
615 | { | |
7465280c AD |
616 | int ring; |
617 | ||
8a47cc9e | 618 | mutex_lock(&rdev->ring_lock); |
7465280c AD |
619 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
620 | if (!rdev->fence_drv[ring].initialized) | |
621 | continue; | |
8a47cc9e | 622 | radeon_fence_wait_empty_locked(rdev, ring); |
0085c950 | 623 | wake_up_all(&rdev->fence_queue); |
7465280c | 624 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
7465280c AD |
625 | rdev->fence_drv[ring].initialized = false; |
626 | } | |
8a47cc9e | 627 | mutex_unlock(&rdev->ring_lock); |
771fe6b9 JG |
628 | } |
629 | ||
630 | ||
631 | /* | |
632 | * Fence debugfs | |
633 | */ | |
634 | #if defined(CONFIG_DEBUG_FS) | |
635 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) | |
636 | { | |
637 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
638 | struct drm_device *dev = node->minor->dev; | |
639 | struct radeon_device *rdev = dev->dev_private; | |
68e250b7 | 640 | int i, j; |
7465280c AD |
641 | |
642 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
643 | if (!rdev->fence_drv[i].initialized) | |
644 | continue; | |
645 | ||
646 | seq_printf(m, "--- ring %d ---\n", i); | |
d3029b4e DA |
647 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
648 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); | |
68e250b7 CK |
649 | seq_printf(m, "Last emitted 0x%016llx\n", |
650 | rdev->fence_drv[i].sync_seq[i]); | |
651 | ||
652 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { | |
653 | if (i != j && rdev->fence_drv[j].initialized) | |
654 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", | |
655 | j, rdev->fence_drv[i].sync_seq[j]); | |
656 | } | |
771fe6b9 JG |
657 | } |
658 | return 0; | |
659 | } | |
660 | ||
661 | static struct drm_info_list radeon_debugfs_fence_list[] = { | |
662 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, | |
663 | }; | |
664 | #endif | |
665 | ||
666 | int radeon_debugfs_fence_init(struct radeon_device *rdev) | |
667 | { | |
668 | #if defined(CONFIG_DEBUG_FS) | |
669 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); | |
670 | #else | |
671 | return 0; | |
672 | #endif | |
673 | } |