Commit | Line | Data |
---|---|---|
688e6c72 CW |
1 | /* |
2 | * Copyright © 2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
c81d4613 CW |
25 | #include <linux/kthread.h> |
26 | ||
688e6c72 CW |
27 | #include "i915_drv.h" |
28 | ||
83348ba8 CW |
29 | static void intel_breadcrumbs_hangcheck(unsigned long data) |
30 | { | |
31 | struct intel_engine_cs *engine = (struct intel_engine_cs *)data; | |
32 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
33 | ||
34 | if (!b->irq_enabled) | |
35 | return; | |
36 | ||
37 | if (time_before(jiffies, b->timeout)) { | |
38 | mod_timer(&b->hangcheck, b->timeout); | |
39 | return; | |
40 | } | |
41 | ||
42 | DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name); | |
43 | set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); | |
44 | mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); | |
45 | ||
46 | /* Ensure that even if the GPU hangs, we get woken up. | |
47 | * | |
48 | * However, note that if no one is waiting, we never notice | |
49 | * a gpu hang. Eventually, we will have to wait for a resource | |
50 | * held by the GPU and so trigger a hangcheck. In the most | |
51 | * pathological case, this will be upon memory starvation! To | |
52 | * prevent this, we also queue the hangcheck from the retire | |
53 | * worker. | |
54 | */ | |
55 | i915_queue_hangcheck(engine->i915); | |
56 | } | |
57 | ||
58 | static unsigned long wait_timeout(void) | |
59 | { | |
60 | return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); | |
61 | } | |
62 | ||
688e6c72 CW |
63 | static void intel_breadcrumbs_fake_irq(unsigned long data) |
64 | { | |
65 | struct intel_engine_cs *engine = (struct intel_engine_cs *)data; | |
66 | ||
67 | /* | |
68 | * The timer persists in case we cannot enable interrupts, | |
69 | * or if we have previously seen seqno/interrupt incoherency | |
70 | * ("missed interrupt" syndrome). Here the worker will wake up | |
71 | * every jiffie in order to kick the oldest waiter to do the | |
72 | * coherent seqno check. | |
73 | */ | |
688e6c72 CW |
74 | if (intel_engine_wakeup(engine)) |
75 | mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); | |
688e6c72 CW |
76 | } |
77 | ||
78 | static void irq_enable(struct intel_engine_cs *engine) | |
79 | { | |
3d5564e9 CW |
80 | /* Enabling the IRQ may miss the generation of the interrupt, but |
81 | * we still need to force the barrier before reading the seqno, | |
82 | * just in case. | |
83 | */ | |
aca34b6e | 84 | engine->breadcrumbs.irq_posted = true; |
31bb59cc CW |
85 | |
86 | spin_lock_irq(&engine->i915->irq_lock); | |
87 | engine->irq_enable(engine); | |
88 | spin_unlock_irq(&engine->i915->irq_lock); | |
688e6c72 CW |
89 | } |
90 | ||
91 | static void irq_disable(struct intel_engine_cs *engine) | |
92 | { | |
31bb59cc CW |
93 | spin_lock_irq(&engine->i915->irq_lock); |
94 | engine->irq_disable(engine); | |
95 | spin_unlock_irq(&engine->i915->irq_lock); | |
96 | ||
aca34b6e | 97 | engine->breadcrumbs.irq_posted = false; |
688e6c72 CW |
98 | } |
99 | ||
04171313 | 100 | static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) |
688e6c72 CW |
101 | { |
102 | struct intel_engine_cs *engine = | |
103 | container_of(b, struct intel_engine_cs, breadcrumbs); | |
104 | struct drm_i915_private *i915 = engine->i915; | |
688e6c72 CW |
105 | |
106 | assert_spin_locked(&b->lock); | |
107 | if (b->rpm_wakelock) | |
04171313 | 108 | return; |
688e6c72 CW |
109 | |
110 | /* Since we are waiting on a request, the GPU should be busy | |
111 | * and should have its own rpm reference. For completeness, | |
112 | * record an rpm reference for ourselves to cover the | |
113 | * interrupt we unmask. | |
114 | */ | |
115 | intel_runtime_pm_get_noresume(i915); | |
116 | b->rpm_wakelock = true; | |
117 | ||
118 | /* No interrupts? Kick the waiter every jiffie! */ | |
119 | if (intel_irqs_enabled(i915)) { | |
3d5564e9 | 120 | if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) |
688e6c72 | 121 | irq_enable(engine); |
688e6c72 CW |
122 | b->irq_enabled = true; |
123 | } | |
124 | ||
125 | if (!b->irq_enabled || | |
83348ba8 | 126 | test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { |
688e6c72 | 127 | mod_timer(&b->fake_irq, jiffies + 1); |
83348ba8 CW |
128 | } else { |
129 | /* Ensure we never sleep indefinitely */ | |
130 | GEM_BUG_ON(!time_after(b->timeout, jiffies)); | |
131 | mod_timer(&b->hangcheck, b->timeout); | |
132 | } | |
688e6c72 CW |
133 | } |
134 | ||
135 | static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) | |
136 | { | |
137 | struct intel_engine_cs *engine = | |
138 | container_of(b, struct intel_engine_cs, breadcrumbs); | |
139 | ||
140 | assert_spin_locked(&b->lock); | |
141 | if (!b->rpm_wakelock) | |
142 | return; | |
143 | ||
144 | if (b->irq_enabled) { | |
145 | irq_disable(engine); | |
146 | b->irq_enabled = false; | |
147 | } | |
148 | ||
149 | intel_runtime_pm_put(engine->i915); | |
150 | b->rpm_wakelock = false; | |
151 | } | |
152 | ||
153 | static inline struct intel_wait *to_wait(struct rb_node *node) | |
154 | { | |
155 | return container_of(node, struct intel_wait, node); | |
156 | } | |
157 | ||
158 | static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, | |
159 | struct intel_wait *wait) | |
160 | { | |
161 | assert_spin_locked(&b->lock); | |
162 | ||
163 | /* This request is completed, so remove it from the tree, mark it as | |
164 | * complete, and *then* wake up the associated task. | |
165 | */ | |
166 | rb_erase(&wait->node, &b->waiters); | |
167 | RB_CLEAR_NODE(&wait->node); | |
168 | ||
169 | wake_up_process(wait->tsk); /* implicit smp_wmb() */ | |
170 | } | |
171 | ||
172 | static bool __intel_engine_add_wait(struct intel_engine_cs *engine, | |
173 | struct intel_wait *wait) | |
174 | { | |
175 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
176 | struct rb_node **p, *parent, *completed; | |
177 | bool first; | |
178 | u32 seqno; | |
179 | ||
180 | /* Insert the request into the retirement ordered list | |
181 | * of waiters by walking the rbtree. If we are the oldest | |
182 | * seqno in the tree (the first to be retired), then | |
183 | * set ourselves as the bottom-half. | |
184 | * | |
185 | * As we descend the tree, prune completed branches since we hold the | |
186 | * spinlock we know that the first_waiter must be delayed and can | |
187 | * reduce some of the sequential wake up latency if we take action | |
188 | * ourselves and wake up the completed tasks in parallel. Also, by | |
189 | * removing stale elements in the tree, we may be able to reduce the | |
190 | * ping-pong between the old bottom-half and ourselves as first-waiter. | |
191 | */ | |
192 | first = true; | |
193 | parent = NULL; | |
194 | completed = NULL; | |
1b7744e7 | 195 | seqno = intel_engine_get_seqno(engine); |
688e6c72 CW |
196 | |
197 | /* If the request completed before we managed to grab the spinlock, | |
198 | * return now before adding ourselves to the rbtree. We let the | |
199 | * current bottom-half handle any pending wakeups and instead | |
200 | * try and get out of the way quickly. | |
201 | */ | |
202 | if (i915_seqno_passed(seqno, wait->seqno)) { | |
203 | RB_CLEAR_NODE(&wait->node); | |
204 | return first; | |
205 | } | |
206 | ||
207 | p = &b->waiters.rb_node; | |
208 | while (*p) { | |
209 | parent = *p; | |
210 | if (wait->seqno == to_wait(parent)->seqno) { | |
211 | /* We have multiple waiters on the same seqno, select | |
212 | * the highest priority task (that with the smallest | |
213 | * task->prio) to serve as the bottom-half for this | |
214 | * group. | |
215 | */ | |
216 | if (wait->tsk->prio > to_wait(parent)->tsk->prio) { | |
217 | p = &parent->rb_right; | |
218 | first = false; | |
219 | } else { | |
220 | p = &parent->rb_left; | |
221 | } | |
222 | } else if (i915_seqno_passed(wait->seqno, | |
223 | to_wait(parent)->seqno)) { | |
224 | p = &parent->rb_right; | |
225 | if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) | |
226 | completed = parent; | |
227 | else | |
228 | first = false; | |
229 | } else { | |
230 | p = &parent->rb_left; | |
231 | } | |
232 | } | |
233 | rb_link_node(&wait->node, parent, p); | |
234 | rb_insert_color(&wait->node, &b->waiters); | |
dbd6ef29 | 235 | GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh)); |
688e6c72 CW |
236 | |
237 | if (completed) { | |
238 | struct rb_node *next = rb_next(completed); | |
239 | ||
240 | GEM_BUG_ON(!next && !first); | |
241 | if (next && next != &wait->node) { | |
242 | GEM_BUG_ON(first); | |
83348ba8 | 243 | b->timeout = wait_timeout(); |
688e6c72 | 244 | b->first_wait = to_wait(next); |
dbd6ef29 | 245 | rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); |
688e6c72 CW |
246 | /* As there is a delay between reading the current |
247 | * seqno, processing the completed tasks and selecting | |
248 | * the next waiter, we may have missed the interrupt | |
249 | * and so need for the next bottom-half to wakeup. | |
250 | * | |
251 | * Also as we enable the IRQ, we may miss the | |
252 | * interrupt for that seqno, so we have to wake up | |
253 | * the next bottom-half in order to do a coherent check | |
254 | * in case the seqno passed. | |
255 | */ | |
256 | __intel_breadcrumbs_enable_irq(b); | |
aca34b6e | 257 | if (READ_ONCE(b->irq_posted)) |
3d5564e9 | 258 | wake_up_process(to_wait(next)->tsk); |
688e6c72 CW |
259 | } |
260 | ||
261 | do { | |
262 | struct intel_wait *crumb = to_wait(completed); | |
263 | completed = rb_prev(completed); | |
264 | __intel_breadcrumbs_finish(b, crumb); | |
265 | } while (completed); | |
266 | } | |
267 | ||
268 | if (first) { | |
269 | GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); | |
83348ba8 | 270 | b->timeout = wait_timeout(); |
688e6c72 | 271 | b->first_wait = wait; |
dbd6ef29 | 272 | rcu_assign_pointer(b->irq_seqno_bh, wait->tsk); |
04171313 CW |
273 | /* After assigning ourselves as the new bottom-half, we must |
274 | * perform a cursory check to prevent a missed interrupt. | |
275 | * Either we miss the interrupt whilst programming the hardware, | |
276 | * or if there was a previous waiter (for a later seqno) they | |
277 | * may be woken instead of us (due to the inherent race | |
aca34b6e CW |
278 | * in the unlocked read of b->irq_seqno_bh in the irq handler) |
279 | * and so we miss the wake up. | |
04171313 CW |
280 | */ |
281 | __intel_breadcrumbs_enable_irq(b); | |
688e6c72 | 282 | } |
dbd6ef29 | 283 | GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh)); |
688e6c72 CW |
284 | GEM_BUG_ON(!b->first_wait); |
285 | GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); | |
286 | ||
287 | return first; | |
288 | } | |
289 | ||
290 | bool intel_engine_add_wait(struct intel_engine_cs *engine, | |
291 | struct intel_wait *wait) | |
292 | { | |
293 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
294 | bool first; | |
295 | ||
296 | spin_lock(&b->lock); | |
297 | first = __intel_engine_add_wait(engine, wait); | |
298 | spin_unlock(&b->lock); | |
299 | ||
300 | return first; | |
301 | } | |
302 | ||
688e6c72 CW |
303 | static inline bool chain_wakeup(struct rb_node *rb, int priority) |
304 | { | |
305 | return rb && to_wait(rb)->tsk->prio <= priority; | |
306 | } | |
307 | ||
c81d4613 CW |
308 | static inline int wakeup_priority(struct intel_breadcrumbs *b, |
309 | struct task_struct *tsk) | |
310 | { | |
311 | if (tsk == b->signaler) | |
312 | return INT_MIN; | |
313 | else | |
314 | return tsk->prio; | |
315 | } | |
316 | ||
688e6c72 CW |
317 | void intel_engine_remove_wait(struct intel_engine_cs *engine, |
318 | struct intel_wait *wait) | |
319 | { | |
320 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
321 | ||
322 | /* Quick check to see if this waiter was already decoupled from | |
323 | * the tree by the bottom-half to avoid contention on the spinlock | |
324 | * by the herd. | |
325 | */ | |
326 | if (RB_EMPTY_NODE(&wait->node)) | |
327 | return; | |
328 | ||
329 | spin_lock(&b->lock); | |
330 | ||
331 | if (RB_EMPTY_NODE(&wait->node)) | |
332 | goto out_unlock; | |
333 | ||
334 | if (b->first_wait == wait) { | |
c81d4613 | 335 | const int priority = wakeup_priority(b, wait->tsk); |
688e6c72 | 336 | struct rb_node *next; |
688e6c72 | 337 | |
dbd6ef29 | 338 | GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk); |
688e6c72 CW |
339 | |
340 | /* We are the current bottom-half. Find the next candidate, | |
341 | * the first waiter in the queue on the remaining oldest | |
342 | * request. As multiple seqnos may complete in the time it | |
343 | * takes us to wake up and find the next waiter, we have to | |
344 | * wake up that waiter for it to perform its own coherent | |
345 | * completion check. | |
346 | */ | |
347 | next = rb_next(&wait->node); | |
348 | if (chain_wakeup(next, priority)) { | |
349 | /* If the next waiter is already complete, | |
350 | * wake it up and continue onto the next waiter. So | |
351 | * if have a small herd, they will wake up in parallel | |
352 | * rather than sequentially, which should reduce | |
353 | * the overall latency in waking all the completed | |
354 | * clients. | |
355 | * | |
356 | * However, waking up a chain adds extra latency to | |
357 | * the first_waiter. This is undesirable if that | |
358 | * waiter is a high priority task. | |
359 | */ | |
1b7744e7 | 360 | u32 seqno = intel_engine_get_seqno(engine); |
688e6c72 CW |
361 | |
362 | while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { | |
363 | struct rb_node *n = rb_next(next); | |
364 | ||
365 | __intel_breadcrumbs_finish(b, to_wait(next)); | |
366 | next = n; | |
367 | if (!chain_wakeup(next, priority)) | |
368 | break; | |
369 | } | |
370 | } | |
371 | ||
372 | if (next) { | |
373 | /* In our haste, we may have completed the first waiter | |
374 | * before we enabled the interrupt. Do so now as we | |
375 | * have a second waiter for a future seqno. Afterwards, | |
376 | * we have to wake up that waiter in case we missed | |
377 | * the interrupt, or if we have to handle an | |
378 | * exception rather than a seqno completion. | |
379 | */ | |
83348ba8 | 380 | b->timeout = wait_timeout(); |
688e6c72 | 381 | b->first_wait = to_wait(next); |
dbd6ef29 | 382 | rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); |
688e6c72 CW |
383 | if (b->first_wait->seqno != wait->seqno) |
384 | __intel_breadcrumbs_enable_irq(b); | |
dbd6ef29 | 385 | wake_up_process(b->first_wait->tsk); |
688e6c72 CW |
386 | } else { |
387 | b->first_wait = NULL; | |
dbd6ef29 | 388 | rcu_assign_pointer(b->irq_seqno_bh, NULL); |
688e6c72 CW |
389 | __intel_breadcrumbs_disable_irq(b); |
390 | } | |
391 | } else { | |
392 | GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); | |
393 | } | |
394 | ||
395 | GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); | |
396 | rb_erase(&wait->node, &b->waiters); | |
397 | ||
398 | out_unlock: | |
399 | GEM_BUG_ON(b->first_wait == wait); | |
400 | GEM_BUG_ON(rb_first(&b->waiters) != | |
401 | (b->first_wait ? &b->first_wait->node : NULL)); | |
dbd6ef29 | 402 | GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters)); |
688e6c72 CW |
403 | spin_unlock(&b->lock); |
404 | } | |
405 | ||
b3850855 | 406 | static bool signal_complete(struct drm_i915_gem_request *request) |
c81d4613 | 407 | { |
b3850855 | 408 | if (!request) |
c81d4613 CW |
409 | return false; |
410 | ||
411 | /* If another process served as the bottom-half it may have already | |
412 | * signalled that this wait is already completed. | |
413 | */ | |
b3850855 | 414 | if (intel_wait_complete(&request->signaling.wait)) |
c81d4613 CW |
415 | return true; |
416 | ||
417 | /* Carefully check if the request is complete, giving time for the | |
418 | * seqno to be visible or if the GPU hung. | |
419 | */ | |
b3850855 | 420 | if (__i915_request_irq_complete(request)) |
c81d4613 CW |
421 | return true; |
422 | ||
423 | return false; | |
424 | } | |
425 | ||
b3850855 | 426 | static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) |
c81d4613 | 427 | { |
b3850855 | 428 | return container_of(rb, struct drm_i915_gem_request, signaling.node); |
c81d4613 CW |
429 | } |
430 | ||
431 | static void signaler_set_rtpriority(void) | |
432 | { | |
433 | struct sched_param param = { .sched_priority = 1 }; | |
434 | ||
435 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); | |
436 | } | |
437 | ||
438 | static int intel_breadcrumbs_signaler(void *arg) | |
439 | { | |
440 | struct intel_engine_cs *engine = arg; | |
441 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
b3850855 | 442 | struct drm_i915_gem_request *request; |
c81d4613 CW |
443 | |
444 | /* Install ourselves with high priority to reduce signalling latency */ | |
445 | signaler_set_rtpriority(); | |
446 | ||
447 | do { | |
448 | set_current_state(TASK_INTERRUPTIBLE); | |
449 | ||
450 | /* We are either woken up by the interrupt bottom-half, | |
451 | * or by a client adding a new signaller. In both cases, | |
452 | * the GPU seqno may have advanced beyond our oldest signal. | |
453 | * If it has, propagate the signal, remove the waiter and | |
454 | * check again with the next oldest signal. Otherwise we | |
455 | * need to wait for a new interrupt from the GPU or for | |
456 | * a new client. | |
457 | */ | |
b3850855 CW |
458 | request = READ_ONCE(b->first_signal); |
459 | if (signal_complete(request)) { | |
c81d4613 CW |
460 | /* Wake up all other completed waiters and select the |
461 | * next bottom-half for the next user interrupt. | |
462 | */ | |
b3850855 CW |
463 | intel_engine_remove_wait(engine, |
464 | &request->signaling.wait); | |
04769652 | 465 | fence_signal(&request->fence); |
c81d4613 CW |
466 | |
467 | /* Find the next oldest signal. Note that as we have | |
468 | * not been holding the lock, another client may | |
469 | * have installed an even older signal than the one | |
470 | * we just completed - so double check we are still | |
471 | * the oldest before picking the next one. | |
472 | */ | |
473 | spin_lock(&b->lock); | |
b3850855 CW |
474 | if (request == b->first_signal) { |
475 | struct rb_node *rb = | |
476 | rb_next(&request->signaling.node); | |
477 | b->first_signal = rb ? to_signaler(rb) : NULL; | |
478 | } | |
479 | rb_erase(&request->signaling.node, &b->signals); | |
c81d4613 CW |
480 | spin_unlock(&b->lock); |
481 | ||
e8a261ea | 482 | i915_gem_request_put(request); |
c81d4613 CW |
483 | } else { |
484 | if (kthread_should_stop()) | |
485 | break; | |
486 | ||
487 | schedule(); | |
488 | } | |
489 | } while (1); | |
490 | __set_current_state(TASK_RUNNING); | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
b3850855 | 495 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request) |
c81d4613 CW |
496 | { |
497 | struct intel_engine_cs *engine = request->engine; | |
498 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
499 | struct rb_node *parent, **p; | |
c81d4613 CW |
500 | bool first, wakeup; |
501 | ||
4a50d20e CW |
502 | /* locked by fence_enable_sw_signaling() */ |
503 | assert_spin_locked(&request->lock); | |
c81d4613 | 504 | |
b3850855 | 505 | request->signaling.wait.tsk = b->signaler; |
04769652 | 506 | request->signaling.wait.seqno = request->fence.seqno; |
e8a261ea | 507 | i915_gem_request_get(request); |
c81d4613 | 508 | |
4a50d20e CW |
509 | spin_lock(&b->lock); |
510 | ||
c81d4613 CW |
511 | /* First add ourselves into the list of waiters, but register our |
512 | * bottom-half as the signaller thread. As per usual, only the oldest | |
513 | * waiter (not just signaller) is tasked as the bottom-half waking | |
514 | * up all completed waiters after the user interrupt. | |
515 | * | |
516 | * If we are the oldest waiter, enable the irq (after which we | |
517 | * must double check that the seqno did not complete). | |
518 | */ | |
b3850855 | 519 | wakeup = __intel_engine_add_wait(engine, &request->signaling.wait); |
c81d4613 CW |
520 | |
521 | /* Now insert ourselves into the retirement ordered list of signals | |
522 | * on this engine. We track the oldest seqno as that will be the | |
523 | * first signal to complete. | |
524 | */ | |
c81d4613 CW |
525 | parent = NULL; |
526 | first = true; | |
527 | p = &b->signals.rb_node; | |
528 | while (*p) { | |
529 | parent = *p; | |
04769652 CW |
530 | if (i915_seqno_passed(request->fence.seqno, |
531 | to_signaler(parent)->fence.seqno)) { | |
c81d4613 CW |
532 | p = &parent->rb_right; |
533 | first = false; | |
534 | } else { | |
535 | p = &parent->rb_left; | |
536 | } | |
537 | } | |
b3850855 CW |
538 | rb_link_node(&request->signaling.node, parent, p); |
539 | rb_insert_color(&request->signaling.node, &b->signals); | |
c81d4613 | 540 | if (first) |
b3850855 CW |
541 | smp_store_mb(b->first_signal, request); |
542 | ||
c81d4613 CW |
543 | spin_unlock(&b->lock); |
544 | ||
545 | if (wakeup) | |
546 | wake_up_process(b->signaler); | |
c81d4613 CW |
547 | } |
548 | ||
688e6c72 CW |
549 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) |
550 | { | |
551 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
c81d4613 | 552 | struct task_struct *tsk; |
688e6c72 CW |
553 | |
554 | spin_lock_init(&b->lock); | |
555 | setup_timer(&b->fake_irq, | |
556 | intel_breadcrumbs_fake_irq, | |
557 | (unsigned long)engine); | |
83348ba8 CW |
558 | setup_timer(&b->hangcheck, |
559 | intel_breadcrumbs_hangcheck, | |
560 | (unsigned long)engine); | |
688e6c72 | 561 | |
c81d4613 CW |
562 | /* Spawn a thread to provide a common bottom-half for all signals. |
563 | * As this is an asynchronous interface we cannot steal the current | |
564 | * task for handling the bottom-half to the user interrupt, therefore | |
565 | * we create a thread to do the coherent seqno dance after the | |
566 | * interrupt and then signal the waitqueue (via the dma-buf/fence). | |
567 | */ | |
568 | tsk = kthread_run(intel_breadcrumbs_signaler, engine, | |
569 | "i915/signal:%d", engine->id); | |
570 | if (IS_ERR(tsk)) | |
571 | return PTR_ERR(tsk); | |
572 | ||
573 | b->signaler = tsk; | |
574 | ||
688e6c72 CW |
575 | return 0; |
576 | } | |
577 | ||
578 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) | |
579 | { | |
580 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | |
581 | ||
c81d4613 CW |
582 | if (!IS_ERR_OR_NULL(b->signaler)) |
583 | kthread_stop(b->signaler); | |
584 | ||
83348ba8 | 585 | del_timer_sync(&b->hangcheck); |
688e6c72 CW |
586 | del_timer_sync(&b->fake_irq); |
587 | } | |
588 | ||
589 | unsigned int intel_kick_waiters(struct drm_i915_private *i915) | |
590 | { | |
591 | struct intel_engine_cs *engine; | |
592 | unsigned int mask = 0; | |
593 | ||
594 | /* To avoid the task_struct disappearing beneath us as we wake up | |
595 | * the process, we must first inspect the task_struct->state under the | |
596 | * RCU lock, i.e. as we call wake_up_process() we must be holding the | |
597 | * rcu_read_lock(). | |
598 | */ | |
688e6c72 CW |
599 | for_each_engine(engine, i915) |
600 | if (unlikely(intel_engine_wakeup(engine))) | |
601 | mask |= intel_engine_flag(engine); | |
688e6c72 CW |
602 | |
603 | return mask; | |
604 | } | |
c81d4613 CW |
605 | |
606 | unsigned int intel_kick_signalers(struct drm_i915_private *i915) | |
607 | { | |
608 | struct intel_engine_cs *engine; | |
609 | unsigned int mask = 0; | |
610 | ||
611 | for_each_engine(engine, i915) { | |
612 | if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { | |
613 | wake_up_process(engine->breadcrumbs.signaler); | |
614 | mask |= intel_engine_flag(engine); | |
615 | } | |
616 | } | |
617 | ||
618 | return mask; | |
619 | } |