2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/ktime.h>
30 #include <linux/hrtimer.h>
31 #include <trace/events/fence.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct fence_ops nouveau_fence_ops_uevent
;
41 static const struct fence_ops nouveau_fence_ops_legacy
;
43 static inline struct nouveau_fence
*
44 from_fence(struct fence
*fence
)
46 return container_of(fence
, struct nouveau_fence
, base
);
49 static inline struct nouveau_fence_chan
*
50 nouveau_fctx(struct nouveau_fence
*fence
)
52 return container_of(fence
->base
.lock
, struct nouveau_fence_chan
, lock
);
56 nouveau_fence_signal(struct nouveau_fence
*fence
)
58 fence_signal_locked(&fence
->base
);
59 list_del(&fence
->head
);
61 if (test_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
)) {
62 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
64 if (!--fctx
->notify_ref
)
65 nvif_notify_put(&fctx
->notify
);
68 fence_put(&fence
->base
);
71 static struct nouveau_fence
*
72 nouveau_local_fence(struct fence
*fence
, struct nouveau_drm
*drm
) {
73 struct nouveau_fence_priv
*priv
= (void*)drm
->fence
;
75 if (fence
->ops
!= &nouveau_fence_ops_legacy
&&
76 fence
->ops
!= &nouveau_fence_ops_uevent
)
79 if (fence
->context
< priv
->context_base
||
80 fence
->context
>= priv
->context_base
+ priv
->contexts
)
83 return from_fence(fence
);
87 nouveau_fence_context_del(struct nouveau_fence_chan
*fctx
)
89 struct nouveau_fence
*fence
;
91 nvif_notify_fini(&fctx
->notify
);
93 spin_lock_irq(&fctx
->lock
);
94 while (!list_empty(&fctx
->pending
)) {
95 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
97 nouveau_fence_signal(fence
);
98 fence
->channel
= NULL
;
100 spin_unlock_irq(&fctx
->lock
);
104 nouveau_fence_update(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
106 struct nouveau_fence
*fence
;
108 u32 seq
= fctx
->read(chan
);
110 while (!list_empty(&fctx
->pending
)) {
111 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
113 if ((int)(seq
- fence
->base
.seqno
) < 0)
116 nouveau_fence_signal(fence
);
121 nouveau_fence_wait_uevent_handler(struct nvif_notify
*notify
)
123 struct nouveau_fence_chan
*fctx
=
124 container_of(notify
, typeof(*fctx
), notify
);
127 spin_lock_irqsave(&fctx
->lock
, flags
);
128 if (!list_empty(&fctx
->pending
)) {
129 struct nouveau_fence
*fence
;
131 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
132 nouveau_fence_update(fence
->channel
, fctx
);
134 spin_unlock_irqrestore(&fctx
->lock
, flags
);
136 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
137 return NVIF_NOTIFY_KEEP
;
141 nouveau_fence_context_new(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
143 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
146 INIT_LIST_HEAD(&fctx
->flip
);
147 INIT_LIST_HEAD(&fctx
->pending
);
148 spin_lock_init(&fctx
->lock
);
149 fctx
->context
= priv
->context_base
+ chan
->chid
;
154 ret
= nvif_notify_init(chan
->object
, NULL
,
155 nouveau_fence_wait_uevent_handler
, false,
156 G82_CHANNEL_DMA_V0_NTFY_UEVENT
,
157 &(struct nvif_notify_uevent_req
) { },
158 sizeof(struct nvif_notify_uevent_req
),
159 sizeof(struct nvif_notify_uevent_rep
),
165 struct nouveau_fence_work
{
166 struct work_struct work
;
168 void (*func
)(void *);
173 nouveau_fence_work_handler(struct work_struct
*kwork
)
175 struct nouveau_fence_work
*work
= container_of(kwork
, typeof(*work
), work
);
176 work
->func(work
->data
);
180 static void nouveau_fence_work_cb(struct fence
*fence
, struct fence_cb
*cb
)
182 struct nouveau_fence_work
*work
= container_of(cb
, typeof(*work
), cb
);
184 schedule_work(&work
->work
);
188 nouveau_fence_work(struct fence
*fence
,
189 void (*func
)(void *), void *data
)
191 struct nouveau_fence_work
*work
;
193 if (fence_is_signaled(fence
))
196 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
198 WARN_ON(nouveau_fence_wait((struct nouveau_fence
*)fence
,
203 INIT_WORK(&work
->work
, nouveau_fence_work_handler
);
207 if (fence_add_callback(fence
, &work
->cb
, nouveau_fence_work_cb
) < 0)
218 nouveau_fence_emit(struct nouveau_fence
*fence
, struct nouveau_channel
*chan
)
220 struct nouveau_fence_chan
*fctx
= chan
->fence
;
221 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
224 fence
->channel
= chan
;
225 fence
->timeout
= jiffies
+ (15 * HZ
);
228 fence_init(&fence
->base
, &nouveau_fence_ops_uevent
,
230 priv
->context_base
+ chan
->chid
, ++fctx
->sequence
);
232 fence_init(&fence
->base
, &nouveau_fence_ops_legacy
,
234 priv
->context_base
+ chan
->chid
, ++fctx
->sequence
);
236 trace_fence_emit(&fence
->base
);
237 ret
= fctx
->emit(fence
);
239 fence_get(&fence
->base
);
240 spin_lock_irq(&fctx
->lock
);
241 nouveau_fence_update(chan
, fctx
);
242 list_add_tail(&fence
->head
, &fctx
->pending
);
243 spin_unlock_irq(&fctx
->lock
);
250 nouveau_fence_done(struct nouveau_fence
*fence
)
252 if (fence
->base
.ops
== &nouveau_fence_ops_legacy
||
253 fence
->base
.ops
== &nouveau_fence_ops_uevent
) {
254 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
257 if (test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
260 spin_lock_irqsave(&fctx
->lock
, flags
);
261 nouveau_fence_update(fence
->channel
, fctx
);
262 spin_unlock_irqrestore(&fctx
->lock
, flags
);
264 return fence_is_signaled(&fence
->base
);
268 nouveau_fence_wait_legacy(struct fence
*f
, bool intr
, long wait
)
270 struct nouveau_fence
*fence
= from_fence(f
);
271 unsigned long sleep_time
= NSEC_PER_MSEC
/ 1000;
272 unsigned long t
= jiffies
, timeout
= t
+ wait
;
274 while (!nouveau_fence_done(fence
)) {
279 if (wait
!= MAX_SCHEDULE_TIMEOUT
&& time_after_eq(t
, timeout
)) {
280 __set_current_state(TASK_RUNNING
);
284 __set_current_state(intr
? TASK_INTERRUPTIBLE
:
285 TASK_UNINTERRUPTIBLE
);
287 kt
= ktime_set(0, sleep_time
);
288 schedule_hrtimeout(&kt
, HRTIMER_MODE_REL
);
290 if (sleep_time
> NSEC_PER_MSEC
)
291 sleep_time
= NSEC_PER_MSEC
;
293 if (intr
&& signal_pending(current
))
297 __set_current_state(TASK_RUNNING
);
303 nouveau_fence_wait_busy(struct nouveau_fence
*fence
, bool intr
)
307 while (!nouveau_fence_done(fence
)) {
308 if (time_after_eq(jiffies
, fence
->timeout
)) {
313 __set_current_state(intr
?
315 TASK_UNINTERRUPTIBLE
);
317 if (intr
&& signal_pending(current
)) {
323 __set_current_state(TASK_RUNNING
);
328 nouveau_fence_wait(struct nouveau_fence
*fence
, bool lazy
, bool intr
)
333 return nouveau_fence_wait_busy(fence
, intr
);
335 ret
= fence_wait_timeout(&fence
->base
, intr
, 15 * HZ
);
345 nouveau_fence_sync(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
)
347 struct nouveau_fence_chan
*fctx
= chan
->fence
;
348 struct fence
*fence
= NULL
;
349 struct reservation_object
*resv
= nvbo
->bo
.resv
;
350 struct reservation_object_list
*fobj
;
353 fence
= reservation_object_get_excl(resv
);
355 if (fence
&& !fence_is_signaled(fence
)) {
356 struct nouveau_fence
*f
= from_fence(fence
);
357 struct nouveau_channel
*prev
= f
->channel
;
360 ret
= fctx
->sync(f
, prev
, chan
);
362 ret
= nouveau_fence_wait(f
, true, true);
369 fobj
= reservation_object_get_list(resv
);
373 for (i
= 0; i
< fobj
->shared_count
&& !ret
; ++i
) {
374 fence
= rcu_dereference_protected(fobj
->shared
[i
],
375 reservation_object_held(resv
));
377 /* should always be true, for now */
378 if (!nouveau_local_fence(fence
, chan
->drm
))
379 ret
= fence_wait(fence
, true);
386 nouveau_fence_unref(struct nouveau_fence
**pfence
)
389 fence_put(&(*pfence
)->base
);
393 struct nouveau_fence
*
394 nouveau_fence_ref(struct nouveau_fence
*fence
)
397 fence_get(&fence
->base
);
402 nouveau_fence_new(struct nouveau_channel
*chan
, bool sysmem
,
403 struct nouveau_fence
**pfence
)
405 struct nouveau_fence
*fence
;
408 if (unlikely(!chan
->fence
))
411 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
415 fence
->sysmem
= sysmem
;
417 ret
= nouveau_fence_emit(fence
, chan
);
419 nouveau_fence_unref(&fence
);
425 static const char *nouveau_fence_get_get_driver_name(struct fence
*fence
)
430 static const char *nouveau_fence_get_timeline_name(struct fence
*f
)
432 struct nouveau_fence
*fence
= from_fence(f
);
433 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
435 return fence
->channel
? fctx
->name
: "dead channel";
439 * In an ideal world, read would not assume the channel context is still alive.
440 * This function may be called from another device, running into free memory as a
441 * result. The drm node should still be there, so we can derive the index from
444 static bool nouveau_fence_is_signaled(struct fence
*f
)
446 struct nouveau_fence
*fence
= from_fence(f
);
447 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
448 struct nouveau_channel
*chan
= fence
->channel
;
450 return (int)(fctx
->read(chan
) - fence
->base
.seqno
) >= 0;
453 static bool nouveau_fence_no_signaling(struct fence
*f
)
455 struct nouveau_fence
*fence
= from_fence(f
);
458 * caller should have a reference on the fence,
459 * else fence could get freed here
461 WARN_ON(atomic_read(&fence
->base
.refcount
.refcount
) <= 1);
464 * This needs uevents to work correctly, but fence_add_callback relies on
465 * being able to enable signaling. It will still get signaled eventually,
466 * just not right away.
468 if (nouveau_fence_is_signaled(f
)) {
469 list_del(&fence
->head
);
471 fence_put(&fence
->base
);
478 static const struct fence_ops nouveau_fence_ops_legacy
= {
479 .get_driver_name
= nouveau_fence_get_get_driver_name
,
480 .get_timeline_name
= nouveau_fence_get_timeline_name
,
481 .enable_signaling
= nouveau_fence_no_signaling
,
482 .signaled
= nouveau_fence_is_signaled
,
483 .wait
= nouveau_fence_wait_legacy
,
487 static bool nouveau_fence_enable_signaling(struct fence
*f
)
489 struct nouveau_fence
*fence
= from_fence(f
);
490 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
493 if (!fctx
->notify_ref
++)
494 nvif_notify_get(&fctx
->notify
);
496 ret
= nouveau_fence_no_signaling(f
);
498 set_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
);
499 else if (!--fctx
->notify_ref
)
500 nvif_notify_put(&fctx
->notify
);
505 static const struct fence_ops nouveau_fence_ops_uevent
= {
506 .get_driver_name
= nouveau_fence_get_get_driver_name
,
507 .get_timeline_name
= nouveau_fence_get_timeline_name
,
508 .enable_signaling
= nouveau_fence_enable_signaling
,
509 .signaled
= nouveau_fence_is_signaled
,
510 .wait
= fence_default_wait
,