2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "qxl_object.h"
24 #include <trace/events/fence.h>
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
30 * use an ida to index into the chunks?
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
40 static const int release_size_per_bo
[] = { RELEASE_SIZE
, SURFACE_RELEASE_SIZE
, RELEASE_SIZE
};
41 static const int releases_per_bo
[] = { RELEASES_PER_BO
, SURFACE_RELEASES_PER_BO
, RELEASES_PER_BO
};
43 static const char *qxl_get_driver_name(struct fence
*fence
)
48 static const char *qxl_get_timeline_name(struct fence
*fence
)
53 static bool qxl_nop_signaling(struct fence
*fence
)
55 /* fences are always automatically signaled, so just pretend we did this.. */
59 static long qxl_fence_wait(struct fence
*fence
, bool intr
, signed long timeout
)
61 struct qxl_device
*qdev
;
62 struct qxl_release
*release
;
63 int count
= 0, sc
= 0;
64 bool have_drawable_releases
;
65 unsigned long cur
, end
= jiffies
+ timeout
;
67 qdev
= container_of(fence
->lock
, struct qxl_device
, release_lock
);
68 release
= container_of(fence
, struct qxl_release
, base
);
69 have_drawable_releases
= release
->type
== QXL_RELEASE_DRAWABLE
;
74 if (fence_is_signaled_locked(fence
))
77 qxl_io_notify_oom(qdev
);
79 for (count
= 0; count
< 11; count
++) {
80 if (!qxl_queue_garbage_collect(qdev
, true))
83 if (fence_is_signaled_locked(fence
))
87 if (fence_is_signaled_locked(fence
))
90 if (have_drawable_releases
|| sc
< 4) {
93 usleep_range(500, 1000);
95 if (time_after(jiffies
, end
))
98 if (have_drawable_releases
&& sc
> 300) {
99 FENCE_WARN(fence
, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence
->context
& ~0xf0000000, sc
);
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
113 if (time_after(cur
, end
))
118 static const struct fence_ops qxl_fence_ops
= {
119 .get_driver_name
= qxl_get_driver_name
,
120 .get_timeline_name
= qxl_get_timeline_name
,
121 .enable_signaling
= qxl_nop_signaling
,
122 .wait
= qxl_fence_wait
,
126 qxl_release_alloc(struct qxl_device
*qdev
, int type
,
127 struct qxl_release
**ret
)
129 struct qxl_release
*release
;
131 size_t size
= sizeof(*release
);
133 release
= kmalloc(size
, GFP_KERNEL
);
135 DRM_ERROR("Out of memory\n");
138 release
->base
.ops
= NULL
;
139 release
->type
= type
;
140 release
->release_offset
= 0;
141 release
->surface_release_id
= 0;
142 INIT_LIST_HEAD(&release
->bos
);
144 idr_preload(GFP_KERNEL
);
145 spin_lock(&qdev
->release_idr_lock
);
146 handle
= idr_alloc(&qdev
->release_idr
, release
, 1, 0, GFP_NOWAIT
);
147 release
->base
.seqno
= ++qdev
->release_seqno
;
148 spin_unlock(&qdev
->release_idr_lock
);
156 QXL_INFO(qdev
, "allocated release %lld\n", handle
);
157 release
->id
= handle
;
162 qxl_release_free_list(struct qxl_release
*release
)
164 while (!list_empty(&release
->bos
)) {
165 struct ttm_validate_buffer
*entry
;
167 entry
= container_of(release
->bos
.next
,
168 struct ttm_validate_buffer
, head
);
170 list_del(&entry
->head
);
176 qxl_release_free(struct qxl_device
*qdev
,
177 struct qxl_release
*release
)
179 QXL_INFO(qdev
, "release %d, type %d\n", release
->id
,
182 if (release
->surface_release_id
)
183 qxl_surface_id_dealloc(qdev
, release
->surface_release_id
);
185 spin_lock(&qdev
->release_idr_lock
);
186 idr_remove(&qdev
->release_idr
, release
->id
);
187 spin_unlock(&qdev
->release_idr_lock
);
189 if (release
->base
.ops
) {
190 WARN_ON(list_empty(&release
->bos
));
191 qxl_release_free_list(release
);
193 fence_signal(&release
->base
);
194 fence_put(&release
->base
);
196 qxl_release_free_list(release
);
201 static int qxl_release_bo_alloc(struct qxl_device
*qdev
,
205 /* pin releases bo's they are too messy to evict */
206 ret
= qxl_bo_create(qdev
, PAGE_SIZE
, false, true,
207 QXL_GEM_DOMAIN_VRAM
, NULL
,
212 int qxl_release_list_add(struct qxl_release
*release
, struct qxl_bo
*bo
)
214 struct qxl_bo_list
*entry
;
216 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
217 if (entry
->tv
.bo
== &bo
->tbo
)
221 entry
= kmalloc(sizeof(struct qxl_bo_list
), GFP_KERNEL
);
226 entry
->tv
.bo
= &bo
->tbo
;
227 list_add_tail(&entry
->tv
.head
, &release
->bos
);
231 static int qxl_release_validate_bo(struct qxl_bo
*bo
)
235 if (!bo
->pin_count
) {
236 qxl_ttm_placement_from_domain(bo
, bo
->type
, false);
237 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
243 ret
= reservation_object_reserve_shared(bo
->tbo
.resv
);
247 /* allocate a surface for reserved + validated buffers */
248 ret
= qxl_bo_check_id(bo
->gem_base
.dev
->dev_private
, bo
);
254 int qxl_release_reserve_list(struct qxl_release
*release
, bool no_intr
)
257 struct qxl_bo_list
*entry
;
259 /* if only one object on the release its the release itself
260 since these objects are pinned no need to reserve */
261 if (list_is_singular(&release
->bos
))
264 ret
= ttm_eu_reserve_buffers(&release
->ticket
, &release
->bos
, !no_intr
);
268 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
269 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
271 ret
= qxl_release_validate_bo(bo
);
273 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
280 void qxl_release_backoff_reserve_list(struct qxl_release
*release
)
282 /* if only one object on the release its the release itself
283 since these objects are pinned no need to reserve */
284 if (list_is_singular(&release
->bos
))
287 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
291 int qxl_alloc_surface_release_reserved(struct qxl_device
*qdev
,
292 enum qxl_surface_cmd_type surface_cmd_type
,
293 struct qxl_release
*create_rel
,
294 struct qxl_release
**release
)
296 if (surface_cmd_type
== QXL_SURFACE_CMD_DESTROY
&& create_rel
) {
298 struct qxl_bo_list
*entry
= list_first_entry(&create_rel
->bos
, struct qxl_bo_list
, tv
.head
);
300 union qxl_release_info
*info
;
302 /* stash the release after the create command */
303 idr_ret
= qxl_release_alloc(qdev
, QXL_RELEASE_SURFACE_CMD
, release
);
306 bo
= qxl_bo_ref(to_qxl_bo(entry
->tv
.bo
));
308 (*release
)->release_offset
= create_rel
->release_offset
+ 64;
310 qxl_release_list_add(*release
, bo
);
312 info
= qxl_release_map(qdev
, *release
);
314 qxl_release_unmap(qdev
, *release
, info
);
320 return qxl_alloc_release_reserved(qdev
, sizeof(struct qxl_surface_cmd
),
321 QXL_RELEASE_SURFACE_CMD
, release
, NULL
);
324 int qxl_alloc_release_reserved(struct qxl_device
*qdev
, unsigned long size
,
325 int type
, struct qxl_release
**release
,
331 union qxl_release_info
*info
;
334 if (type
== QXL_RELEASE_DRAWABLE
)
336 else if (type
== QXL_RELEASE_SURFACE_CMD
)
338 else if (type
== QXL_RELEASE_CURSOR_CMD
)
341 DRM_ERROR("got illegal type: %d\n", type
);
345 idr_ret
= qxl_release_alloc(qdev
, type
, release
);
352 mutex_lock(&qdev
->release_mutex
);
353 if (qdev
->current_release_bo_offset
[cur_idx
] + 1 >= releases_per_bo
[cur_idx
]) {
354 qxl_bo_unref(&qdev
->current_release_bo
[cur_idx
]);
355 qdev
->current_release_bo_offset
[cur_idx
] = 0;
356 qdev
->current_release_bo
[cur_idx
] = NULL
;
358 if (!qdev
->current_release_bo
[cur_idx
]) {
359 ret
= qxl_release_bo_alloc(qdev
, &qdev
->current_release_bo
[cur_idx
]);
361 mutex_unlock(&qdev
->release_mutex
);
366 bo
= qxl_bo_ref(qdev
->current_release_bo
[cur_idx
]);
368 (*release
)->release_offset
= qdev
->current_release_bo_offset
[cur_idx
] * release_size_per_bo
[cur_idx
];
369 qdev
->current_release_bo_offset
[cur_idx
]++;
374 mutex_unlock(&qdev
->release_mutex
);
376 qxl_release_list_add(*release
, bo
);
378 info
= qxl_release_map(qdev
, *release
);
380 qxl_release_unmap(qdev
, *release
, info
);
386 struct qxl_release
*qxl_release_from_id_locked(struct qxl_device
*qdev
,
389 struct qxl_release
*release
;
391 spin_lock(&qdev
->release_idr_lock
);
392 release
= idr_find(&qdev
->release_idr
, id
);
393 spin_unlock(&qdev
->release_idr_lock
);
395 DRM_ERROR("failed to find id in release_idr\n");
402 union qxl_release_info
*qxl_release_map(struct qxl_device
*qdev
,
403 struct qxl_release
*release
)
406 union qxl_release_info
*info
;
407 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
408 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
410 ptr
= qxl_bo_kmap_atomic_page(qdev
, bo
, release
->release_offset
& PAGE_SIZE
);
413 info
= ptr
+ (release
->release_offset
& ~PAGE_SIZE
);
417 void qxl_release_unmap(struct qxl_device
*qdev
,
418 struct qxl_release
*release
,
419 union qxl_release_info
*info
)
421 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
422 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
425 ptr
= ((void *)info
) - (release
->release_offset
& ~PAGE_SIZE
);
426 qxl_bo_kunmap_atomic_page(qdev
, bo
, ptr
);
429 void qxl_release_fence_buffer_objects(struct qxl_release
*release
)
431 struct ttm_buffer_object
*bo
;
432 struct ttm_bo_global
*glob
;
433 struct ttm_bo_device
*bdev
;
434 struct ttm_bo_driver
*driver
;
436 struct ttm_validate_buffer
*entry
;
437 struct qxl_device
*qdev
;
439 /* if only one object on the release its the release itself
440 since these objects are pinned no need to reserve */
441 if (list_is_singular(&release
->bos
))
444 bo
= list_first_entry(&release
->bos
, struct ttm_validate_buffer
, head
)->bo
;
446 qdev
= container_of(bdev
, struct qxl_device
, mman
.bdev
);
449 * Since we never really allocated a context and we don't want to conflict,
450 * set the highest bits. This will break if we really allow exporting of dma-bufs.
452 fence_init(&release
->base
, &qxl_fence_ops
, &qdev
->release_lock
,
453 release
->id
| 0xf0000000, release
->base
.seqno
);
454 trace_fence_emit(&release
->base
);
456 driver
= bdev
->driver
;
459 spin_lock(&glob
->lru_lock
);
460 /* acquire release_lock to protect bo->resv->fence and its contents */
461 spin_lock(&qdev
->release_lock
);
463 list_for_each_entry(entry
, &release
->bos
, head
) {
467 if (!entry
->bo
->sync_obj
)
468 entry
->bo
->sync_obj
= qbo
;
470 reservation_object_add_shared_fence(bo
->resv
, &release
->base
);
471 ttm_bo_add_to_lru(bo
);
472 __ttm_bo_unreserve(bo
);
474 spin_unlock(&qdev
->release_lock
);
475 spin_unlock(&glob
->lru_lock
);
476 ww_acquire_fini(&release
->ticket
);