Commit | Line | Data |
---|---|---|
f64122c1 DA |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Dave Airlie | |
23 | * Alon Levy | |
24 | */ | |
25 | ||
26 | /* QXL cmd/ring handling */ | |
27 | ||
28 | #include "qxl_drv.h" | |
29 | #include "qxl_object.h" | |
30 | ||
31 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap); | |
32 | ||
33 | struct ring { | |
34 | struct qxl_ring_header header; | |
35 | uint8_t elements[0]; | |
36 | }; | |
37 | ||
38 | struct qxl_ring { | |
39 | struct ring *ring; | |
40 | int element_size; | |
41 | int n_elements; | |
42 | int prod_notify; | |
43 | wait_queue_head_t *push_event; | |
44 | spinlock_t lock; | |
45 | }; | |
46 | ||
47 | void qxl_ring_free(struct qxl_ring *ring) | |
48 | { | |
49 | kfree(ring); | |
50 | } | |
51 | ||
1e209117 DA |
52 | void qxl_ring_init_hdr(struct qxl_ring *ring) |
53 | { | |
54 | ring->ring->header.notify_on_prod = ring->n_elements; | |
55 | } | |
56 | ||
f64122c1 DA |
57 | struct qxl_ring * |
58 | qxl_ring_create(struct qxl_ring_header *header, | |
59 | int element_size, | |
60 | int n_elements, | |
61 | int prod_notify, | |
62 | bool set_prod_notify, | |
63 | wait_queue_head_t *push_event) | |
64 | { | |
65 | struct qxl_ring *ring; | |
66 | ||
67 | ring = kmalloc(sizeof(*ring), GFP_KERNEL); | |
68 | if (!ring) | |
69 | return NULL; | |
70 | ||
71 | ring->ring = (struct ring *)header; | |
72 | ring->element_size = element_size; | |
73 | ring->n_elements = n_elements; | |
74 | ring->prod_notify = prod_notify; | |
75 | ring->push_event = push_event; | |
76 | if (set_prod_notify) | |
1e209117 | 77 | qxl_ring_init_hdr(ring); |
f64122c1 DA |
78 | spin_lock_init(&ring->lock); |
79 | return ring; | |
80 | } | |
81 | ||
82 | static int qxl_check_header(struct qxl_ring *ring) | |
83 | { | |
84 | int ret; | |
85 | struct qxl_ring_header *header = &(ring->ring->header); | |
86 | unsigned long flags; | |
87 | spin_lock_irqsave(&ring->lock, flags); | |
88 | ret = header->prod - header->cons < header->num_items; | |
89 | if (ret == 0) | |
90 | header->notify_on_cons = header->cons + 1; | |
91 | spin_unlock_irqrestore(&ring->lock, flags); | |
92 | return ret; | |
93 | } | |
94 | ||
1e209117 | 95 | int qxl_check_idle(struct qxl_ring *ring) |
f64122c1 DA |
96 | { |
97 | int ret; | |
98 | struct qxl_ring_header *header = &(ring->ring->header); | |
99 | unsigned long flags; | |
100 | spin_lock_irqsave(&ring->lock, flags); | |
101 | ret = header->prod == header->cons; | |
102 | spin_unlock_irqrestore(&ring->lock, flags); | |
103 | return ret; | |
104 | } | |
105 | ||
106 | int qxl_ring_push(struct qxl_ring *ring, | |
107 | const void *new_elt, bool interruptible) | |
108 | { | |
109 | struct qxl_ring_header *header = &(ring->ring->header); | |
110 | uint8_t *elt; | |
111 | int idx, ret; | |
112 | unsigned long flags; | |
113 | spin_lock_irqsave(&ring->lock, flags); | |
114 | if (header->prod - header->cons == header->num_items) { | |
115 | header->notify_on_cons = header->cons + 1; | |
116 | mb(); | |
117 | spin_unlock_irqrestore(&ring->lock, flags); | |
118 | if (!drm_can_sleep()) { | |
119 | while (!qxl_check_header(ring)) | |
120 | udelay(1); | |
121 | } else { | |
122 | if (interruptible) { | |
123 | ret = wait_event_interruptible(*ring->push_event, | |
124 | qxl_check_header(ring)); | |
125 | if (ret) | |
126 | return ret; | |
127 | } else { | |
128 | wait_event(*ring->push_event, | |
129 | qxl_check_header(ring)); | |
130 | } | |
131 | ||
132 | } | |
133 | spin_lock_irqsave(&ring->lock, flags); | |
134 | } | |
135 | ||
136 | idx = header->prod & (ring->n_elements - 1); | |
137 | elt = ring->ring->elements + idx * ring->element_size; | |
138 | ||
139 | memcpy((void *)elt, new_elt, ring->element_size); | |
140 | ||
141 | header->prod++; | |
142 | ||
143 | mb(); | |
144 | ||
145 | if (header->prod == header->notify_on_prod) | |
146 | outb(0, ring->prod_notify); | |
147 | ||
148 | spin_unlock_irqrestore(&ring->lock, flags); | |
149 | return 0; | |
150 | } | |
151 | ||
6d01f1f5 DA |
152 | static bool qxl_ring_pop(struct qxl_ring *ring, |
153 | void *element) | |
f64122c1 DA |
154 | { |
155 | volatile struct qxl_ring_header *header = &(ring->ring->header); | |
156 | volatile uint8_t *ring_elt; | |
157 | int idx; | |
158 | unsigned long flags; | |
159 | spin_lock_irqsave(&ring->lock, flags); | |
160 | if (header->cons == header->prod) { | |
161 | header->notify_on_prod = header->cons + 1; | |
162 | spin_unlock_irqrestore(&ring->lock, flags); | |
163 | return false; | |
164 | } | |
165 | ||
166 | idx = header->cons & (ring->n_elements - 1); | |
167 | ring_elt = ring->ring->elements + idx * ring->element_size; | |
168 | ||
169 | memcpy(element, (void *)ring_elt, ring->element_size); | |
170 | ||
171 | header->cons++; | |
172 | ||
173 | spin_unlock_irqrestore(&ring->lock, flags); | |
174 | return true; | |
175 | } | |
176 | ||
f64122c1 DA |
177 | int |
178 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | |
179 | uint32_t type, bool interruptible) | |
180 | { | |
181 | struct qxl_command cmd; | |
182 | ||
183 | cmd.type = type; | |
184 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | |
185 | ||
186 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | |
187 | } | |
188 | ||
189 | int | |
190 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | |
191 | uint32_t type, bool interruptible) | |
192 | { | |
193 | struct qxl_command cmd; | |
194 | ||
195 | cmd.type = type; | |
196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | |
197 | ||
198 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | |
199 | } | |
200 | ||
201 | bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) | |
202 | { | |
203 | if (!qxl_check_idle(qdev->release_ring)) { | |
204 | queue_work(qdev->gc_queue, &qdev->gc_work); | |
205 | if (flush) | |
206 | flush_work(&qdev->gc_work); | |
207 | return true; | |
208 | } | |
209 | return false; | |
210 | } | |
211 | ||
212 | int qxl_garbage_collect(struct qxl_device *qdev) | |
213 | { | |
214 | struct qxl_release *release; | |
215 | uint64_t id, next_id; | |
216 | int i = 0; | |
217 | int ret; | |
218 | union qxl_release_info *info; | |
219 | ||
220 | while (qxl_ring_pop(qdev->release_ring, &id)) { | |
221 | QXL_INFO(qdev, "popped %lld\n", id); | |
222 | while (id) { | |
223 | release = qxl_release_from_id_locked(qdev, id); | |
224 | if (release == NULL) | |
225 | break; | |
226 | ||
227 | ret = qxl_release_reserve(qdev, release, false); | |
228 | if (ret) { | |
229 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | |
230 | DRM_ERROR("failed to reserve release %lld\n", id); | |
231 | } | |
232 | ||
233 | info = qxl_release_map(qdev, release); | |
234 | next_id = info->next; | |
235 | qxl_release_unmap(qdev, release, info); | |
236 | ||
237 | qxl_release_unreserve(qdev, release); | |
238 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | |
239 | next_id); | |
240 | ||
241 | switch (release->type) { | |
242 | case QXL_RELEASE_DRAWABLE: | |
243 | case QXL_RELEASE_SURFACE_CMD: | |
244 | case QXL_RELEASE_CURSOR_CMD: | |
245 | break; | |
246 | default: | |
247 | DRM_ERROR("unexpected release type\n"); | |
248 | break; | |
249 | } | |
250 | id = next_id; | |
251 | ||
252 | qxl_release_free(qdev, release); | |
253 | ++i; | |
254 | } | |
255 | } | |
256 | ||
257 | QXL_INFO(qdev, "%s: %lld\n", __func__, i); | |
258 | ||
259 | return i; | |
260 | } | |
261 | ||
262 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | |
263 | struct qxl_bo **_bo) | |
264 | { | |
265 | struct qxl_bo *bo; | |
266 | int ret; | |
267 | ||
268 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | |
269 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | |
270 | if (ret) { | |
271 | DRM_ERROR("failed to allocate VRAM BO\n"); | |
272 | return ret; | |
273 | } | |
274 | ret = qxl_bo_reserve(bo, false); | |
275 | if (unlikely(ret != 0)) | |
276 | goto out_unref; | |
277 | ||
278 | *_bo = bo; | |
279 | return 0; | |
280 | out_unref: | |
281 | qxl_bo_unref(&bo); | |
282 | return 0; | |
283 | } | |
284 | ||
a6ac1bc3 | 285 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
f64122c1 DA |
286 | { |
287 | int irq_num; | |
288 | long addr = qdev->io_base + port; | |
289 | int ret; | |
290 | ||
291 | mutex_lock(&qdev->async_io_mutex); | |
292 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | |
f64122c1 | 293 | if (qdev->last_sent_io_cmd > irq_num) { |
a6ac1bc3 DA |
294 | if (intr) |
295 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, | |
296 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
297 | else | |
298 | ret = wait_event_timeout(qdev->io_cmd_event, | |
299 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
300 | /* 0 is timeout, just bail the "hw" has gone away */ | |
301 | if (ret <= 0) | |
f64122c1 DA |
302 | goto out; |
303 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | |
304 | } | |
305 | outb(val, addr); | |
306 | qdev->last_sent_io_cmd = irq_num + 1; | |
a6ac1bc3 DA |
307 | if (intr) |
308 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, | |
309 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
310 | else | |
311 | ret = wait_event_timeout(qdev->io_cmd_event, | |
312 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
f64122c1 | 313 | out: |
a6ac1bc3 DA |
314 | if (ret > 0) |
315 | ret = 0; | |
f64122c1 DA |
316 | mutex_unlock(&qdev->async_io_mutex); |
317 | return ret; | |
318 | } | |
319 | ||
320 | static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) | |
321 | { | |
322 | int ret; | |
323 | ||
324 | restart: | |
a6ac1bc3 | 325 | ret = wait_for_io_cmd_user(qdev, val, port, false); |
f64122c1 DA |
326 | if (ret == -ERESTARTSYS) |
327 | goto restart; | |
328 | } | |
329 | ||
330 | int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, | |
331 | const struct qxl_rect *area) | |
332 | { | |
333 | int surface_id; | |
334 | uint32_t surface_width, surface_height; | |
335 | int ret; | |
336 | ||
337 | if (!surf->hw_surf_alloc) | |
338 | DRM_ERROR("got io update area with no hw surface\n"); | |
339 | ||
340 | if (surf->is_primary) | |
341 | surface_id = 0; | |
342 | else | |
343 | surface_id = surf->surface_id; | |
344 | surface_width = surf->surf.width; | |
345 | surface_height = surf->surf.height; | |
346 | ||
347 | if (area->left < 0 || area->top < 0 || | |
348 | area->right > surface_width || area->bottom > surface_height) { | |
349 | qxl_io_log(qdev, "%s: not doing area update for " | |
350 | "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left, | |
351 | area->top, area->right, area->bottom, surface_width, surface_height); | |
352 | return -EINVAL; | |
353 | } | |
354 | mutex_lock(&qdev->update_area_mutex); | |
355 | qdev->ram_header->update_area = *area; | |
356 | qdev->ram_header->update_surface = surface_id; | |
a6ac1bc3 | 357 | ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); |
f64122c1 DA |
358 | mutex_unlock(&qdev->update_area_mutex); |
359 | return ret; | |
360 | } | |
361 | ||
362 | void qxl_io_notify_oom(struct qxl_device *qdev) | |
363 | { | |
364 | outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM); | |
365 | } | |
366 | ||
367 | void qxl_io_flush_release(struct qxl_device *qdev) | |
368 | { | |
369 | outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE); | |
370 | } | |
371 | ||
372 | void qxl_io_flush_surfaces(struct qxl_device *qdev) | |
373 | { | |
374 | wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC); | |
375 | } | |
376 | ||
377 | ||
378 | void qxl_io_destroy_primary(struct qxl_device *qdev) | |
379 | { | |
380 | wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); | |
381 | } | |
382 | ||
07f8d9bd DA |
383 | void qxl_io_create_primary(struct qxl_device *qdev, |
384 | unsigned offset, struct qxl_bo *bo) | |
f64122c1 DA |
385 | { |
386 | struct qxl_surface_create *create; | |
387 | ||
388 | QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev, | |
389 | qdev->ram_header); | |
390 | create = &qdev->ram_header->create_surface; | |
391 | create->format = bo->surf.format; | |
07f8d9bd DA |
392 | create->width = bo->surf.width; |
393 | create->height = bo->surf.height; | |
f64122c1 DA |
394 | create->stride = bo->surf.stride; |
395 | create->mem = qxl_bo_physical_address(qdev, bo, offset); | |
396 | ||
397 | QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, | |
398 | bo->kptr); | |
399 | ||
400 | create->flags = QXL_SURF_FLAG_KEEP_DATA; | |
401 | create->type = QXL_SURF_TYPE_PRIMARY; | |
402 | ||
403 | wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); | |
404 | } | |
405 | ||
406 | void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) | |
407 | { | |
408 | QXL_INFO(qdev, "qxl_memslot_add %d\n", id); | |
409 | wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); | |
410 | } | |
411 | ||
412 | void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) | |
413 | { | |
414 | va_list args; | |
415 | ||
416 | va_start(args, fmt); | |
417 | vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); | |
418 | va_end(args); | |
419 | /* | |
420 | * DO not do a DRM output here - this will call printk, which will | |
421 | * call back into qxl for rendering (qxl_fb) | |
422 | */ | |
423 | outb(0, qdev->io_base + QXL_IO_LOG); | |
424 | } | |
425 | ||
426 | void qxl_io_reset(struct qxl_device *qdev) | |
427 | { | |
428 | outb(0, qdev->io_base + QXL_IO_RESET); | |
429 | } | |
430 | ||
431 | void qxl_io_monitors_config(struct qxl_device *qdev) | |
432 | { | |
433 | qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__, | |
434 | qdev->monitors_config ? | |
435 | qdev->monitors_config->count : -1, | |
436 | qdev->monitors_config && qdev->monitors_config->count ? | |
437 | qdev->monitors_config->heads[0].width : -1, | |
438 | qdev->monitors_config && qdev->monitors_config->count ? | |
439 | qdev->monitors_config->heads[0].height : -1, | |
440 | qdev->monitors_config && qdev->monitors_config->count ? | |
441 | qdev->monitors_config->heads[0].x : -1, | |
442 | qdev->monitors_config && qdev->monitors_config->count ? | |
443 | qdev->monitors_config->heads[0].y : -1 | |
444 | ); | |
445 | ||
446 | wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); | |
447 | } | |
448 | ||
449 | int qxl_surface_id_alloc(struct qxl_device *qdev, | |
450 | struct qxl_bo *surf) | |
451 | { | |
307b9c02 | 452 | uint32_t handle; |
f64122c1 DA |
453 | int idr_ret; |
454 | int count = 0; | |
455 | again: | |
307b9c02 | 456 | idr_preload(GFP_ATOMIC); |
f64122c1 | 457 | spin_lock(&qdev->surf_id_idr_lock); |
307b9c02 | 458 | idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT); |
f64122c1 | 459 | spin_unlock(&qdev->surf_id_idr_lock); |
307b9c02 DA |
460 | idr_preload_end(); |
461 | if (idr_ret < 0) | |
462 | return idr_ret; | |
463 | handle = idr_ret; | |
f64122c1 DA |
464 | |
465 | if (handle >= qdev->rom->n_surfaces) { | |
466 | count++; | |
467 | spin_lock(&qdev->surf_id_idr_lock); | |
468 | idr_remove(&qdev->surf_id_idr, handle); | |
469 | spin_unlock(&qdev->surf_id_idr_lock); | |
470 | qxl_reap_surface_id(qdev, 2); | |
471 | goto again; | |
472 | } | |
473 | surf->surface_id = handle; | |
474 | ||
475 | spin_lock(&qdev->surf_id_idr_lock); | |
476 | qdev->last_alloced_surf_id = handle; | |
477 | spin_unlock(&qdev->surf_id_idr_lock); | |
f64122c1 DA |
478 | return 0; |
479 | } | |
480 | ||
481 | void qxl_surface_id_dealloc(struct qxl_device *qdev, | |
482 | uint32_t surface_id) | |
483 | { | |
484 | spin_lock(&qdev->surf_id_idr_lock); | |
485 | idr_remove(&qdev->surf_id_idr, surface_id); | |
486 | spin_unlock(&qdev->surf_id_idr_lock); | |
487 | } | |
488 | ||
489 | int qxl_hw_surface_alloc(struct qxl_device *qdev, | |
490 | struct qxl_bo *surf, | |
491 | struct ttm_mem_reg *new_mem) | |
492 | { | |
493 | struct qxl_surface_cmd *cmd; | |
494 | struct qxl_release *release; | |
495 | int ret; | |
496 | ||
497 | if (surf->hw_surf_alloc) | |
498 | return 0; | |
499 | ||
500 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE, | |
501 | NULL, | |
502 | &release); | |
503 | if (ret) | |
504 | return ret; | |
505 | ||
506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | |
507 | cmd->type = QXL_SURFACE_CMD_CREATE; | |
508 | cmd->u.surface_create.format = surf->surf.format; | |
509 | cmd->u.surface_create.width = surf->surf.width; | |
510 | cmd->u.surface_create.height = surf->surf.height; | |
511 | cmd->u.surface_create.stride = surf->surf.stride; | |
512 | if (new_mem) { | |
513 | int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot; | |
514 | struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]); | |
515 | ||
516 | /* TODO - need to hold one of the locks to read tbo.offset */ | |
517 | cmd->u.surface_create.data = slot->high_bits; | |
518 | ||
519 | cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset; | |
520 | } else | |
521 | cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0); | |
522 | cmd->surface_id = surf->surface_id; | |
523 | qxl_release_unmap(qdev, release, &cmd->release_info); | |
524 | ||
525 | surf->surf_create = release; | |
526 | ||
527 | /* no need to add a release to the fence for this bo, | |
528 | since it is only released when we ask to destroy the surface | |
529 | and it would never signal otherwise */ | |
530 | qxl_fence_releaseable(qdev, release); | |
531 | ||
532 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | |
533 | ||
534 | qxl_release_unreserve(qdev, release); | |
535 | ||
536 | surf->hw_surf_alloc = true; | |
537 | spin_lock(&qdev->surf_id_idr_lock); | |
538 | idr_replace(&qdev->surf_id_idr, surf, surf->surface_id); | |
539 | spin_unlock(&qdev->surf_id_idr_lock); | |
540 | return 0; | |
541 | } | |
542 | ||
543 | int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |
544 | struct qxl_bo *surf) | |
545 | { | |
546 | struct qxl_surface_cmd *cmd; | |
547 | struct qxl_release *release; | |
548 | int ret; | |
549 | int id; | |
550 | ||
551 | if (!surf->hw_surf_alloc) | |
552 | return 0; | |
553 | ||
554 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY, | |
555 | surf->surf_create, | |
556 | &release); | |
557 | if (ret) | |
558 | return ret; | |
559 | ||
560 | surf->surf_create = NULL; | |
561 | /* remove the surface from the idr, but not the surface id yet */ | |
562 | spin_lock(&qdev->surf_id_idr_lock); | |
563 | idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id); | |
564 | spin_unlock(&qdev->surf_id_idr_lock); | |
565 | surf->hw_surf_alloc = false; | |
566 | ||
567 | id = surf->surface_id; | |
568 | surf->surface_id = 0; | |
569 | ||
570 | release->surface_release_id = id; | |
571 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | |
572 | cmd->type = QXL_SURFACE_CMD_DESTROY; | |
573 | cmd->surface_id = id; | |
574 | qxl_release_unmap(qdev, release, &cmd->release_info); | |
575 | ||
576 | qxl_fence_releaseable(qdev, release); | |
577 | ||
578 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | |
579 | ||
580 | qxl_release_unreserve(qdev, release); | |
581 | ||
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
586 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) | |
587 | { | |
588 | struct qxl_rect rect; | |
589 | int ret; | |
590 | ||
591 | /* if we are evicting, we need to make sure the surface is up | |
592 | to date */ | |
593 | rect.left = 0; | |
594 | rect.right = surf->surf.width; | |
595 | rect.top = 0; | |
596 | rect.bottom = surf->surf.height; | |
597 | retry: | |
598 | ret = qxl_io_update_area(qdev, surf, &rect); | |
599 | if (ret == -ERESTARTSYS) | |
600 | goto retry; | |
601 | return ret; | |
602 | } | |
603 | ||
6d01f1f5 | 604 | static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) |
f64122c1 DA |
605 | { |
606 | /* no need to update area if we are just freeing the surface normally */ | |
607 | if (do_update_area) | |
608 | qxl_update_surface(qdev, surf); | |
609 | ||
610 | /* nuke the surface id at the hw */ | |
611 | qxl_hw_surface_dealloc(qdev, surf); | |
612 | } | |
613 | ||
614 | void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) | |
615 | { | |
616 | mutex_lock(&qdev->surf_evict_mutex); | |
617 | qxl_surface_evict_locked(qdev, surf, do_update_area); | |
618 | mutex_unlock(&qdev->surf_evict_mutex); | |
619 | } | |
620 | ||
621 | static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall) | |
622 | { | |
623 | int ret; | |
624 | ||
625 | ret = qxl_bo_reserve(surf, false); | |
626 | if (ret == -EBUSY) | |
627 | return -EBUSY; | |
628 | ||
629 | if (surf->fence.num_active_releases > 0 && stall == false) { | |
630 | qxl_bo_unreserve(surf); | |
631 | return -EBUSY; | |
632 | } | |
633 | ||
634 | if (stall) | |
635 | mutex_unlock(&qdev->surf_evict_mutex); | |
636 | ||
637 | spin_lock(&surf->tbo.bdev->fence_lock); | |
638 | ret = ttm_bo_wait(&surf->tbo, true, true, !stall); | |
639 | spin_unlock(&surf->tbo.bdev->fence_lock); | |
640 | ||
641 | if (stall) | |
642 | mutex_lock(&qdev->surf_evict_mutex); | |
643 | if (ret == -EBUSY) { | |
644 | qxl_bo_unreserve(surf); | |
645 | return -EBUSY; | |
646 | } | |
647 | ||
648 | qxl_surface_evict_locked(qdev, surf, true); | |
649 | qxl_bo_unreserve(surf); | |
650 | return 0; | |
651 | } | |
652 | ||
653 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap) | |
654 | { | |
655 | int num_reaped = 0; | |
656 | int i, ret; | |
657 | bool stall = false; | |
658 | int start = 0; | |
659 | ||
660 | mutex_lock(&qdev->surf_evict_mutex); | |
661 | again: | |
662 | ||
663 | spin_lock(&qdev->surf_id_idr_lock); | |
664 | start = qdev->last_alloced_surf_id + 1; | |
665 | spin_unlock(&qdev->surf_id_idr_lock); | |
666 | ||
667 | for (i = start; i < start + qdev->rom->n_surfaces; i++) { | |
668 | void *objptr; | |
669 | int surfid = i % qdev->rom->n_surfaces; | |
670 | ||
671 | /* this avoids the case where the objects is in the | |
672 | idr but has been evicted half way - its makes | |
673 | the idr lookup atomic with the eviction */ | |
674 | spin_lock(&qdev->surf_id_idr_lock); | |
675 | objptr = idr_find(&qdev->surf_id_idr, surfid); | |
676 | spin_unlock(&qdev->surf_id_idr_lock); | |
677 | ||
678 | if (!objptr) | |
679 | continue; | |
680 | ||
681 | ret = qxl_reap_surf(qdev, objptr, stall); | |
682 | if (ret == 0) | |
683 | num_reaped++; | |
684 | if (num_reaped >= max_to_reap) | |
685 | break; | |
686 | } | |
687 | if (num_reaped == 0 && stall == false) { | |
688 | stall = true; | |
689 | goto again; | |
690 | } | |
691 | ||
692 | mutex_unlock(&qdev->surf_evict_mutex); | |
693 | if (num_reaped) { | |
694 | usleep_range(500, 1000); | |
695 | qxl_queue_garbage_collect(qdev, true); | |
696 | } | |
697 | ||
698 | return 0; | |
699 | } |