Commit | Line | Data |
---|---|---|
f64122c1 DA |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Dave Airlie | |
23 | * Alon Levy | |
24 | */ | |
25 | ||
26 | /* QXL cmd/ring handling */ | |
27 | ||
28 | #include "qxl_drv.h" | |
29 | #include "qxl_object.h" | |
30 | ||
31 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap); | |
32 | ||
33 | struct ring { | |
34 | struct qxl_ring_header header; | |
35 | uint8_t elements[0]; | |
36 | }; | |
37 | ||
38 | struct qxl_ring { | |
39 | struct ring *ring; | |
40 | int element_size; | |
41 | int n_elements; | |
42 | int prod_notify; | |
43 | wait_queue_head_t *push_event; | |
44 | spinlock_t lock; | |
45 | }; | |
46 | ||
47 | void qxl_ring_free(struct qxl_ring *ring) | |
48 | { | |
49 | kfree(ring); | |
50 | } | |
51 | ||
52 | struct qxl_ring * | |
53 | qxl_ring_create(struct qxl_ring_header *header, | |
54 | int element_size, | |
55 | int n_elements, | |
56 | int prod_notify, | |
57 | bool set_prod_notify, | |
58 | wait_queue_head_t *push_event) | |
59 | { | |
60 | struct qxl_ring *ring; | |
61 | ||
62 | ring = kmalloc(sizeof(*ring), GFP_KERNEL); | |
63 | if (!ring) | |
64 | return NULL; | |
65 | ||
66 | ring->ring = (struct ring *)header; | |
67 | ring->element_size = element_size; | |
68 | ring->n_elements = n_elements; | |
69 | ring->prod_notify = prod_notify; | |
70 | ring->push_event = push_event; | |
71 | if (set_prod_notify) | |
72 | header->notify_on_prod = ring->n_elements; | |
73 | spin_lock_init(&ring->lock); | |
74 | return ring; | |
75 | } | |
76 | ||
77 | static int qxl_check_header(struct qxl_ring *ring) | |
78 | { | |
79 | int ret; | |
80 | struct qxl_ring_header *header = &(ring->ring->header); | |
81 | unsigned long flags; | |
82 | spin_lock_irqsave(&ring->lock, flags); | |
83 | ret = header->prod - header->cons < header->num_items; | |
84 | if (ret == 0) | |
85 | header->notify_on_cons = header->cons + 1; | |
86 | spin_unlock_irqrestore(&ring->lock, flags); | |
87 | return ret; | |
88 | } | |
89 | ||
90 | static int qxl_check_idle(struct qxl_ring *ring) | |
91 | { | |
92 | int ret; | |
93 | struct qxl_ring_header *header = &(ring->ring->header); | |
94 | unsigned long flags; | |
95 | spin_lock_irqsave(&ring->lock, flags); | |
96 | ret = header->prod == header->cons; | |
97 | spin_unlock_irqrestore(&ring->lock, flags); | |
98 | return ret; | |
99 | } | |
100 | ||
101 | int qxl_ring_push(struct qxl_ring *ring, | |
102 | const void *new_elt, bool interruptible) | |
103 | { | |
104 | struct qxl_ring_header *header = &(ring->ring->header); | |
105 | uint8_t *elt; | |
106 | int idx, ret; | |
107 | unsigned long flags; | |
108 | spin_lock_irqsave(&ring->lock, flags); | |
109 | if (header->prod - header->cons == header->num_items) { | |
110 | header->notify_on_cons = header->cons + 1; | |
111 | mb(); | |
112 | spin_unlock_irqrestore(&ring->lock, flags); | |
113 | if (!drm_can_sleep()) { | |
114 | while (!qxl_check_header(ring)) | |
115 | udelay(1); | |
116 | } else { | |
117 | if (interruptible) { | |
118 | ret = wait_event_interruptible(*ring->push_event, | |
119 | qxl_check_header(ring)); | |
120 | if (ret) | |
121 | return ret; | |
122 | } else { | |
123 | wait_event(*ring->push_event, | |
124 | qxl_check_header(ring)); | |
125 | } | |
126 | ||
127 | } | |
128 | spin_lock_irqsave(&ring->lock, flags); | |
129 | } | |
130 | ||
131 | idx = header->prod & (ring->n_elements - 1); | |
132 | elt = ring->ring->elements + idx * ring->element_size; | |
133 | ||
134 | memcpy((void *)elt, new_elt, ring->element_size); | |
135 | ||
136 | header->prod++; | |
137 | ||
138 | mb(); | |
139 | ||
140 | if (header->prod == header->notify_on_prod) | |
141 | outb(0, ring->prod_notify); | |
142 | ||
143 | spin_unlock_irqrestore(&ring->lock, flags); | |
144 | return 0; | |
145 | } | |
146 | ||
6d01f1f5 DA |
147 | static bool qxl_ring_pop(struct qxl_ring *ring, |
148 | void *element) | |
f64122c1 DA |
149 | { |
150 | volatile struct qxl_ring_header *header = &(ring->ring->header); | |
151 | volatile uint8_t *ring_elt; | |
152 | int idx; | |
153 | unsigned long flags; | |
154 | spin_lock_irqsave(&ring->lock, flags); | |
155 | if (header->cons == header->prod) { | |
156 | header->notify_on_prod = header->cons + 1; | |
157 | spin_unlock_irqrestore(&ring->lock, flags); | |
158 | return false; | |
159 | } | |
160 | ||
161 | idx = header->cons & (ring->n_elements - 1); | |
162 | ring_elt = ring->ring->elements + idx * ring->element_size; | |
163 | ||
164 | memcpy(element, (void *)ring_elt, ring->element_size); | |
165 | ||
166 | header->cons++; | |
167 | ||
168 | spin_unlock_irqrestore(&ring->lock, flags); | |
169 | return true; | |
170 | } | |
171 | ||
f64122c1 DA |
172 | int |
173 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | |
174 | uint32_t type, bool interruptible) | |
175 | { | |
176 | struct qxl_command cmd; | |
177 | ||
178 | cmd.type = type; | |
179 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | |
180 | ||
181 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | |
182 | } | |
183 | ||
184 | int | |
185 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | |
186 | uint32_t type, bool interruptible) | |
187 | { | |
188 | struct qxl_command cmd; | |
189 | ||
190 | cmd.type = type; | |
191 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | |
192 | ||
193 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | |
194 | } | |
195 | ||
196 | bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) | |
197 | { | |
198 | if (!qxl_check_idle(qdev->release_ring)) { | |
199 | queue_work(qdev->gc_queue, &qdev->gc_work); | |
200 | if (flush) | |
201 | flush_work(&qdev->gc_work); | |
202 | return true; | |
203 | } | |
204 | return false; | |
205 | } | |
206 | ||
207 | int qxl_garbage_collect(struct qxl_device *qdev) | |
208 | { | |
209 | struct qxl_release *release; | |
210 | uint64_t id, next_id; | |
211 | int i = 0; | |
212 | int ret; | |
213 | union qxl_release_info *info; | |
214 | ||
215 | while (qxl_ring_pop(qdev->release_ring, &id)) { | |
216 | QXL_INFO(qdev, "popped %lld\n", id); | |
217 | while (id) { | |
218 | release = qxl_release_from_id_locked(qdev, id); | |
219 | if (release == NULL) | |
220 | break; | |
221 | ||
222 | ret = qxl_release_reserve(qdev, release, false); | |
223 | if (ret) { | |
224 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | |
225 | DRM_ERROR("failed to reserve release %lld\n", id); | |
226 | } | |
227 | ||
228 | info = qxl_release_map(qdev, release); | |
229 | next_id = info->next; | |
230 | qxl_release_unmap(qdev, release, info); | |
231 | ||
232 | qxl_release_unreserve(qdev, release); | |
233 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | |
234 | next_id); | |
235 | ||
236 | switch (release->type) { | |
237 | case QXL_RELEASE_DRAWABLE: | |
238 | case QXL_RELEASE_SURFACE_CMD: | |
239 | case QXL_RELEASE_CURSOR_CMD: | |
240 | break; | |
241 | default: | |
242 | DRM_ERROR("unexpected release type\n"); | |
243 | break; | |
244 | } | |
245 | id = next_id; | |
246 | ||
247 | qxl_release_free(qdev, release); | |
248 | ++i; | |
249 | } | |
250 | } | |
251 | ||
252 | QXL_INFO(qdev, "%s: %lld\n", __func__, i); | |
253 | ||
254 | return i; | |
255 | } | |
256 | ||
257 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | |
258 | struct qxl_bo **_bo) | |
259 | { | |
260 | struct qxl_bo *bo; | |
261 | int ret; | |
262 | ||
263 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | |
264 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | |
265 | if (ret) { | |
266 | DRM_ERROR("failed to allocate VRAM BO\n"); | |
267 | return ret; | |
268 | } | |
269 | ret = qxl_bo_reserve(bo, false); | |
270 | if (unlikely(ret != 0)) | |
271 | goto out_unref; | |
272 | ||
273 | *_bo = bo; | |
274 | return 0; | |
275 | out_unref: | |
276 | qxl_bo_unref(&bo); | |
277 | return 0; | |
278 | } | |
279 | ||
a6ac1bc3 | 280 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
f64122c1 DA |
281 | { |
282 | int irq_num; | |
283 | long addr = qdev->io_base + port; | |
284 | int ret; | |
285 | ||
286 | mutex_lock(&qdev->async_io_mutex); | |
287 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | |
f64122c1 | 288 | if (qdev->last_sent_io_cmd > irq_num) { |
a6ac1bc3 DA |
289 | if (intr) |
290 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, | |
291 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
292 | else | |
293 | ret = wait_event_timeout(qdev->io_cmd_event, | |
294 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
295 | /* 0 is timeout, just bail the "hw" has gone away */ | |
296 | if (ret <= 0) | |
f64122c1 DA |
297 | goto out; |
298 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | |
299 | } | |
300 | outb(val, addr); | |
301 | qdev->last_sent_io_cmd = irq_num + 1; | |
a6ac1bc3 DA |
302 | if (intr) |
303 | ret = wait_event_interruptible_timeout(qdev->io_cmd_event, | |
304 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
305 | else | |
306 | ret = wait_event_timeout(qdev->io_cmd_event, | |
307 | atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); | |
f64122c1 | 308 | out: |
a6ac1bc3 DA |
309 | if (ret > 0) |
310 | ret = 0; | |
f64122c1 DA |
311 | mutex_unlock(&qdev->async_io_mutex); |
312 | return ret; | |
313 | } | |
314 | ||
315 | static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) | |
316 | { | |
317 | int ret; | |
318 | ||
319 | restart: | |
a6ac1bc3 | 320 | ret = wait_for_io_cmd_user(qdev, val, port, false); |
f64122c1 DA |
321 | if (ret == -ERESTARTSYS) |
322 | goto restart; | |
323 | } | |
324 | ||
325 | int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, | |
326 | const struct qxl_rect *area) | |
327 | { | |
328 | int surface_id; | |
329 | uint32_t surface_width, surface_height; | |
330 | int ret; | |
331 | ||
332 | if (!surf->hw_surf_alloc) | |
333 | DRM_ERROR("got io update area with no hw surface\n"); | |
334 | ||
335 | if (surf->is_primary) | |
336 | surface_id = 0; | |
337 | else | |
338 | surface_id = surf->surface_id; | |
339 | surface_width = surf->surf.width; | |
340 | surface_height = surf->surf.height; | |
341 | ||
342 | if (area->left < 0 || area->top < 0 || | |
343 | area->right > surface_width || area->bottom > surface_height) { | |
344 | qxl_io_log(qdev, "%s: not doing area update for " | |
345 | "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left, | |
346 | area->top, area->right, area->bottom, surface_width, surface_height); | |
347 | return -EINVAL; | |
348 | } | |
349 | mutex_lock(&qdev->update_area_mutex); | |
350 | qdev->ram_header->update_area = *area; | |
351 | qdev->ram_header->update_surface = surface_id; | |
a6ac1bc3 | 352 | ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); |
f64122c1 DA |
353 | mutex_unlock(&qdev->update_area_mutex); |
354 | return ret; | |
355 | } | |
356 | ||
357 | void qxl_io_notify_oom(struct qxl_device *qdev) | |
358 | { | |
359 | outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM); | |
360 | } | |
361 | ||
362 | void qxl_io_flush_release(struct qxl_device *qdev) | |
363 | { | |
364 | outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE); | |
365 | } | |
366 | ||
367 | void qxl_io_flush_surfaces(struct qxl_device *qdev) | |
368 | { | |
369 | wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC); | |
370 | } | |
371 | ||
372 | ||
373 | void qxl_io_destroy_primary(struct qxl_device *qdev) | |
374 | { | |
375 | wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); | |
376 | } | |
377 | ||
378 | void qxl_io_create_primary(struct qxl_device *qdev, unsigned width, | |
379 | unsigned height, unsigned offset, struct qxl_bo *bo) | |
380 | { | |
381 | struct qxl_surface_create *create; | |
382 | ||
383 | QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev, | |
384 | qdev->ram_header); | |
385 | create = &qdev->ram_header->create_surface; | |
386 | create->format = bo->surf.format; | |
387 | create->width = width; | |
388 | create->height = height; | |
389 | create->stride = bo->surf.stride; | |
390 | create->mem = qxl_bo_physical_address(qdev, bo, offset); | |
391 | ||
392 | QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, | |
393 | bo->kptr); | |
394 | ||
395 | create->flags = QXL_SURF_FLAG_KEEP_DATA; | |
396 | create->type = QXL_SURF_TYPE_PRIMARY; | |
397 | ||
398 | wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); | |
399 | } | |
400 | ||
401 | void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) | |
402 | { | |
403 | QXL_INFO(qdev, "qxl_memslot_add %d\n", id); | |
404 | wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); | |
405 | } | |
406 | ||
407 | void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) | |
408 | { | |
409 | va_list args; | |
410 | ||
411 | va_start(args, fmt); | |
412 | vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); | |
413 | va_end(args); | |
414 | /* | |
415 | * DO not do a DRM output here - this will call printk, which will | |
416 | * call back into qxl for rendering (qxl_fb) | |
417 | */ | |
418 | outb(0, qdev->io_base + QXL_IO_LOG); | |
419 | } | |
420 | ||
421 | void qxl_io_reset(struct qxl_device *qdev) | |
422 | { | |
423 | outb(0, qdev->io_base + QXL_IO_RESET); | |
424 | } | |
425 | ||
426 | void qxl_io_monitors_config(struct qxl_device *qdev) | |
427 | { | |
428 | qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__, | |
429 | qdev->monitors_config ? | |
430 | qdev->monitors_config->count : -1, | |
431 | qdev->monitors_config && qdev->monitors_config->count ? | |
432 | qdev->monitors_config->heads[0].width : -1, | |
433 | qdev->monitors_config && qdev->monitors_config->count ? | |
434 | qdev->monitors_config->heads[0].height : -1, | |
435 | qdev->monitors_config && qdev->monitors_config->count ? | |
436 | qdev->monitors_config->heads[0].x : -1, | |
437 | qdev->monitors_config && qdev->monitors_config->count ? | |
438 | qdev->monitors_config->heads[0].y : -1 | |
439 | ); | |
440 | ||
441 | wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); | |
442 | } | |
443 | ||
444 | int qxl_surface_id_alloc(struct qxl_device *qdev, | |
445 | struct qxl_bo *surf) | |
446 | { | |
307b9c02 | 447 | uint32_t handle; |
f64122c1 DA |
448 | int idr_ret; |
449 | int count = 0; | |
450 | again: | |
307b9c02 | 451 | idr_preload(GFP_ATOMIC); |
f64122c1 | 452 | spin_lock(&qdev->surf_id_idr_lock); |
307b9c02 | 453 | idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT); |
f64122c1 | 454 | spin_unlock(&qdev->surf_id_idr_lock); |
307b9c02 DA |
455 | idr_preload_end(); |
456 | if (idr_ret < 0) | |
457 | return idr_ret; | |
458 | handle = idr_ret; | |
f64122c1 DA |
459 | |
460 | if (handle >= qdev->rom->n_surfaces) { | |
461 | count++; | |
462 | spin_lock(&qdev->surf_id_idr_lock); | |
463 | idr_remove(&qdev->surf_id_idr, handle); | |
464 | spin_unlock(&qdev->surf_id_idr_lock); | |
465 | qxl_reap_surface_id(qdev, 2); | |
466 | goto again; | |
467 | } | |
468 | surf->surface_id = handle; | |
469 | ||
470 | spin_lock(&qdev->surf_id_idr_lock); | |
471 | qdev->last_alloced_surf_id = handle; | |
472 | spin_unlock(&qdev->surf_id_idr_lock); | |
f64122c1 DA |
473 | return 0; |
474 | } | |
475 | ||
476 | void qxl_surface_id_dealloc(struct qxl_device *qdev, | |
477 | uint32_t surface_id) | |
478 | { | |
479 | spin_lock(&qdev->surf_id_idr_lock); | |
480 | idr_remove(&qdev->surf_id_idr, surface_id); | |
481 | spin_unlock(&qdev->surf_id_idr_lock); | |
482 | } | |
483 | ||
484 | int qxl_hw_surface_alloc(struct qxl_device *qdev, | |
485 | struct qxl_bo *surf, | |
486 | struct ttm_mem_reg *new_mem) | |
487 | { | |
488 | struct qxl_surface_cmd *cmd; | |
489 | struct qxl_release *release; | |
490 | int ret; | |
491 | ||
492 | if (surf->hw_surf_alloc) | |
493 | return 0; | |
494 | ||
495 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE, | |
496 | NULL, | |
497 | &release); | |
498 | if (ret) | |
499 | return ret; | |
500 | ||
501 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | |
502 | cmd->type = QXL_SURFACE_CMD_CREATE; | |
503 | cmd->u.surface_create.format = surf->surf.format; | |
504 | cmd->u.surface_create.width = surf->surf.width; | |
505 | cmd->u.surface_create.height = surf->surf.height; | |
506 | cmd->u.surface_create.stride = surf->surf.stride; | |
507 | if (new_mem) { | |
508 | int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot; | |
509 | struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]); | |
510 | ||
511 | /* TODO - need to hold one of the locks to read tbo.offset */ | |
512 | cmd->u.surface_create.data = slot->high_bits; | |
513 | ||
514 | cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset; | |
515 | } else | |
516 | cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0); | |
517 | cmd->surface_id = surf->surface_id; | |
518 | qxl_release_unmap(qdev, release, &cmd->release_info); | |
519 | ||
520 | surf->surf_create = release; | |
521 | ||
522 | /* no need to add a release to the fence for this bo, | |
523 | since it is only released when we ask to destroy the surface | |
524 | and it would never signal otherwise */ | |
525 | qxl_fence_releaseable(qdev, release); | |
526 | ||
527 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | |
528 | ||
529 | qxl_release_unreserve(qdev, release); | |
530 | ||
531 | surf->hw_surf_alloc = true; | |
532 | spin_lock(&qdev->surf_id_idr_lock); | |
533 | idr_replace(&qdev->surf_id_idr, surf, surf->surface_id); | |
534 | spin_unlock(&qdev->surf_id_idr_lock); | |
535 | return 0; | |
536 | } | |
537 | ||
538 | int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |
539 | struct qxl_bo *surf) | |
540 | { | |
541 | struct qxl_surface_cmd *cmd; | |
542 | struct qxl_release *release; | |
543 | int ret; | |
544 | int id; | |
545 | ||
546 | if (!surf->hw_surf_alloc) | |
547 | return 0; | |
548 | ||
549 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY, | |
550 | surf->surf_create, | |
551 | &release); | |
552 | if (ret) | |
553 | return ret; | |
554 | ||
555 | surf->surf_create = NULL; | |
556 | /* remove the surface from the idr, but not the surface id yet */ | |
557 | spin_lock(&qdev->surf_id_idr_lock); | |
558 | idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id); | |
559 | spin_unlock(&qdev->surf_id_idr_lock); | |
560 | surf->hw_surf_alloc = false; | |
561 | ||
562 | id = surf->surface_id; | |
563 | surf->surface_id = 0; | |
564 | ||
565 | release->surface_release_id = id; | |
566 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | |
567 | cmd->type = QXL_SURFACE_CMD_DESTROY; | |
568 | cmd->surface_id = id; | |
569 | qxl_release_unmap(qdev, release, &cmd->release_info); | |
570 | ||
571 | qxl_fence_releaseable(qdev, release); | |
572 | ||
573 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | |
574 | ||
575 | qxl_release_unreserve(qdev, release); | |
576 | ||
577 | ||
578 | return 0; | |
579 | } | |
580 | ||
581 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) | |
582 | { | |
583 | struct qxl_rect rect; | |
584 | int ret; | |
585 | ||
586 | /* if we are evicting, we need to make sure the surface is up | |
587 | to date */ | |
588 | rect.left = 0; | |
589 | rect.right = surf->surf.width; | |
590 | rect.top = 0; | |
591 | rect.bottom = surf->surf.height; | |
592 | retry: | |
593 | ret = qxl_io_update_area(qdev, surf, &rect); | |
594 | if (ret == -ERESTARTSYS) | |
595 | goto retry; | |
596 | return ret; | |
597 | } | |
598 | ||
6d01f1f5 | 599 | static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) |
f64122c1 DA |
600 | { |
601 | /* no need to update area if we are just freeing the surface normally */ | |
602 | if (do_update_area) | |
603 | qxl_update_surface(qdev, surf); | |
604 | ||
605 | /* nuke the surface id at the hw */ | |
606 | qxl_hw_surface_dealloc(qdev, surf); | |
607 | } | |
608 | ||
609 | void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) | |
610 | { | |
611 | mutex_lock(&qdev->surf_evict_mutex); | |
612 | qxl_surface_evict_locked(qdev, surf, do_update_area); | |
613 | mutex_unlock(&qdev->surf_evict_mutex); | |
614 | } | |
615 | ||
616 | static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall) | |
617 | { | |
618 | int ret; | |
619 | ||
620 | ret = qxl_bo_reserve(surf, false); | |
621 | if (ret == -EBUSY) | |
622 | return -EBUSY; | |
623 | ||
624 | if (surf->fence.num_active_releases > 0 && stall == false) { | |
625 | qxl_bo_unreserve(surf); | |
626 | return -EBUSY; | |
627 | } | |
628 | ||
629 | if (stall) | |
630 | mutex_unlock(&qdev->surf_evict_mutex); | |
631 | ||
632 | spin_lock(&surf->tbo.bdev->fence_lock); | |
633 | ret = ttm_bo_wait(&surf->tbo, true, true, !stall); | |
634 | spin_unlock(&surf->tbo.bdev->fence_lock); | |
635 | ||
636 | if (stall) | |
637 | mutex_lock(&qdev->surf_evict_mutex); | |
638 | if (ret == -EBUSY) { | |
639 | qxl_bo_unreserve(surf); | |
640 | return -EBUSY; | |
641 | } | |
642 | ||
643 | qxl_surface_evict_locked(qdev, surf, true); | |
644 | qxl_bo_unreserve(surf); | |
645 | return 0; | |
646 | } | |
647 | ||
648 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap) | |
649 | { | |
650 | int num_reaped = 0; | |
651 | int i, ret; | |
652 | bool stall = false; | |
653 | int start = 0; | |
654 | ||
655 | mutex_lock(&qdev->surf_evict_mutex); | |
656 | again: | |
657 | ||
658 | spin_lock(&qdev->surf_id_idr_lock); | |
659 | start = qdev->last_alloced_surf_id + 1; | |
660 | spin_unlock(&qdev->surf_id_idr_lock); | |
661 | ||
662 | for (i = start; i < start + qdev->rom->n_surfaces; i++) { | |
663 | void *objptr; | |
664 | int surfid = i % qdev->rom->n_surfaces; | |
665 | ||
666 | /* this avoids the case where the objects is in the | |
667 | idr but has been evicted half way - its makes | |
668 | the idr lookup atomic with the eviction */ | |
669 | spin_lock(&qdev->surf_id_idr_lock); | |
670 | objptr = idr_find(&qdev->surf_id_idr, surfid); | |
671 | spin_unlock(&qdev->surf_id_idr_lock); | |
672 | ||
673 | if (!objptr) | |
674 | continue; | |
675 | ||
676 | ret = qxl_reap_surf(qdev, objptr, stall); | |
677 | if (ret == 0) | |
678 | num_reaped++; | |
679 | if (num_reaped >= max_to_reap) | |
680 | break; | |
681 | } | |
682 | if (num_reaped == 0 && stall == false) { | |
683 | stall = true; | |
684 | goto again; | |
685 | } | |
686 | ||
687 | mutex_unlock(&qdev->surf_evict_mutex); | |
688 | if (num_reaped) { | |
689 | usleep_range(500, 1000); | |
690 | qxl_queue_garbage_collect(qdev, true); | |
691 | } | |
692 | ||
693 | return 0; | |
694 | } |